├── .gitignore ├── 3rd_party ├── .gitkeep ├── crewai_agentic.py ├── data │ ├── 10k │ │ ├── lyft_2021.pdf │ │ └── uber_2021.pdf │ ├── lyft_index │ │ ├── default__vector_store.json │ │ ├── docstore.json │ │ ├── graph_store.json │ │ ├── image__vector_store.json │ │ └── index_store.json │ └── uber_index │ │ ├── default__vector_store.json │ │ ├── docstore.json │ │ ├── graph_store.json │ │ ├── image__vector_store.json │ │ └── index_store.json ├── langchain_agentic.py ├── llamaindex_agentic.py └── smol_agents.py ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── agentic_chatbot ├── agent_tools.py ├── chatbot_st.py └── images │ └── .gitkeep ├── agentic_workflow ├── agentic_tools.py └── utils.py ├── gen_ai_stack.png ├── lambda_function_tools └── read_csv_tool.py ├── reinvent_2024_agentic ├── README.md ├── agent_chatbot_st.py ├── agent_tools.py ├── lambda_functions │ ├── create_lambda_functions.py │ ├── describe_image.py │ ├── gen_aws_diag_docker │ │ ├── Dockerfile │ │ ├── diag_mapping.json │ │ ├── lambda_handler.py │ │ └── requirements.txt │ └── website_to_text.py ├── lambda_layers │ ├── make_pil_layer.sh │ └── make_requests_layer.sh └── media_streaming_aws_diag_example.png ├── requirements.txt ├── requirements_crewai.txt ├── requirements_langchain.txt ├── requirements_llama_index.txt ├── requirements_smolagents.txt ├── sagemaker_ai ├── bedrock_example.py ├── cato_capital.py └── images │ └── CatCapital.png └── strands_agents ├── mcp_docs_diag.py ├── multi_agent_ppt.py └── weather_word_count.py /.gitignore: -------------------------------------------------------------------------------- 1 | # added 2 | strands_agents/generated-diagrams/ 3 | notes.txt 4 | .vscode/ 5 | dependencies/ 6 | agentic_chatbot/chatbot_full_st.py 7 | agentic_workflow/agentic_tools_full.py 8 | 9 | # Byte-compiled / optimized / DLL files 10 | __pycache__/ 11 | *.py[cod] 12 | *$py.class 13 | 14 | # C extensions 15 | *.so 16 | 17 | # Distribution / packaging 18 | .Python 19 | build/ 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | pip-wheel-metadata/ 32 | share/python-wheels/ 33 | *.egg-info/ 34 | .installed.cfg 35 | *.egg 36 | MANIFEST 37 | 38 | # PyInstaller 39 | # Usually these files are written by a python script from a template 40 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 41 | *.manifest 42 | *.spec 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit test / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .nox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *.cover 58 | *.py,cover 59 | .hypothesis/ 60 | .pytest_cache/ 61 | 62 | # Translations 63 | *.mo 64 | *.pot 65 | 66 | # Django stuff: 67 | *.log 68 | local_settings.py 69 | db.sqlite3 70 | db.sqlite3-journal 71 | 72 | # Flask stuff: 73 | instance/ 74 | .webassets-cache 75 | 76 | # Scrapy stuff: 77 | .scrapy 78 | 79 | # Sphinx documentation 80 | docs/_build/ 81 | 82 | # PyBuilder 83 | target/ 84 | 85 | # Jupyter Notebook 86 | .ipynb_checkpoints 87 | 88 | # IPython 89 | profile_default/ 90 | ipython_config.py 91 | 92 | # pyenv 93 | .python-version 94 | 95 | # pipenv 96 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 97 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 98 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 99 | # install all needed dependencies. 100 | #Pipfile.lock 101 | 102 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 103 | __pypackages__/ 104 | 105 | # Celery stuff 106 | celerybeat-schedule 107 | celerybeat.pid 108 | 109 | # SageMath parsed files 110 | *.sage.py 111 | 112 | # Environments 113 | .env 114 | .venv 115 | env/ 116 | venv/ 117 | ENV/ 118 | env.bak/ 119 | venv.bak/ 120 | 121 | # Spyder project settings 122 | .spyderproject 123 | .spyproject 124 | 125 | # Rope project settings 126 | .ropeproject 127 | 128 | # mkdocs documentation 129 | /site 130 | 131 | # mypy 132 | .mypy_cache/ 133 | .dmypy.json 134 | dmypy.json 135 | 136 | # Pyre type checker 137 | .pyre/ 138 | -------------------------------------------------------------------------------- /3rd_party/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/build-on-aws/agentic-workshop/7d7c0cd0a6607fd761537b1602cd75f7dd75d264/3rd_party/.gitkeep -------------------------------------------------------------------------------- /3rd_party/crewai_agentic.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | 3 | from dotenv import load_dotenv 4 | 5 | load_dotenv() 6 | 7 | from crewai import LLM, Agent, Crew, Task 8 | 9 | # Load Claude from Amazon Bedrock 10 | llm = LLM(model="bedrock/anthropic.claude-3-5-haiku-20241022-v1:0", temperature=0.7) 11 | 12 | 13 | class TravelListicleAgents: 14 | def travel_researcher_agent(self): 15 | return Agent( 16 | role="Travel Researcher", 17 | goal="Research and compile interesting activities and attractions for a given location", 18 | backstory=dedent( 19 | """You are an experienced travel researcher with a knack for 20 | discovering both popular attractions and hidden gems in any 21 | location. Your expertise lies in gathering comprehensive 22 | information about various activities, their historical 23 | significance, and practical details for visitors.""" 24 | ), 25 | allow_delegation=False, 26 | verbose=True, 27 | llm=llm, 28 | ) 29 | 30 | def content_writer_agent(self): 31 | return Agent( 32 | role="Travel Content Writer", 33 | goal="Create engaging and informative content for the top 10 listicle", 34 | backstory=dedent( 35 | """You are a skilled travel writer with a flair for creating 36 | captivating content. Your writing style is engaging, 37 | informative, and tailored to inspire readers to explore new 38 | destinations. You excel at crafting concise yet compelling 39 | descriptions of attractions and activities.""" 40 | ), 41 | allow_delegation=False, 42 | verbose=True, 43 | llm=llm, 44 | ) 45 | 46 | def editor_agent(self): 47 | return Agent( 48 | role="Content Editor", 49 | goal="Ensure the listicle is well-structured, engaging, and error-free", 50 | backstory=dedent( 51 | """You are a meticulous editor with years of experience in 52 | travel content. Your keen eye for detail helps polish articles 53 | to perfection. You focus on improving flow, maintaining 54 | consistency, and enhancing the overall readability of the 55 | content while ensuring it appeals to the target audience.""" 56 | ), 57 | allow_delegation=True, 58 | verbose=True, 59 | llm=llm, 60 | ) 61 | 62 | 63 | class TravelListicleTasks: 64 | def research_task(self, agent, location): 65 | return Task( 66 | description=dedent( 67 | f"""Research and compile a list of at least 15 interesting 68 | activities and attractions in {location}. Include a mix of 69 | popular tourist spots and lesser-known local favorites. For 70 | each item, provide: 71 | 1. Name of the attraction/activity 72 | 2. Brief description (2-3 sentences) 73 | 3. Why it's worth visiting 74 | 4. Any practical information (e.g., best time to visit, cost) 75 | 76 | Your final answer should be a structured list of these items. 77 | """ 78 | ), 79 | agent=agent, 80 | expected_output="Structured list of 15+ attractions/activities", 81 | ) 82 | 83 | def write_listicle_task(self, agent, location): 84 | return Task( 85 | description=dedent( 86 | f"""Create an engaging top 10 listicle article about things to 87 | do in {location}. Use the research provided to: 88 | 1. Write a catchy title and introduction (100-150 words) 89 | 2. Select and write about the top 10 activities/attractions 90 | 3. For each item, write 2-3 paragraphs (100-150 words total) 91 | 4. Include a brief conclusion (50-75 words) 92 | 93 | Ensure the content is engaging, informative, and inspiring. 94 | Your final answer should be the complete listicle article. 95 | """ 96 | ), 97 | agent=agent, 98 | expected_output="Complete top 10 listicle article", 99 | ) 100 | 101 | def edit_listicle_task(self, agent, location): 102 | return Task( 103 | description=dedent( 104 | f"""Review and edit the top 10 listicle article about things to 105 | do in {location}. Focus on: 106 | 1. Improving the overall structure and flow 107 | 2. Enhancing the engagement factor of the content 108 | 3. Ensuring consistency in tone and style 109 | 4. Correcting any grammatical or spelling errors 110 | 5. Optimizing for SEO (if possible, suggest relevant keywords) 111 | 112 | Your final answer should be the polished, publication-ready 113 | version of the article. 114 | """ 115 | ), 116 | agent=agent, 117 | expected_output="Edited and polished listicle article", 118 | ) 119 | 120 | 121 | tasks = TravelListicleTasks() 122 | agents = TravelListicleAgents() 123 | 124 | print("## Welcome to the Travel Listicle Crew") 125 | print("--------------------------------------") 126 | location = input("What location would you like to create a top 10 listicle for?\n") 127 | 128 | # Create Agents 129 | travel_researcher = agents.travel_researcher_agent() 130 | content_writer = agents.content_writer_agent() 131 | editor = agents.editor_agent() 132 | 133 | # Create Tasks 134 | research_location = tasks.research_task(travel_researcher, location) 135 | write_listicle = tasks.write_listicle_task(content_writer, location) 136 | edit_listicle = tasks.edit_listicle_task(editor, location) 137 | 138 | # Create Crew for Listicle Production 139 | crew = Crew( 140 | agents=[travel_researcher, content_writer, editor], 141 | tasks=[research_location, write_listicle, edit_listicle], 142 | verbose=True, 143 | ) 144 | 145 | listicle_result = crew.kickoff() 146 | 147 | # Print results 148 | print("\n\n########################") 149 | print("## Here is the result") 150 | print("########################\n") 151 | print(f"Top 10 Things to Do in {location}:") 152 | print(listicle_result) 153 | -------------------------------------------------------------------------------- /3rd_party/data/10k/lyft_2021.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/build-on-aws/agentic-workshop/7d7c0cd0a6607fd761537b1602cd75f7dd75d264/3rd_party/data/10k/lyft_2021.pdf -------------------------------------------------------------------------------- /3rd_party/data/10k/uber_2021.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/build-on-aws/agentic-workshop/7d7c0cd0a6607fd761537b1602cd75f7dd75d264/3rd_party/data/10k/uber_2021.pdf -------------------------------------------------------------------------------- /3rd_party/data/lyft_index/graph_store.json: -------------------------------------------------------------------------------- 1 | {"graph_dict": {}} -------------------------------------------------------------------------------- /3rd_party/data/lyft_index/image__vector_store.json: -------------------------------------------------------------------------------- 1 | {"embedding_dict": {}, "text_id_to_ref_doc_id": {}, "metadata_dict": {}} -------------------------------------------------------------------------------- /3rd_party/data/lyft_index/index_store.json: -------------------------------------------------------------------------------- 1 | {"index_store/data": {"88311ab3-5673-4bb9-a5c5-cf780bbd5847": {"__type__": "vector_store", "__data__": "{\"index_id\": \"88311ab3-5673-4bb9-a5c5-cf780bbd5847\", \"summary\": null, \"nodes_dict\": {\"d28f3903-c672-461f-a284-0994f00b911c\": \"d28f3903-c672-461f-a284-0994f00b911c\", \"81de1aa9-dca8-43aa-bf64-0daec39d7d18\": \"81de1aa9-dca8-43aa-bf64-0daec39d7d18\", \"40075582-7424-4974-838b-cc8f179d9d41\": \"40075582-7424-4974-838b-cc8f179d9d41\", \"f44ef3bd-7b8d-4bd0-8fe5-af171ecd7504\": \"f44ef3bd-7b8d-4bd0-8fe5-af171ecd7504\", \"1f0271e6-8c16-4d0e-a709-2da171ffd144\": \"1f0271e6-8c16-4d0e-a709-2da171ffd144\", \"30317ed7-fda0-4130-8d9a-4eda23acc02f\": \"30317ed7-fda0-4130-8d9a-4eda23acc02f\", \"73ae3003-f079-4c12-a7b8-5acc11213600\": \"73ae3003-f079-4c12-a7b8-5acc11213600\", \"dbf36c89-e524-4039-93d0-59dca4062405\": \"dbf36c89-e524-4039-93d0-59dca4062405\", \"f643e485-084d-4ebb-93ba-dee7483a8f21\": \"f643e485-084d-4ebb-93ba-dee7483a8f21\", \"b8d816f9-913b-4aee-bfdd-b63e509cd362\": \"b8d816f9-913b-4aee-bfdd-b63e509cd362\", \"915473fe-bba6-46f8-a7ac-c5b348b25bbf\": \"915473fe-bba6-46f8-a7ac-c5b348b25bbf\", \"a77fe452-df6e-4e43-ae16-afc13c774d62\": \"a77fe452-df6e-4e43-ae16-afc13c774d62\", \"e69a1bff-7cfc-4f49-ab72-948b50b65ecc\": \"e69a1bff-7cfc-4f49-ab72-948b50b65ecc\", \"e58e385a-611a-4109-a3af-2ce38199d67b\": \"e58e385a-611a-4109-a3af-2ce38199d67b\", \"1a83bd88-9269-44e8-9f24-91712e3348b4\": \"1a83bd88-9269-44e8-9f24-91712e3348b4\", \"9663409c-bc90-4ace-8f89-802f406c214f\": \"9663409c-bc90-4ace-8f89-802f406c214f\", \"3210c208-b5da-4649-9b8a-520fac2f9e80\": \"3210c208-b5da-4649-9b8a-520fac2f9e80\", \"1564b472-c0a5-4fe8-8541-a56a47b430f4\": \"1564b472-c0a5-4fe8-8541-a56a47b430f4\", \"c189792c-604a-4591-b822-296d2a992234\": \"c189792c-604a-4591-b822-296d2a992234\", \"97ce1854-ddd4-4bf9-8a28-6d493a5b6203\": \"97ce1854-ddd4-4bf9-8a28-6d493a5b6203\", \"b7da85a0-7229-4bc9-ac71-6696a957270e\": \"b7da85a0-7229-4bc9-ac71-6696a957270e\", \"314503af-6a85-4de9-b0fb-1d9f65c8f332\": \"314503af-6a85-4de9-b0fb-1d9f65c8f332\", \"a1c95344-6ec5-4c10-9f54-09d74c0e38c6\": \"a1c95344-6ec5-4c10-9f54-09d74c0e38c6\", \"3802ca29-4d5c-4a38-919b-80ada3a25310\": \"3802ca29-4d5c-4a38-919b-80ada3a25310\", \"a0a21cad-b117-43eb-aa4f-22b51a16a1fa\": \"a0a21cad-b117-43eb-aa4f-22b51a16a1fa\", \"38040677-6e0e-4015-bf91-f3695350fd2f\": \"38040677-6e0e-4015-bf91-f3695350fd2f\", \"258205a9-42de-4656-af63-85bac15239b1\": \"258205a9-42de-4656-af63-85bac15239b1\", \"af761e9f-e543-4754-bced-e1146955759d\": \"af761e9f-e543-4754-bced-e1146955759d\", \"176ecd92-bb01-4d3f-bcc8-3cea563fa026\": \"176ecd92-bb01-4d3f-bcc8-3cea563fa026\", \"2c88fd88-ed55-4a7c-954f-ff18d6d54811\": \"2c88fd88-ed55-4a7c-954f-ff18d6d54811\", \"109b7538-b3b6-4377-8c1c-df6b45adeefb\": \"109b7538-b3b6-4377-8c1c-df6b45adeefb\", \"03d14d90-9045-47f2-8c39-078aaac9a9a8\": \"03d14d90-9045-47f2-8c39-078aaac9a9a8\", \"df917145-b9af-46ae-8cf1-2fb6ff96a3bf\": \"df917145-b9af-46ae-8cf1-2fb6ff96a3bf\", \"2ffe50b2-7120-4bdf-b3bc-cf1b363342c0\": \"2ffe50b2-7120-4bdf-b3bc-cf1b363342c0\", \"8dd55d6d-c223-47a8-8efd-e33b661f6acf\": \"8dd55d6d-c223-47a8-8efd-e33b661f6acf\", \"831f65ae-c0d7-4706-986d-0a31d23fdeb9\": \"831f65ae-c0d7-4706-986d-0a31d23fdeb9\", \"76460186-9b04-4659-a79c-2c0889593550\": \"76460186-9b04-4659-a79c-2c0889593550\", \"460c0b8b-f602-4a43-8e1a-c263623271b6\": \"460c0b8b-f602-4a43-8e1a-c263623271b6\", \"c36954ad-774b-4062-a677-1f1ba807223d\": \"c36954ad-774b-4062-a677-1f1ba807223d\", \"ae6edb58-3310-410f-ab16-03fd3690c5d0\": \"ae6edb58-3310-410f-ab16-03fd3690c5d0\", \"05174a3d-1f63-414f-b174-3b4cedb0a797\": \"05174a3d-1f63-414f-b174-3b4cedb0a797\", \"5e9ba4f0-7f2c-4abf-9df3-485e9dd72468\": \"5e9ba4f0-7f2c-4abf-9df3-485e9dd72468\", \"a1646012-7b3d-4c9d-9977-4bd5235e343b\": \"a1646012-7b3d-4c9d-9977-4bd5235e343b\", \"3f5132d0-833b-43ac-806e-5852f6ce75cc\": \"3f5132d0-833b-43ac-806e-5852f6ce75cc\", \"c5428b07-e4f5-49e4-946b-8e00ff7c1087\": \"c5428b07-e4f5-49e4-946b-8e00ff7c1087\", \"b47218cd-7f0a-46fd-b68e-b57d134cc08c\": \"b47218cd-7f0a-46fd-b68e-b57d134cc08c\", \"02d3a200-84d3-4a70-80a4-37828ca893fb\": \"02d3a200-84d3-4a70-80a4-37828ca893fb\", \"b60ae012-8b76-4ee5-a3f9-c34352aded37\": \"b60ae012-8b76-4ee5-a3f9-c34352aded37\", \"a1c99c45-f099-4294-8009-113d0d541561\": \"a1c99c45-f099-4294-8009-113d0d541561\", \"b619a46d-ffd4-4b72-83c1-ed17c6babb77\": \"b619a46d-ffd4-4b72-83c1-ed17c6babb77\", \"707dc8d8-bcb1-4d9b-a712-647181048fb8\": \"707dc8d8-bcb1-4d9b-a712-647181048fb8\", \"32b86934-1060-4bf9-a7ea-f445fb8f6975\": \"32b86934-1060-4bf9-a7ea-f445fb8f6975\", \"539c0dbb-21a2-4919-afbb-a91ab4c2ab5c\": \"539c0dbb-21a2-4919-afbb-a91ab4c2ab5c\", \"db48dc0a-cb0c-4ed4-8588-6638e1e562de\": \"db48dc0a-cb0c-4ed4-8588-6638e1e562de\", \"1f5618ed-da2d-48a5-8baa-ab1de5e2af73\": \"1f5618ed-da2d-48a5-8baa-ab1de5e2af73\", \"3cb8c83c-cd73-4746-ac2b-f43d13b0718e\": \"3cb8c83c-cd73-4746-ac2b-f43d13b0718e\", \"34e2d8b4-0e25-4c20-a4bb-beb7e1a08e4f\": \"34e2d8b4-0e25-4c20-a4bb-beb7e1a08e4f\", \"f5860771-9fab-4372-8aea-4c062518f711\": \"f5860771-9fab-4372-8aea-4c062518f711\", \"e772f6a8-9ab9-4cfb-bcb9-4c1d9e52e58c\": \"e772f6a8-9ab9-4cfb-bcb9-4c1d9e52e58c\", \"094eec31-f55a-4dce-98b7-727027df3046\": \"094eec31-f55a-4dce-98b7-727027df3046\", \"9a788229-bb83-4961-a394-6bb0b23027e7\": \"9a788229-bb83-4961-a394-6bb0b23027e7\", \"5e1b068b-baff-4e7e-92df-dcecd1e853f9\": \"5e1b068b-baff-4e7e-92df-dcecd1e853f9\", \"4ee70bc9-4c14-4f6c-9f6f-548309f44c00\": \"4ee70bc9-4c14-4f6c-9f6f-548309f44c00\", \"1477268a-f5d8-4df3-a476-5f87c7b0c208\": \"1477268a-f5d8-4df3-a476-5f87c7b0c208\", \"382d6f59-efcd-4058-8190-7db845b9cf06\": \"382d6f59-efcd-4058-8190-7db845b9cf06\", \"d63a1fef-d692-4043-b46b-04e452fcd61a\": \"d63a1fef-d692-4043-b46b-04e452fcd61a\", \"fd897c47-6d1b-4407-ac34-993d36739265\": \"fd897c47-6d1b-4407-ac34-993d36739265\", \"9dafdc4f-16c0-4a8a-9acf-bed647810f8f\": \"9dafdc4f-16c0-4a8a-9acf-bed647810f8f\", \"ef126ae8-8288-4bc4-93af-ca16e33af027\": \"ef126ae8-8288-4bc4-93af-ca16e33af027\", \"06d3bf7d-9ea5-4d88-a9dc-8fc4e6ecd63c\": \"06d3bf7d-9ea5-4d88-a9dc-8fc4e6ecd63c\", \"8be0c48d-d546-4569-ad79-a3501464afc8\": \"8be0c48d-d546-4569-ad79-a3501464afc8\", \"d0601cc8-f91b-4842-b018-0673c3f38c9c\": \"d0601cc8-f91b-4842-b018-0673c3f38c9c\", \"32d65f46-3a3b-4a0b-964b-0cefa4e5f35b\": \"32d65f46-3a3b-4a0b-964b-0cefa4e5f35b\", \"043b6161-12c0-4d1d-9392-b2f689e5159a\": \"043b6161-12c0-4d1d-9392-b2f689e5159a\", \"dbe23afb-1a7d-46d0-a106-2242b56b8c5d\": \"dbe23afb-1a7d-46d0-a106-2242b56b8c5d\", \"6b3cc9b1-1120-4248-b43d-8e3dfabbccce\": \"6b3cc9b1-1120-4248-b43d-8e3dfabbccce\", \"029dd438-5002-498d-bd3d-31da5f86edc1\": \"029dd438-5002-498d-bd3d-31da5f86edc1\", \"28b90124-749b-4bcc-914b-7d9a3f4d9f4d\": \"28b90124-749b-4bcc-914b-7d9a3f4d9f4d\", \"1cb0f9c4-c0fa-4ae6-af8f-2fe2576e6f05\": \"1cb0f9c4-c0fa-4ae6-af8f-2fe2576e6f05\", \"02220e79-621f-4f32-afcd-c3cdb3dd5275\": \"02220e79-621f-4f32-afcd-c3cdb3dd5275\", \"c20145d7-211f-465a-9292-2b1185f271b0\": \"c20145d7-211f-465a-9292-2b1185f271b0\", \"a80ec2b8-044b-4194-b3f2-e2128bf69ecf\": \"a80ec2b8-044b-4194-b3f2-e2128bf69ecf\", \"95de5222-5220-4ff2-a8fc-4156b9c0d72e\": \"95de5222-5220-4ff2-a8fc-4156b9c0d72e\", \"80f295bf-9161-4a2e-8ae1-e89c3b40e0cb\": \"80f295bf-9161-4a2e-8ae1-e89c3b40e0cb\", \"f477f2d9-6c1b-41fd-bc8b-e70352555648\": \"f477f2d9-6c1b-41fd-bc8b-e70352555648\", \"9f23fb51-b839-4b92-bb98-b7568cc89533\": \"9f23fb51-b839-4b92-bb98-b7568cc89533\", \"e76dfb43-0dbc-4e31-ad40-c95017639e2a\": \"e76dfb43-0dbc-4e31-ad40-c95017639e2a\", \"b2b40507-ff91-4f80-bfd4-852a0fbca7a0\": \"b2b40507-ff91-4f80-bfd4-852a0fbca7a0\", \"4584b411-b00e-4650-8638-11e117f27241\": \"4584b411-b00e-4650-8638-11e117f27241\", \"a997ce99-07c4-41a1-92a6-c6e3291b8c64\": \"a997ce99-07c4-41a1-92a6-c6e3291b8c64\", \"64a01f93-5e14-41a5-8d0c-3c04cecf84f9\": \"64a01f93-5e14-41a5-8d0c-3c04cecf84f9\", \"f6380dbc-7ef4-41c3-942c-90258c16f404\": \"f6380dbc-7ef4-41c3-942c-90258c16f404\", \"a4e32399-cedf-45d6-b354-bcb3fc19f3a0\": \"a4e32399-cedf-45d6-b354-bcb3fc19f3a0\", \"59ddb4b9-6eb1-4adc-b0eb-4266923cea7b\": \"59ddb4b9-6eb1-4adc-b0eb-4266923cea7b\", \"52873a9a-5fc1-4981-b96e-f5cb55bae402\": \"52873a9a-5fc1-4981-b96e-f5cb55bae402\", \"56b41e2a-8a75-473c-9a66-44b12a845e38\": \"56b41e2a-8a75-473c-9a66-44b12a845e38\", \"350c09de-f343-4341-95d9-5f6be0563c88\": \"350c09de-f343-4341-95d9-5f6be0563c88\", \"3c70ba36-1387-453e-976b-be1c05e5b763\": \"3c70ba36-1387-453e-976b-be1c05e5b763\", \"2e5c75c6-bff4-4aa9-ba21-25c2a75853c9\": \"2e5c75c6-bff4-4aa9-ba21-25c2a75853c9\", \"8b01cef2-c52d-4196-aac0-5f27253856bb\": \"8b01cef2-c52d-4196-aac0-5f27253856bb\", \"23171cff-63c0-4411-a40b-6aed9eb2b09e\": \"23171cff-63c0-4411-a40b-6aed9eb2b09e\", \"24759d7f-7298-469c-be66-079609f83836\": \"24759d7f-7298-469c-be66-079609f83836\", \"da5f7149-db64-4cac-b959-1b7b2c258f3f\": \"da5f7149-db64-4cac-b959-1b7b2c258f3f\", \"5a8c7a45-86e4-4abf-aeab-9d759f995108\": \"5a8c7a45-86e4-4abf-aeab-9d759f995108\", \"e3d3ce84-e73d-4cbd-837e-de8fa6f6e8b7\": \"e3d3ce84-e73d-4cbd-837e-de8fa6f6e8b7\", \"b7f2410f-b05b-4309-a826-630a3d059522\": \"b7f2410f-b05b-4309-a826-630a3d059522\", \"37ce2290-3a39-49e2-ac14-19e9a4fd8466\": \"37ce2290-3a39-49e2-ac14-19e9a4fd8466\", \"db45a47b-cb22-424e-b43e-b736d3c0e8bc\": \"db45a47b-cb22-424e-b43e-b736d3c0e8bc\", \"8a1ab7c1-abb9-4740-a589-5b1d61b18180\": \"8a1ab7c1-abb9-4740-a589-5b1d61b18180\", \"a17ead68-d3e6-4684-8289-e3aa7915644e\": \"a17ead68-d3e6-4684-8289-e3aa7915644e\", \"acc26a05-eed3-4ae4-84d6-1a0c67140926\": \"acc26a05-eed3-4ae4-84d6-1a0c67140926\", \"e6a1ca2a-fd8a-452f-b109-fe4fd6c54c2e\": \"e6a1ca2a-fd8a-452f-b109-fe4fd6c54c2e\", \"dd8bfc30-041f-4dac-87d6-1a72285e261b\": \"dd8bfc30-041f-4dac-87d6-1a72285e261b\", \"2cc8f39b-e51e-4916-a3c4-ff60af71461e\": \"2cc8f39b-e51e-4916-a3c4-ff60af71461e\", \"8b61d4a7-b37d-4f89-9c0d-c687e3aad333\": \"8b61d4a7-b37d-4f89-9c0d-c687e3aad333\", \"5cc2e1ed-986c-436c-820b-2020a9a4b1de\": \"5cc2e1ed-986c-436c-820b-2020a9a4b1de\", \"10d4bee7-b972-49c9-8758-5a84640a9001\": \"10d4bee7-b972-49c9-8758-5a84640a9001\", \"1d5493a6-0783-473f-8ead-727c08f29ea7\": \"1d5493a6-0783-473f-8ead-727c08f29ea7\", \"799a0ac1-f1ca-40cf-a9af-bf59ad2d2471\": \"799a0ac1-f1ca-40cf-a9af-bf59ad2d2471\", \"bd792fb1-e40f-4a93-b7a4-f706047583df\": \"bd792fb1-e40f-4a93-b7a4-f706047583df\", \"f6f97953-5d9c-48dd-b4e9-98e2b714c5df\": \"f6f97953-5d9c-48dd-b4e9-98e2b714c5df\", \"e3941b5a-3ee5-405f-9be2-47b35ba74b9d\": \"e3941b5a-3ee5-405f-9be2-47b35ba74b9d\", \"bc8aaea2-852c-414a-a785-6ba18848a391\": \"bc8aaea2-852c-414a-a785-6ba18848a391\", \"b0b83c59-861d-4921-aee7-f2c97d859076\": \"b0b83c59-861d-4921-aee7-f2c97d859076\", \"191ddcc7-75b1-44fd-a096-f1a5e011edeb\": \"191ddcc7-75b1-44fd-a096-f1a5e011edeb\", \"920691e2-952f-485f-8300-ebd7956bdffd\": \"920691e2-952f-485f-8300-ebd7956bdffd\", \"6e624b1e-387a-4e06-aec0-30049eb8566e\": \"6e624b1e-387a-4e06-aec0-30049eb8566e\", \"2d1f7a94-4e0b-49ff-baeb-7f201f9a09e9\": \"2d1f7a94-4e0b-49ff-baeb-7f201f9a09e9\", \"cbf8201f-bf1c-45a9-ab6b-c15eaff8e00a\": \"cbf8201f-bf1c-45a9-ab6b-c15eaff8e00a\", \"79c3c91f-97b5-4174-a14a-3863d0d3d0c6\": \"79c3c91f-97b5-4174-a14a-3863d0d3d0c6\", \"a434cfaf-bfd2-415b-a315-d01fc8edcafe\": \"a434cfaf-bfd2-415b-a315-d01fc8edcafe\", \"8b3c5270-8686-4d6f-8ad1-d8d375bd7828\": \"8b3c5270-8686-4d6f-8ad1-d8d375bd7828\", \"fb18cf21-f7b8-430d-a324-2fd1ce78840e\": \"fb18cf21-f7b8-430d-a324-2fd1ce78840e\", \"7be7179e-d19c-4661-a8d9-4b5513a259ee\": \"7be7179e-d19c-4661-a8d9-4b5513a259ee\", \"6a34d8fb-b48e-4753-a601-e50c2700c60b\": \"6a34d8fb-b48e-4753-a601-e50c2700c60b\", \"e0acc5a1-abc1-4cb8-be28-a2a2b00187cf\": \"e0acc5a1-abc1-4cb8-be28-a2a2b00187cf\", \"4c8130b1-610b-4741-bdd2-8dcc3cade683\": \"4c8130b1-610b-4741-bdd2-8dcc3cade683\", \"ddd5fad1-1a23-4502-a32c-439b9b2721bb\": \"ddd5fad1-1a23-4502-a32c-439b9b2721bb\", \"55aae380-0259-4402-bfb7-d4aabfebf87a\": \"55aae380-0259-4402-bfb7-d4aabfebf87a\", \"e40838e8-abdb-451c-9cd3-2f75ef1fdb32\": \"e40838e8-abdb-451c-9cd3-2f75ef1fdb32\", \"0f3e1f57-8406-4947-85b0-53401bb92456\": \"0f3e1f57-8406-4947-85b0-53401bb92456\", \"971ee6bc-9a66-4485-ad9b-2c098126e740\": \"971ee6bc-9a66-4485-ad9b-2c098126e740\", \"964583e1-3d08-496c-a655-2e76e64d43dd\": \"964583e1-3d08-496c-a655-2e76e64d43dd\", \"c90f37de-8ae6-4074-8e3b-0c158cfe1f3f\": \"c90f37de-8ae6-4074-8e3b-0c158cfe1f3f\", \"d360eba1-871b-4cb8-b7c6-69262c6e0136\": \"d360eba1-871b-4cb8-b7c6-69262c6e0136\", \"e0314cbe-7753-4253-8ba3-c9c69ffb06e8\": \"e0314cbe-7753-4253-8ba3-c9c69ffb06e8\", \"a236b199-3f6a-4f17-a927-c98741af7826\": \"a236b199-3f6a-4f17-a927-c98741af7826\", \"f0142ff3-956f-4a78-908f-b1e8c676b8af\": \"f0142ff3-956f-4a78-908f-b1e8c676b8af\", \"59768367-8115-44db-8a6b-1fe75e6888e6\": \"59768367-8115-44db-8a6b-1fe75e6888e6\", \"1c2eb89a-2426-4b9b-bb07-2fce02975e18\": \"1c2eb89a-2426-4b9b-bb07-2fce02975e18\", \"8843aa43-8d8b-46a6-91d6-31e225ed7ba2\": \"8843aa43-8d8b-46a6-91d6-31e225ed7ba2\", \"2fa7754a-ed3a-42b9-97df-43e81a8c130b\": \"2fa7754a-ed3a-42b9-97df-43e81a8c130b\", \"5828e762-c2e4-483c-ade4-4e5f9084ca5f\": \"5828e762-c2e4-483c-ade4-4e5f9084ca5f\", \"ae9f3396-ae94-491f-9251-c5c071375ffc\": \"ae9f3396-ae94-491f-9251-c5c071375ffc\", \"1ff30800-572a-4445-a9b5-65198133a590\": \"1ff30800-572a-4445-a9b5-65198133a590\", \"90ce3a7e-a7ac-427f-bafb-d32647290500\": \"90ce3a7e-a7ac-427f-bafb-d32647290500\", \"1fc7996a-94e5-4bce-895f-219b213e12b4\": \"1fc7996a-94e5-4bce-895f-219b213e12b4\", \"ed8c92ee-475c-4325-919d-f7810e37ef59\": \"ed8c92ee-475c-4325-919d-f7810e37ef59\", \"3de3137b-199f-4b1a-a447-8df4dd5d1f5a\": \"3de3137b-199f-4b1a-a447-8df4dd5d1f5a\", \"37c9d6f6-f7b3-477f-aa37-b4c17262761d\": \"37c9d6f6-f7b3-477f-aa37-b4c17262761d\", \"cc3a9c87-4d33-4f98-ab93-8dec002d77e9\": \"cc3a9c87-4d33-4f98-ab93-8dec002d77e9\", \"0d1ec0bc-c1ff-4e63-b701-5cbfc1b736c4\": \"0d1ec0bc-c1ff-4e63-b701-5cbfc1b736c4\", \"9be485f5-4cb0-45ae-85e0-87922dcfabf8\": \"9be485f5-4cb0-45ae-85e0-87922dcfabf8\", \"48c86110-a7ba-4d28-a552-ddfdc4255c94\": \"48c86110-a7ba-4d28-a552-ddfdc4255c94\", \"98213de2-5835-4569-93b1-52d4fa9bfc51\": \"98213de2-5835-4569-93b1-52d4fa9bfc51\", \"bad819de-5f17-41bf-9b80-7aee2252919a\": \"bad819de-5f17-41bf-9b80-7aee2252919a\", \"8ebb27fb-7a6b-4440-bb62-8b91ea442f45\": \"8ebb27fb-7a6b-4440-bb62-8b91ea442f45\", \"92ef1ae9-abb6-489c-9e80-b57fd9933d05\": \"92ef1ae9-abb6-489c-9e80-b57fd9933d05\", \"cef6f1d8-e1d4-431b-94af-7c5ee789dc00\": \"cef6f1d8-e1d4-431b-94af-7c5ee789dc00\", \"83e85dca-272e-4c19-934f-71ba490a12b0\": \"83e85dca-272e-4c19-934f-71ba490a12b0\", \"56ebbf50-cdca-462e-8b40-2dfac1ef8fcc\": \"56ebbf50-cdca-462e-8b40-2dfac1ef8fcc\", \"16317398-292a-4812-b51c-52368c571fec\": \"16317398-292a-4812-b51c-52368c571fec\", \"2f7f8b99-e846-4d7d-bce8-6f3f8b484c44\": \"2f7f8b99-e846-4d7d-bce8-6f3f8b484c44\", \"76f3ec66-73e2-4ea8-bd4c-ab215b3bcad8\": \"76f3ec66-73e2-4ea8-bd4c-ab215b3bcad8\", \"1851f700-6108-4d96-bfee-41c25b3f839d\": \"1851f700-6108-4d96-bfee-41c25b3f839d\", \"926ce9bd-1845-40a9-a21b-2bf4160d9f7e\": \"926ce9bd-1845-40a9-a21b-2bf4160d9f7e\", \"765cc808-710c-4ef0-a45c-15d0ef4896d3\": \"765cc808-710c-4ef0-a45c-15d0ef4896d3\", \"849b2322-da75-4e66-a4ed-0db79be63acb\": \"849b2322-da75-4e66-a4ed-0db79be63acb\", \"ef6310b0-b7b8-438d-89f4-523369f7baee\": \"ef6310b0-b7b8-438d-89f4-523369f7baee\", \"08650821-5510-440c-9b79-ace47c65a9de\": \"08650821-5510-440c-9b79-ace47c65a9de\", \"53fd8911-5b8d-40be-8649-4a4907230681\": \"53fd8911-5b8d-40be-8649-4a4907230681\", \"60003c9d-f09d-4906-8cb5-6ac4df954f54\": \"60003c9d-f09d-4906-8cb5-6ac4df954f54\", \"2754b315-7ca4-4209-83a6-61e10684fc78\": \"2754b315-7ca4-4209-83a6-61e10684fc78\", \"fae3c3cb-f82d-47d7-9207-7156432e0632\": \"fae3c3cb-f82d-47d7-9207-7156432e0632\", \"238bb45c-7c04-43bd-a759-7e5d614d2243\": \"238bb45c-7c04-43bd-a759-7e5d614d2243\", \"87228a75-2926-45a3-8fcd-6d817448a03a\": \"87228a75-2926-45a3-8fcd-6d817448a03a\", \"709efd8c-a350-4648-acee-94de50d18598\": \"709efd8c-a350-4648-acee-94de50d18598\", \"f7afc4b5-3a82-4e56-9790-ff369c439faf\": \"f7afc4b5-3a82-4e56-9790-ff369c439faf\", \"156f5e06-cad5-41d0-bc06-2b15b9241cf0\": \"156f5e06-cad5-41d0-bc06-2b15b9241cf0\", \"253b67cf-15a0-49db-9a62-6e8d55915e7a\": \"253b67cf-15a0-49db-9a62-6e8d55915e7a\", \"b287de83-5641-497a-9fc3-1c48c453c296\": \"b287de83-5641-497a-9fc3-1c48c453c296\", \"9c2931b5-8d13-43b7-949c-7366af728591\": \"9c2931b5-8d13-43b7-949c-7366af728591\", \"5d0d88cb-7067-445a-9257-00e0f2d10a9b\": \"5d0d88cb-7067-445a-9257-00e0f2d10a9b\", \"72ae48d1-ac5d-463a-9a6e-ae3d9ca605ba\": \"72ae48d1-ac5d-463a-9a6e-ae3d9ca605ba\", \"94cf3647-433e-439d-8490-031d667cc528\": \"94cf3647-433e-439d-8490-031d667cc528\", \"90ae5807-da7c-42b7-975e-c61adb0ac24d\": \"90ae5807-da7c-42b7-975e-c61adb0ac24d\", \"1726f6c3-aa36-4632-8f48-0a681cbbb1ff\": \"1726f6c3-aa36-4632-8f48-0a681cbbb1ff\", \"eae26102-1cc4-45ec-979f-a6b39b8667f3\": \"eae26102-1cc4-45ec-979f-a6b39b8667f3\", \"68009d50-2d08-4008-8fe1-430d13cab22d\": \"68009d50-2d08-4008-8fe1-430d13cab22d\", \"f88337a5-d37f-4630-be98-8b6744e91492\": \"f88337a5-d37f-4630-be98-8b6744e91492\", \"aa154f61-a268-4a3f-b3e9-ee8304f066df\": \"aa154f61-a268-4a3f-b3e9-ee8304f066df\", \"8746263f-2620-4c9a-9962-a9028fd59921\": \"8746263f-2620-4c9a-9962-a9028fd59921\", \"236a0d06-ebb0-4cd0-a130-d8e408326a14\": \"236a0d06-ebb0-4cd0-a130-d8e408326a14\", \"2c90d70b-8c34-40c6-804e-5d9c37879738\": \"2c90d70b-8c34-40c6-804e-5d9c37879738\", \"04ce3aca-2fb0-4b57-9638-07e8f5f7640c\": \"04ce3aca-2fb0-4b57-9638-07e8f5f7640c\", \"87453432-52da-4dc9-aa96-3b2e3517031b\": \"87453432-52da-4dc9-aa96-3b2e3517031b\", \"2dff7e30-60c3-4686-9ecf-63729ddf9135\": \"2dff7e30-60c3-4686-9ecf-63729ddf9135\", \"a85c335b-9c5b-4903-84aa-f43e9a6de951\": \"a85c335b-9c5b-4903-84aa-f43e9a6de951\", \"8037c8b9-5471-4983-92f2-b6bf958aad64\": \"8037c8b9-5471-4983-92f2-b6bf958aad64\", \"551d703c-785a-4299-9057-2a0f43aa4876\": \"551d703c-785a-4299-9057-2a0f43aa4876\", \"693b99f0-1096-4e4c-ae01-798d3190a7ab\": \"693b99f0-1096-4e4c-ae01-798d3190a7ab\", \"be5694d4-a332-4f34-9e61-f78fcdb0f117\": \"be5694d4-a332-4f34-9e61-f78fcdb0f117\", \"52a46bc3-73c0-4041-8a9f-652a9ce1ca9f\": \"52a46bc3-73c0-4041-8a9f-652a9ce1ca9f\", \"a133048d-90f2-46f8-819f-e62b8ea32cf6\": \"a133048d-90f2-46f8-819f-e62b8ea32cf6\", \"e6141b83-b903-46bd-934f-a55f74dd2ed5\": \"e6141b83-b903-46bd-934f-a55f74dd2ed5\", \"d61dd3e1-2e64-44a9-b75e-e206c1a93d2b\": \"d61dd3e1-2e64-44a9-b75e-e206c1a93d2b\", \"2911be31-a76e-4cf5-81e0-cbdfb808219d\": \"2911be31-a76e-4cf5-81e0-cbdfb808219d\", \"8d9c1b2e-e206-413d-b96c-1b495d04fb16\": \"8d9c1b2e-e206-413d-b96c-1b495d04fb16\", \"def36d7e-49a2-4b12-bad0-12d2a8e162c7\": \"def36d7e-49a2-4b12-bad0-12d2a8e162c7\", \"a2fabfb0-1125-4a3a-8323-37473168cc01\": \"a2fabfb0-1125-4a3a-8323-37473168cc01\", \"44daee71-45f1-40ba-a981-dcfde76e79d6\": \"44daee71-45f1-40ba-a981-dcfde76e79d6\", \"fdaf3e5b-2762-44dd-971b-03cdd0b237dc\": \"fdaf3e5b-2762-44dd-971b-03cdd0b237dc\", \"9c6b4ee3-a082-4d0b-b08e-decc8f9abc16\": \"9c6b4ee3-a082-4d0b-b08e-decc8f9abc16\", \"98c78684-35da-42db-bc2a-946d26007a1c\": \"98c78684-35da-42db-bc2a-946d26007a1c\", \"63f0b64e-93c2-4834-ad5f-c984836d8ebe\": \"63f0b64e-93c2-4834-ad5f-c984836d8ebe\", \"4d528b89-efe4-4196-bdfd-e51d64883151\": \"4d528b89-efe4-4196-bdfd-e51d64883151\", \"279a8f28-51af-405d-b2a2-ec7d6019cf28\": \"279a8f28-51af-405d-b2a2-ec7d6019cf28\", \"37afede8-c63f-4a7a-878d-653ff38b08da\": \"37afede8-c63f-4a7a-878d-653ff38b08da\", \"de6970a1-dc76-4a57-9843-e465b54a9cf4\": \"de6970a1-dc76-4a57-9843-e465b54a9cf4\", \"43663467-2ba3-4d20-8b0b-9603604c1875\": \"43663467-2ba3-4d20-8b0b-9603604c1875\", \"1e5c177c-1c7a-4163-896d-95ea44286ee4\": \"1e5c177c-1c7a-4163-896d-95ea44286ee4\", \"19f37b4f-9439-456d-a612-2f186651e1bd\": \"19f37b4f-9439-456d-a612-2f186651e1bd\", \"45ab33de-bb02-4d0e-83e6-057cd7a833bf\": \"45ab33de-bb02-4d0e-83e6-057cd7a833bf\", \"a86516ac-1c0c-49d5-a750-20e8ced2afed\": \"a86516ac-1c0c-49d5-a750-20e8ced2afed\", \"74aa20b3-ae17-4c8b-80b1-5eb5c27d534d\": \"74aa20b3-ae17-4c8b-80b1-5eb5c27d534d\", \"fb998343-f4d8-4d64-8240-84919b7a0a2d\": \"fb998343-f4d8-4d64-8240-84919b7a0a2d\", \"d03d823d-f90e-4c63-9973-4cc742900e2c\": \"d03d823d-f90e-4c63-9973-4cc742900e2c\", \"e38d39a7-a543-4c64-8eee-7621fa7a8db0\": \"e38d39a7-a543-4c64-8eee-7621fa7a8db0\", \"6bf32d52-1603-4d5a-bb77-0f6769f305d9\": \"6bf32d52-1603-4d5a-bb77-0f6769f305d9\", \"7926f6c4-bc25-4c48-b5b4-f0f181623504\": \"7926f6c4-bc25-4c48-b5b4-f0f181623504\", \"9a5ce88b-ff09-468b-8ebc-6f049a287d4e\": \"9a5ce88b-ff09-468b-8ebc-6f049a287d4e\", \"abc2d475-06e6-4bac-8fb2-c79774584363\": \"abc2d475-06e6-4bac-8fb2-c79774584363\", \"04fdefca-bd5c-42c5-a4a0-0a7946dc6974\": \"04fdefca-bd5c-42c5-a4a0-0a7946dc6974\", \"1be3f597-4c6b-40e3-9b9a-907056f76b42\": \"1be3f597-4c6b-40e3-9b9a-907056f76b42\", \"01050a88-32d1-4955-99de-22ed7beb50b7\": \"01050a88-32d1-4955-99de-22ed7beb50b7\", \"47232ac8-e397-4ab3-9d44-52bb590f620e\": \"47232ac8-e397-4ab3-9d44-52bb590f620e\", \"a7ead492-7586-41f4-ab8a-b9511fca8a79\": \"a7ead492-7586-41f4-ab8a-b9511fca8a79\", \"9a2fed2d-0ac6-48c6-9bc4-52e96ab5680b\": \"9a2fed2d-0ac6-48c6-9bc4-52e96ab5680b\", \"fb787da1-12ad-44e2-9974-daf5df7892d7\": \"fb787da1-12ad-44e2-9974-daf5df7892d7\", \"45f586f3-0f6e-4d9f-adef-23d866073ed3\": \"45f586f3-0f6e-4d9f-adef-23d866073ed3\", \"f63aa6f4-1a30-429c-8a42-5814b4915870\": \"f63aa6f4-1a30-429c-8a42-5814b4915870\", \"7c591a38-1c63-4bff-aa36-6ae46f995927\": \"7c591a38-1c63-4bff-aa36-6ae46f995927\", \"632623b0-6e91-4175-b87d-79b8d9f2b9a6\": \"632623b0-6e91-4175-b87d-79b8d9f2b9a6\", \"5668535b-8119-401d-8d7b-f8478bf0788f\": \"5668535b-8119-401d-8d7b-f8478bf0788f\", \"cea73181-f6a2-4bd7-a047-7768159be7f6\": \"cea73181-f6a2-4bd7-a047-7768159be7f6\", \"36b04184-ae8e-40a6-9d58-81f8a7748e7c\": \"36b04184-ae8e-40a6-9d58-81f8a7748e7c\", \"506f6753-bf7e-4df3-9dfa-0a47adfb17f9\": \"506f6753-bf7e-4df3-9dfa-0a47adfb17f9\", \"f816ee43-b251-4826-9fec-c1b29b31b6d7\": \"f816ee43-b251-4826-9fec-c1b29b31b6d7\", \"76e2727a-056b-4f6e-b1c9-c031833b0587\": \"76e2727a-056b-4f6e-b1c9-c031833b0587\", \"e9bc45dd-a87c-454f-a2f5-17ab408fcd8c\": \"e9bc45dd-a87c-454f-a2f5-17ab408fcd8c\", \"c3058dde-b4fb-44b7-9a4e-4d068e857542\": \"c3058dde-b4fb-44b7-9a4e-4d068e857542\", \"cc4ec547-85b6-4e34-a423-07f7f5e4f00a\": \"cc4ec547-85b6-4e34-a423-07f7f5e4f00a\", \"e8ae8a90-e39c-4960-855d-b4a0f357e200\": \"e8ae8a90-e39c-4960-855d-b4a0f357e200\", \"6d347334-e756-4112-bd83-be3ad9782a99\": \"6d347334-e756-4112-bd83-be3ad9782a99\", \"7d5c7045-a45c-446f-877a-38f3fca9200d\": \"7d5c7045-a45c-446f-877a-38f3fca9200d\", \"41c2768b-97cf-400d-beb9-5c05fc3cfdea\": \"41c2768b-97cf-400d-beb9-5c05fc3cfdea\", \"268bcad0-933c-4a5f-a9c7-3d3a92cba1e1\": \"268bcad0-933c-4a5f-a9c7-3d3a92cba1e1\", \"9ffd5a49-622f-4de0-ae17-2f3d0e76307c\": \"9ffd5a49-622f-4de0-ae17-2f3d0e76307c\", \"2ed53da5-54f1-49b2-8307-76544d63f62c\": \"2ed53da5-54f1-49b2-8307-76544d63f62c\", \"ff58f032-47d9-4510-9ea7-38fb63494cf6\": \"ff58f032-47d9-4510-9ea7-38fb63494cf6\", \"63972a22-881f-4e80-9f6e-e48ad4495e30\": \"63972a22-881f-4e80-9f6e-e48ad4495e30\", \"810c07af-82ec-4934-9cc2-04cfee3466e6\": \"810c07af-82ec-4934-9cc2-04cfee3466e6\", \"5e8bd7c2-e286-4ef9-821e-add5842b116c\": \"5e8bd7c2-e286-4ef9-821e-add5842b116c\", \"dfc6385a-fd39-4fe2-a07b-13d4f58e1ad6\": \"dfc6385a-fd39-4fe2-a07b-13d4f58e1ad6\", \"07a759fa-ae35-4eff-8863-82992bf4f945\": \"07a759fa-ae35-4eff-8863-82992bf4f945\", \"6b207ede-b736-4830-93e7-b953003a0fdc\": \"6b207ede-b736-4830-93e7-b953003a0fdc\", \"6da6d430-f92c-4e27-96b2-2740e39842a4\": \"6da6d430-f92c-4e27-96b2-2740e39842a4\", \"2cbdf965-dff5-47ed-967a-08e23e06119b\": \"2cbdf965-dff5-47ed-967a-08e23e06119b\", \"f35a1a30-1c1c-4c28-aba5-d585cee56914\": \"f35a1a30-1c1c-4c28-aba5-d585cee56914\", \"598ec9c5-69c4-445f-91c6-3ea644d890ad\": \"598ec9c5-69c4-445f-91c6-3ea644d890ad\", \"3f6b1b26-1966-4b30-b247-dcf2d403e708\": \"3f6b1b26-1966-4b30-b247-dcf2d403e708\", \"4a82091d-4fc0-4d25-b9af-54259bfe7045\": \"4a82091d-4fc0-4d25-b9af-54259bfe7045\", \"9cf57b4c-1022-412a-82d5-37cfab3afc54\": \"9cf57b4c-1022-412a-82d5-37cfab3afc54\", \"bc637128-2637-4c04-ac75-c82abf9d98f3\": \"bc637128-2637-4c04-ac75-c82abf9d98f3\", \"88c8bf4e-edee-471e-aea8-a48c867ef527\": \"88c8bf4e-edee-471e-aea8-a48c867ef527\", \"631b3596-8f09-4bf6-bf44-cdb5712c21cb\": \"631b3596-8f09-4bf6-bf44-cdb5712c21cb\", \"ed2f3ecf-210e-4e9e-a1d9-b044822dc461\": \"ed2f3ecf-210e-4e9e-a1d9-b044822dc461\", \"aa3b2263-a1f0-473a-b610-7cbd0672ea11\": \"aa3b2263-a1f0-473a-b610-7cbd0672ea11\", \"2d8f47e2-b382-4abb-be56-1deeee5905b7\": \"2d8f47e2-b382-4abb-be56-1deeee5905b7\", \"4c892d2f-9909-45ef-8afe-1ea870eaec5f\": \"4c892d2f-9909-45ef-8afe-1ea870eaec5f\", \"de240060-4020-457a-8d60-d7c7440f69b1\": \"de240060-4020-457a-8d60-d7c7440f69b1\", \"571ebfd5-7477-47ba-af56-e7c88d6ddbef\": \"571ebfd5-7477-47ba-af56-e7c88d6ddbef\", \"5dd2a67d-71e6-448d-8161-1e79ac9c2828\": \"5dd2a67d-71e6-448d-8161-1e79ac9c2828\", \"73361ade-a9b9-4511-b9e1-a9f72e840919\": \"73361ade-a9b9-4511-b9e1-a9f72e840919\", \"2109a730-236c-40a3-a3fe-eaf089f4c1f4\": \"2109a730-236c-40a3-a3fe-eaf089f4c1f4\", \"f039806b-d832-4611-bfc0-b9720c3bb58f\": \"f039806b-d832-4611-bfc0-b9720c3bb58f\", \"c54fa8fd-2e11-486a-86d7-d8bdd888ea9c\": \"c54fa8fd-2e11-486a-86d7-d8bdd888ea9c\", \"c891f453-b214-44b6-b7eb-2e24fc407b32\": \"c891f453-b214-44b6-b7eb-2e24fc407b32\", \"d90f5556-81a9-4eea-ae03-a4c46394ef51\": \"d90f5556-81a9-4eea-ae03-a4c46394ef51\", \"6438df47-f975-4169-adfd-a93928a0f51b\": \"6438df47-f975-4169-adfd-a93928a0f51b\", \"b0cb57c5-a6d3-4e27-ab08-244f44bf6bfb\": \"b0cb57c5-a6d3-4e27-ab08-244f44bf6bfb\", \"adc73254-bdb4-420e-b20f-e489125e94eb\": \"adc73254-bdb4-420e-b20f-e489125e94eb\", \"71c83b18-5829-4976-930b-082620ec6376\": \"71c83b18-5829-4976-930b-082620ec6376\", \"b28d2719-14fa-4e63-8cc4-fe8289e68c13\": \"b28d2719-14fa-4e63-8cc4-fe8289e68c13\", \"7b77204f-643a-480c-9e4d-b360a531e5c8\": \"7b77204f-643a-480c-9e4d-b360a531e5c8\", \"3a857606-7279-474d-8b17-924943da0963\": \"3a857606-7279-474d-8b17-924943da0963\", \"4eb03441-da67-4831-9e24-3b71b049cbc5\": \"4eb03441-da67-4831-9e24-3b71b049cbc5\", \"506f46af-f492-4aa1-b7bc-a60668631838\": \"506f46af-f492-4aa1-b7bc-a60668631838\", \"99b0fc7d-c8aa-4dd2-9155-bc73eb703020\": \"99b0fc7d-c8aa-4dd2-9155-bc73eb703020\", \"1db6a284-3a00-436b-9743-e9429ed9618e\": \"1db6a284-3a00-436b-9743-e9429ed9618e\", \"547ace8a-b7b6-41bc-afda-af42a6f3da5b\": \"547ace8a-b7b6-41bc-afda-af42a6f3da5b\", \"d548724c-4a2d-4795-93aa-cc0f3456e928\": \"d548724c-4a2d-4795-93aa-cc0f3456e928\", \"2674ad53-61ac-42de-9ca3-75d220235455\": \"2674ad53-61ac-42de-9ca3-75d220235455\", \"a8bf260b-dd65-4850-a1c7-35dd4f6ee92b\": \"a8bf260b-dd65-4850-a1c7-35dd4f6ee92b\", \"a8b71d3e-ed95-4939-8480-acf2510301d2\": \"a8b71d3e-ed95-4939-8480-acf2510301d2\", \"6cdcf151-c980-437e-b0f3-b064d480030a\": \"6cdcf151-c980-437e-b0f3-b064d480030a\", \"5ee7ae89-7ff0-43a1-a9b4-f2e324d1f009\": \"5ee7ae89-7ff0-43a1-a9b4-f2e324d1f009\", \"858dee29-ad71-45e0-9566-c3b56d20d404\": \"858dee29-ad71-45e0-9566-c3b56d20d404\", \"4b1afc39-002a-414e-b35a-6d2d2b8aa2b3\": \"4b1afc39-002a-414e-b35a-6d2d2b8aa2b3\", \"e2b8548b-a427-47cd-837f-b8708468c2a0\": \"e2b8548b-a427-47cd-837f-b8708468c2a0\", \"f577553d-ae2b-4f07-9949-b4a88a34d41b\": \"f577553d-ae2b-4f07-9949-b4a88a34d41b\", \"43e14ed8-79f5-4e3a-a847-f09968dba80e\": \"43e14ed8-79f5-4e3a-a847-f09968dba80e\", \"9ce311a1-d9ed-4506-80e4-640e5a253afb\": \"9ce311a1-d9ed-4506-80e4-640e5a253afb\", \"084f0d04-e515-46ea-b17a-9fcf911d3218\": \"084f0d04-e515-46ea-b17a-9fcf911d3218\", \"30da1fe5-6851-4379-b395-7a2ef4bc5d8c\": \"30da1fe5-6851-4379-b395-7a2ef4bc5d8c\", \"9da77f2c-53ac-4880-9efb-2b9bc98f4e00\": \"9da77f2c-53ac-4880-9efb-2b9bc98f4e00\", \"5791ef0c-205f-4d51-8244-2e9002254f2a\": \"5791ef0c-205f-4d51-8244-2e9002254f2a\", \"5d385fe6-41a9-4eb4-aa08-a6d34204cfc6\": \"5d385fe6-41a9-4eb4-aa08-a6d34204cfc6\", \"3298abee-aecf-4687-94bd-d638738eb206\": \"3298abee-aecf-4687-94bd-d638738eb206\", \"4fa26393-6999-4968-a4c3-cfcbe42adf67\": \"4fa26393-6999-4968-a4c3-cfcbe42adf67\", \"55074c82-f18a-4420-ba69-ffc27f93ba18\": \"55074c82-f18a-4420-ba69-ffc27f93ba18\", \"4b4a35c4-cc71-4720-b344-3f83088f06b2\": \"4b4a35c4-cc71-4720-b344-3f83088f06b2\", \"a89b1170-e083-4270-8896-886a28268e15\": \"a89b1170-e083-4270-8896-886a28268e15\", \"49834d66-2555-4b0a-9dc6-81aafa4d6c29\": \"49834d66-2555-4b0a-9dc6-81aafa4d6c29\", \"87f7d012-96a7-45c8-b1ea-c9091de34270\": \"87f7d012-96a7-45c8-b1ea-c9091de34270\", \"a0d37892-d3b1-4887-afc8-5f783d13f099\": \"a0d37892-d3b1-4887-afc8-5f783d13f099\", \"60a993a3-6e64-4ae2-b5c4-0783bbfb0249\": \"60a993a3-6e64-4ae2-b5c4-0783bbfb0249\", \"97297d92-bb2c-4b73-b96d-c7bd5026c14e\": \"97297d92-bb2c-4b73-b96d-c7bd5026c14e\", \"1d6dc6b6-a8e7-4f80-8260-406385cc707e\": \"1d6dc6b6-a8e7-4f80-8260-406385cc707e\", \"f7eedfeb-04cd-42a1-ab56-2d4f70e2644e\": \"f7eedfeb-04cd-42a1-ab56-2d4f70e2644e\", \"9e1d2df7-3c0f-4d0d-beea-ff8b30d75771\": \"9e1d2df7-3c0f-4d0d-beea-ff8b30d75771\", \"c6682aa3-3376-4299-ade7-b51006acf77a\": \"c6682aa3-3376-4299-ade7-b51006acf77a\", \"a306ace1-edb6-4deb-bd75-8d24754c0b03\": \"a306ace1-edb6-4deb-bd75-8d24754c0b03\", \"ba2b1673-3774-4222-bb3f-27b5ea923fcb\": \"ba2b1673-3774-4222-bb3f-27b5ea923fcb\"}, \"doc_id_dict\": {}, \"embeddings_dict\": {}}"}}} -------------------------------------------------------------------------------- /3rd_party/data/uber_index/graph_store.json: -------------------------------------------------------------------------------- 1 | {"graph_dict": {}} -------------------------------------------------------------------------------- /3rd_party/data/uber_index/image__vector_store.json: -------------------------------------------------------------------------------- 1 | {"embedding_dict": {}, "text_id_to_ref_doc_id": {}, "metadata_dict": {}} -------------------------------------------------------------------------------- /3rd_party/data/uber_index/index_store.json: -------------------------------------------------------------------------------- 1 | {"index_store/data": {"53c391c3-d4c3-4e68-9997-bbdfb72e1b57": {"__type__": "vector_store", "__data__": "{\"index_id\": \"53c391c3-d4c3-4e68-9997-bbdfb72e1b57\", \"summary\": null, \"nodes_dict\": {\"500cbce0-7211-4af4-97c8-449f639e3efe\": \"500cbce0-7211-4af4-97c8-449f639e3efe\", \"36b1d807-68c1-4d74-843c-100aed419d2f\": \"36b1d807-68c1-4d74-843c-100aed419d2f\", \"17e5df3a-1d49-4891-a490-063cfc66971f\": \"17e5df3a-1d49-4891-a490-063cfc66971f\", \"852fc65b-6cfe-4807-9a4a-1d4301bf5f9a\": \"852fc65b-6cfe-4807-9a4a-1d4301bf5f9a\", \"f1fee21b-5ee7-4790-990e-9537e8880247\": \"f1fee21b-5ee7-4790-990e-9537e8880247\", \"84c82621-a41f-4d42-87ec-2178fa9be4dd\": \"84c82621-a41f-4d42-87ec-2178fa9be4dd\", \"b4fb3f3e-6944-4161-9521-90492b2933f7\": \"b4fb3f3e-6944-4161-9521-90492b2933f7\", \"d3fa75f4-3711-490d-bf64-71dc76874487\": \"d3fa75f4-3711-490d-bf64-71dc76874487\", \"9c7f7b46-8e20-48aa-8257-c76c8999e81e\": \"9c7f7b46-8e20-48aa-8257-c76c8999e81e\", \"c3342d8b-f9a7-49c3-8df3-0b4da365cd0b\": \"c3342d8b-f9a7-49c3-8df3-0b4da365cd0b\", \"25cfc1ee-d129-4f1d-ab7f-d1ba8f06f02a\": \"25cfc1ee-d129-4f1d-ab7f-d1ba8f06f02a\", \"d0d57d84-79fd-4b03-9d5a-801048f82f04\": \"d0d57d84-79fd-4b03-9d5a-801048f82f04\", \"8d2c1b9e-7705-4ad1-bc62-17f6395a2760\": \"8d2c1b9e-7705-4ad1-bc62-17f6395a2760\", \"4337ba14-d340-4470-8bc6-728308bf2b78\": \"4337ba14-d340-4470-8bc6-728308bf2b78\", \"65b53352-cb6d-4e84-83ab-3f312c4dff49\": \"65b53352-cb6d-4e84-83ab-3f312c4dff49\", \"6a256f98-b553-486f-a13e-df6b1de6ef7f\": \"6a256f98-b553-486f-a13e-df6b1de6ef7f\", \"94410f8e-2045-489b-a315-c00e34999fe3\": \"94410f8e-2045-489b-a315-c00e34999fe3\", \"e1b459a5-59fe-4590-b83d-044f8db2be7d\": \"e1b459a5-59fe-4590-b83d-044f8db2be7d\", \"72bbf1a8-b219-4c50-9164-49165c4bf406\": \"72bbf1a8-b219-4c50-9164-49165c4bf406\", \"554af160-263e-471d-8972-1f29ec4583fb\": \"554af160-263e-471d-8972-1f29ec4583fb\", \"f281f92b-edcd-46f7-a719-95ac88ab95e5\": \"f281f92b-edcd-46f7-a719-95ac88ab95e5\", \"7e753c1e-4b4f-42c1-8e0d-badd9531bbe7\": \"7e753c1e-4b4f-42c1-8e0d-badd9531bbe7\", \"9a2f2de4-bffa-4afc-b4dd-4d6015002cb2\": \"9a2f2de4-bffa-4afc-b4dd-4d6015002cb2\", \"ad1fec3f-8f1f-402d-b36d-57f028dac94e\": \"ad1fec3f-8f1f-402d-b36d-57f028dac94e\", \"200ac4ce-4924-4f85-90f8-d5136ea98cea\": \"200ac4ce-4924-4f85-90f8-d5136ea98cea\", \"afa27913-5fde-4b1d-909f-88e9d233adfa\": \"afa27913-5fde-4b1d-909f-88e9d233adfa\", \"d357fb09-ea9b-4823-a844-3400323e59b5\": \"d357fb09-ea9b-4823-a844-3400323e59b5\", \"172518ad-8aed-4849-953a-527362d9289b\": \"172518ad-8aed-4849-953a-527362d9289b\", \"0f087e89-fd59-42c6-bd93-75a54ec165f8\": \"0f087e89-fd59-42c6-bd93-75a54ec165f8\", \"0af912c0-f88d-4f90-b96b-47f36bac6692\": \"0af912c0-f88d-4f90-b96b-47f36bac6692\", \"0ea6abc5-88bb-4d53-8cd6-e37e499732bf\": \"0ea6abc5-88bb-4d53-8cd6-e37e499732bf\", \"7f4cf809-43a0-484f-85ad-a99b03fc990c\": \"7f4cf809-43a0-484f-85ad-a99b03fc990c\", \"642df7c7-342c-459d-9a26-431a13f3717b\": \"642df7c7-342c-459d-9a26-431a13f3717b\", \"5dfc4008-a979-4393-965a-cb60d4383560\": \"5dfc4008-a979-4393-965a-cb60d4383560\", \"5e576ec8-325f-4be8-a4ce-886b5e01acbf\": \"5e576ec8-325f-4be8-a4ce-886b5e01acbf\", \"8d713e44-538c-4eb2-926c-a42a959eb7ab\": \"8d713e44-538c-4eb2-926c-a42a959eb7ab\", \"f57bd565-fde4-40cb-935e-08022fff439b\": \"f57bd565-fde4-40cb-935e-08022fff439b\", \"2886d4c0-f340-420e-9d01-529276946c44\": \"2886d4c0-f340-420e-9d01-529276946c44\", \"9f361958-62ee-4a79-bab0-bd0c5e97a202\": \"9f361958-62ee-4a79-bab0-bd0c5e97a202\", \"defec0b3-92d2-4e74-b08e-b61c4d14a886\": \"defec0b3-92d2-4e74-b08e-b61c4d14a886\", \"2c55a15f-3088-446b-9824-ca0223df6248\": \"2c55a15f-3088-446b-9824-ca0223df6248\", \"b037a97f-1e63-43f4-8aa0-79537c17b239\": \"b037a97f-1e63-43f4-8aa0-79537c17b239\", \"9c1bfd12-80aa-404b-a6f6-c416bdd0c06d\": \"9c1bfd12-80aa-404b-a6f6-c416bdd0c06d\", \"7068c700-3322-41e0-a044-227d5d863731\": \"7068c700-3322-41e0-a044-227d5d863731\", \"8cb8a0e4-bea1-47b1-a3cd-583e23838ca2\": \"8cb8a0e4-bea1-47b1-a3cd-583e23838ca2\", \"d2f951e7-d14c-4252-a66a-86aecc35671d\": \"d2f951e7-d14c-4252-a66a-86aecc35671d\", \"f59b009b-3d19-4f6f-8dc9-2bbb902a0e77\": \"f59b009b-3d19-4f6f-8dc9-2bbb902a0e77\", \"ee64cdc8-97b8-4b7f-b4f4-616157732948\": \"ee64cdc8-97b8-4b7f-b4f4-616157732948\", \"4a5f9596-a219-4b9f-b12b-9af763cd343b\": \"4a5f9596-a219-4b9f-b12b-9af763cd343b\", \"9b3cca48-c54a-457d-9190-9ef779a39776\": \"9b3cca48-c54a-457d-9190-9ef779a39776\", \"d81fb76d-7afe-4d0c-93f1-cf7efe90fb40\": \"d81fb76d-7afe-4d0c-93f1-cf7efe90fb40\", \"a61b40eb-82c7-486f-a48c-d50d430202cd\": \"a61b40eb-82c7-486f-a48c-d50d430202cd\", \"3fd52898-aa21-4e2d-bae2-696962740922\": \"3fd52898-aa21-4e2d-bae2-696962740922\", \"3f6aebfc-55c7-4243-a100-f93835719139\": \"3f6aebfc-55c7-4243-a100-f93835719139\", \"4cebbe4d-a884-44fc-8678-c75100a16b57\": \"4cebbe4d-a884-44fc-8678-c75100a16b57\", \"3fa2a693-8ad0-4955-90b7-1ece48eab37e\": \"3fa2a693-8ad0-4955-90b7-1ece48eab37e\", \"14881f88-80b1-48cf-bce5-79aac912c51b\": \"14881f88-80b1-48cf-bce5-79aac912c51b\", \"8308cfa6-c342-4316-8968-d475177da88c\": \"8308cfa6-c342-4316-8968-d475177da88c\", \"aa0283ef-03af-4659-a145-93f53b9c98ac\": \"aa0283ef-03af-4659-a145-93f53b9c98ac\", \"c8a62267-5540-4025-8e84-7cffdf2d7315\": \"c8a62267-5540-4025-8e84-7cffdf2d7315\", \"1750bff7-1553-46b4-a0f6-c40419224f0f\": \"1750bff7-1553-46b4-a0f6-c40419224f0f\", \"476b2116-404e-45c0-adde-80a820c70750\": \"476b2116-404e-45c0-adde-80a820c70750\", \"9a749b6b-a471-470c-b77a-67a58160e271\": \"9a749b6b-a471-470c-b77a-67a58160e271\", \"c21a763e-98b5-4084-84a1-e7d49ce2fd4d\": \"c21a763e-98b5-4084-84a1-e7d49ce2fd4d\", \"f78b19bd-5225-4624-9a6e-fdfdf48b3ffb\": \"f78b19bd-5225-4624-9a6e-fdfdf48b3ffb\", \"1180f80c-2aef-4914-8ddc-3d3d5ff1638e\": \"1180f80c-2aef-4914-8ddc-3d3d5ff1638e\", \"e3572053-8508-47e4-90eb-34adacb5261f\": \"e3572053-8508-47e4-90eb-34adacb5261f\", \"fccfeda4-fa43-4fec-bc60-0ffb690a24df\": \"fccfeda4-fa43-4fec-bc60-0ffb690a24df\", \"104b0399-b096-448c-a801-a4a6f770e743\": \"104b0399-b096-448c-a801-a4a6f770e743\", \"3b64d91d-0094-40b6-bb5b-892ad4e105a3\": \"3b64d91d-0094-40b6-bb5b-892ad4e105a3\", \"01db7d5b-6e01-4dc3-aa12-16273c5c0c85\": \"01db7d5b-6e01-4dc3-aa12-16273c5c0c85\", \"049c1ce0-7d9a-4411-801a-359380e767b7\": \"049c1ce0-7d9a-4411-801a-359380e767b7\", \"9d7d982e-7161-4b55-9809-ed47aa04f6b3\": \"9d7d982e-7161-4b55-9809-ed47aa04f6b3\", \"80df6e7b-925b-4735-86f5-fb90ae71cdd7\": \"80df6e7b-925b-4735-86f5-fb90ae71cdd7\", \"89db7977-17d2-4328-abcc-f0354a24581a\": \"89db7977-17d2-4328-abcc-f0354a24581a\", \"896928a6-2f07-4f2b-b9e9-06c9fe3e3372\": \"896928a6-2f07-4f2b-b9e9-06c9fe3e3372\", \"f3662877-9fac-46d8-b36e-daa9b2d40415\": \"f3662877-9fac-46d8-b36e-daa9b2d40415\", \"860b74ef-09ea-41c0-98d6-8087ab6fbd05\": \"860b74ef-09ea-41c0-98d6-8087ab6fbd05\", \"207f4e50-0e35-4080-ab94-897afadfd71e\": \"207f4e50-0e35-4080-ab94-897afadfd71e\", \"42274869-4578-452c-aaad-3e17f1254c22\": \"42274869-4578-452c-aaad-3e17f1254c22\", \"6f95f237-62d0-415e-a8cf-adcec52331f0\": \"6f95f237-62d0-415e-a8cf-adcec52331f0\", \"32cac13d-22d9-4e49-a3f3-bd789b763bd8\": \"32cac13d-22d9-4e49-a3f3-bd789b763bd8\", \"30412b12-d77a-4b37-9016-5e0a4f039747\": \"30412b12-d77a-4b37-9016-5e0a4f039747\", \"8529abbe-299f-42ee-a9a0-d06664b26317\": \"8529abbe-299f-42ee-a9a0-d06664b26317\", \"90fb3049-2964-48c3-9cb1-48c6dcdd3abf\": \"90fb3049-2964-48c3-9cb1-48c6dcdd3abf\", \"246e063f-4310-4b64-9bbc-340c12f29539\": \"246e063f-4310-4b64-9bbc-340c12f29539\", \"43c5ce9c-0c26-48e2-a803-db308e551081\": \"43c5ce9c-0c26-48e2-a803-db308e551081\", \"a43024d6-5314-46d5-b8c1-00d9326b480b\": \"a43024d6-5314-46d5-b8c1-00d9326b480b\", \"e36ae248-0f52-416b-b0ee-d65c9db8233a\": \"e36ae248-0f52-416b-b0ee-d65c9db8233a\", \"591415c4-8080-4a51-ad19-1bf401941f0c\": \"591415c4-8080-4a51-ad19-1bf401941f0c\", \"7d99cb6d-571e-4fd6-af9f-8e7a0c22a830\": \"7d99cb6d-571e-4fd6-af9f-8e7a0c22a830\", \"f6bb7f82-dca1-4004-a9e2-0b5b1753638a\": \"f6bb7f82-dca1-4004-a9e2-0b5b1753638a\", \"d4fd4551-5d73-4ef9-919b-af18f65895a5\": \"d4fd4551-5d73-4ef9-919b-af18f65895a5\", \"73b0b7eb-a821-4fbb-a4d1-cbf98479976e\": \"73b0b7eb-a821-4fbb-a4d1-cbf98479976e\", \"66736422-5e90-4197-af7a-baf789d2cd4c\": \"66736422-5e90-4197-af7a-baf789d2cd4c\", \"3c014a2c-b183-44df-a11e-2016918c4a67\": \"3c014a2c-b183-44df-a11e-2016918c4a67\", \"2f4231f1-2927-4bf5-a1e6-caa53599aa99\": \"2f4231f1-2927-4bf5-a1e6-caa53599aa99\", \"04ef44e5-7692-412e-bf54-71e5796d47af\": \"04ef44e5-7692-412e-bf54-71e5796d47af\", \"b1e0e03d-3996-4006-a3ea-c149670717ce\": \"b1e0e03d-3996-4006-a3ea-c149670717ce\", \"27b94c1d-3237-46d7-baa0-554b03d05e78\": \"27b94c1d-3237-46d7-baa0-554b03d05e78\", \"8c113bca-11ba-47be-a7e3-3183fc69ab3f\": \"8c113bca-11ba-47be-a7e3-3183fc69ab3f\", \"b2eda367-36cf-4e8f-bba4-3421937f6c96\": \"b2eda367-36cf-4e8f-bba4-3421937f6c96\", \"6228b35e-9f52-40d5-a2f9-c3f3b889f3f1\": \"6228b35e-9f52-40d5-a2f9-c3f3b889f3f1\", \"0da2fced-baa6-4def-8de0-a857597bd30f\": \"0da2fced-baa6-4def-8de0-a857597bd30f\", \"0bd84587-122a-45ae-943a-6a63b5ff78d3\": \"0bd84587-122a-45ae-943a-6a63b5ff78d3\", \"83f034ac-bc8c-41dd-a277-01c6fefd8229\": \"83f034ac-bc8c-41dd-a277-01c6fefd8229\", \"b445e301-49e8-41fd-ae0f-ffa01e573d65\": \"b445e301-49e8-41fd-ae0f-ffa01e573d65\", \"ca578ec1-c6d4-4126-80d7-58f57761c12d\": \"ca578ec1-c6d4-4126-80d7-58f57761c12d\", \"54807469-4245-4ae9-97be-93d80d99181d\": \"54807469-4245-4ae9-97be-93d80d99181d\", \"62f90cac-da8b-4aac-a78f-d3b43d1f7c72\": \"62f90cac-da8b-4aac-a78f-d3b43d1f7c72\", \"42fa8728-833c-44a4-a53a-4f4aae19e98e\": \"42fa8728-833c-44a4-a53a-4f4aae19e98e\", \"fcd75eeb-cff1-42e2-b35c-161950a14ef4\": \"fcd75eeb-cff1-42e2-b35c-161950a14ef4\", \"fc3e8807-c272-4754-a3b0-84f2397cc5d4\": \"fc3e8807-c272-4754-a3b0-84f2397cc5d4\", \"0eebcdf5-c8f0-45f0-b17f-e6a3cae1d24c\": \"0eebcdf5-c8f0-45f0-b17f-e6a3cae1d24c\", \"2331d6ac-fbf7-4830-839d-229583333ba0\": \"2331d6ac-fbf7-4830-839d-229583333ba0\", \"da0923cf-fce7-44cc-915d-ef808ee4039e\": \"da0923cf-fce7-44cc-915d-ef808ee4039e\", \"843d62f6-8f70-4491-a11c-6e7406def8db\": \"843d62f6-8f70-4491-a11c-6e7406def8db\", \"8b33d5cf-486b-4163-98ed-6fce95f8c7f3\": \"8b33d5cf-486b-4163-98ed-6fce95f8c7f3\", \"2c2d3601-47ed-44c5-9e44-21efdf0bcf94\": \"2c2d3601-47ed-44c5-9e44-21efdf0bcf94\", \"186dbf6e-f31a-4373-881d-42351f32a4c8\": \"186dbf6e-f31a-4373-881d-42351f32a4c8\", \"beb7b775-c9ff-43a4-92b8-3f412b19ccd3\": \"beb7b775-c9ff-43a4-92b8-3f412b19ccd3\", \"35867be0-e3cc-46de-acba-386fe6ec1bea\": \"35867be0-e3cc-46de-acba-386fe6ec1bea\", \"fdbc1677-26a4-4b27-89f3-0395994e3895\": \"fdbc1677-26a4-4b27-89f3-0395994e3895\", \"ab2bc76b-8a2a-4359-89c1-367278e6739c\": \"ab2bc76b-8a2a-4359-89c1-367278e6739c\", \"2e1acd0d-3cd2-4a2b-b3f8-8194d246b2f0\": \"2e1acd0d-3cd2-4a2b-b3f8-8194d246b2f0\", \"8f2b6010-e9cc-4b7f-846a-ad6d96bef731\": \"8f2b6010-e9cc-4b7f-846a-ad6d96bef731\", \"70407c45-9579-4ddd-8297-c7cd4b6ad776\": \"70407c45-9579-4ddd-8297-c7cd4b6ad776\", \"2912e764-47a5-4f26-ad53-bd7f60025ea7\": \"2912e764-47a5-4f26-ad53-bd7f60025ea7\", \"d88b14e2-d005-46c6-9332-6b734960a42f\": \"d88b14e2-d005-46c6-9332-6b734960a42f\", \"7bcc7afa-17a5-40f2-9797-34c5d9b3cc63\": \"7bcc7afa-17a5-40f2-9797-34c5d9b3cc63\", \"01b87b3a-3948-4cb3-81ed-d57cbc61df32\": \"01b87b3a-3948-4cb3-81ed-d57cbc61df32\", \"7ed0dbc4-f5cb-4a84-9852-d8dbf3c09bd0\": \"7ed0dbc4-f5cb-4a84-9852-d8dbf3c09bd0\", \"c13afbc0-7d26-4686-a446-20ed95f0c838\": \"c13afbc0-7d26-4686-a446-20ed95f0c838\", \"1356722b-0f9e-443f-8311-15dd72340f01\": \"1356722b-0f9e-443f-8311-15dd72340f01\", \"ab75e9d0-cc74-4192-9444-638ed9640224\": \"ab75e9d0-cc74-4192-9444-638ed9640224\", \"99cfd751-d150-4ba7-8617-3a92e1a0bf18\": \"99cfd751-d150-4ba7-8617-3a92e1a0bf18\", \"3116d5de-fe47-4036-b55c-3337be14b568\": \"3116d5de-fe47-4036-b55c-3337be14b568\", \"0942673a-7bd1-412b-8eb0-cddd2de535dd\": \"0942673a-7bd1-412b-8eb0-cddd2de535dd\", \"2cef4bc0-a398-4e0b-b35c-7780d290c4ab\": \"2cef4bc0-a398-4e0b-b35c-7780d290c4ab\", \"70cd2740-9d75-4ce5-913c-4261cb9290ae\": \"70cd2740-9d75-4ce5-913c-4261cb9290ae\", \"62b7f367-a98c-4ed8-89a9-2035ee2f807e\": \"62b7f367-a98c-4ed8-89a9-2035ee2f807e\", \"ac12f8c9-d049-47d2-a4bb-7adbf2ff95a5\": \"ac12f8c9-d049-47d2-a4bb-7adbf2ff95a5\", \"bf941fa9-0b6f-4d92-8db7-114c198f8108\": \"bf941fa9-0b6f-4d92-8db7-114c198f8108\", \"c9b476a1-f269-42f9-8862-8cbfeeac4d22\": \"c9b476a1-f269-42f9-8862-8cbfeeac4d22\", \"8c680393-387b-43f6-866c-a0d8b02edea3\": \"8c680393-387b-43f6-866c-a0d8b02edea3\", \"32e88b0e-9a88-4f3f-870e-07bd06f31af7\": \"32e88b0e-9a88-4f3f-870e-07bd06f31af7\", \"e1c5eb9a-033a-40dc-9122-7a4feac529a6\": \"e1c5eb9a-033a-40dc-9122-7a4feac529a6\", \"28af87f3-8c68-497d-b32e-5fb1fa8ed018\": \"28af87f3-8c68-497d-b32e-5fb1fa8ed018\", \"494e6e03-f071-4b1c-892a-df6a84562d0a\": \"494e6e03-f071-4b1c-892a-df6a84562d0a\", \"cc48b069-b16f-4e2c-9e59-738e28a7334b\": \"cc48b069-b16f-4e2c-9e59-738e28a7334b\", \"013acf66-63ab-4e61-a9fb-f20c8108d7a4\": \"013acf66-63ab-4e61-a9fb-f20c8108d7a4\", \"648c3917-9b22-4f00-88b1-15a8e70bc959\": \"648c3917-9b22-4f00-88b1-15a8e70bc959\", \"181a0d18-243f-49e6-ba8c-847157cf3714\": \"181a0d18-243f-49e6-ba8c-847157cf3714\", \"9cff322e-43e0-4424-bed1-8cf271596651\": \"9cff322e-43e0-4424-bed1-8cf271596651\", \"fc3ed2d2-89bc-49a6-bda7-3a139afb6b51\": \"fc3ed2d2-89bc-49a6-bda7-3a139afb6b51\", \"8c1342a3-f191-442e-a039-413fdb87356d\": \"8c1342a3-f191-442e-a039-413fdb87356d\", \"0a41467f-e9be-4b5e-876b-208f1a16cbe9\": \"0a41467f-e9be-4b5e-876b-208f1a16cbe9\", \"6c9a8e17-3595-4aac-978f-b2eb68661c5c\": \"6c9a8e17-3595-4aac-978f-b2eb68661c5c\", \"e14bef9a-c717-427a-a418-fc17ccea1c93\": \"e14bef9a-c717-427a-a418-fc17ccea1c93\", \"e01342cf-f138-41e6-8b9e-5730702ef687\": \"e01342cf-f138-41e6-8b9e-5730702ef687\", \"f223ca17-8c2c-4ddd-9eef-4a34cc3f470f\": \"f223ca17-8c2c-4ddd-9eef-4a34cc3f470f\", \"eb842e76-7e5d-43df-99d9-a55c0db67011\": \"eb842e76-7e5d-43df-99d9-a55c0db67011\", \"39682fc5-0e6e-477c-818a-4617a7860fc3\": \"39682fc5-0e6e-477c-818a-4617a7860fc3\", \"bf7c5f49-3104-4bef-98f2-98ea7db01c2d\": \"bf7c5f49-3104-4bef-98f2-98ea7db01c2d\", \"131891d3-71df-4420-9917-395d90b50305\": \"131891d3-71df-4420-9917-395d90b50305\", \"e0bedde0-9c17-4e57-9867-5aab61aedcd6\": \"e0bedde0-9c17-4e57-9867-5aab61aedcd6\", \"fd442971-6d6b-4bff-9655-0a87b1121236\": \"fd442971-6d6b-4bff-9655-0a87b1121236\", \"b488378c-3cfe-4d0a-b639-939612f7c5b6\": \"b488378c-3cfe-4d0a-b639-939612f7c5b6\", \"7f525da1-6b9e-4deb-8325-a979a52cc819\": \"7f525da1-6b9e-4deb-8325-a979a52cc819\", \"404828e2-ab65-404e-afd3-44e491956963\": \"404828e2-ab65-404e-afd3-44e491956963\", \"3d78dff6-ef10-4984-a625-5598ac8df9b2\": \"3d78dff6-ef10-4984-a625-5598ac8df9b2\", \"4a6af5db-7bac-47f8-9f4b-8fa9a0c81c7f\": \"4a6af5db-7bac-47f8-9f4b-8fa9a0c81c7f\", \"fe68f542-cea0-44cc-882a-3fff1877fd37\": \"fe68f542-cea0-44cc-882a-3fff1877fd37\", \"5a63c205-eb8c-45a6-8ad6-6b9562ed163f\": \"5a63c205-eb8c-45a6-8ad6-6b9562ed163f\", \"6142c39d-5e80-4f53-b3ec-f5fa7ab12201\": \"6142c39d-5e80-4f53-b3ec-f5fa7ab12201\", \"8bbb5bbb-2345-4ec8-b10d-79da4cfc3788\": \"8bbb5bbb-2345-4ec8-b10d-79da4cfc3788\", \"7a347cd2-7821-4dd7-bb4d-b54acaeb5d75\": \"7a347cd2-7821-4dd7-bb4d-b54acaeb5d75\", \"2bd041ee-d016-4d45-94a1-b2427c1d44d2\": \"2bd041ee-d016-4d45-94a1-b2427c1d44d2\", \"e85e84cd-14aa-4882-8431-c7a349bd5cf3\": \"e85e84cd-14aa-4882-8431-c7a349bd5cf3\", \"54a51086-dfed-4a88-9083-adf7ad18a41e\": \"54a51086-dfed-4a88-9083-adf7ad18a41e\", \"d5b0d2c9-7ca0-48f6-aa8d-15ca3398d42f\": \"d5b0d2c9-7ca0-48f6-aa8d-15ca3398d42f\", \"8a440997-7715-4c63-85a1-1597cfb0a014\": \"8a440997-7715-4c63-85a1-1597cfb0a014\", \"2c98bc8f-5874-4af7-a38f-90671d222aaf\": \"2c98bc8f-5874-4af7-a38f-90671d222aaf\", \"56672962-cde6-47b8-8fec-2ff4f701b774\": \"56672962-cde6-47b8-8fec-2ff4f701b774\", \"9de58ddf-c353-4c3d-b80e-daa721e93991\": \"9de58ddf-c353-4c3d-b80e-daa721e93991\", \"ddfde823-7dd0-4567-867d-3cd17509ba84\": \"ddfde823-7dd0-4567-867d-3cd17509ba84\", \"c78fc8aa-e653-436a-8150-f0688fab732b\": \"c78fc8aa-e653-436a-8150-f0688fab732b\", \"88e31eef-09e2-4043-af03-579ae2b8bd3a\": \"88e31eef-09e2-4043-af03-579ae2b8bd3a\", \"40375eae-931b-475f-8d06-d6284b62a895\": \"40375eae-931b-475f-8d06-d6284b62a895\", \"be1c4de5-3d2b-4245-a844-0b3e8e022a23\": \"be1c4de5-3d2b-4245-a844-0b3e8e022a23\", \"a88bd25e-1bc1-4d67-900e-2d78ecb7833c\": \"a88bd25e-1bc1-4d67-900e-2d78ecb7833c\", \"e7fd24bf-b0ad-47e2-a0a0-3385d383827d\": \"e7fd24bf-b0ad-47e2-a0a0-3385d383827d\", \"dcef90bf-680c-48a4-ab63-18b37ce9b1c6\": \"dcef90bf-680c-48a4-ab63-18b37ce9b1c6\", \"038a15c4-42fc-4bd8-9081-c8e9fe8e12db\": \"038a15c4-42fc-4bd8-9081-c8e9fe8e12db\", \"1fc6d6f6-7bd0-45a6-9651-f9762a2f7cf0\": \"1fc6d6f6-7bd0-45a6-9651-f9762a2f7cf0\", \"d1d36bec-4b9f-44fd-ab06-4fefe90a455b\": \"d1d36bec-4b9f-44fd-ab06-4fefe90a455b\", \"7b8a47e0-2c85-4bf2-9aab-61452c0073f6\": \"7b8a47e0-2c85-4bf2-9aab-61452c0073f6\", \"6939c849-ed3f-40ba-bcef-a46ed36a347b\": \"6939c849-ed3f-40ba-bcef-a46ed36a347b\", \"b149c267-1c6c-4420-bfa1-c6c4cefb8340\": \"b149c267-1c6c-4420-bfa1-c6c4cefb8340\", \"b6bc982e-7595-4df6-81f8-bb79e58c77b6\": \"b6bc982e-7595-4df6-81f8-bb79e58c77b6\", \"17e41f78-4a6a-4281-a61e-58bf64ea86ca\": \"17e41f78-4a6a-4281-a61e-58bf64ea86ca\", \"1fbe309c-6e82-46c8-91e0-44b49b1e46ea\": \"1fbe309c-6e82-46c8-91e0-44b49b1e46ea\", \"fe57caeb-9ad4-44d9-b87b-ad195c1ae28e\": \"fe57caeb-9ad4-44d9-b87b-ad195c1ae28e\", \"72aceaa3-d3dd-48ba-98fc-7f15db36b9d5\": \"72aceaa3-d3dd-48ba-98fc-7f15db36b9d5\", \"d3a9f914-a6f8-4a1d-ab90-b93e6976e252\": \"d3a9f914-a6f8-4a1d-ab90-b93e6976e252\", \"0a51de64-d4bf-4213-afbc-9ae61715ecbf\": \"0a51de64-d4bf-4213-afbc-9ae61715ecbf\", \"c152c32e-715b-45cd-bc96-b953a64c98bc\": \"c152c32e-715b-45cd-bc96-b953a64c98bc\", \"7bd8213d-55de-41af-bf1c-3f7e108d677d\": \"7bd8213d-55de-41af-bf1c-3f7e108d677d\", \"1b4a9980-42b4-4d57-9ef5-f0fb2a926a46\": \"1b4a9980-42b4-4d57-9ef5-f0fb2a926a46\", \"3e12a1e6-f794-4348-aec4-081835d6ded2\": \"3e12a1e6-f794-4348-aec4-081835d6ded2\", \"eefa6d17-f124-43bb-bd0e-57ad5ad38b9f\": \"eefa6d17-f124-43bb-bd0e-57ad5ad38b9f\", \"ac703ee8-310e-4c47-ac94-0113d30255d9\": \"ac703ee8-310e-4c47-ac94-0113d30255d9\", \"23561c0b-26e3-4279-92e4-cbd261be98ff\": \"23561c0b-26e3-4279-92e4-cbd261be98ff\", \"da098905-7270-4ee5-a110-e810b8d4fc2d\": \"da098905-7270-4ee5-a110-e810b8d4fc2d\", \"78436056-efeb-4e79-b37c-bed21999c687\": \"78436056-efeb-4e79-b37c-bed21999c687\", \"be218591-bcfc-4a18-8ce9-015bd255aece\": \"be218591-bcfc-4a18-8ce9-015bd255aece\", \"9f5df959-8ba5-4c64-8495-2ca6c076c79b\": \"9f5df959-8ba5-4c64-8495-2ca6c076c79b\", \"7f7102f8-ad68-4391-b3fb-7e8889dbb0b9\": \"7f7102f8-ad68-4391-b3fb-7e8889dbb0b9\", \"d36ed56c-5c16-42c4-abfe-93e1d7016f80\": \"d36ed56c-5c16-42c4-abfe-93e1d7016f80\", \"90fbd42e-5d3c-4580-b720-28951e083d33\": \"90fbd42e-5d3c-4580-b720-28951e083d33\", \"896b601b-7ba9-4ed5-b01c-9d61c09b4bbd\": \"896b601b-7ba9-4ed5-b01c-9d61c09b4bbd\", \"c1869db3-2fab-4c61-8672-44d3d3154e68\": \"c1869db3-2fab-4c61-8672-44d3d3154e68\", \"60ea3ed0-fee0-4a63-a27f-339936304c07\": \"60ea3ed0-fee0-4a63-a27f-339936304c07\", \"dca5128e-039b-47b5-b1c9-b48363b25ae0\": \"dca5128e-039b-47b5-b1c9-b48363b25ae0\", \"7de541ba-a4f8-45be-bbf2-3e5b4910e5f1\": \"7de541ba-a4f8-45be-bbf2-3e5b4910e5f1\", \"3aec6282-3105-40e1-b44b-c4be56a373e1\": \"3aec6282-3105-40e1-b44b-c4be56a373e1\", \"460b54d4-99a6-425b-bc83-3222b48cfb98\": \"460b54d4-99a6-425b-bc83-3222b48cfb98\", \"2ecb9347-4c5f-41dc-a784-00d83ae620a9\": \"2ecb9347-4c5f-41dc-a784-00d83ae620a9\", \"dcbabeab-18d2-4658-a09f-cd75bd9625e2\": \"dcbabeab-18d2-4658-a09f-cd75bd9625e2\", \"6ec4f9a6-4066-4f88-a70e-e4c2576ec850\": \"6ec4f9a6-4066-4f88-a70e-e4c2576ec850\", \"f633dbb0-d358-40a4-b8a4-305284161619\": \"f633dbb0-d358-40a4-b8a4-305284161619\", \"2e0acaa3-b5c7-4ab6-bcd6-1ded93ee7c88\": \"2e0acaa3-b5c7-4ab6-bcd6-1ded93ee7c88\", \"211814f8-b681-4f32-a3fa-0f7714c7bdd1\": \"211814f8-b681-4f32-a3fa-0f7714c7bdd1\", \"f992980a-7b06-473a-b551-73041685631b\": \"f992980a-7b06-473a-b551-73041685631b\", \"bc5deff4-fe8b-4c6d-a5e6-c77819824ecb\": \"bc5deff4-fe8b-4c6d-a5e6-c77819824ecb\", \"e88a0454-b1ca-49b8-ad1c-f227cb207a09\": \"e88a0454-b1ca-49b8-ad1c-f227cb207a09\", \"d5d8bf24-c246-4887-8bbd-3f347eeafdf7\": \"d5d8bf24-c246-4887-8bbd-3f347eeafdf7\", \"77099ea5-3c7c-4f54-9404-f05645dc95fa\": \"77099ea5-3c7c-4f54-9404-f05645dc95fa\", \"2775fe1f-282c-4c7d-9ab1-15f13fa61e6a\": \"2775fe1f-282c-4c7d-9ab1-15f13fa61e6a\", \"5dd581b9-f10d-42e9-a163-d5e1e068f5b5\": \"5dd581b9-f10d-42e9-a163-d5e1e068f5b5\", \"c538a865-4eb7-4b63-a98e-868110fd13ed\": \"c538a865-4eb7-4b63-a98e-868110fd13ed\", \"8df5217c-c0d6-44c7-b31d-e06a90d31541\": \"8df5217c-c0d6-44c7-b31d-e06a90d31541\", \"5d31911d-811c-4ae3-a191-402ac27dcbac\": \"5d31911d-811c-4ae3-a191-402ac27dcbac\", \"a040d503-0063-4844-9dc3-aaeef5262699\": \"a040d503-0063-4844-9dc3-aaeef5262699\", \"0b1b142e-42a0-4c3b-8cfb-6a3b37d75075\": \"0b1b142e-42a0-4c3b-8cfb-6a3b37d75075\", \"859048ec-bad6-40ca-a991-f33252260698\": \"859048ec-bad6-40ca-a991-f33252260698\", \"94eb9118-5907-401d-a528-006da2198e10\": \"94eb9118-5907-401d-a528-006da2198e10\", \"40182ade-e27b-4073-bfc8-c87b016a16e4\": \"40182ade-e27b-4073-bfc8-c87b016a16e4\", \"48c8e065-24e3-4620-81da-f43cf4f69d83\": \"48c8e065-24e3-4620-81da-f43cf4f69d83\", \"240517cd-d351-4f81-998c-bbefba7a002f\": \"240517cd-d351-4f81-998c-bbefba7a002f\", \"6f2d843a-424b-4e2d-805e-45c791808083\": \"6f2d843a-424b-4e2d-805e-45c791808083\", \"1f17dc5d-50cb-45f5-812d-cf546c710638\": \"1f17dc5d-50cb-45f5-812d-cf546c710638\", \"4979a211-6fad-4ac1-81c7-0268a28ef6ad\": \"4979a211-6fad-4ac1-81c7-0268a28ef6ad\", \"03525f46-0f5e-44ea-acf5-feead629d503\": \"03525f46-0f5e-44ea-acf5-feead629d503\", \"40c18041-890a-4ad6-a82e-099e3c18ab05\": \"40c18041-890a-4ad6-a82e-099e3c18ab05\", \"b4bd8506-8a1b-4653-981a-04d1704d30a3\": \"b4bd8506-8a1b-4653-981a-04d1704d30a3\", \"df18a697-f1ae-484b-919d-5d8b717e14ce\": \"df18a697-f1ae-484b-919d-5d8b717e14ce\", \"397a65cc-3601-4b5e-97b4-b1aed57f1986\": \"397a65cc-3601-4b5e-97b4-b1aed57f1986\", \"52d9d806-099b-4981-b7ca-da5b24efa9cc\": \"52d9d806-099b-4981-b7ca-da5b24efa9cc\", \"ec1404d1-26a0-4386-8cd8-a335ca2464b7\": \"ec1404d1-26a0-4386-8cd8-a335ca2464b7\", \"e727596f-8ab0-4f11-8d37-213f8e71580b\": \"e727596f-8ab0-4f11-8d37-213f8e71580b\", \"5ab01fe7-6fd7-45cf-957d-93606c735f52\": \"5ab01fe7-6fd7-45cf-957d-93606c735f52\", \"deee6a7e-7bc3-4f94-9500-59f7cadc2885\": \"deee6a7e-7bc3-4f94-9500-59f7cadc2885\", \"5aca1a29-a34b-408e-a3d3-0aa413969b02\": \"5aca1a29-a34b-408e-a3d3-0aa413969b02\", \"2cc668c3-09a8-427e-b457-669730f99b58\": \"2cc668c3-09a8-427e-b457-669730f99b58\", \"0f3d700d-1e23-4edd-86bd-2b0ca098f3de\": \"0f3d700d-1e23-4edd-86bd-2b0ca098f3de\", \"061bbd25-c931-4a0c-8b81-0f132d9e0f9b\": \"061bbd25-c931-4a0c-8b81-0f132d9e0f9b\", \"18187a46-1780-4ba9-9a50-554843fb329f\": \"18187a46-1780-4ba9-9a50-554843fb329f\", \"0d7ee83d-10ce-4aba-ab50-1cf1d1568d3b\": \"0d7ee83d-10ce-4aba-ab50-1cf1d1568d3b\", \"6a4d74ab-77c9-4178-86d0-adec3cc62886\": \"6a4d74ab-77c9-4178-86d0-adec3cc62886\", \"596f1a79-c8bf-49b8-9d6b-dad0e4aade88\": \"596f1a79-c8bf-49b8-9d6b-dad0e4aade88\", \"1141cb19-4d20-4cb6-8397-d690446046b5\": \"1141cb19-4d20-4cb6-8397-d690446046b5\", \"a61c27cb-fb51-476f-afa4-dfe2777372a6\": \"a61c27cb-fb51-476f-afa4-dfe2777372a6\", \"2ccd1d06-9e56-4e1e-ae37-a43af3b5d150\": \"2ccd1d06-9e56-4e1e-ae37-a43af3b5d150\", \"4b22e8c7-849c-4cf2-9d08-760b18f887f0\": \"4b22e8c7-849c-4cf2-9d08-760b18f887f0\", \"ee900e47-6b34-4877-8acb-007f8cd10fc3\": \"ee900e47-6b34-4877-8acb-007f8cd10fc3\", \"2851c470-d262-4c8a-96fe-b2cc8b834d48\": \"2851c470-d262-4c8a-96fe-b2cc8b834d48\", \"15347b43-70a5-4af8-93d9-05aa61b906b1\": \"15347b43-70a5-4af8-93d9-05aa61b906b1\", \"12c4d4cc-ecbf-41d4-a0c0-5b5b780d527d\": \"12c4d4cc-ecbf-41d4-a0c0-5b5b780d527d\", \"2928563d-f0ae-4158-8df1-cb1020e11178\": \"2928563d-f0ae-4158-8df1-cb1020e11178\", \"3a4a1afa-2277-4757-b903-6b91c8256ff0\": \"3a4a1afa-2277-4757-b903-6b91c8256ff0\", \"456d0b06-cfe7-4135-8283-ee8658ff2f1d\": \"456d0b06-cfe7-4135-8283-ee8658ff2f1d\", \"ad368b69-0348-4238-a315-69a1df24d40d\": \"ad368b69-0348-4238-a315-69a1df24d40d\", \"55568171-8b9b-473e-acfe-e03a3323f15b\": \"55568171-8b9b-473e-acfe-e03a3323f15b\", \"8d7e39b4-9e12-43eb-b458-093faedc905e\": \"8d7e39b4-9e12-43eb-b458-093faedc905e\", \"5472cb5f-5636-4e10-9db9-9d69eced5103\": \"5472cb5f-5636-4e10-9db9-9d69eced5103\", \"e902ca6a-3e3b-4770-8c87-ba1306da8e33\": \"e902ca6a-3e3b-4770-8c87-ba1306da8e33\", \"604f0ac3-d3b6-4024-b956-24280618850a\": \"604f0ac3-d3b6-4024-b956-24280618850a\", \"986df7d3-2b20-4e14-a132-3083e095667b\": \"986df7d3-2b20-4e14-a132-3083e095667b\", \"870810eb-4fb1-4863-956a-ceebac9f6a64\": \"870810eb-4fb1-4863-956a-ceebac9f6a64\", \"2e3ab93d-4704-48ab-b536-024457323097\": \"2e3ab93d-4704-48ab-b536-024457323097\", \"dc4a587e-4113-4d0e-9f76-aa90aceab203\": \"dc4a587e-4113-4d0e-9f76-aa90aceab203\", \"5386fc96-83c5-43a2-9cf2-e11fdd6242b5\": \"5386fc96-83c5-43a2-9cf2-e11fdd6242b5\", \"2b046865-4ca2-4c00-a69f-d83617632e39\": \"2b046865-4ca2-4c00-a69f-d83617632e39\", \"f1bf2813-b8c3-4b03-b280-e84f91c3e9f7\": \"f1bf2813-b8c3-4b03-b280-e84f91c3e9f7\", \"6db76df1-2ef2-4fa5-8fb0-ac91d7f48c72\": \"6db76df1-2ef2-4fa5-8fb0-ac91d7f48c72\", \"6985b293-d21f-439d-8f05-46c872f69e5d\": \"6985b293-d21f-439d-8f05-46c872f69e5d\", \"5d970e4c-cda3-43e6-a43a-f0fdeee67de5\": \"5d970e4c-cda3-43e6-a43a-f0fdeee67de5\", \"205e76c3-0725-4c75-baa6-f5ac3fab135d\": \"205e76c3-0725-4c75-baa6-f5ac3fab135d\", \"09b04b3b-3035-4858-b16b-2a11bc5c9b40\": \"09b04b3b-3035-4858-b16b-2a11bc5c9b40\", \"c274f977-3edf-4821-8324-a8b010121cd2\": \"c274f977-3edf-4821-8324-a8b010121cd2\", \"43a792aa-8d04-4041-8cb6-828108ea5983\": \"43a792aa-8d04-4041-8cb6-828108ea5983\", \"746bd124-abac-4500-94fa-d4aefd6cf9a8\": \"746bd124-abac-4500-94fa-d4aefd6cf9a8\", \"598a94bc-0a29-417d-ae3f-163b5a9b2565\": \"598a94bc-0a29-417d-ae3f-163b5a9b2565\", \"ce2d4dd5-f1d9-4293-bb6e-72e614d48147\": \"ce2d4dd5-f1d9-4293-bb6e-72e614d48147\", \"0cf96eff-3453-4720-9edf-abe5237ac7b7\": \"0cf96eff-3453-4720-9edf-abe5237ac7b7\", \"f46f39f1-3dc4-486e-b19e-23f0cdf38a55\": \"f46f39f1-3dc4-486e-b19e-23f0cdf38a55\", \"5c141c0d-77c4-491c-8781-a877e876f2e2\": \"5c141c0d-77c4-491c-8781-a877e876f2e2\", \"47d9fbb6-8e04-41bd-88a0-dad0a9900d12\": \"47d9fbb6-8e04-41bd-88a0-dad0a9900d12\", \"831fe6fb-94bf-49d7-90b5-78a1e99de993\": \"831fe6fb-94bf-49d7-90b5-78a1e99de993\", \"6fac3364-c110-4a65-a24f-4d5e53a6f1c6\": \"6fac3364-c110-4a65-a24f-4d5e53a6f1c6\", \"918dcbfa-b1f9-4a35-95ba-44c840842f3f\": \"918dcbfa-b1f9-4a35-95ba-44c840842f3f\", \"255afedc-6bb0-4a8c-95d7-88a35cff8b8c\": \"255afedc-6bb0-4a8c-95d7-88a35cff8b8c\", \"5c3f46b5-ca29-441f-ba7e-32a9dca5d145\": \"5c3f46b5-ca29-441f-ba7e-32a9dca5d145\", \"c6809cd7-cd79-4952-ad66-9ab1d24e92c8\": \"c6809cd7-cd79-4952-ad66-9ab1d24e92c8\", \"53852912-ce8e-4f55-b97c-9230896bdb6b\": \"53852912-ce8e-4f55-b97c-9230896bdb6b\", \"84845d72-4e86-4bdf-90ae-accb01fdc348\": \"84845d72-4e86-4bdf-90ae-accb01fdc348\", \"6c2efd40-b1b3-48c4-93dd-959d162df0be\": \"6c2efd40-b1b3-48c4-93dd-959d162df0be\", \"ecd073bc-6484-45e8-9b4e-492593a0363a\": \"ecd073bc-6484-45e8-9b4e-492593a0363a\", \"60792bd0-2dbd-417f-98ce-62319743f32d\": \"60792bd0-2dbd-417f-98ce-62319743f32d\", \"fb20af6a-5036-4e00-9301-2846194b95b0\": \"fb20af6a-5036-4e00-9301-2846194b95b0\", \"ab529cde-bfb3-48ff-adbf-fb8e4bbe6f73\": \"ab529cde-bfb3-48ff-adbf-fb8e4bbe6f73\", \"9e5a5818-b5cb-4243-a111-c72fbc8511ba\": \"9e5a5818-b5cb-4243-a111-c72fbc8511ba\", \"857d6c23-dafb-43cb-acf5-4a9bcf0e8389\": \"857d6c23-dafb-43cb-acf5-4a9bcf0e8389\", \"ab9d693e-4669-4f97-ad2b-3839ebaf1baf\": \"ab9d693e-4669-4f97-ad2b-3839ebaf1baf\", \"423b5aad-8e95-417c-968c-f1813b8e0860\": \"423b5aad-8e95-417c-968c-f1813b8e0860\", \"5f8be354-1cfe-4396-937c-02da0384462b\": \"5f8be354-1cfe-4396-937c-02da0384462b\", \"c591a399-bbaf-491e-941b-550134cd9298\": \"c591a399-bbaf-491e-941b-550134cd9298\", \"92989dd1-b07a-4c23-8a49-610a6f74f8f0\": \"92989dd1-b07a-4c23-8a49-610a6f74f8f0\", \"fd42e0be-a619-4f8c-856c-1ae885dade76\": \"fd42e0be-a619-4f8c-856c-1ae885dade76\", \"e27e0f4d-b40f-4f57-b21a-8cef73678f7f\": \"e27e0f4d-b40f-4f57-b21a-8cef73678f7f\", \"57490e82-0ae4-4305-b87d-fc76ff915c4a\": \"57490e82-0ae4-4305-b87d-fc76ff915c4a\", \"451c5765-ff57-457d-9161-7f66cc8c117c\": \"451c5765-ff57-457d-9161-7f66cc8c117c\", \"af2a305e-9a4f-43f5-9597-edd664071be9\": \"af2a305e-9a4f-43f5-9597-edd664071be9\", \"0476ca73-5a65-4314-a080-53e6ff270df5\": \"0476ca73-5a65-4314-a080-53e6ff270df5\", \"e22e0645-0811-431d-aed8-d607a2c716bc\": \"e22e0645-0811-431d-aed8-d607a2c716bc\", \"59fdfa48-b47b-4631-b938-5deaa966e377\": \"59fdfa48-b47b-4631-b938-5deaa966e377\", \"db032d00-e915-46a5-ac54-ccabac7cb4a9\": \"db032d00-e915-46a5-ac54-ccabac7cb4a9\", \"3c031299-96bf-4df6-b805-f4d0404cb218\": \"3c031299-96bf-4df6-b805-f4d0404cb218\", \"c5fcef4a-3cba-44f9-8c2a-051a8218b7f9\": \"c5fcef4a-3cba-44f9-8c2a-051a8218b7f9\", \"4deedf34-76a9-4695-ab7d-1e60eacea654\": \"4deedf34-76a9-4695-ab7d-1e60eacea654\", \"2f43ff2b-b70c-4a0a-b56f-bb29ad347fd6\": \"2f43ff2b-b70c-4a0a-b56f-bb29ad347fd6\", \"5b3e5656-da72-4164-9579-34a355e25e4e\": \"5b3e5656-da72-4164-9579-34a355e25e4e\", \"afd56c4a-be07-43a1-8d41-ad156615dbff\": \"afd56c4a-be07-43a1-8d41-ad156615dbff\", \"d44d2870-68f1-47bc-8e6d-d69c3d48919e\": \"d44d2870-68f1-47bc-8e6d-d69c3d48919e\", \"ffb4dfb7-8952-4eb1-ad5c-f8744eb0f239\": \"ffb4dfb7-8952-4eb1-ad5c-f8744eb0f239\", \"f97237f0-b976-4876-bbb8-a66581cee794\": \"f97237f0-b976-4876-bbb8-a66581cee794\", \"02668a10-bc22-40c1-9796-5f1f49bbe387\": \"02668a10-bc22-40c1-9796-5f1f49bbe387\", \"dcb725af-e6da-44a5-b9f4-bd4d0398429d\": \"dcb725af-e6da-44a5-b9f4-bd4d0398429d\", \"a9bf3539-f2b3-4d3e-b6e8-c124fc22481a\": \"a9bf3539-f2b3-4d3e-b6e8-c124fc22481a\", \"c5c86efd-b1ce-498b-9bc8-447eab405f64\": \"c5c86efd-b1ce-498b-9bc8-447eab405f64\", \"92606e3c-caeb-43bc-b0f9-daa4d2fb9477\": \"92606e3c-caeb-43bc-b0f9-daa4d2fb9477\", \"06244ed9-eae6-40bf-aecc-6523116acc10\": \"06244ed9-eae6-40bf-aecc-6523116acc10\", \"3d90a0ad-aa08-431b-bddf-fd60ba11bd90\": \"3d90a0ad-aa08-431b-bddf-fd60ba11bd90\", \"7068fd5c-33a3-4ab7-890c-8e85173e7f95\": \"7068fd5c-33a3-4ab7-890c-8e85173e7f95\", \"05bc0393-b6a5-4862-a783-a49c385dde03\": \"05bc0393-b6a5-4862-a783-a49c385dde03\", \"5b573ca8-1da5-4edb-ad00-86ebf1098632\": \"5b573ca8-1da5-4edb-ad00-86ebf1098632\", \"4be9102f-9557-4c16-bce2-e6378d3e6abf\": \"4be9102f-9557-4c16-bce2-e6378d3e6abf\", \"af54bc85-6e6b-4518-8e2b-8ebe37029341\": \"af54bc85-6e6b-4518-8e2b-8ebe37029341\", \"acb64576-7c15-46dc-ad3b-9bf98904611f\": \"acb64576-7c15-46dc-ad3b-9bf98904611f\", \"822a7f44-42bf-44d3-a61f-9387d544d3fd\": \"822a7f44-42bf-44d3-a61f-9387d544d3fd\", \"ef4cfd39-1f13-41cf-a477-e11a2c596549\": \"ef4cfd39-1f13-41cf-a477-e11a2c596549\", \"96e656f5-a9d8-4343-b688-c4cd9c499ed0\": \"96e656f5-a9d8-4343-b688-c4cd9c499ed0\", \"87e2312b-4e6b-4e67-a93e-cc80e6f7a650\": \"87e2312b-4e6b-4e67-a93e-cc80e6f7a650\", \"9dee0917-4759-4672-8e8b-8cef23041650\": \"9dee0917-4759-4672-8e8b-8cef23041650\", \"22498608-e54f-462f-b5fd-9d33c5e853e3\": \"22498608-e54f-462f-b5fd-9d33c5e853e3\", \"95f71786-c3e8-4adc-b286-a8073dd15a59\": \"95f71786-c3e8-4adc-b286-a8073dd15a59\", \"f75ba1c4-a0ee-4758-8570-7adff000db70\": \"f75ba1c4-a0ee-4758-8570-7adff000db70\", \"d772e55c-4f92-4cbd-9ebe-f74a26cbe4dd\": \"d772e55c-4f92-4cbd-9ebe-f74a26cbe4dd\", \"7e37e090-4d2b-415d-92f4-346df4871277\": \"7e37e090-4d2b-415d-92f4-346df4871277\", \"8940f0fd-00f5-4569-83e8-853a13e7b354\": \"8940f0fd-00f5-4569-83e8-853a13e7b354\", \"04f8f2ca-b5b4-4acd-a4f7-da8e1e5e6464\": \"04f8f2ca-b5b4-4acd-a4f7-da8e1e5e6464\", \"4cec7112-2dad-446a-bafa-e0736952cbfe\": \"4cec7112-2dad-446a-bafa-e0736952cbfe\", \"b5171e28-a3df-4fbc-9bee-4a2d11c9d084\": \"b5171e28-a3df-4fbc-9bee-4a2d11c9d084\", \"80dcb4ab-ea99-4306-907e-912aadefc162\": \"80dcb4ab-ea99-4306-907e-912aadefc162\", \"8a0b87ae-1fa6-4554-9d14-cff73dbb48b9\": \"8a0b87ae-1fa6-4554-9d14-cff73dbb48b9\", \"2b12d339-bc03-4173-b2de-efaa6b3ca0b0\": \"2b12d339-bc03-4173-b2de-efaa6b3ca0b0\", \"ec995ebe-ae7c-4fa2-aa21-47aa82d8aff4\": \"ec995ebe-ae7c-4fa2-aa21-47aa82d8aff4\", \"4246d9aa-cc50-4237-b2ed-55c8de9fdc13\": \"4246d9aa-cc50-4237-b2ed-55c8de9fdc13\", \"a4567121-a0e8-480e-8c68-8639792bece7\": \"a4567121-a0e8-480e-8c68-8639792bece7\", \"32237403-6748-4a45-9101-fa27a6396d9e\": \"32237403-6748-4a45-9101-fa27a6396d9e\", \"50282e43-6f11-4ee6-a669-2db46dc2f40c\": \"50282e43-6f11-4ee6-a669-2db46dc2f40c\", \"d1dd1dd3-e1a3-46f1-94ea-c66d160724bd\": \"d1dd1dd3-e1a3-46f1-94ea-c66d160724bd\", \"f7db1c52-9cc5-44cf-b0b7-9ce1a07df386\": \"f7db1c52-9cc5-44cf-b0b7-9ce1a07df386\", \"a265403a-d912-43a4-8e2f-370c9ee6a52c\": \"a265403a-d912-43a4-8e2f-370c9ee6a52c\", \"0c15b9f4-3774-4ef3-a4a9-f1085c78dc40\": \"0c15b9f4-3774-4ef3-a4a9-f1085c78dc40\", \"d40ca92c-f945-4f53-8935-e7dbce45853a\": \"d40ca92c-f945-4f53-8935-e7dbce45853a\", \"441fe1da-2134-4d85-8fdf-1dcbbdcfd16a\": \"441fe1da-2134-4d85-8fdf-1dcbbdcfd16a\", \"d9171754-6018-44af-8131-2f011b82b434\": \"d9171754-6018-44af-8131-2f011b82b434\", \"d0aa2195-158c-45b5-a651-2f526c9da392\": \"d0aa2195-158c-45b5-a651-2f526c9da392\", \"76cb3b88-8d7e-4c38-9fd2-778837184c31\": \"76cb3b88-8d7e-4c38-9fd2-778837184c31\", \"c92dc809-5344-4964-9fdb-c1e94d0be7fd\": \"c92dc809-5344-4964-9fdb-c1e94d0be7fd\", \"1493a825-c25b-45d7-aff1-12bb1f06dd86\": \"1493a825-c25b-45d7-aff1-12bb1f06dd86\", \"044e9e32-423c-4e27-b93a-5e322ff01060\": \"044e9e32-423c-4e27-b93a-5e322ff01060\", \"8ba707cf-2b9e-4dc5-a2b1-270c87d29cc2\": \"8ba707cf-2b9e-4dc5-a2b1-270c87d29cc2\", \"7563c224-6738-453d-ba1f-bbc2e1c72a5a\": \"7563c224-6738-453d-ba1f-bbc2e1c72a5a\", \"2b181d93-efce-4a15-893e-3ae29a515a21\": \"2b181d93-efce-4a15-893e-3ae29a515a21\", \"93eb3b90-1c64-4d2c-93ad-2e678cd73ce0\": \"93eb3b90-1c64-4d2c-93ad-2e678cd73ce0\", \"55d4ce88-0196-4b42-9a05-25623d45f069\": \"55d4ce88-0196-4b42-9a05-25623d45f069\", \"46b82b8b-a87c-435c-a5e0-4be51d979c89\": \"46b82b8b-a87c-435c-a5e0-4be51d979c89\", \"a2ccb35b-a2ba-4e0e-9871-4ba522bdaf22\": \"a2ccb35b-a2ba-4e0e-9871-4ba522bdaf22\", \"693231e9-cfe7-455b-84cf-2dcb58ce0a2e\": \"693231e9-cfe7-455b-84cf-2dcb58ce0a2e\", \"94530892-9b89-4cc6-b270-02704950b505\": \"94530892-9b89-4cc6-b270-02704950b505\", \"72f6f545-7217-475a-b940-61816c09a1b0\": \"72f6f545-7217-475a-b940-61816c09a1b0\", \"1b9f3e7a-8a80-42d1-baaf-6ee9505e67e8\": \"1b9f3e7a-8a80-42d1-baaf-6ee9505e67e8\", \"217d517e-c451-424b-b5bb-562edaf49196\": \"217d517e-c451-424b-b5bb-562edaf49196\", \"60fc1e4e-dd5e-4650-ba4d-2dac0bae20fb\": \"60fc1e4e-dd5e-4650-ba4d-2dac0bae20fb\", \"67d4eb8b-d72c-4d18-9cb2-acf3747233a6\": \"67d4eb8b-d72c-4d18-9cb2-acf3747233a6\", \"ae756c20-bc09-42f8-a84d-ef5f50b1640a\": \"ae756c20-bc09-42f8-a84d-ef5f50b1640a\", \"4fc172be-dceb-4447-b10f-eabc73335e09\": \"4fc172be-dceb-4447-b10f-eabc73335e09\"}, \"doc_id_dict\": {}, \"embeddings_dict\": {}}"}}} -------------------------------------------------------------------------------- /3rd_party/langchain_agentic.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import operator 3 | from typing import Annotated, Literal, Sequence, TypedDict 4 | 5 | from langchain_aws import ChatBedrock 6 | from langchain_community.tools import DuckDuckGoSearchRun 7 | from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage 8 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder 9 | from langchain_core.tools import tool 10 | from langchain_experimental.utilities import PythonREPL 11 | from langgraph.graph import END, START, StateGraph 12 | from langgraph.prebuilt import ToolNode 13 | 14 | 15 | # Define the state object passed between nodes in the graph 16 | class AgentState(TypedDict): 17 | messages: Annotated[Sequence[BaseMessage], operator.add] 18 | sender: str 19 | 20 | 21 | # Tool definitions 22 | def setup_tools(): 23 | """Set up and return the tools used by the agents.""" 24 | duck_duck_go_tool = DuckDuckGoSearchRun(max_results=5) 25 | 26 | @tool 27 | def python_repl( 28 | code: Annotated[str, "The python code to execute to generate your chart."], 29 | ): 30 | """Execute Python code and return the result.""" 31 | repl = PythonREPL() 32 | try: 33 | result = repl.run(code) 34 | except BaseException as e: 35 | return f"Failed to execute. Error: {repr(e)}" 36 | result_str = f"Successfully executed:\n```python\n{code}\n```\nStdout: {result}" 37 | return ( 38 | result_str 39 | + "\n\nIf you have completed all tasks, respond with FINAL ANSWER." 40 | ) 41 | 42 | return [duck_duck_go_tool, python_repl] 43 | 44 | 45 | # Agent creation 46 | def create_agent(llm, tools, system_message: str): 47 | """Create an agent with specified LLM, tools, and system message.""" 48 | prompt = ChatPromptTemplate.from_messages( 49 | [ 50 | ( 51 | "system", 52 | "You are a helpful AI assistant, collaborating with other assistants. " 53 | "Use the provided tools to progress towards answering the question. " 54 | "If you are unable to fully answer, that's OK, another assistant with different tools " 55 | "will help where you left off. Execute what you can to make progress. " 56 | "If you or any of the other assistants have the final answer or deliverable, " 57 | "prefix your response with FINAL ANSWER so the team knows to stop. " 58 | "You have access to the following tools: {tool_names}.\n{system_message}", 59 | ), 60 | MessagesPlaceholder(variable_name="messages"), 61 | ] 62 | ) 63 | prompt = prompt.partial( 64 | system_message=system_message, 65 | tool_names=", ".join([tool.name for tool in tools]), 66 | ) 67 | return prompt | llm.bind_tools(tools) 68 | 69 | 70 | # Node functions 71 | def agent_node(state, agent, name): 72 | """Process the state through an agent and return the updated state.""" 73 | result = agent.invoke(state) 74 | if not isinstance(result, ToolMessage): 75 | result = AIMessage(**result.model_dump(exclude={"type", "name"}), name=name) 76 | return { 77 | "messages": [result], 78 | "sender": name, 79 | } 80 | 81 | 82 | def setup_workflow(llm, tools): 83 | """Set up and return the workflow graph.""" 84 | # Create agents 85 | research_agent = create_agent( 86 | llm, tools, "You should provide accurate data for the chart_generator to use." 87 | ) 88 | chart_agent = create_agent( 89 | llm, tools, "Any charts you display will be visible by the user." 90 | ) 91 | 92 | # Create nodes 93 | research_node = functools.partial( 94 | agent_node, agent=research_agent, name="Researcher" 95 | ) 96 | chart_node = functools.partial( 97 | agent_node, agent=chart_agent, name="chart_generator" 98 | ) 99 | tool_node = ToolNode(tools) 100 | 101 | # Set up the workflow 102 | workflow = StateGraph(AgentState) 103 | workflow.add_node("Researcher", research_node) 104 | workflow.add_node("chart_generator", chart_node) 105 | workflow.add_node("call_tool", tool_node) 106 | 107 | # Add edges 108 | workflow.add_conditional_edges( 109 | "Researcher", 110 | router, 111 | {"continue": "chart_generator", "call_tool": "call_tool", "__end__": END}, 112 | ) 113 | workflow.add_conditional_edges( 114 | "chart_generator", 115 | router, 116 | {"continue": "Researcher", "call_tool": "call_tool", "__end__": END}, 117 | ) 118 | workflow.add_conditional_edges( 119 | "call_tool", 120 | lambda x: x["sender"], 121 | {"Researcher": "Researcher", "chart_generator": "chart_generator"}, 122 | ) 123 | workflow.add_edge(START, "Researcher") 124 | 125 | return workflow.compile() 126 | 127 | 128 | # Router function 129 | def router(state) -> Literal["call_tool", "__end__", "continue"]: 130 | """Determine the next step in the workflow based on the current state.""" 131 | last_message = state["messages"][-1] 132 | if last_message.tool_calls: 133 | return "call_tool" 134 | if "FINAL ANSWER" in last_message.content: 135 | return "__end__" 136 | return "continue" 137 | 138 | 139 | # Main execution 140 | def main(): 141 | # Set up the LLM 142 | llm = ChatBedrock( 143 | model_id="us.anthropic.claude-3-5-haiku-20241022-v1:0", 144 | model_kwargs=dict(temperature=0), 145 | region_name="us-west-2", 146 | ) 147 | 148 | # Set up tools 149 | tools = setup_tools() 150 | 151 | # Set up the workflow 152 | graph = setup_workflow(llm, tools) 153 | 154 | # Execute the workflow 155 | events = graph.stream( 156 | { 157 | "messages": [ 158 | HumanMessage( 159 | content="Fetch the UK's GDP over the past 5 years, " 160 | "then create a bar graph for me to see. " 161 | "Once you code it up, save the bar graph as a png" 162 | ) 163 | ], 164 | }, 165 | {"recursion_limit": 150}, 166 | ) 167 | 168 | # Print the results 169 | for s in events: 170 | print(s) 171 | print("----") 172 | 173 | 174 | if __name__ == "__main__": 175 | main() 176 | -------------------------------------------------------------------------------- /3rd_party/llamaindex_agentic.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from llama_index.core import ( 4 | Settings, 5 | SimpleDirectoryReader, 6 | StorageContext, 7 | VectorStoreIndex, 8 | load_index_from_storage, 9 | ) 10 | from llama_index.core.agent import ReActAgent 11 | from llama_index.core.tools import QueryEngineTool, ToolMetadata 12 | from llama_index.embeddings.bedrock import BedrockEmbedding 13 | from llama_index.llms.bedrock import Bedrock 14 | 15 | 16 | def initialize_settings(): 17 | """ 18 | Initialize global settings for LlamaIndex. 19 | This sets up the language model (LLM) and embedding model using Amazon Bedrock. 20 | """ 21 | # Set the LLM to use Haiku model from Bedrock 22 | Settings.llm = Bedrock( 23 | model="anthropic.claude-3-5-haiku-20241022-v1:0", 24 | region_name="us-west-2", 25 | context_size=2000, 26 | ) 27 | # Set the embedding model to use Amazon's Titan model 28 | Settings.embed_model = BedrockEmbedding( 29 | model="amazon.titan-embed-text-v2:0", 30 | region_name="us-west-2", 31 | ) 32 | 33 | 34 | def load_or_create_index(file_path, persist_dir): 35 | """ 36 | Load an existing index from storage or create a new one if it doesn't exist. 37 | 38 | Args: 39 | file_path (str): Path to the PDF file to index. 40 | persist_dir (str): Directory to persist the index. 41 | 42 | Returns: 43 | VectorStoreIndex: The loaded or newly created index. 44 | """ 45 | if os.path.exists(persist_dir): 46 | print(f"Loading existing index from {persist_dir}") 47 | # Load the existing index from the specified directory 48 | storage_context = StorageContext.from_defaults(persist_dir=persist_dir) 49 | return load_index_from_storage(storage_context) 50 | else: 51 | print(f"Creating new index from {file_path}") 52 | # Load documents from the PDF file 53 | documents = SimpleDirectoryReader(input_files=[file_path]).load_data() 54 | # Create a new index from the documents 55 | index = VectorStoreIndex.from_documents(documents) 56 | # Persist the index to the specified directory 57 | index.storage_context.persist(persist_dir) 58 | return index 59 | 60 | 61 | def create_query_engine_tool(query_engine, name, description): 62 | """ 63 | Create a QueryEngineTool for use with the ReActAgent. 64 | 65 | Args: 66 | query_engine: The query engine to use. 67 | name (str): Name of the tool. 68 | description (str): Description of the tool. 69 | 70 | Returns: 71 | QueryEngineTool: A tool that can be used by the ReActAgent. 72 | """ 73 | return QueryEngineTool( 74 | query_engine=query_engine, 75 | metadata=ToolMetadata(name=name, description=description), 76 | ) 77 | 78 | 79 | def main(): 80 | """ 81 | Main function to orchestrate the index creation/loading and querying process. 82 | """ 83 | # Initialize LlamaIndex settings 84 | initialize_settings() 85 | 86 | # Load or create indexes for Lyft and Uber data 87 | lyft_index = load_or_create_index("./data/10k/lyft_2021.pdf", "./data/lyft_index") 88 | uber_index = load_or_create_index("./data/10k/uber_2021.pdf", "./data/uber_index") 89 | 90 | # Create query engines from the indexes 91 | lyft_engine = lyft_index.as_query_engine(similarity_top_k=3) 92 | uber_engine = uber_index.as_query_engine(similarity_top_k=3) 93 | 94 | # Create query engine tools for the ReActAgent 95 | query_engine_tools = [ 96 | create_query_engine_tool( 97 | lyft_engine, 98 | "lyft_10k", 99 | "Provides information about Lyft financials for year 2021. " 100 | "Use a detailed plain text question as input to the tool.", 101 | ), 102 | create_query_engine_tool( 103 | uber_engine, 104 | "uber_10k", 105 | "Provides information about Uber financials for year 2021. " 106 | "Use a detailed plain text question as input to the tool.", 107 | ), 108 | ] 109 | 110 | # Create a ReActAgent with the query engine tools 111 | agent = ReActAgent.from_tools(query_engine_tools, verbose=True) 112 | 113 | # Use the agent to answer a question 114 | response = agent.chat("Compare revenue growth of Uber and Lyft from 2020 to 2021") 115 | print(response) 116 | 117 | 118 | if __name__ == "__main__": 119 | main() 120 | -------------------------------------------------------------------------------- /3rd_party/smol_agents.py: -------------------------------------------------------------------------------- 1 | from huggingface_hub import list_models 2 | from smolagents import CodeAgent, LiteLLMModel, ToolCallingAgent, tool 3 | 4 | 5 | @tool 6 | def model_download_tool(task: str) -> str: 7 | """ 8 | This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. 9 | It returns the name of the checkpoint. 10 | 11 | Args: 12 | task: The task for which 13 | """ 14 | most_downloaded_model = next( 15 | iter(list_models(filter=task, sort="downloads", direction=-1)) 16 | ) 17 | return most_downloaded_model.id 18 | 19 | 20 | def multi_agent_example(): 21 | """ 22 | This is an example of how to use the CodeAgent to create a multi-agent system. 23 | """ 24 | llm = LiteLLMModel(model_id="bedrock/anthropic.claude-3-5-haiku-20241022-v1:0") 25 | 26 | managed_model_download_agent = ToolCallingAgent( 27 | tools=[model_download_tool], 28 | name="model_download", 29 | model=llm, 30 | description="Returns the most downloaded model of a given task on the Hugging Face Hub.", 31 | ) 32 | 33 | manager_agent = CodeAgent( 34 | tools=[], 35 | model=llm, 36 | managed_agents=[managed_model_download_agent], 37 | ) 38 | 39 | manager_agent.run( 40 | "Find the most downloaded model of text generation on the Hugging Face Hub." 41 | ) 42 | 43 | 44 | multi_agent_example() 45 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 17 | 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Building Agentic Workflows on AWS 2 | 3 | This workshop is designed for builders ready to create Agentic Workflows on AWS. Agentic Workflows enable large language models (LLMs) to act autonomously, performing tasks or providing assistance on behalf of users. 4 | 5 | In this workshop, you'll learn to: 6 | 7 | 1. Leverage capabilities in Amazon Bedrock to create both chatbot and non-chatbot workflows 8 | 2. Incorporate features such as code interpreter, guardrails, knowledge bases and more! 9 | 3. Use Amazon Q Developer to assist in coding and debugging 10 | 11 | ![Gen AI Stack](gen_ai_stack.png) 12 | 13 | ### Workshop Modules 14 | 15 | You will complete the following 3 modules: 16 | 17 | 1. **Build an Agentic Chatbot:** Create a Chatbot that can plan, use tools, and reflect to help with tasks. 18 | 2. **Build an Agentic Workflow:** Develop agentic workflows that don't rely on a chat interface. 19 | 3. **Using 3rd Party Agentic Workflows:** Use Amazon Bedrock with popular frameworks such as LangChain, LlamaIndex, and crewAI. 20 | 21 | ## Security 22 | 23 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 24 | 25 | ## License 26 | 27 | This library is licensed under the MIT-0 License. See the LICENSE file. 28 | 29 | -------------------------------------------------------------------------------- /agentic_chatbot/agent_tools.py: -------------------------------------------------------------------------------- 1 | import io 2 | import random 3 | 4 | import boto3 5 | import matplotlib.pyplot as plt 6 | import streamlit as st 7 | 8 | AGENT_ID = "REPLACE_WITH_YOUR_AGENT_ID" 9 | REGION = "us-west-2" 10 | IMAGE_FOLDER = "images" 11 | 12 | bedrock_runtime = boto3.client( 13 | service_name="bedrock-runtime", 14 | region_name=REGION, 15 | ) 16 | 17 | bedrock_agent_runtime = boto3.client( 18 | service_name="bedrock-agent-runtime", region_name=REGION 19 | ) 20 | 21 | 22 | def generate_random_15digit(): 23 | number = "" 24 | 25 | for _ in range(15): 26 | number += str(random.randint(0, 9)) 27 | 28 | return number 29 | 30 | 31 | def invoke_bedrock_agent(inputText, sessionId, trace_container, endSession=False): 32 | # Invoke the Bedrock agent with the given input text 33 | response = bedrock_agent_runtime.invoke_agent( 34 | agentAliasId="TSTALIASID", 35 | agentId=AGENT_ID, 36 | sessionId=sessionId, 37 | inputText=inputText, 38 | endSession=endSession, 39 | enableTrace=True, 40 | ) 41 | 42 | # Get the event stream from the response 43 | event_stream = response["completion"] 44 | 45 | model_response = {"text": "", "images": [], "files": [], "traces": []} 46 | 47 | # Process each event in the stream 48 | for index, event in enumerate(event_stream): 49 | print(f"Event {index}:") 50 | print(str(event)) 51 | print("\n") 52 | 53 | try: 54 | # Check trace 55 | if "trace" in event: 56 | if ( 57 | "trace" in event["trace"] 58 | and "orchestrationTrace" in event["trace"]["trace"] 59 | ): 60 | trace_event = event["trace"]["trace"]["orchestrationTrace"] 61 | if "rationale" in trace_event: 62 | trace_text = trace_event["rationale"]["text"] 63 | trace_object = {"trace_type": "rationale", "text": trace_text} 64 | model_response["traces"].append(trace_object) 65 | 66 | with trace_container.expander("rationale"): 67 | st.markdown(trace_text) 68 | 69 | # for invocationInput type 70 | if "invocationInput" in trace_event: 71 | if ( 72 | "codeInterpreterInvocationInput" 73 | in trace_event["invocationInput"] 74 | ): 75 | trace_code = trace_event["invocationInput"][ 76 | "codeInterpreterInvocationInput" 77 | ]["code"] 78 | trace_object = { 79 | "trace_type": "codeInterpreter", 80 | "text": trace_code, 81 | } 82 | model_response["traces"].append(trace_object) 83 | 84 | with trace_container.expander("codeInterpreter"): 85 | st.code(trace_code) 86 | if "knowledgeBaseLookupInput" in trace_event["invocationInput"]: 87 | trace_text = trace_event["invocationInput"][ 88 | "knowledgeBaseLookupInput" 89 | ]["text"] 90 | trace_object = { 91 | "trace_type": "knowledgeBaseLookup", 92 | "text": trace_text, 93 | } 94 | model_response["traces"].append(trace_object) 95 | 96 | with trace_container.expander("knowledgeBaseLookup"): 97 | st.markdown(trace_text) 98 | 99 | if ( 100 | "actionGroupInvocationInput" 101 | in trace_event["invocationInput"] 102 | ): 103 | trace_text = trace_event["invocationInput"][ 104 | "actionGroupInvocationInput" 105 | ]["function"] 106 | trace_object = { 107 | "trace_type": "actionGroupInvocation", 108 | "text": trace_text, 109 | } 110 | model_response["traces"].append(trace_object) 111 | 112 | with trace_container.expander("actionGroupInvocation"): 113 | st.markdown(f"Calling function: {trace_text}") 114 | 115 | # for observation type 116 | if "observation" in trace_event: 117 | if ( 118 | "codeInterpreterInvocationOutput" 119 | in trace_event["observation"] 120 | ): 121 | if ( 122 | "executionOutput" 123 | in trace_event["observation"][ 124 | "codeInterpreterInvocationOutput" 125 | ] 126 | ): 127 | trace_resp = trace_event["observation"][ 128 | "codeInterpreterInvocationOutput" 129 | ]["executionOutput"] 130 | trace_object = { 131 | "trace_type": "observation", 132 | "text": trace_resp, 133 | } 134 | model_response["traces"].append(trace_object) 135 | 136 | with trace_container.expander("observation"): 137 | st.markdown(trace_resp) 138 | if ( 139 | "executionError" 140 | in trace_event["observation"][ 141 | "codeInterpreterInvocationOutput" 142 | ] 143 | ): 144 | trace_resp = trace_event["observation"][ 145 | "codeInterpreterInvocationOutput" 146 | ]["executionError"] 147 | trace_object = { 148 | "trace_type": "observation", 149 | "text": trace_resp, 150 | } 151 | model_response["traces"].append(trace_object) 152 | 153 | with trace_container.expander("observation"): 154 | st.error(trace_resp) 155 | 156 | if "knowledgeBaseLookupOutput" in trace_event["observation"]: 157 | # trace_text = trace_event["observation"][ 158 | # "knowledgeBaseLookupOutput" 159 | # ]["text"] 160 | trace_object = { 161 | "trace_type": "knowledgeBaseLookupOutput", 162 | "text": trace_event["observation"][ 163 | "knowledgeBaseLookupOutput" 164 | ]["retrievedReferences"], 165 | } 166 | model_response["traces"].append(trace_object) 167 | 168 | with trace_container.expander("knowledgeBaseLookupOutput"): 169 | # st.markdown(trace_text) 170 | 171 | if ( 172 | "retrievedReferences" 173 | in trace_event["observation"][ 174 | "knowledgeBaseLookupOutput" 175 | ] 176 | ): 177 | references = trace_event["observation"][ 178 | "knowledgeBaseLookupOutput" 179 | ]["retrievedReferences"] 180 | for reference in references: 181 | st.markdown( 182 | f'{reference["location"]["s3Location"]["uri"]}' 183 | ) 184 | st.markdown(f'{reference["content"]["text"]}') 185 | 186 | if "actionGroupInvocationOutput" in trace_event["observation"]: 187 | trace_resp = trace_event["observation"][ 188 | "actionGroupInvocationOutput" 189 | ]["text"] 190 | trace_object = { 191 | "trace_type": "observation", 192 | "text": trace_resp, 193 | } 194 | model_response["traces"].append(trace_object) 195 | 196 | with trace_container.expander("observation"): 197 | st.markdown(trace_resp) 198 | 199 | if "finalResponse" in trace_event["observation"]: 200 | trace_resp = trace_event["observation"]["finalResponse"][ 201 | "text" 202 | ] 203 | trace_object = { 204 | "trace_type": "finalResponse", 205 | "text": trace_resp, 206 | } 207 | model_response["traces"].append(trace_object) 208 | 209 | with trace_container.expander("finalResponse"): 210 | st.markdown(trace_resp) 211 | 212 | elif "guardrailTrace" in event["trace"]["trace"]: 213 | 214 | guardrail_trace = event["trace"]["trace"]["guardrailTrace"] 215 | if "inputAssessments" in guardrail_trace: 216 | assessments = guardrail_trace["inputAssessments"] 217 | for assessment in assessments: 218 | if "contentPolicy" in assessment: 219 | filters = assessment["contentPolicy"]["filters"] 220 | for filter in filters: 221 | if filter["action"] == "BLOCKED": 222 | st.error( 223 | f"Guardrail blocked {filter['type']} confidence: {filter['confidence']}" 224 | ) 225 | if "topicPolicy" in assessment: 226 | topics = assessment["topicPolicy"]["topics"] 227 | for topic in topics: 228 | if topic["action"] == "BLOCKED": 229 | st.error( 230 | f"Guardrail blocked topic {topic['name']}" 231 | ) 232 | # Handle text chunks 233 | if "chunk" in event: 234 | chunk = event["chunk"] 235 | if "bytes" in chunk: 236 | text = chunk["bytes"].decode("utf-8") 237 | print(f"Chunk: {text}") 238 | model_response["text"] += text 239 | return model_response 240 | 241 | # Handle file outputs 242 | if "files" in event: 243 | print("Files received") 244 | files = event["files"]["files"] 245 | for file in files: 246 | name = file["name"] 247 | type = file["type"] 248 | bytes_data = file["bytes"] 249 | 250 | # Display PNG images using matplotlib 251 | if type == "image/png": 252 | 253 | # save image to disk 254 | img = plt.imread(io.BytesIO(bytes_data)) 255 | img_name = f"{IMAGE_FOLDER}/{name}" 256 | plt.imsave(img_name, img) 257 | 258 | # if image name not in images 259 | if img_name not in model_response["images"]: 260 | model_response["images"].append(img_name) 261 | print(f"Image '{name}' saved to disk.") 262 | # Save other file types to disk 263 | else: 264 | with open(name, "wb") as f: 265 | f.write(bytes_data) 266 | model_response["files"].append(name) 267 | print(f"File '{name}' saved to disk.") 268 | except Exception as e: 269 | print(f"Error processing event: {e}") 270 | continue 271 | -------------------------------------------------------------------------------- /agentic_chatbot/chatbot_st.py: -------------------------------------------------------------------------------- 1 | import agent_tools 2 | import streamlit as st 3 | from PIL import Image 4 | 5 | st.title("Amazon Bedrock Agentic Chatbot") # Title of the application 6 | 7 | 8 | st.sidebar.markdown( 9 | "This app shows an Agentic Chatbot powered by Amazon Bedrock to answer questions." 10 | ) 11 | clear_button = st.sidebar.button("Clear Conversation", key="clear") 12 | # reset sessions state on clear 13 | if clear_button: 14 | st.session_state.messages = [] 15 | st.session_state.session_id = agent_tools.generate_random_15digit() 16 | 17 | 18 | if "messages" not in st.session_state: 19 | st.session_state.messages = [] 20 | st.session_state.session_id = agent_tools.generate_random_15digit() 21 | 22 | for message in st.session_state.messages: 23 | with st.chat_message(message["role"]): 24 | 25 | if "traces" in message: 26 | trace_container = st.container() 27 | for trace in message["traces"]: 28 | # Show an expander for each trace type 29 | with trace_container.expander(trace["trace_type"]): 30 | # If trace_type is codeInterpreter use st.code, else use st.markdown 31 | if trace["trace_type"] == "codeInterpreter": 32 | st.code(trace["text"], language="python") 33 | else: 34 | st.markdown(trace["text"]) 35 | 36 | st.markdown(message["content"][0]["text"]) 37 | # TODO show images 38 | 39 | if prompt := st.chat_input("How can I help??"): 40 | st.session_state.messages.append({"role": "user", "content": [{"text": prompt}]}) 41 | with st.chat_message("user"): 42 | st.markdown(prompt) 43 | 44 | with st.chat_message("assistant"): 45 | message_placeholder = st.empty() 46 | full_response = "" 47 | 48 | trace_container = st.container() 49 | 50 | result = agent_tools.invoke_bedrock_agent( 51 | prompt, st.session_state.session_id, trace_container 52 | ) 53 | 54 | message_placeholder.markdown(result["text"]) 55 | 56 | # TODO show images 57 | 58 | st.session_state.messages.append( 59 | { 60 | "role": "assistant", 61 | "content": [{"text": f"{full_response}"}], 62 | "images": result["images"], 63 | "traces": result["traces"], 64 | } 65 | ) 66 | -------------------------------------------------------------------------------- /agentic_chatbot/images/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/build-on-aws/agentic-workshop/7d7c0cd0a6607fd761537b1602cd75f7dd75d264/agentic_chatbot/images/.gitkeep -------------------------------------------------------------------------------- /agentic_workflow/agentic_tools.py: -------------------------------------------------------------------------------- 1 | import json 2 | import math 3 | import os 4 | from typing import List 5 | 6 | import boto3 7 | import utils as lambda_helpers 8 | from botocore.exceptions import ClientError 9 | 10 | # Retrieve environment variables 11 | LAMBDA_ROLE = os.environ["LAMBDA_ROLE"] 12 | S3_BUCKET = os.environ["S3_BUCKET"] 13 | REGION = "us-west-2" 14 | 15 | 16 | def initialize_clients(): 17 | """Initialize and return the AWS Bedrock, Lambda, and S3 clients.""" 18 | session = boto3.Session() 19 | bedrock = session.client(service_name="bedrock-runtime", region_name=REGION) 20 | lambda_client = session.client("lambda", region_name=REGION) 21 | s3 = session.client("s3", region_name=REGION) 22 | return bedrock, lambda_client, s3 23 | 24 | 25 | def get_tool_list(): 26 | """Define and return the tool list for the LLM to use.""" 27 | return [ 28 | { 29 | "toolSpec": { 30 | "name": "cosine", 31 | "description": "Calculate the cosine of x.", 32 | "inputSchema": { 33 | "json": { 34 | "type": "object", 35 | "properties": { 36 | "x": { 37 | "type": "number", 38 | "description": "The number to pass to the function.", 39 | } 40 | }, 41 | "required": ["x"], 42 | } 43 | }, 44 | } 45 | }, 46 | { 47 | "toolSpec": { 48 | "name": "create_lambda_function", 49 | "description": "Create and deploy a Lambda function.", 50 | "inputSchema": { 51 | "json": { 52 | "type": "object", 53 | "properties": { 54 | "code": { 55 | "type": "string", 56 | "description": "The Python code for the Lambda function.", 57 | }, 58 | "function_name": { 59 | "type": "string", 60 | "description": "The name of the Lambda function.", 61 | }, 62 | "description": { 63 | "type": "string", 64 | "description": "A description of the Lambda function.", 65 | }, 66 | "has_external_python_libraries": { 67 | "type": "boolean", 68 | "description": "Whether the function uses external Python libraries.", 69 | }, 70 | "external_python_libraries": { 71 | "type": "array", 72 | "items": {"type": "string"}, 73 | "description": "List of external Python libraries to include.", 74 | }, 75 | }, 76 | "required": [ 77 | "code", 78 | "function_name", 79 | "description", 80 | "has_external_python_libraries", 81 | "external_python_libraries", 82 | ], 83 | } 84 | }, 85 | } 86 | }, 87 | ] 88 | 89 | 90 | def query_llm(bedrock, messages, tools, system_prompt): 91 | """Make a request to the LLM and return the response.""" 92 | return bedrock.converse( 93 | modelId="anthropic.claude-3-5-sonnet-20241022-v2:0", 94 | messages=messages, 95 | inferenceConfig={"maxTokens": 2000, "temperature": 0}, 96 | toolConfig={"tools": tools}, 97 | system=[{"text": system_prompt}], 98 | ) 99 | 100 | 101 | def create_lambda_function( 102 | lambda_client, 103 | s3, 104 | code: str, 105 | function_name: str, 106 | description: str, 107 | has_external_python_libraries: bool, 108 | external_python_libraries: List[str], 109 | ) -> str: 110 | """ 111 | Creates and deploys a Lambda Function, based on what the customer requested. 112 | Returns the name of the created Lambda function 113 | """ 114 | print("Creating Lambda function") 115 | runtime = "python3.12" 116 | handler = "lambda_function.handler" 117 | 118 | # Create a zip file for the code 119 | if has_external_python_libraries: 120 | zipfile = lambda_helpers.create_deployment_package_with_dependencies( 121 | code, function_name, f"{function_name}.zip", external_python_libraries 122 | ) 123 | else: 124 | zipfile = lambda_helpers.create_deployment_package_no_dependencies( 125 | code, function_name, f"{function_name}.zip" 126 | ) 127 | 128 | try: 129 | # Upload zip file 130 | zip_key = f"lambda_resources/{function_name}.zip" 131 | s3.upload_file(zipfile, S3_BUCKET, zip_key) 132 | print(f"Uploaded zip to {S3_BUCKET}/{zip_key}") 133 | 134 | response = lambda_client.create_function( 135 | Code={ 136 | "S3Bucket": S3_BUCKET, 137 | "S3Key": zip_key, 138 | }, 139 | Description=description, 140 | FunctionName=function_name, 141 | Handler=handler, 142 | Timeout=30, 143 | Publish=True, 144 | Role=LAMBDA_ROLE, 145 | Runtime=runtime, 146 | ) 147 | print("Lambda function created successfully") 148 | print(response) 149 | deployed_function = response["FunctionName"] 150 | user_response = f"The function {deployed_function} has been deployed to the customer's AWS account. I will now provide my final answer to the customer on how to invoke the {deployed_function} function with boto3 and print the result." 151 | return user_response 152 | except ClientError as e: 153 | print(e) 154 | return f"Error: {e}\n Let me try again..." 155 | 156 | 157 | def process_llm_response(response_message, lambda_client, s3): 158 | """Process the LLM's response, handling tool usage and text output.""" 159 | response_content_blocks = response_message["content"] 160 | follow_up_content_blocks = [] 161 | 162 | for content_block in response_content_blocks: 163 | if "toolUse" in content_block: 164 | tool_use_block = content_block["toolUse"] 165 | tool_use_name = tool_use_block["name"] 166 | print(f"Using tool {tool_use_name}") 167 | if tool_use_name == "cosine": 168 | tool_result_value = math.cos(tool_use_block["input"]["x"]) 169 | print(f"Cosine result: {tool_result_value}") 170 | follow_up_content_blocks.append( 171 | { 172 | "toolResult": { 173 | "toolUseId": tool_use_block["toolUseId"], 174 | "content": [{"json": {"result": tool_result_value}}], 175 | } 176 | } 177 | ) 178 | elif tool_use_name == "create_lambda_function": 179 | result = create_lambda_function( 180 | lambda_client, 181 | s3, 182 | tool_use_block["input"]["code"], 183 | tool_use_block["input"]["function_name"], 184 | tool_use_block["input"]["description"], 185 | tool_use_block["input"]["has_external_python_libraries"], 186 | tool_use_block["input"]["external_python_libraries"], 187 | ) 188 | print(f"Lambda function creation result: {result}") 189 | follow_up_content_blocks.append( 190 | { 191 | "toolResult": { 192 | "toolUseId": tool_use_block["toolUseId"], 193 | "content": [{"json": {"result": result}}], 194 | } 195 | } 196 | ) 197 | elif "text" in content_block: 198 | print(f"LLM response: {content_block['text']}") 199 | 200 | return follow_up_content_blocks 201 | 202 | 203 | def main(): 204 | # Initialize the AWS clients 205 | bedrock, lambda_client, s3 = initialize_clients() 206 | 207 | # Get the tool list 208 | tool_list = get_tool_list() 209 | 210 | # Initialize the message list for the conversation 211 | message_list = [ 212 | { 213 | "role": "user", 214 | "content": [ 215 | { 216 | "text": "Create a Lambda function that calculates the factorial of a number." 217 | } 218 | ], 219 | } 220 | ] 221 | 222 | # Set the system prompt 223 | system_prompt = "You are an AI assistant capable of creating Lambda functions and performing mathematical calculations. Use the provided tools when necessary." 224 | 225 | # Make the initial request to the LLM 226 | response = query_llm(bedrock, message_list, tool_list, system_prompt) 227 | response_message = response["output"]["message"] 228 | print(json.dumps(response_message, indent=4)) 229 | message_list.append(response_message) 230 | 231 | # Process the LLM's response 232 | follow_up_content_blocks = process_llm_response(response_message, lambda_client, s3) 233 | 234 | # If there are follow-up content blocks, make another request to the LLM 235 | if follow_up_content_blocks: 236 | follow_up_message = { 237 | "role": "user", 238 | "content": follow_up_content_blocks, 239 | } 240 | message_list.append(follow_up_message) 241 | 242 | response = query_llm(bedrock, message_list, tool_list, system_prompt) 243 | response_message = response["output"]["message"] 244 | print(json.dumps(response_message, indent=4)) 245 | message_list.append(response_message) 246 | 247 | # Process the final response 248 | process_llm_response(response_message, lambda_client, s3) 249 | 250 | 251 | if __name__ == "__main__": 252 | main() 253 | -------------------------------------------------------------------------------- /agentic_workflow/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import subprocess 4 | import zipfile 5 | 6 | 7 | def create_deployment_package_no_dependencies( 8 | lambda_code, project_name, output_zip_name 9 | ): 10 | """ 11 | Create a deployment package without dependencies. 12 | """ 13 | # Create the project directory 14 | os.makedirs(project_name, exist_ok=True) 15 | 16 | # Write the lambda code to the lambda_function.py file 17 | with open(os.path.join(project_name, "lambda_function.py"), "w") as f: 18 | f.write(lambda_code) 19 | 20 | # Create a .zip file for the deployment package 21 | with zipfile.ZipFile(output_zip_name, "w") as zipf: 22 | zipf.write( 23 | os.path.join(project_name, "lambda_function.py"), "lambda_function.py" 24 | ) 25 | 26 | # Clean up the project directory 27 | shutil.rmtree(project_name) 28 | 29 | return output_zip_name 30 | 31 | 32 | def create_deployment_package_with_dependencies( 33 | lambda_code, project_name, output_zip_name, dependencies 34 | ): 35 | """ 36 | Create a deployment package with dependencies. 37 | """ 38 | # Create the project directory 39 | os.makedirs(project_name, exist_ok=True) 40 | 41 | # Write the lambda code to the lambda_function.py file 42 | with open(os.path.join(project_name, "lambda_function.py"), "w") as f: 43 | f.write(lambda_code) 44 | 45 | # Install the dependencies to the package directory 46 | package_dir = os.path.join(project_name, "package") 47 | os.makedirs(package_dir, exist_ok=True) 48 | 49 | # Turn dependencies into a list 50 | # dependencies = dependencies.split(",") 51 | 52 | for dependency in dependencies: 53 | subprocess.run(["pip", "install", "--target", package_dir, dependency]) 54 | 55 | # Create a .zip file for the deployment package 56 | with zipfile.ZipFile(output_zip_name, "w") as zipf: 57 | # Add the installed dependencies to the .zip file 58 | for root, _, files in os.walk(package_dir): 59 | for file in files: 60 | zipf.write( 61 | os.path.join(root, file), 62 | os.path.relpath(os.path.join(root, file), package_dir), 63 | ) 64 | # Add the lambda_function.py file to the .zip file 65 | zipf.write( 66 | os.path.join(project_name, "lambda_function.py"), "lambda_function.py" 67 | ) 68 | 69 | # Clean up the project directory 70 | shutil.rmtree(project_name) 71 | 72 | return output_zip_name 73 | -------------------------------------------------------------------------------- /gen_ai_stack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/build-on-aws/agentic-workshop/7d7c0cd0a6607fd761537b1602cd75f7dd75d264/gen_ai_stack.png -------------------------------------------------------------------------------- /lambda_function_tools/read_csv_tool.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import json 3 | import os 4 | 5 | import boto3 6 | 7 | S3_BUCKET = os.environ["S3_BUCKET"] 8 | S3_OBJECT = os.environ["S3_OBJECT"] 9 | 10 | 11 | def lambda_handler(event, context): 12 | # Print the received event to the logs 13 | print("Received event: ") 14 | print(event) 15 | 16 | # Initialize response code to None 17 | response_code = None 18 | 19 | # Extract the action group, api path, and parameters from the prediction 20 | agent = event["agent"] 21 | actionGroup = event["actionGroup"] 22 | function = event["function"] 23 | parameters = event.get("parameters", []) 24 | 25 | # Check the api path to determine which tool function to call 26 | s3 = boto3.client("s3") 27 | s3.download_file(S3_BUCKET, S3_OBJECT, "/tmp/data.csv") 28 | 29 | # Read CSV file and count rows 30 | with open("/tmp/data.csv", "r") as file: 31 | csv_reader = csv.reader(file) 32 | count = sum(1 for row in csv_reader) - 1 # Subtract 1 to exclude header row 33 | 34 | response_body = {"TEXT": {"body": str(count)}} 35 | response_code = 200 36 | 37 | # Print the response body to the logs 38 | print(f"Response body: {response_body}") 39 | 40 | # Create a dictionary containing the response details 41 | action_response = { 42 | "actionGroup": actionGroup, 43 | "function": function, 44 | "functionResponse": {"responseBody": response_body}, 45 | } 46 | 47 | # Return the list of responses as a dictionary 48 | api_response = { 49 | "messageVersion": event["messageVersion"], 50 | "response": action_response, 51 | } 52 | return api_response 53 | -------------------------------------------------------------------------------- /reinvent_2024_agentic/README.md: -------------------------------------------------------------------------------- 1 | # Building an AWS Solutions Architect Agentic App with Amazon Bedrock 2 | 3 | This project implements an agentic chatbot using Amazon Bedrock and AWS Lambda, capable of processing images, generating AWS architecture diagrams, and analyzing websites. 4 | 5 | The chatbot leverages various AWS services to provide a rich, interactive experience. It can answer questions, display images, generate AWS architecture diagrams based on user requests, and analyze website content. The system is built using Streamlit for the frontend and integrates seamlessly with AWS services like S3, Lambda, and Bedrock. 6 | 7 | This is the video that explains the code: 8 | https://www.youtube.com/watch?v=XPHOybnXCd4 9 | 10 | ## Repository Structure 11 | 12 | - `agent_tools.py`: Contains utility functions for the chatbot, including image processing and Bedrock agent interactions. 13 | - `chatbot_st.py`: The main Streamlit application file for the chatbot interface. 14 | - `lambda_functions/`: Directory containing Lambda function implementations: 15 | * `create_lambda_functions.py`: Creates and deploys Lambda functions dynamically. 16 | * `describe_image.py`: Generates captions for images stored in S3. 17 | * `gen_aws_diag_docker/`: Contains files for generating AWS architecture diagrams: 18 | - `diag_mapping.json`: Maps AWS service names to diagram categories. 19 | - `lambda_handler.py`: Handles the diagram generation process. 20 | * `website_to_text.py`: Extracts and processes text content from websites. 21 | - `lambda_layers/`: Scripts for creating Lambda layers: 22 | * `make_pil_layer.sh`: Creates a layer for the Pillow library. 23 | * `make_requests_layer.sh`: Creates a layer for the Requests library. 24 | - `test.py`: Contains test code for sentiment analysis using AWS Lambda. 25 | 26 | ## Usage Instructions 27 | 28 | You will need to setup the agent in Amazon Bedrock 29 | 30 | Follow the workshop instructions, to learn how to configure the agent and create action groups. 31 | 32 | https://catalog.workshops.aws/building-agentic-workflows/en-US/chatbot-agent 33 | 34 | Once your agent and action groups are setup you can replace `AGENT_ID` in agent_tools.py with your Agent ID. 35 | 36 | ### Running the Chatbot 37 | 38 | To start the Streamlit application: 39 | 40 | ``` 41 | streamlit run agent_chatbot_st.py 42 | ``` 43 | 44 | This will launch the chatbot interface in your default web browser. 45 | 46 | ### Creating Lambda Layers 47 | 48 | To create Lambda layers for Pillow and Requests libraries: 49 | 50 | 1. Navigate to the `lambda_layers` directory: 51 | ``` 52 | cd lambda_layers 53 | ``` 54 | 55 | 2. Run the shell scripts: 56 | ``` 57 | ./make_pil_layer.sh 58 | ./make_requests_layer.sh 59 | ``` 60 | 61 | 3. Upload the resulting ZIP files (`pillow-layer.zip` and `requests-layer.zip`) to AWS Lambda as layers. 62 | -------------------------------------------------------------------------------- /reinvent_2024_agentic/agent_chatbot_st.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import uuid 4 | from datetime import datetime 5 | from io import BytesIO 6 | 7 | import agent_tools 8 | import boto3 9 | import streamlit as st 10 | from PIL import Image 11 | 12 | # Initialize S3 client 13 | s3_client = boto3.client("s3") 14 | 15 | # Sample questions 16 | SAMPLE_QUESTIONS = [ 17 | "What are the best practices for cloud security?", 18 | "Can you draw an AWS diagram that shows an ecommerce architecture", 19 | "What are the top 5 stories from https://aws.amazon.com/blogs/aws/", 20 | "Can you create a lambda function that can do sentiment analysis on text?", 21 | ] 22 | 23 | 24 | def upload_to_s3(file_bytes, file_name): 25 | """ 26 | Upload a file to S3 and return the URL 27 | """ 28 | try: 29 | s3_client = boto3.client("s3") 30 | bucket_name = os.getenv("S3_BUCKET_NAME") 31 | 32 | # Generate a unique file name to avoid collisions 33 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 34 | unique_id = str(uuid.uuid4())[:8] 35 | s3_key = f"uploaded_images/{timestamp}_{unique_id}_{file_name}" 36 | 37 | content_type = ( 38 | "image/jpeg" 39 | if file_name.lower().endswith((".jpg", ".jpeg")) 40 | else "image/png" 41 | ) 42 | 43 | s3_client.put_object( 44 | Bucket=bucket_name, Key=s3_key, Body=file_bytes, ContentType=content_type 45 | ) 46 | 47 | url = f"https://{bucket_name}.s3.amazonaws.com/{s3_key}" 48 | return url 49 | except Exception as e: 50 | st.error(f"Error uploading to S3: {str(e)}") 51 | return None 52 | 53 | 54 | def extract_and_display_s3_images(text, s3_client): 55 | """ 56 | Extract S3 URLs from text, download images, and return them for display 57 | """ 58 | s3_pattern = r"https://[\w\-\.]+\.s3\.amazonaws\.com/[\w\-\./]+" 59 | s3_urls = re.findall(s3_pattern, text) 60 | 61 | images = [] 62 | for url in s3_urls: 63 | try: 64 | bucket = url.split(".s3.amazonaws.com/")[0].split("//")[1] 65 | key = url.split(".s3.amazonaws.com/")[1] 66 | 67 | response = s3_client.get_object(Bucket=bucket, Key=key) 68 | image_data = response["Body"].read() 69 | 70 | image = Image.open(BytesIO(image_data)) 71 | images.append(image) 72 | 73 | except Exception as e: 74 | st.error(f"Error downloading image from S3: {str(e)}") 75 | continue 76 | 77 | return images 78 | 79 | 80 | def process_query(prompt, uploaded_file=None): 81 | """Handle the query processing and response""" 82 | # Check if there's an uploaded file 83 | image_url = None 84 | if uploaded_file is not None: 85 | # Upload the file to S3 86 | image_url = upload_to_s3(uploaded_file.getvalue(), uploaded_file.name) 87 | 88 | # If image was uploaded successfully, append it to the message 89 | if image_url: 90 | prompt = f"{prompt}\nhere is the image: {image_url}" 91 | 92 | # Add user message to chat 93 | st.session_state.messages.append( 94 | { 95 | "role": "user", 96 | "content": [{"text": prompt}], 97 | "images": [image_url] if image_url else [], 98 | } 99 | ) 100 | 101 | with st.chat_message("user"): 102 | st.markdown(prompt) 103 | 104 | # Generate and display assistant response 105 | with st.chat_message("assistant"): 106 | trace_container = st.container() 107 | 108 | result = agent_tools.invoke_bedrock_agent( 109 | prompt, st.session_state.session_id, trace_container 110 | ) 111 | 112 | st.markdown(result["text"]) 113 | 114 | if "images" in result: 115 | for image in result["images"]: 116 | if isinstance(image, str) and image.startswith("http"): 117 | st.image(image) 118 | elif isinstance(image, Image.Image): 119 | st.image(image, use_column_width=True) 120 | else: 121 | image_data = Image.open(image) 122 | st.image(image_data, use_column_width=True) 123 | 124 | st.session_state.messages.append( 125 | { 126 | "role": "assistant", 127 | "content": [{"text": f"{result['text']}"}], 128 | "images": result["images"] if "images" in result else [], 129 | "traces": result["traces"] if "traces" in result else [], 130 | } 131 | ) 132 | 133 | 134 | st.title("Amazon Bedrock Agentic Chatbot") 135 | st.sidebar.markdown( 136 | "This app shows an Agentic Chatbot powered by Amazon Bedrock to answer questions." 137 | ) 138 | 139 | # Add file uploader to sidebar 140 | st.sidebar.subheader("Upload Image") 141 | uploaded_file = st.sidebar.file_uploader("Choose an image", type=["png", "jpg", "jpeg"]) 142 | 143 | # Preview the uploaded image in the sidebar 144 | if uploaded_file is not None: 145 | st.image(uploaded_file, caption="Preview of uploaded image", use_column_width=True) 146 | if st.button("Clear Image"): 147 | uploaded_file = None 148 | st.rerun() 149 | 150 | clear_button = st.sidebar.button("Clear Conversation", key="clear") 151 | 152 | # Initialize session state for sample questions visibility 153 | if "show_sample_questions" not in st.session_state: 154 | st.session_state.show_sample_questions = True 155 | 156 | # Reset sessions state on clear 157 | if clear_button: 158 | st.session_state.messages = [] 159 | st.session_state.session_id = agent_tools.generate_random_15digit() 160 | st.session_state.show_sample_questions = ( 161 | True # Show sample questions again after clearing 162 | ) 163 | 164 | if "messages" not in st.session_state: 165 | st.session_state.messages = [] 166 | st.session_state.session_id = agent_tools.generate_random_15digit() 167 | 168 | # Display chat messages 169 | for message in st.session_state.messages: 170 | with st.chat_message(message["role"]): 171 | if "traces" in message: 172 | trace_container = st.container() 173 | for trace in message["traces"]: 174 | with trace_container.expander(trace["trace_type"]): 175 | if trace["trace_type"] == "codeInterpreter": 176 | st.code(trace["text"], language="python") 177 | else: 178 | st.markdown(trace["text"]) 179 | 180 | message_text = message["content"][0]["text"] 181 | st.markdown(message_text) 182 | 183 | # Display images in the message 184 | if "images" in message and message["images"]: 185 | for image_url in message["images"]: 186 | if image_url: # Only display if image_url is not None 187 | st.image(image_url) 188 | 189 | 190 | # Display sample questions in a 2x2 grid if they should be shown 191 | if st.session_state.show_sample_questions: 192 | st.write("Try asking one of these questions:") 193 | col1, col2 = st.columns(2) 194 | 195 | # First row 196 | if col1.button(SAMPLE_QUESTIONS[0], key="q1", use_container_width=True): 197 | st.session_state.show_sample_questions = False 198 | process_query(SAMPLE_QUESTIONS[0], uploaded_file) 199 | st.rerun() 200 | if col2.button(SAMPLE_QUESTIONS[1], key="q2", use_container_width=True): 201 | st.session_state.show_sample_questions = False 202 | process_query(SAMPLE_QUESTIONS[1], uploaded_file) 203 | st.rerun() 204 | 205 | # Second row 206 | if col1.button(SAMPLE_QUESTIONS[2], key="q3", use_container_width=True): 207 | st.session_state.show_sample_questions = False 208 | process_query(SAMPLE_QUESTIONS[2], uploaded_file) 209 | st.rerun() 210 | if col2.button(SAMPLE_QUESTIONS[3], key="q4", use_container_width=True): 211 | st.session_state.show_sample_questions = False 212 | process_query(SAMPLE_QUESTIONS[3], uploaded_file) 213 | st.rerun() 214 | 215 | # Always show the chat input 216 | if user_input := st.chat_input("How can I help??"): 217 | process_query(user_input, uploaded_file) 218 | -------------------------------------------------------------------------------- /reinvent_2024_agentic/agent_tools.py: -------------------------------------------------------------------------------- 1 | import io 2 | import json 3 | import random 4 | from io import BytesIO 5 | 6 | import boto3 7 | import matplotlib.pyplot as plt 8 | import streamlit as st 9 | from PIL import Image 10 | 11 | AGENT_ID = "REPLACE_WITH_YOUR_AGENT" 12 | REGION = "us-west-2" 13 | IMAGE_FOLDER = "images" 14 | 15 | # Initialize S3 client 16 | s3_client = boto3.client("s3") 17 | 18 | bedrock_runtime = boto3.client( 19 | service_name="bedrock-runtime", 20 | region_name=REGION, 21 | ) 22 | 23 | bedrock_agent_runtime = boto3.client( 24 | service_name="bedrock-agent-runtime", region_name=REGION 25 | ) 26 | 27 | 28 | def generate_random_15digit(): 29 | number = "" 30 | 31 | for _ in range(15): 32 | number += str(random.randint(0, 9)) 33 | 34 | return number 35 | 36 | 37 | def download_image(url): 38 | bucket = url.split(".s3.amazonaws.com/")[0].split("//")[1] 39 | key = url.split(".s3.amazonaws.com/")[1] 40 | 41 | response = s3_client.get_object(Bucket=bucket, Key=key) 42 | image_data = response["Body"].read() 43 | 44 | image = Image.open(BytesIO(image_data)) 45 | return image 46 | 47 | 48 | def invoke_bedrock_agent(inputText, sessionId, trace_container, endSession=False): 49 | # Invoke the Bedrock agent with the given input text 50 | response = bedrock_agent_runtime.invoke_agent( 51 | agentAliasId="TSTALIASID", 52 | agentId=AGENT_ID, 53 | sessionId=sessionId, 54 | inputText=inputText, 55 | endSession=endSession, 56 | enableTrace=True, 57 | ) 58 | 59 | # Get the event stream from the response 60 | event_stream = response["completion"] 61 | 62 | model_response = {"text": "", "images": [], "files": [], "traces": []} 63 | 64 | for index, event in enumerate(event_stream): 65 | print(f"Event {index}:") 66 | print(str(event)) 67 | print("\n") 68 | 69 | try: 70 | # Check trace 71 | if "trace" in event: 72 | if ( 73 | "trace" in event["trace"] 74 | and "orchestrationTrace" in event["trace"]["trace"] 75 | ): 76 | trace_event = event["trace"]["trace"]["orchestrationTrace"] 77 | if "rationale" in trace_event: 78 | trace_text = trace_event["rationale"]["text"] 79 | trace_object = {"trace_type": "rationale", "text": trace_text} 80 | model_response["traces"].append(trace_object) 81 | 82 | with trace_container.expander("rationale"): 83 | st.markdown(trace_text) 84 | 85 | # for invocationInput type 86 | if "invocationInput" in trace_event: 87 | if ( 88 | "codeInterpreterInvocationInput" 89 | in trace_event["invocationInput"] 90 | ): 91 | trace_code = trace_event["invocationInput"][ 92 | "codeInterpreterInvocationInput" 93 | ]["code"] 94 | trace_object = { 95 | "trace_type": "codeInterpreter", 96 | "text": trace_code, 97 | } 98 | model_response["traces"].append(trace_object) 99 | 100 | with trace_container.expander("codeInterpreter"): 101 | st.code(trace_code) 102 | if "knowledgeBaseLookupInput" in trace_event["invocationInput"]: 103 | trace_text = trace_event["invocationInput"][ 104 | "knowledgeBaseLookupInput" 105 | ]["text"] 106 | trace_object = { 107 | "trace_type": "knowledgeBaseLookup", 108 | "text": trace_text, 109 | } 110 | model_response["traces"].append(trace_object) 111 | 112 | with trace_container.expander("knowledgeBaseLookup"): 113 | st.markdown(trace_text) 114 | 115 | if ( 116 | "actionGroupInvocationInput" 117 | in trace_event["invocationInput"] 118 | ): 119 | trace_text = trace_event["invocationInput"][ 120 | "actionGroupInvocationInput" 121 | ]["function"] 122 | trace_object = { 123 | "trace_type": "actionGroupInvocation", 124 | "text": trace_text, 125 | } 126 | model_response["traces"].append(trace_object) 127 | 128 | with trace_container.expander("actionGroupInvocation"): 129 | st.markdown(f"Calling function: {trace_text}") 130 | 131 | # for observation type 132 | if "observation" in trace_event: 133 | if ( 134 | "codeInterpreterInvocationOutput" 135 | in trace_event["observation"] 136 | ): 137 | if ( 138 | "executionOutput" 139 | in trace_event["observation"][ 140 | "codeInterpreterInvocationOutput" 141 | ] 142 | ): 143 | trace_resp = trace_event["observation"][ 144 | "codeInterpreterInvocationOutput" 145 | ]["executionOutput"] 146 | trace_object = { 147 | "trace_type": "observation", 148 | "text": trace_resp, 149 | } 150 | model_response["traces"].append(trace_object) 151 | 152 | with trace_container.expander("observation"): 153 | st.markdown(trace_resp) 154 | 155 | if ( 156 | "executionError" 157 | in trace_event["observation"][ 158 | "codeInterpreterInvocationOutput" 159 | ] 160 | ): 161 | trace_resp = trace_event["observation"][ 162 | "codeInterpreterInvocationOutput" 163 | ]["executionError"] 164 | trace_object = { 165 | "trace_type": "observation", 166 | "text": trace_resp, 167 | } 168 | model_response["traces"].append(trace_object) 169 | 170 | with trace_container.expander("observation"): 171 | st.error(trace_resp) 172 | 173 | if "image_url" in trace_resp: 174 | print("got image") 175 | image_url = trace_resp["image_url"] 176 | # download image 177 | image = download_image(image_url) 178 | # add image to model response 179 | model_response["images"].append(image) 180 | 181 | if "knowledgeBaseLookupOutput" in trace_event["observation"]: 182 | # trace_text = trace_event["observation"][ 183 | # "knowledgeBaseLookupOutput" 184 | # ]["text"] 185 | trace_object = { 186 | "trace_type": "knowledgeBaseLookupOutput", 187 | "text": trace_event["observation"][ 188 | "knowledgeBaseLookupOutput" 189 | ]["retrievedReferences"], 190 | } 191 | model_response["traces"].append(trace_object) 192 | 193 | with trace_container.expander("knowledgeBaseLookupOutput"): 194 | # st.markdown(trace_text) 195 | 196 | if ( 197 | "retrievedReferences" 198 | in trace_event["observation"][ 199 | "knowledgeBaseLookupOutput" 200 | ] 201 | ): 202 | references = trace_event["observation"][ 203 | "knowledgeBaseLookupOutput" 204 | ]["retrievedReferences"] 205 | for reference in references: 206 | st.markdown( 207 | f'{reference["location"]["s3Location"]["uri"]}' 208 | ) 209 | st.markdown(f'{reference["content"]["text"]}') 210 | 211 | if "actionGroupInvocationOutput" in trace_event["observation"]: 212 | trace_resp = trace_event["observation"][ 213 | "actionGroupInvocationOutput" 214 | ]["text"] 215 | trace_object = { 216 | "trace_type": "observation", 217 | "text": trace_resp, 218 | } 219 | model_response["traces"].append(trace_object) 220 | 221 | with trace_container.expander("observation"): 222 | st.markdown(trace_resp) 223 | 224 | print("checking trace resp") 225 | print(trace_resp) 226 | 227 | # try to covnert to json 228 | try: 229 | trace_resp = trace_resp.replace("'", '"') 230 | trace_resp = json.loads(trace_resp) 231 | print("converted to json") 232 | print(trace_resp) 233 | 234 | # check if image_url is in trace_response, if it is download the image and add it to the images object of mdoel response 235 | if "image_url" in trace_resp: 236 | print("got image") 237 | image_url = trace_resp["image_url"] 238 | # download image 239 | image = download_image(image_url) 240 | # add image to model response 241 | model_response["images"].append(image) 242 | 243 | except: 244 | print("not json") 245 | pass 246 | 247 | if "finalResponse" in trace_event["observation"]: 248 | trace_resp = trace_event["observation"]["finalResponse"][ 249 | "text" 250 | ] 251 | trace_object = { 252 | "trace_type": "finalResponse", 253 | "text": trace_resp, 254 | } 255 | model_response["traces"].append(trace_object) 256 | 257 | with trace_container.expander("finalResponse"): 258 | st.markdown(trace_resp) 259 | 260 | elif "guardrailTrace" in event["trace"]["trace"]: 261 | 262 | guardrail_trace = event["trace"]["trace"]["guardrailTrace"] 263 | if "inputAssessments" in guardrail_trace: 264 | assessments = guardrail_trace["inputAssessments"] 265 | for assessment in assessments: 266 | if "contentPolicy" in assessment: 267 | filters = assessment["contentPolicy"]["filters"] 268 | for filter in filters: 269 | if filter["action"] == "BLOCKED": 270 | st.error( 271 | f"Guardrail blocked {filter['type']} confidence: {filter['confidence']}" 272 | ) 273 | if "topicPolicy" in assessment: 274 | topics = assessment["topicPolicy"]["topics"] 275 | for topic in topics: 276 | if topic["action"] == "BLOCKED": 277 | st.error( 278 | f"Guardrail blocked topic {topic['name']}" 279 | ) 280 | # Handle text chunks 281 | if "chunk" in event: 282 | chunk = event["chunk"] 283 | if "bytes" in chunk: 284 | text = chunk["bytes"].decode("utf-8") 285 | print(f"Chunk: {text}") 286 | model_response["text"] += text 287 | return model_response 288 | 289 | # Handle file outputs 290 | if "files" in event: 291 | print("Files received") 292 | files = event["files"]["files"] 293 | for file in files: 294 | name = file["name"] 295 | type = file["type"] 296 | bytes_data = file["bytes"] 297 | 298 | # Display PNG images using matplotlib 299 | if type == "image/png": 300 | 301 | # save image to disk 302 | img = plt.imread(io.BytesIO(bytes_data)) 303 | img_name = f"{IMAGE_FOLDER}/{name}" 304 | plt.imsave(img_name, img) 305 | 306 | # if image name not in images 307 | if img_name not in model_response["images"]: 308 | model_response["images"].append(img_name) 309 | print(f"Image '{name}' saved to disk.") 310 | # Save other file types to disk 311 | else: 312 | with open(name, "wb") as f: 313 | f.write(bytes_data) 314 | model_response["files"].append(name) 315 | print(f"File '{name}' saved to disk.") 316 | except Exception as e: 317 | print(f"Error processing event: {e}") 318 | continue 319 | -------------------------------------------------------------------------------- /reinvent_2024_agentic/lambda_functions/create_lambda_functions.py: -------------------------------------------------------------------------------- 1 | import json 2 | import math 3 | import os 4 | import shutil 5 | import subprocess 6 | import zipfile 7 | from typing import List 8 | 9 | import boto3 10 | from botocore.exceptions import ClientError 11 | 12 | # Retrieve environment variables 13 | LAMBDA_ROLE = os.environ["LAMBDA_ROLE"] 14 | S3_BUCKET = os.environ["S3_BUCKET"] 15 | REGION = "us-west-2" 16 | 17 | 18 | def initialize_clients(): 19 | """Initialize and return the AWS Bedrock, Lambda, and S3 clients.""" 20 | session = boto3.Session() 21 | bedrock = session.client(service_name="bedrock-runtime", region_name=REGION) 22 | lambda_client = session.client("lambda", region_name=REGION) 23 | s3 = session.client("s3", region_name=REGION) 24 | return bedrock, lambda_client, s3 25 | 26 | 27 | def get_working_directory(): 28 | """ 29 | Returns the appropriate working directory based on the environment. 30 | In AWS Lambda, this will be /tmp. Otherwise, it will be the current directory. 31 | """ 32 | if os.path.exists("/tmp") and os.access("/tmp", os.W_OK): 33 | # We're likely in AWS Lambda 34 | return "/tmp" 35 | return os.getcwd() 36 | 37 | 38 | def create_deployment_package_no_dependencies( 39 | lambda_code, project_name, output_zip_name 40 | ): 41 | """ 42 | Create a deployment package without dependencies. 43 | Works in both local and AWS Lambda environments. 44 | """ 45 | # Get the appropriate working directory 46 | working_dir = get_working_directory() 47 | 48 | # Create full paths 49 | project_path = os.path.join(working_dir, project_name) 50 | output_zip_path = os.path.join(working_dir, output_zip_name) 51 | 52 | try: 53 | # Create the project directory 54 | os.makedirs(project_path, exist_ok=True) 55 | 56 | # Write the lambda code to the lambda_function.py file 57 | lambda_file_path = os.path.join(project_path, "lambda_function.py") 58 | with open(lambda_file_path, "w") as f: 59 | f.write(lambda_code) 60 | 61 | # Create a .zip file for the deployment package 62 | with zipfile.ZipFile(output_zip_path, "w") as zipf: 63 | zipf.write(lambda_file_path, "lambda_function.py") # Path inside zip 64 | 65 | return output_zip_path 66 | 67 | finally: 68 | # Clean up the project directory 69 | if os.path.exists(project_path): 70 | shutil.rmtree(project_path) 71 | 72 | 73 | def create_deployment_package_with_dependencies( 74 | lambda_code, project_name, output_zip_name, dependencies 75 | ): 76 | """ 77 | Create a deployment package with dependencies. 78 | Works in both local and AWS Lambda environments. 79 | """ 80 | # Get the appropriate working directory 81 | working_dir = get_working_directory() 82 | 83 | # Create full paths 84 | project_path = os.path.join(working_dir, project_name) 85 | output_zip_path = os.path.join(working_dir, output_zip_name) 86 | package_dir = os.path.join(project_path, "package") 87 | 88 | try: 89 | # Create the project directory 90 | os.makedirs(project_path, exist_ok=True) 91 | 92 | # Write the lambda code to the lambda_function.py file 93 | lambda_file_path = os.path.join(project_path, "lambda_function.py") 94 | with open(lambda_file_path, "w") as f: 95 | f.write(lambda_code) 96 | 97 | # Install the dependencies to the package directory 98 | os.makedirs(package_dir, exist_ok=True) 99 | 100 | # Install dependencies 101 | if isinstance(dependencies, str): 102 | dependencies = dependencies.split(",") 103 | 104 | for dependency in dependencies: 105 | dependency = dependency.strip() 106 | if dependency: 107 | subprocess.run( 108 | ["pip", "install", "--target", package_dir, dependency], check=True 109 | ) 110 | 111 | # Create a .zip file for the deployment package 112 | with zipfile.ZipFile(output_zip_path, "w") as zipf: 113 | # Add the installed dependencies to the .zip file 114 | for root, _, files in os.walk(package_dir): 115 | for file in files: 116 | file_path = os.path.join(root, file) 117 | # Calculate relative path for the zip file 118 | arc_name = os.path.relpath(file_path, package_dir) 119 | zipf.write(file_path, arc_name) 120 | 121 | # Add the lambda_function.py file to the .zip file 122 | zipf.write(lambda_file_path, "lambda_function.py") 123 | 124 | return output_zip_path 125 | 126 | finally: 127 | # Clean up the project directory 128 | if os.path.exists(project_path): 129 | shutil.rmtree(project_path) 130 | 131 | 132 | def get_tool_list(): 133 | """Define and return the tool list for the LLM to use.""" 134 | return [ 135 | { 136 | "toolSpec": { 137 | "name": "create_lambda_function", 138 | "description": "Create and deploy a Lambda function.", 139 | "inputSchema": { 140 | "json": { 141 | "type": "object", 142 | "properties": { 143 | "code": { 144 | "type": "string", 145 | "description": "The Python code for the Lambda function.", 146 | }, 147 | "function_name": { 148 | "type": "string", 149 | "description": "The name of the Lambda function.", 150 | }, 151 | "description": { 152 | "type": "string", 153 | "description": "A description of the Lambda function.", 154 | }, 155 | "has_external_python_libraries": { 156 | "type": "boolean", 157 | "description": "Whether the function uses external Python libraries.", 158 | }, 159 | "external_python_libraries": { 160 | "type": "array", 161 | "items": {"type": "string"}, 162 | "description": "List of external Python libraries to include.", 163 | }, 164 | }, 165 | "required": [ 166 | "code", 167 | "function_name", 168 | "description", 169 | "has_external_python_libraries", 170 | "external_python_libraries", 171 | ], 172 | } 173 | }, 174 | } 175 | }, 176 | ] 177 | 178 | 179 | def query_llm(bedrock, messages, tools, system_prompt): 180 | """Make a request to the LLM and return the response.""" 181 | return bedrock.converse( 182 | modelId="anthropic.claude-3-5-sonnet-20241022-v2:0", 183 | messages=messages, 184 | inferenceConfig={"maxTokens": 2000, "temperature": 0.7}, 185 | toolConfig={"tools": tools}, 186 | system=[{"text": system_prompt}], 187 | ) 188 | 189 | 190 | def create_lambda_function( 191 | lambda_client, 192 | s3, 193 | code: str, 194 | function_name: str, 195 | description: str, 196 | has_external_python_libraries: bool, 197 | external_python_libraries: List[str], 198 | ) -> str: 199 | """ 200 | Creates and deploys a Lambda Function, based on what the customer requested. 201 | Returns the name of the created Lambda function 202 | """ 203 | print("Creating Lambda function") 204 | runtime = "python3.13" 205 | handler = "lambda_function.lambda_handler" 206 | 207 | # Create a zip file for the code 208 | if has_external_python_libraries: 209 | zipfile = create_deployment_package_with_dependencies( 210 | code, function_name, f"{function_name}.zip", external_python_libraries 211 | ) 212 | else: 213 | zipfile = create_deployment_package_no_dependencies( 214 | code, function_name, f"{function_name}.zip" 215 | ) 216 | 217 | try: 218 | # Upload zip file 219 | zip_key = f"lambda_resources/{function_name}.zip" 220 | s3.upload_file(zipfile, S3_BUCKET, zip_key) 221 | print(f"Uploaded zip to {S3_BUCKET}/{zip_key}") 222 | 223 | response = lambda_client.create_function( 224 | Code={ 225 | "S3Bucket": S3_BUCKET, 226 | "S3Key": zip_key, 227 | }, 228 | Description=description, 229 | FunctionName=function_name, 230 | Handler=handler, 231 | Timeout=30, 232 | Publish=True, 233 | Role=LAMBDA_ROLE, 234 | Runtime=runtime, 235 | ) 236 | print("Lambda function created successfully") 237 | print(response) 238 | deployed_function = response["FunctionName"] 239 | user_response = f"The function {deployed_function} has been deployed to the customer's AWS account. I will now provide my final answer to the customer on how to invoke the {deployed_function} function with boto3 and print the result." 240 | return user_response 241 | except ClientError as e: 242 | print(e) 243 | return f"Error: {e}\n Let me try again..." 244 | 245 | 246 | def process_llm_response(response_message, lambda_client, s3): 247 | """Process the LLM's response, handling tool usage and text output.""" 248 | response_content_blocks = response_message["content"] 249 | follow_up_content_blocks = [] 250 | # llm_response = "" 251 | 252 | for content_block in response_content_blocks: 253 | if "toolUse" in content_block: 254 | tool_use_block = content_block["toolUse"] 255 | tool_use_name = tool_use_block["name"] 256 | print(f"Using tool {tool_use_name}") 257 | if tool_use_name == "create_lambda_function": 258 | result = create_lambda_function( 259 | lambda_client, 260 | s3, 261 | tool_use_block["input"]["code"], 262 | tool_use_block["input"]["function_name"], 263 | tool_use_block["input"]["description"], 264 | tool_use_block["input"]["has_external_python_libraries"], 265 | tool_use_block["input"]["external_python_libraries"], 266 | ) 267 | print(f"Lambda function creation result: {result}") 268 | # llm_response = result 269 | follow_up_content_blocks.append( 270 | { 271 | "toolResult": { 272 | "toolUseId": tool_use_block["toolUseId"], 273 | "content": [{"json": {"result": result}}], 274 | } 275 | } 276 | ) 277 | elif "text" in content_block: 278 | print(f"LLM response: {content_block['text']}") 279 | 280 | # llm_response = content_block["text"] 281 | 282 | return follow_up_content_blocks 283 | 284 | 285 | def lambda_function_pipeline(input_text): 286 | # Initialize the AWS clients 287 | bedrock, lambda_client, s3 = initialize_clients() 288 | 289 | # Get the tool list 290 | tool_list = get_tool_list() 291 | 292 | # Initialize the message list for the conversation 293 | message_list = [ 294 | { 295 | "role": "user", 296 | "content": [{"text": input_text}], 297 | } 298 | ] 299 | 300 | # Set the system prompt 301 | system_prompt = "You are an AI assistant capable of creating Python Lambda functions. Use the provided tools when necessary." 302 | 303 | # Make the initial request to the LLM 304 | response = query_llm(bedrock, message_list, tool_list, system_prompt) 305 | response_message = response["output"]["message"] 306 | print(json.dumps(response_message, indent=4)) 307 | message_list.append(response_message) 308 | 309 | # Process the LLM's response 310 | follow_up_content_blocks = process_llm_response(response_message, lambda_client, s3) 311 | print("THIS IS FOLLOW UP CONTEXT") 312 | print(follow_up_content_blocks) 313 | 314 | # If there are follow-up content blocks, make another request to the LLM 315 | if follow_up_content_blocks: 316 | print("MAKING FOLLOW UP REQUEST") 317 | follow_up_message = { 318 | "role": "user", 319 | "content": follow_up_content_blocks, 320 | } 321 | message_list.append(follow_up_message) 322 | 323 | response = query_llm(bedrock, message_list, tool_list, system_prompt) 324 | response_message = response["output"]["message"] 325 | print(json.dumps(response_message, indent=4)) 326 | message_list.append(response_message) 327 | 328 | # Process the final response 329 | follow_up_content_blocks = process_llm_response( 330 | response_message, lambda_client, s3 331 | ) 332 | print("THIS IS MESSAGE LIST") 333 | print(message_list) 334 | return message_list 335 | 336 | 337 | def lambda_handler(event, context): 338 | # Print the received event to the logs 339 | print("Received event: ") 340 | print(event) 341 | 342 | # Initialize response code to None 343 | 344 | # Extract the action group, api path, and parameters from the prediction 345 | actionGroup = event["actionGroup"] 346 | function = event.get("function", "") 347 | parameters = event.get("parameters", []) 348 | inputText = event.get("inputText", "") 349 | 350 | results = lambda_function_pipeline(inputText) 351 | 352 | response_body = {"TEXT": {"body": str(results)}} 353 | 354 | # Print the response body to the logs 355 | print(f"Response body: {response_body}") 356 | 357 | # Create a dictionary containing the response details 358 | action_response = { 359 | "actionGroup": actionGroup, 360 | "function": function, 361 | "functionResponse": {"responseBody": response_body}, 362 | } 363 | 364 | # Return the list of responses as a dictionary 365 | api_response = { 366 | "messageVersion": event["messageVersion"], 367 | "response": action_response, 368 | } 369 | 370 | return api_response 371 | -------------------------------------------------------------------------------- /reinvent_2024_agentic/lambda_functions/describe_image.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import io 3 | import json 4 | from typing import Any, Dict, List, Type, Union 5 | 6 | import boto3 7 | from PIL import Image 8 | 9 | s3 = boto3.client("s3") 10 | 11 | bedrock_runtime = boto3.client( 12 | service_name="bedrock-runtime", 13 | region_name="us-west-2", 14 | ) 15 | 16 | 17 | # function to convert a PIL image to a base64 string 18 | def pil_to_base64(image, format="png"): 19 | with io.BytesIO() as buffer: 20 | image.save(buffer, format) 21 | return base64.b64encode(buffer.getvalue()).decode() 22 | 23 | 24 | def gen_image_caption(base64_string): 25 | 26 | system_prompt = """ 27 | 28 | You are an experienced AWS Solutions Architect with deep knowledge of AWS services and best practices for designing and implementing cloud architectures. Maintain a professional and consultative tone, providing clear and detailed explanations tailored for technical audiences. Your task is to describe and explain AWS architecture diagrams presented by users. Your descriptions should cover the purpose and functionality of the included AWS services, their interactions, data flows, and any relevant design patterns or best practices. 29 | """ 30 | 31 | prompt_config = { 32 | "anthropic_version": "bedrock-2023-05-31", 33 | "max_tokens": 4096, 34 | "system": system_prompt, 35 | "messages": [ 36 | { 37 | "role": "user", 38 | "content": [ 39 | { 40 | "type": "image", 41 | "source": { 42 | "type": "base64", 43 | "media_type": "image/png", 44 | "data": base64_string, 45 | }, 46 | }, 47 | { 48 | "type": "text", 49 | "text": "Please describe the following AWS architecture diagram, explaining the purpose of each service, their interactions, and any relevant design considerations or best practices.", 50 | }, 51 | ], 52 | } 53 | ], 54 | } 55 | 56 | body = json.dumps(prompt_config) 57 | 58 | modelId = "anthropic.claude-3-5-sonnet-20241022-v2:0" 59 | accept = "application/json" 60 | contentType = "application/json" 61 | 62 | response = bedrock_runtime.invoke_model( 63 | body=body, modelId=modelId, accept=accept, contentType=contentType 64 | ) 65 | response_body = json.loads(response.get("body").read()) 66 | 67 | results = response_body.get("content")[0].get("text") 68 | return results 69 | 70 | 71 | def lambda_handler(event, context): 72 | # Print the received event to the logs 73 | print("Received event: ") 74 | print(event) 75 | 76 | # Initialize response code to None 77 | 78 | # Extract the action group, api path, and parameters from the prediction 79 | actionGroup = event["actionGroup"] 80 | function = event.get("function", "") 81 | parameters = event.get("parameters", []) 82 | inputText = event.get("inputText", "") 83 | 84 | image_url = parameters[0]["value"] 85 | 86 | # Download image from s3 87 | 88 | bucket_name = image_url.split("/")[2].split(".")[0] 89 | key = "/".join(image_url.split("/")[3:]) 90 | response = s3.get_object(Bucket=bucket_name, Key=key) 91 | image_content = response["Body"].read() 92 | # Create a PIL Image object from the image content 93 | image = Image.open(io.BytesIO(image_content)) 94 | # Convert the PIL Image object to a base64 string 95 | base64_string = pil_to_base64(image) 96 | 97 | results = gen_image_caption(base64_string) 98 | 99 | response_body = {"TEXT": {"body": str(results)}} 100 | 101 | # Print the response body to the logs 102 | print(f"Response body: {response_body}") 103 | 104 | # Create a dictionary containing the response details 105 | action_response = { 106 | "actionGroup": actionGroup, 107 | "function": function, 108 | "functionResponse": {"responseBody": response_body}, 109 | } 110 | 111 | # Return the list of responses as a dictionary 112 | api_response = { 113 | "messageVersion": event["messageVersion"], 114 | "response": action_response, 115 | } 116 | 117 | return api_response 118 | -------------------------------------------------------------------------------- /reinvent_2024_agentic/lambda_functions/gen_aws_diag_docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM amazon/aws-lambda-python:3.13 2 | 3 | # Install system dependencies 4 | RUN dnf update -y && dnf install -y \ 5 | mesa-libGL \ 6 | libX11 \ 7 | zip \ 8 | unzip \ 9 | tar \ 10 | bzip2 \ 11 | graphviz \ 12 | && dnf clean all 13 | 14 | # Set up work directory 15 | WORKDIR /var/task 16 | 17 | # Copy application files 18 | COPY . . 19 | 20 | # Install Python dependencies 21 | RUN pip install -r requirements.txt 22 | 23 | # Run the application 24 | CMD ["lambda_handler.lambda_handler"] -------------------------------------------------------------------------------- /reinvent_2024_agentic/lambda_functions/gen_aws_diag_docker/diag_mapping.json: -------------------------------------------------------------------------------- 1 | { 2 | "Analytics": "analytics", 3 | "Athena": "analytics", 4 | "Cloudsearch": "analytics", 5 | "CloudsearchSearchDocuments": "analytics", 6 | "DataLakeResource": "analytics", 7 | "DataPipeline": "analytics", 8 | "EMR": "analytics", 9 | "EMRCluster": "analytics", 10 | "EMREngine": "analytics", 11 | "EMREngineMaprM3": "analytics", 12 | "EMREngineMaprM5": "analytics", 13 | "EMREngineMaprM7": "analytics", 14 | "EMRHdfsCluster": "analytics", 15 | "ES": "analytics", 16 | "ElasticsearchService": "analytics", 17 | "Glue": "analytics", 18 | "GlueCrawlers": "analytics", 19 | "GlueDataCatalog": "analytics", 20 | "Kinesis": "analytics", 21 | "KinesisDataAnalytics": "analytics", 22 | "KinesisDataFirehose": "analytics", 23 | "KinesisDataStreams": "analytics", 24 | "KinesisVideoStreams": "media", 25 | "LakeFormation": "analytics", 26 | "ManagedStreamingForKafka": "analytics", 27 | "Quicksight": "analytics", 28 | "Redshift": "database", 29 | "RedshiftDenseComputeNode": "database", 30 | "RedshiftDenseStorageNode": "database", 31 | "_Analytics": "analytics", 32 | "ArVr": "ar", 33 | "Sumerian": "ar", 34 | "_Ar": "ar", 35 | "Blockchain": "blockchain", 36 | "BlockchainResource": "blockchain", 37 | "ManagedBlockchain": "blockchain", 38 | "QLDB": "database", 39 | "QuantumLedgerDatabaseQldb": "database", 40 | "_Blockchain": "blockchain", 41 | "A4B": "business", 42 | "AlexaForBusiness": "business", 43 | "BusinessApplications": "business", 44 | "Chime": "business", 45 | "Workmail": "business", 46 | "_Business": "business", 47 | "AMI": "compute", 48 | "AppRunner": "compute", 49 | "ApplicationAutoScaling": "compute", 50 | "AutoScaling": "management", 51 | "Batch": "compute", 52 | "Compute": "compute", 53 | "ComputeOptimizer": "compute", 54 | "EB": "compute", 55 | "EC2": "compute", 56 | "EC2Ami": "compute", 57 | "EC2AutoScaling": "compute", 58 | "EC2ContainerRegistry": "compute", 59 | "EC2ContainerRegistryImage": "compute", 60 | "EC2ContainerRegistryRegistry": "compute", 61 | "EC2ElasticIpAddress": "compute", 62 | "EC2ImageBuilder": "compute", 63 | "EC2Instance": "compute", 64 | "EC2Instances": "compute", 65 | "EC2Rescue": "compute", 66 | "EC2SpotInstance": "compute", 67 | "ECR": "compute", 68 | "ECS": "compute", 69 | "EKS": "compute", 70 | "ElasticBeanstalk": "compute", 71 | "ElasticBeanstalkApplication": "compute", 72 | "ElasticBeanstalkDeployment": "compute", 73 | "ElasticContainerService": "compute", 74 | "ElasticContainerServiceContainer": "compute", 75 | "ElasticContainerServiceService": "compute", 76 | "ElasticKubernetesService": "compute", 77 | "Fargate": "compute", 78 | "Lambda": "compute", 79 | "LambdaFunction": "compute", 80 | "Lightsail": "compute", 81 | "LocalZones": "compute", 82 | "Outposts": "compute", 83 | "SAR": "compute", 84 | "ServerlessApplicationRepository": "compute", 85 | "ThinkboxDeadline": "compute", 86 | "ThinkboxDraft": "compute", 87 | "ThinkboxFrost": "compute", 88 | "ThinkboxKrakatoa": "compute", 89 | "ThinkboxSequoia": "compute", 90 | "ThinkboxStoke": "compute", 91 | "ThinkboxXmesh": "compute", 92 | "VmwareCloudOnAWS": "compute", 93 | "Wavelength": "compute", 94 | "_Compute": "compute", 95 | "Budgets": "cost", 96 | "CostAndUsageReport": "cost", 97 | "CostExplorer": "cost", 98 | "CostManagement": "cost", 99 | "ReservedInstanceReporting": "cost", 100 | "SavingsPlans": "cost", 101 | "_Cost": "cost", 102 | "Aurora": "database", 103 | "AuroraInstance": "database", 104 | "DAX": "database", 105 | "DB": "database", 106 | "DDB": "database", 107 | "DMS": "migration", 108 | "Database": "database", 109 | "DatabaseMigrationService": "migration", 110 | "DatabaseMigrationServiceDatabaseMigrationWorkflow": "database", 111 | "DocumentDB": "database", 112 | "DocumentdbMongodbCompatibility": "database", 113 | "Dynamodb": "database", 114 | "DynamodbAttribute": "database", 115 | "DynamodbAttributes": "database", 116 | "DynamodbDax": "database", 117 | "DynamodbGSI": "database", 118 | "DynamodbGlobalSecondaryIndex": "database", 119 | "DynamodbItem": "database", 120 | "DynamodbItems": "database", 121 | "DynamodbTable": "database", 122 | "ElastiCache": "database", 123 | "Elasticache": "database", 124 | "ElasticacheCacheNode": "database", 125 | "ElasticacheForMemcached": "database", 126 | "ElasticacheForRedis": "database", 127 | "KeyspacesManagedApacheCassandraService": "database", 128 | "Neptune": "database", 129 | "RDS": "database", 130 | "RDSInstance": "database", 131 | "RDSMariadbInstance": "database", 132 | "RDSMysqlInstance": "database", 133 | "RDSOnVmware": "database", 134 | "RDSOracleInstance": "database", 135 | "RDSPostgresqlInstance": "database", 136 | "RDSSqlServerInstance": "database", 137 | "Timestream": "database", 138 | "_Database": "database", 139 | "CLI": "devtools", 140 | "Cloud9": "devtools", 141 | "Cloud9Resource": "devtools", 142 | "CloudDevelopmentKit": "devtools", 143 | "Codebuild": "devtools", 144 | "Codecommit": "devtools", 145 | "Codedeploy": "devtools", 146 | "Codepipeline": "devtools", 147 | "Codestar": "devtools", 148 | "CommandLineInterface": "management", 149 | "DevTools": "devtools", 150 | "DeveloperTools": "devtools", 151 | "ToolsAndSdks": "devtools", 152 | "XRay": "devtools", 153 | "_Devtools": "devtools", 154 | "CustomerEnablement": "enablement", 155 | "Iq": "enablement", 156 | "ManagedServices": "management", 157 | "ProfessionalServices": "enablement", 158 | "Support": "enablement", 159 | "_Enablement": "enablement", 160 | "Appstream20": "enduser", 161 | "DesktopAndAppStreaming": "enduser", 162 | "Workdocs": "enduser", 163 | "Worklink": "enduser", 164 | "Workspaces": "enduser", 165 | "_Enduser": "enduser", 166 | "Connect": "engagement", 167 | "CustomerEngagement": "engagement", 168 | "Pinpoint": "mobile", 169 | "SES": "engagement", 170 | "SimpleEmailServiceSes": "engagement", 171 | "SimpleEmailServiceSesEmail": "engagement", 172 | "_Engagement": "engagement", 173 | "GameTech": "game", 174 | "Gamelift": "game", 175 | "_Game": "game", 176 | "Client": "general", 177 | "Disk": "general", 178 | "Forums": "general", 179 | "General": "general", 180 | "GenericDatabase": "general", 181 | "GenericFirewall": "general", 182 | "GenericOfficeBuilding": "general", 183 | "GenericSDK": "general", 184 | "GenericSamlToken": "general", 185 | "InternetAlt1": "general", 186 | "InternetAlt2": "general", 187 | "InternetGateway": "network", 188 | "Marketplace": "general", 189 | "MobileClient": "general", 190 | "Multimedia": "general", 191 | "OfficeBuilding": "general", 192 | "SDK": "general", 193 | "SamlToken": "general", 194 | "SslPadlock": "general", 195 | "TapeStorage": "general", 196 | "Toolkit": "general", 197 | "TraditionalServer": "general", 198 | "User": "general", 199 | "Users": "general", 200 | "_General": "general", 201 | "ApplicationIntegration": "integration", 202 | "Appsync": "mobile", 203 | "ConsoleMobileApplication": "integration", 204 | "EventResource": "integration", 205 | "Eventbridge": "integration", 206 | "EventbridgeCustomEventBusResource": "integration", 207 | "EventbridgeDefaultEventBusResource": "integration", 208 | "EventbridgeSaasPartnerEventBusResource": "integration", 209 | "ExpressWorkflows": "integration", 210 | "MQ": "integration", 211 | "SF": "integration", 212 | "SNS": "integration", 213 | "SQS": "integration", 214 | "SimpleNotificationServiceSns": "integration", 215 | "SimpleNotificationServiceSnsEmailNotification": "integration", 216 | "SimpleNotificationServiceSnsHttpNotification": "integration", 217 | "SimpleNotificationServiceSnsTopic": "integration", 218 | "SimpleQueueServiceSqs": "integration", 219 | "SimpleQueueServiceSqsMessage": "integration", 220 | "SimpleQueueServiceSqsQueue": "integration", 221 | "StepFunctions": "integration", 222 | "_Integration": "integration", 223 | "FreeRTOS": "iot", 224 | "Freertos": "iot", 225 | "InternetOfThings": "iot", 226 | "Iot1Click": "iot", 227 | "IotAction": "iot", 228 | "IotActuator": "iot", 229 | "IotAlexaEcho": "iot", 230 | "IotAlexaEnabledDevice": "iot", 231 | "IotAlexaSkill": "iot", 232 | "IotAlexaVoiceService": "iot", 233 | "IotAnalytics": "iot", 234 | "IotAnalyticsChannel": "iot", 235 | "IotAnalyticsDataSet": "iot", 236 | "IotAnalyticsDataStore": "iot", 237 | "IotAnalyticsNotebook": "iot", 238 | "IotAnalyticsPipeline": "iot", 239 | "IotBank": "iot", 240 | "IotBicycle": "iot", 241 | "IotBoard": "iot", 242 | "IotButton": "iot", 243 | "IotCamera": "iot", 244 | "IotCar": "iot", 245 | "IotCart": "iot", 246 | "IotCertificate": "iot", 247 | "IotCoffeePot": "iot", 248 | "IotCore": "iot", 249 | "IotDesiredState": "iot", 250 | "IotDeviceDefender": "iot", 251 | "IotDeviceGateway": "iot", 252 | "IotDeviceManagement": "iot", 253 | "IotDoorLock": "iot", 254 | "IotEvents": "iot", 255 | "IotFactory": "iot", 256 | "IotFireTv": "iot", 257 | "IotFireTvStick": "iot", 258 | "IotGeneric": "iot", 259 | "IotGreengrass": "iot", 260 | "IotGreengrassConnector": "iot", 261 | "IotHardwareBoard": "iot", 262 | "IotHouse": "iot", 263 | "IotHttp": "iot", 264 | "IotHttp2": "iot", 265 | "IotJobs": "iot", 266 | "IotLambda": "iot", 267 | "IotLightbulb": "iot", 268 | "IotMedicalEmergency": "iot", 269 | "IotMqtt": "iot", 270 | "IotOverTheAirUpdate": "iot", 271 | "IotPolicy": "iot", 272 | "IotPolicyEmergency": "iot", 273 | "IotReportedState": "iot", 274 | "IotRule": "iot", 275 | "IotSensor": "iot", 276 | "IotServo": "iot", 277 | "IotShadow": "iot", 278 | "IotSimulator": "iot", 279 | "IotSitewise": "iot", 280 | "IotThermostat": "iot", 281 | "IotThingsGraph": "iot", 282 | "IotTopic": "iot", 283 | "IotTravel": "iot", 284 | "IotUtility": "iot", 285 | "IotWindfarm": "iot", 286 | "_Iot": "iot", 287 | "Chatbot": "management", 288 | "Cloudformation": "management", 289 | "CloudformationChangeSet": "management", 290 | "CloudformationStack": "management", 291 | "CloudformationTemplate": "management", 292 | "Cloudtrail": "management", 293 | "Cloudwatch": "management", 294 | "CloudwatchAlarm": "management", 295 | "CloudwatchEventEventBased": "management", 296 | "CloudwatchEventTimeBased": "management", 297 | "CloudwatchRule": "management", 298 | "Codeguru": "management", 299 | "Config": "management", 300 | "ControlTower": "management", 301 | "LicenseManager": "management", 302 | "ManagementAndGovernance": "management", 303 | "ManagementConsole": "management", 304 | "Opsworks": "management", 305 | "OpsworksApps": "management", 306 | "OpsworksDeployments": "management", 307 | "OpsworksInstances": "management", 308 | "OpsworksLayers": "management", 309 | "OpsworksMonitoring": "management", 310 | "OpsworksPermissions": "management", 311 | "OpsworksResources": "management", 312 | "OpsworksStack": "management", 313 | "Organizations": "management", 314 | "OrganizationsAccount": "management", 315 | "OrganizationsOrganizationalUnit": "management", 316 | "ParameterStore": "management", 317 | "PersonalHealthDashboard": "management", 318 | "SSM": "management", 319 | "ServiceCatalog": "management", 320 | "SystemsManager": "management", 321 | "SystemsManagerAutomation": "management", 322 | "SystemsManagerDocuments": "management", 323 | "SystemsManagerInventory": "management", 324 | "SystemsManagerMaintenanceWindows": "management", 325 | "SystemsManagerOpscenter": "management", 326 | "SystemsManagerParameterStore": "management", 327 | "SystemsManagerPatchManager": "management", 328 | "SystemsManagerRunCommand": "management", 329 | "SystemsManagerStateManager": "management", 330 | "TrustedAdvisor": "management", 331 | "TrustedAdvisorChecklist": "management", 332 | "TrustedAdvisorChecklistCost": "management", 333 | "TrustedAdvisorChecklistFaultTolerant": "management", 334 | "TrustedAdvisorChecklistPerformance": "management", 335 | "TrustedAdvisorChecklistSecurity": "management", 336 | "WellArchitectedTool": "management", 337 | "_Management": "management", 338 | "ElasticTranscoder": "media", 339 | "ElementalConductor": "media", 340 | "ElementalDelta": "media", 341 | "ElementalLive": "media", 342 | "ElementalMediaconnect": "media", 343 | "ElementalMediaconvert": "media", 344 | "ElementalMedialive": "media", 345 | "ElementalMediapackage": "media", 346 | "ElementalMediastore": "media", 347 | "ElementalMediatailor": "media", 348 | "ElementalServer": "media", 349 | "MediaServices": "media", 350 | "_Media": "media", 351 | "ADS": "migration", 352 | "ApplicationDiscoveryService": "migration", 353 | "CEM": "migration", 354 | "CloudendureMigration": "migration", 355 | "Datasync": "migration", 356 | "DatasyncAgent": "migration", 357 | "MAT": "migration", 358 | "MigrationAndTransfer": "migration", 359 | "MigrationHub": "migration", 360 | "SMS": "migration", 361 | "ServerMigrationService": "migration", 362 | "Snowball": "storage", 363 | "SnowballEdge": "storage", 364 | "Snowmobile": "storage", 365 | "TransferForSftp": "migration", 366 | "_Migration": "migration", 367 | "ApacheMxnetOnAWS": "ml", 368 | "AugmentedAi": "ml", 369 | "Comprehend": "ml", 370 | "DLC": "ml", 371 | "DeepLearningAmis": "ml", 372 | "DeepLearningContainers": "ml", 373 | "Deepcomposer": "ml", 374 | "Deeplens": "ml", 375 | "Deepracer": "ml", 376 | "ElasticInference": "ml", 377 | "Forecast": "ml", 378 | "FraudDetector": "ml", 379 | "Kendra": "ml", 380 | "Lex": "ml", 381 | "MachineLearning": "ml", 382 | "Personalize": "ml", 383 | "Polly": "ml", 384 | "Rekognition": "ml", 385 | "RekognitionImage": "ml", 386 | "RekognitionVideo": "ml", 387 | "Sagemaker": "ml", 388 | "SagemakerGroundTruth": "ml", 389 | "SagemakerModel": "ml", 390 | "SagemakerNotebook": "ml", 391 | "SagemakerTrainingJob": "ml", 392 | "TensorflowOnAWS": "ml", 393 | "Textract": "ml", 394 | "Transcribe": "ml", 395 | "Translate": "ml", 396 | "_ML": "ml", 397 | "APIGateway": "network", 398 | "APIGatewayEndpoint": "network", 399 | "Amplify": "mobile", 400 | "DeviceFarm": "mobile", 401 | "Mobile": "mobile", 402 | "_Mobile": "mobile", 403 | "ALB": "network", 404 | "AppMesh": "network", 405 | "CF": "network", 406 | "CLB": "network", 407 | "ClientVpn": "network", 408 | "CloudFront": "network", 409 | "CloudFrontDownloadDistribution": "network", 410 | "CloudFrontEdgeLocation": "network", 411 | "CloudFrontStreamingDistribution": "network", 412 | "CloudMap": "network", 413 | "DirectConnect": "network", 414 | "ELB": "network", 415 | "ElasticLoadBalancing": "network", 416 | "ElbApplicationLoadBalancer": "network", 417 | "ElbClassicLoadBalancer": "network", 418 | "ElbNetworkLoadBalancer": "network", 419 | "Endpoint": "network", 420 | "GAX": "network", 421 | "GlobalAccelerator": "network", 422 | "NATGateway": "network", 423 | "NLB": "network", 424 | "Nacl": "network", 425 | "NetworkingAndContentDelivery": "network", 426 | "PrivateSubnet": "network", 427 | "Privatelink": "network", 428 | "PublicSubnet": "network", 429 | "Route53": "network", 430 | "Route53HostedZone": "network", 431 | "RouteTable": "network", 432 | "SiteToSiteVpn": "network", 433 | "TransitGateway": "network", 434 | "VPC": "network", 435 | "VPCCustomerGateway": "network", 436 | "VPCElasticNetworkAdapter": "network", 437 | "VPCElasticNetworkInterface": "network", 438 | "VPCFlowLogs": "network", 439 | "VPCPeering": "network", 440 | "VPCRouter": "network", 441 | "VPCTrafficMirroring": "network", 442 | "VpnConnection": "network", 443 | "VpnGateway": "network", 444 | "_Network": "network", 445 | "Braket": "quantum", 446 | "QuantumTechnologies": "quantum", 447 | "_Quantum": "quantum", 448 | "Robomaker": "robotics", 449 | "RobomakerCloudExtensionRos": "robotics", 450 | "RobomakerDevelopmentEnvironment": "robotics", 451 | "RobomakerFleetManagement": "robotics", 452 | "RobomakerSimulator": "robotics", 453 | "Robotics": "robotics", 454 | "_Robotics": "robotics", 455 | "GroundStation": "satellite", 456 | "Satellite": "satellite", 457 | "_Satellite": "satellite", 458 | "ACM": "security", 459 | "AdConnector": "security", 460 | "Artifact": "security", 461 | "CertificateAuthority": "security", 462 | "CertificateManager": "security", 463 | "CloudDirectory": "security", 464 | "CloudHSM": "security", 465 | "Cloudhsm": "security", 466 | "Cognito": "security", 467 | "DS": "security", 468 | "Detective": "security", 469 | "DirectoryService": "security", 470 | "FMS": "security", 471 | "FirewallManager": "security", 472 | "Guardduty": "security", 473 | "IAM": "security", 474 | "IAMAWSSts": "security", 475 | "IAMAccessAnalyzer": "security", 476 | "IAMPermissions": "security", 477 | "IAMRole": "security", 478 | "IdentityAndAccessManagementIam": "security", 479 | "IdentityAndAccessManagementIamAWSSts": "security", 480 | "IdentityAndAccessManagementIamAWSStsAlternate": "security", 481 | "IdentityAndAccessManagementIamAccessAnalyzer": "security", 482 | "IdentityAndAccessManagementIamAddOn": "security", 483 | "IdentityAndAccessManagementIamDataEncryptionKey": "security", 484 | "IdentityAndAccessManagementIamEncryptedData": "security", 485 | "IdentityAndAccessManagementIamLongTermSecurityCredential": "security", 486 | "IdentityAndAccessManagementIamMfaToken": "security", 487 | "IdentityAndAccessManagementIamPermissions": "security", 488 | "IdentityAndAccessManagementIamRole": "security", 489 | "IdentityAndAccessManagementIamTemporarySecurityCredential": "security", 490 | "Inspector": "security", 491 | "InspectorAgent": "security", 492 | "KMS": "security", 493 | "KeyManagementService": "security", 494 | "Macie": "security", 495 | "ManagedMicrosoftAd": "security", 496 | "RAM": "security", 497 | "ResourceAccessManager": "security", 498 | "SecretsManager": "security", 499 | "SecurityHub": "security", 500 | "SecurityHubFinding": "security", 501 | "SecurityIdentityAndCompliance": "security", 502 | "Shield": "security", 503 | "ShieldAdvanced": "security", 504 | "SimpleAd": "security", 505 | "SingleSignOn": "security", 506 | "WAF": "security", 507 | "WAFFilteringRule": "security", 508 | "_Security": "security", 509 | "Backup": "storage", 510 | "CDR": "storage", 511 | "CloudendureDisasterRecovery": "storage", 512 | "EBS": "storage", 513 | "EFS": "storage", 514 | "EFSInfrequentaccessPrimaryBg": "storage", 515 | "EFSStandardPrimaryBg": "storage", 516 | "ElasticBlockStoreEBS": "storage", 517 | "ElasticBlockStoreEBSSnapshot": "storage", 518 | "ElasticBlockStoreEBSVolume": "storage", 519 | "ElasticFileSystemEFS": "storage", 520 | "ElasticFileSystemEFSFileSystem": "storage", 521 | "FSx": "storage", 522 | "Fsx": "storage", 523 | "FsxForLustre": "storage", 524 | "FsxForWindowsFileServer": "storage", 525 | "MultipleVolumesResource": "storage", 526 | "S3": "storage", 527 | "S3Glacier": "storage", 528 | "S3GlacierArchive": "storage", 529 | "S3GlacierVault": "storage", 530 | "SimpleStorageServiceS3": "storage", 531 | "SimpleStorageServiceS3Bucket": "storage", 532 | "SimpleStorageServiceS3BucketWithObjects": "storage", 533 | "SimpleStorageServiceS3Object": "storage", 534 | "SnowFamilySnowballImportExport": "storage", 535 | "Storage": "storage", 536 | "StorageGateway": "storage", 537 | "StorageGatewayCachedVolume": "storage", 538 | "StorageGatewayNonCachedVolume": "storage", 539 | "StorageGatewayVirtualTapeLibrary": "storage", 540 | "_Storage": "storage" 541 | } 542 | -------------------------------------------------------------------------------- /reinvent_2024_agentic/lambda_functions/gen_aws_diag_docker/lambda_handler.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import io 3 | import json 4 | import logging 5 | import os 6 | import re 7 | import subprocess 8 | import sys 9 | import time 10 | import uuid 11 | from datetime import datetime 12 | from typing import Any, Dict, List, Type, Union 13 | 14 | import boto3 15 | from PIL import Image 16 | 17 | s3 = boto3.client("s3") 18 | 19 | 20 | bedrock_runtime = boto3.client( 21 | service_name="bedrock-runtime", 22 | region_name="us-west-2", 23 | ) 24 | 25 | 26 | def retry_with_backoff(func, *args, max_retries=3, initial_delay=1): 27 | """ 28 | Retry a function with exponential backoff 29 | 30 | Args: 31 | func: Function to retry 32 | args: Arguments to pass to the function 33 | max_retries: Maximum number of retries (default: 3) 34 | initial_delay: Initial delay in seconds (default: 1) 35 | """ 36 | for attempt in range(max_retries): 37 | try: 38 | result = func(*args) 39 | if all(r is not None for r in result if isinstance(result, tuple)): 40 | return result 41 | 42 | # If we get here, some part of the result was None 43 | print(f"Attempt {attempt + 1} failed with None result") 44 | except Exception as e: 45 | print(f"Attempt {attempt + 1} failed with error: {str(e)}") 46 | 47 | if attempt < max_retries - 1: # Don't sleep on the last attempt 48 | sleep_time = initial_delay * (2**attempt) # Exponential backoff 49 | print(f"Waiting {sleep_time} seconds before retry...") 50 | time.sleep(sleep_time) 51 | 52 | return None, None # Return None if all retries failed 53 | 54 | 55 | def upload_to_s3(file_bytes, file_name): 56 | """ 57 | Upload a file to S3 and return the URL 58 | """ 59 | try: 60 | s3_client = boto3.client("s3") 61 | bucket_name = os.getenv("S3_BUCKET_NAME") 62 | # Generate a unique file name to avoid collisions 63 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 64 | unique_id = str(uuid.uuid4())[:8] 65 | s3_key = f"uploaded_images/{timestamp}_{unique_id}_{file_name}" 66 | 67 | # Upload the file 68 | content_type = ( 69 | "image/jpeg" 70 | if file_name.lower().endswith((".jpg", ".jpeg")) 71 | else "image/png" 72 | ) 73 | 74 | # Convert BytesIO to bytes if necessary 75 | if isinstance(file_bytes, io.BytesIO): 76 | file_bytes = file_bytes.getvalue() 77 | 78 | s3_client.put_object( 79 | Bucket=bucket_name, Key=s3_key, Body=file_bytes, ContentType=content_type 80 | ) 81 | 82 | # Generate the URL 83 | url = f"https://{bucket_name}.s3.amazonaws.com/{s3_key}" 84 | return url 85 | except Exception as e: 86 | print(f"Error uploading to S3: {str(e)}") 87 | return None 88 | 89 | 90 | def call_claude_3_fill( 91 | system_prompt: str, 92 | prompt: str, 93 | model_id: str = "anthropic.claude-3-5-sonnet-20241022-v2:0", 94 | ): 95 | 96 | prompt_config = { 97 | "anthropic_version": "bedrock-2023-05-31", 98 | "max_tokens": 4096, 99 | "system": system_prompt, 100 | "stop_sequences": ["```"], 101 | "messages": [ 102 | { 103 | "role": "user", 104 | "content": [ 105 | {"type": "text", "text": prompt}, 106 | ], 107 | }, 108 | { 109 | "role": "assistant", 110 | "content": [ 111 | { 112 | "type": "text", 113 | "text": "Here is the code with no explanation ```python", 114 | }, 115 | ], 116 | }, 117 | ], 118 | } 119 | 120 | body = json.dumps(prompt_config) 121 | 122 | modelId = model_id 123 | accept = "application/json" 124 | contentType = "application/json" 125 | 126 | response = bedrock_runtime.invoke_model( 127 | body=body, modelId=modelId, accept=accept, contentType=contentType 128 | ) 129 | response_body = json.loads(response.get("body").read()) 130 | 131 | results = response_body.get("content")[0].get("text") 132 | return results 133 | 134 | 135 | def load_json(path_to_json: str) -> Dict[str, Any]: 136 | """ 137 | Purpose: 138 | Load json files 139 | Args: 140 | path_to_json (String): Path to json file 141 | Returns: 142 | Conf: JSON file if loaded, else None 143 | """ 144 | try: 145 | with open(path_to_json, "r") as config_file: 146 | conf = json.load(config_file) 147 | return conf 148 | 149 | except Exception as error: 150 | logging.error(error) 151 | raise TypeError("Invalid JSON file") 152 | 153 | 154 | aws_service_to_module_mapping = load_json("diag_mapping.json") 155 | 156 | 157 | # helper functions 158 | def save_and_run_python_code(code: str, file_name: str = "/tmp/test_diag.py"): 159 | # Save the code to a file 160 | with open(file_name, "w") as file: 161 | file.write(code) 162 | 163 | # Run the code using a subprocess 164 | try: 165 | os.chdir("/tmp") 166 | python_executable = sys.executable 167 | result = subprocess.run( 168 | [python_executable, file_name], capture_output=True, text=True, check=True 169 | ) 170 | # go back... 171 | except subprocess.CalledProcessError as e: 172 | print("Error occurred while running the code:") 173 | print(e.stdout) 174 | print(e.stderr) 175 | # Exit program with error Exception 176 | raise Exception("Error running the Python code.") 177 | 178 | 179 | def process_code(code): 180 | # Split the code into lines 181 | lines = code.split("\n") 182 | 183 | # Initialize variables to store the updated code and diagram filename 184 | updated_lines = [] 185 | diagram_filename = None 186 | inside_diagram_block = False 187 | 188 | for line in lines: 189 | if line == ".": 190 | line = line.replace(".", "") 191 | if "endoftext" in line: 192 | line = "" 193 | if "# In[" in line: 194 | line = "" 195 | if line == "```": 196 | line = "" 197 | 198 | # Check if the line contains "with Diagram(" 199 | if "with Diagram(" in line: 200 | # replace / in the line with _ 201 | line = line.replace("/", "_") 202 | 203 | # Extract the diagram name between "with Diagram('NAME'," 204 | diagram_name = ( 205 | line.split("with Diagram(")[1].split(",")[0].strip("'").strip('"') 206 | ) 207 | 208 | # Convert the diagram name to lowercase, replace spaces with underscores, and add ".png" extension 209 | diagram_filename = ( 210 | diagram_name.lower() 211 | .replace(" ", "_") 212 | .replace(")", "") 213 | .replace('"', "") 214 | .replace("/", "_") 215 | .replace(":", "") 216 | + ".png" 217 | ) 218 | 219 | # Check if the line contains "filename=" 220 | if "filename=" in line: 221 | # Extract the filename from the "filename=" parameter 222 | diagram_filename = ( 223 | line.split("filename=")[1].split(")")[0].strip("'").strip('"') 224 | + ".png" 225 | ) 226 | 227 | inside_diagram_block = True 228 | 229 | # Check if the line contains the end of the "with Diagram:" block 230 | if inside_diagram_block and line.strip() == "": 231 | inside_diagram_block = False 232 | 233 | # TODO: not sure if it handles all edge cases... 234 | # Only include lines that are inside the "with Diagram:" block or not related to the diagram 235 | if inside_diagram_block or not line.strip().startswith("diag."): 236 | updated_lines.append(line) 237 | 238 | # Join the updated lines to create the updated code 239 | updated_code = "\n".join(updated_lines) 240 | 241 | return updated_code, diagram_filename 242 | 243 | 244 | def correct_imports(code): 245 | # Detect all AWS services mentioned in the code 246 | detected_services = [ 247 | service for service in aws_service_to_module_mapping if service in code 248 | ] 249 | 250 | # Determine the correct imports based on the detected services 251 | module_to_services = {} 252 | for service in detected_services: 253 | module = aws_service_to_module_mapping[service] 254 | if module not in module_to_services: 255 | module_to_services[module] = [] 256 | module_to_services[module].append(service) 257 | 258 | # Construct the corrected import lines 259 | corrected_imports = [] 260 | for module, services in module_to_services.items(): 261 | services_str = ", ".join(services) 262 | corrected_imports.append(f"from diagrams.aws.{module} import {services_str}") 263 | 264 | # Replace the original import lines in the code with the corrected ones 265 | code_without_imports = re.sub(r"from diagrams.aws.* import .*", "", code) 266 | corrected_code = "\n".join(corrected_imports) + "\n" + code_without_imports 267 | 268 | return corrected_code.strip() 269 | 270 | 271 | def diagram_tool(query): 272 | """ 273 | This is a tool that generates diagrams based on a customers's request. 274 | """ 275 | 276 | system_prompt = f""" 277 | You are an expert python programmer that has mastered the Diagrams library. You are able to write code to generate AWS diagrams based on what the user asks. Only return the code as it will be run through a program to generate the diagram for the user. 278 | """ 279 | 280 | code = call_claude_3_fill(system_prompt, query) 281 | print("Base code:") 282 | print(code) 283 | 284 | # Clean up hallucinated code 285 | code, file_name = process_code(code) 286 | code = code.replace("```python", "").replace("```", "").replace('"""', "") 287 | code = correct_imports(code) 288 | 289 | print("Cleaned code:") 290 | print(code) 291 | 292 | try: 293 | # Code to run 294 | save_and_run_python_code(code) 295 | # Open in tmp 296 | img = Image.open(f"/tmp/{file_name}") 297 | return img, file_name 298 | except Exception as e: 299 | print(e) 300 | return None, None 301 | 302 | 303 | def remove_first_line(text): 304 | lines = text.split("\n") 305 | if len(lines) > 1: 306 | lines = lines[1:] 307 | return "\n".join(lines) 308 | 309 | 310 | def lambda_handler(event, context): 311 | # Print the received event to the logs 312 | print("Received event: ") 313 | print(event) 314 | 315 | # Extract the action group, api path, and parameters from the prediction 316 | actionGroup = event["actionGroup"] 317 | function = event.get("function", "") 318 | parameters = event.get("parameters", []) 319 | inputText = event.get("inputText", "") 320 | 321 | # Generate diagram 322 | image, file_name = retry_with_backoff(diagram_tool, inputText) 323 | 324 | if image is None or file_name is None: 325 | return { 326 | "messageVersion": event["messageVersion"], 327 | "response": { 328 | "actionGroup": actionGroup, 329 | "function": function, 330 | "functionResponse": { 331 | "responseBody": {"TEXT": {"body": "Error generating diagram"}} 332 | }, 333 | }, 334 | } 335 | 336 | # Convert image to bytes and base64 337 | img_byte_array = io.BytesIO() 338 | image.save(img_byte_array, format=image.format or "PNG") 339 | img_byte_array.seek(0) 340 | 341 | # Upload image to s3 342 | image_url = upload_to_s3(img_byte_array, file_name) 343 | if image_url is None: 344 | return { 345 | "messageVersion": event["messageVersion"], 346 | "response": { 347 | "actionGroup": actionGroup, 348 | "function": function, 349 | "functionResponse": { 350 | "responseBody": {"TEXT": {"body": "Error uploading to S3"}} 351 | }, 352 | }, 353 | } 354 | 355 | results = {"image_url": image_url} 356 | response_body = {"TEXT": {"body": str(results)}} 357 | 358 | # Print the response body to the logs 359 | print(f"Response body: {response_body}") 360 | 361 | # Create the response 362 | action_response = { 363 | "actionGroup": actionGroup, 364 | "function": function, 365 | "functionResponse": {"responseBody": response_body}, 366 | } 367 | 368 | api_response = { 369 | "messageVersion": event["messageVersion"], 370 | "response": action_response, 371 | } 372 | 373 | return api_response 374 | -------------------------------------------------------------------------------- /reinvent_2024_agentic/lambda_functions/gen_aws_diag_docker/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 2 | pillow 3 | diagrams -------------------------------------------------------------------------------- /reinvent_2024_agentic/lambda_functions/website_to_text.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | import boto3 5 | import requests 6 | 7 | # Can get an API KEY here: https://jina.ai/reader/ 8 | JINA_KEY = os.getenv("JINA_KEY") 9 | 10 | bedrock_runtime = boto3.client( 11 | service_name="bedrock-runtime", 12 | region_name="us-west-2", 13 | ) 14 | 15 | 16 | def process_website(input_text, website_text): 17 | 18 | prompt = f"{input_text} {website_text}" 19 | 20 | prompt_config = { 21 | "anthropic_version": "bedrock-2023-05-31", 22 | "max_tokens": 4096, 23 | "messages": [ 24 | { 25 | "role": "user", 26 | "content": [ 27 | { 28 | "type": "text", 29 | "text": prompt, 30 | }, 31 | ], 32 | } 33 | ], 34 | } 35 | 36 | body = json.dumps(prompt_config) 37 | 38 | modelId = "anthropic.claude-3-5-sonnet-20241022-v2:0" 39 | accept = "application/json" 40 | contentType = "application/json" 41 | 42 | response = bedrock_runtime.invoke_model( 43 | body=body, modelId=modelId, accept=accept, contentType=contentType 44 | ) 45 | response_body = json.loads(response.get("body").read()) 46 | 47 | results = response_body.get("content")[0].get("text") 48 | return results 49 | 50 | 51 | def lambda_handler(event, context): 52 | # Print the received event to the logs 53 | print("Received event: ") 54 | print(event) 55 | 56 | # Extract the action group, api path, and parameters from the prediction 57 | actionGroup = event["actionGroup"] 58 | function = event.get("function", "") 59 | parameters = event.get("parameters", []) 60 | inputText = event.get("inputText", "") 61 | website_url = parameters[0]["value"] 62 | 63 | url = f"https://r.jina.ai/{website_url}" 64 | headers = {"Authorization": f"Bearer {JINA_KEY}"} 65 | response = requests.get(url, headers=headers) 66 | 67 | # process request 68 | result = process_website(inputText, response.text) 69 | 70 | response_body = {"TEXT": {"body": result}} 71 | 72 | # Print the response body to the logs 73 | print(f"Response body: {response_body}") 74 | 75 | # Create a dictionary containing the response details 76 | action_response = { 77 | "actionGroup": actionGroup, 78 | "function": function, 79 | "functionResponse": {"responseBody": response_body}, 80 | } 81 | 82 | # Return the list of responses as a dictionary 83 | api_response = { 84 | "messageVersion": event["messageVersion"], 85 | "response": action_response, 86 | } 87 | 88 | return api_response 89 | -------------------------------------------------------------------------------- /reinvent_2024_agentic/lambda_layers/make_pil_layer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Create a directory for our project 4 | mkdir -p pil-lambda-layer 5 | cd pil-lambda-layer 6 | 7 | # Create Dockerfile 8 | cat > Dockerfile << 'EOF' 9 | FROM amazon/aws-lambda-python:3.13 10 | 11 | # Create directory for the layer 12 | RUN mkdir -p /opt/python 13 | 14 | # Install Pillow 15 | RUN pip install Pillow -t /opt/python/ 16 | 17 | # Remove unnecessary files to reduce size 18 | RUN cd /opt/python && \ 19 | find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true && \ 20 | find . -type f -name "*.pyc" -delete && \ 21 | find . -type f -name "*.pyo" -delete && \ 22 | find . -type f -name "*.dist-info" -exec rm -rf {} + 2>/dev/null || true && \ 23 | find . -type d -name "*.egg-info" -exec rm -rf {} + 2>/dev/null || true 24 | EOF 25 | 26 | # Build the Docker image 27 | echo "Building Docker image..." 28 | docker build -t pil-lambda-layer . 29 | 30 | # Create a container and copy the layer contents 31 | echo "Creating container and extracting layer..." 32 | docker create --name temp_container pil-lambda-layer 33 | mkdir -p python 34 | docker cp temp_container:/opt/python/. python/ 35 | docker rm temp_container 36 | 37 | # Create the ZIP file 38 | echo "Creating ZIP file..." 39 | zip -r pillow-layer.zip python/ 40 | 41 | # Clean up 42 | echo "Cleaning up..." 43 | rm -rf python 44 | 45 | echo "Layer has been created as pillow-layer.zip" 46 | echo "You can now upload this to AWS Lambda as a layer" -------------------------------------------------------------------------------- /reinvent_2024_agentic/lambda_layers/make_requests_layer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Create a directory for our project 4 | mkdir -p requests-lambda-layer 5 | cd requests-lambda-layer 6 | 7 | # Create Dockerfile 8 | cat > Dockerfile << 'EOF' 9 | FROM amazon/aws-lambda-python:3.13 10 | 11 | # Create directory for the layer 12 | RUN mkdir -p /opt/python 13 | 14 | # Install Requests 15 | RUN pip install requests -t /opt/python/ 16 | 17 | # Remove unnecessary files to reduce size 18 | RUN cd /opt/python && \ 19 | find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true && \ 20 | find . -type f -name "*.pyc" -delete && \ 21 | find . -type f -name "*.pyo" -delete && \ 22 | find . -type f -name "*.dist-info" -exec rm -rf {} + 2>/dev/null || true && \ 23 | find . -type d -name "*.egg-info" -exec rm -rf {} + 2>/dev/null || true 24 | EOF 25 | 26 | # Build the Docker image 27 | echo "Building Docker image..." 28 | docker build -t requests-lambda-layer . 29 | 30 | # Create a container and copy the layer contents 31 | echo "Creating container and extracting layer..." 32 | docker create --name temp_container requests-lambda-layer 33 | mkdir -p python 34 | docker cp temp_container:/opt/python/. python/ 35 | docker rm temp_container 36 | 37 | # Create the ZIP file 38 | echo "Creating ZIP file..." 39 | zip -r requests-layer.zip python/ 40 | 41 | # Clean up 42 | echo "Cleaning up..." 43 | rm -rf python 44 | 45 | echo "Layer has been created as requests-layer.zip" 46 | echo "You can now upload this to AWS Lambda as a layer" -------------------------------------------------------------------------------- /reinvent_2024_agentic/media_streaming_aws_diag_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/build-on-aws/agentic-workshop/7d7c0cd0a6607fd761537b1602cd75f7dd75d264/reinvent_2024_agentic/media_streaming_aws_diag_example.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit 2 | requests 3 | botocore 4 | aws_requests_auth 5 | pillow 6 | boto3 7 | matplotlib 8 | yfinance 9 | fpdf 10 | strands-agents 11 | strands-agents-tools 12 | uv -------------------------------------------------------------------------------- /requirements_crewai.txt: -------------------------------------------------------------------------------- 1 | crewai -------------------------------------------------------------------------------- /requirements_langchain.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | langchain-community 3 | langgraph 4 | langchain_aws 5 | langchain_experimental 6 | duckduckgo-search -------------------------------------------------------------------------------- /requirements_llama_index.txt: -------------------------------------------------------------------------------- 1 | llama-index 2 | llama-index-llms-bedrock 3 | llama-index-embeddings-bedrock -------------------------------------------------------------------------------- /requirements_smolagents.txt: -------------------------------------------------------------------------------- 1 | smolagents 2 | transformers 3 | huggingface_hub -------------------------------------------------------------------------------- /sagemaker_ai/bedrock_example.py: -------------------------------------------------------------------------------- 1 | # Use the native inference API to send a text message to Meta Llama 3. 2 | 3 | import json 4 | 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | 8 | # Create a Bedrock Runtime client in the AWS Region of your choice. 9 | client = boto3.client("bedrock-runtime", region_name="us-west-2") 10 | 11 | # Set the model ID 12 | # Should be arn:aws:sagemaker:us-east-1:ACCOUNT_ID:endpoint/jumpstart-dft-deepseek-llm-r1-disti-20250207-153847 13 | model_id = "REPLACE_WITH_ARN" 14 | 15 | # Define the prompt for the model. 16 | prompt = "Describe the purpose of a 'hello world' program in one line." 17 | 18 | # Embed the prompt in Llama 3's instruction format. 19 | formatted_prompt = f""" 20 | <|begin_of_text|><|start_header_id|>user<|end_header_id|> 21 | {prompt} 22 | <|eot_id|> 23 | <|start_header_id|>assistant<|end_header_id|> 24 | """ 25 | 26 | # Format the request payload using the model's native structure. 27 | native_request = { 28 | "prompt": formatted_prompt, 29 | "max_gen_len": 512, 30 | "temperature": 0.5, 31 | } 32 | 33 | # Convert the native request to JSON. 34 | request = json.dumps(native_request) 35 | 36 | try: 37 | # Invoke the model with the request. 38 | response = client.invoke_model(modelId=model_id, body=request) 39 | 40 | except (ClientError, Exception) as e: 41 | print(f"ERROR: Can't invoke '{model_id}'. Reason: {e}") 42 | exit(1) 43 | 44 | # Decode the response body. 45 | model_response = json.loads(response["body"].read()) 46 | 47 | # Extract and print the response text. 48 | response_text = model_response["choices"][0]["text"] 49 | print(response_text) 50 | -------------------------------------------------------------------------------- /sagemaker_ai/cato_capital.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from textwrap import dedent 4 | 5 | import yfinance as yf 6 | from crewai.tools import BaseTool 7 | from dotenv import load_dotenv 8 | from fpdf import FPDF 9 | 10 | load_dotenv() 11 | 12 | from crewai import LLM, Agent, Crew, Process, Task 13 | 14 | MODEL_ENDPOINT = "jumpstart-dft-deepseek-llm-r1-disti-20250306-141311" 15 | 16 | deepseek_llama = LLM( 17 | model=f"sagemaker/{MODEL_ENDPOINT}", 18 | temperature=0.7, 19 | max_tokens=4096, 20 | ) 21 | 22 | bedrock_MICRO_nova = LLM(model="bedrock/us.amazon.nova-micro-v1:0") 23 | bedrock_LITE_nova = LLM(model="bedrock/us.amazon.nova-lite-v1:0") 24 | bedrock_PRO_nova = LLM(model="bedrock/us.amazon.nova-pro-v1:0") 25 | 26 | 27 | class YahooFinanceTool(BaseTool): 28 | name: str = "Yahoo Finance Stock Data" 29 | description: str = ( 30 | "Fetches real-time stock/ETF data, including price, volume, and market cap." 31 | ) 32 | 33 | def _run(self, symbol: str) -> str: 34 | """Fetches stock data from Yahoo Finance.""" 35 | try: 36 | stock = yf.Ticker(symbol) 37 | info = stock.info 38 | 39 | return ( 40 | f"Symbol: {symbol}\n" 41 | f"Name: {info.get('longName', 'N/A')}\n" 42 | f"Current Price: {info.get('regularMarketPrice', 'N/A')}\n" 43 | f"52-Week High: {info.get('fiftyTwoWeekHigh', 'N/A')}\n" 44 | f"52-Week Low: {info.get('fiftyTwoWeekLow', 'N/A')}\n" 45 | f"Market Cap: {info.get('marketCap', 'N/A')}\n" 46 | f"Dividend Yield: {info.get('dividendYield', 'N/A')}\n" 47 | ) 48 | except Exception as e: 49 | return f"Error fetching data: {str(e)}" 50 | 51 | 52 | # Instantiate the tool 53 | yahoo_finance_tool = YahooFinanceTool() 54 | 55 | 56 | class InvestmentReportPDF(FPDF): 57 | def __init__(self, symbol): 58 | super().__init__() 59 | self.symbol = symbol 60 | 61 | def header(self): 62 | self.image(os.path.join("images", "CatCapital.png"), 10, 8, 33) 63 | self.set_font("Arial", "B", 16) 64 | self.cell(0, 10, f"{self.symbol} Investment Analysis Report", align="C") 65 | self.ln(20) 66 | 67 | def footer(self): 68 | self.set_y(-15) 69 | self.set_font("Arial", "I", 8) 70 | self.cell(0, 10, f"Page {self.page_no()}", align="C") 71 | 72 | 73 | class PDFReportTool(BaseTool): 74 | name: str = "PDF Report Generator" 75 | description: str = "Generates a professionally formatted PDF investment report" 76 | 77 | def format_text(self, line: str) -> tuple: 78 | """Determine text format based on line prefix""" 79 | if line.startswith("# "): 80 | return ("h1", line[2:], "Arial", "B", 16, True) 81 | elif line.startswith("## "): 82 | return ("h2", line[3:], "Arial", "B", 14, True) 83 | elif line.startswith("### "): 84 | return ("h3", line[4:], "Arial", "B", 12, False) 85 | return ("body", line, "Arial", "", 11, False) 86 | 87 | def _run(self, content: str, symbol: str = "STOCK") -> str: 88 | try: 89 | pdf = InvestmentReportPDF(symbol) 90 | pdf.add_page() 91 | 92 | # Process content in a single pass 93 | content = re.sub(r"\*\*(.*?)\*\*", r"\1", content) 94 | lines = [line.strip() for line in content.split("\n") if line.strip()] 95 | 96 | for line in lines: 97 | style, text, font, weight, size, fill = self.format_text(line) 98 | 99 | pdf.set_font(font, weight, size) 100 | if fill: 101 | pdf.set_fill_color(200, 220, 255) 102 | pdf.cell(0, 10, text, ln=True, fill=True) 103 | else: 104 | pdf.multi_cell(0, 10, text) 105 | pdf.ln(5) 106 | 107 | filename = f"{symbol}_investment_report.pdf" 108 | pdf.output(filename) 109 | return f"Report successfully generated as {filename}" 110 | 111 | except Exception as e: 112 | return f"Error generating PDF report: {str(e)}" 113 | 114 | 115 | # Create the PDF tool instance 116 | pdf_report_tool = PDFReportTool() 117 | 118 | 119 | data_collector = Agent( 120 | role="Stock Data Collector", 121 | goal="Retrieve stock and ETF data from Yahoo Finance.", 122 | backstory=""" 123 | A data retrieval bot specialized in fetching stock market data. 124 | """, 125 | tools=[yahoo_finance_tool], 126 | llm=bedrock_LITE_nova, 127 | verbose=True, 128 | ) 129 | 130 | 131 | financial_analyst = Agent( 132 | role="Financial Analysis Bot", 133 | goal="Analyze financial data and provide investment insights.", 134 | backstory="A talented financial analysis bot producing structured insights for a hedge fund.", 135 | llm=deepseek_llama, 136 | verbose=True, 137 | ) 138 | 139 | # Report Writer Agent (Uses AWS Bedrock NOVA) 140 | report_writer = Agent( 141 | role="Hedge Fund Report Writer", 142 | goal="Generate professional reports summarizing financial insights.", 143 | backstory="An AI-powered financial writer producing hedge fund reports.", 144 | llm=bedrock_PRO_nova, # Uses AWS Bedrock NOVA for text generation 145 | verbose=True, 146 | ) 147 | 148 | fetch_stock_task = Task( 149 | description="Fetch stock data for {symbol} using Yahoo Finance Tool.", 150 | expected_output="Stock price, market cap, and key metrics for {symbol}.", 151 | agent=data_collector, 152 | tools=[yahoo_finance_tool], 153 | ) 154 | 155 | # Task 2: Analyze Stock Data 156 | analyze_stock_task = Task( 157 | description=""" 158 | You are an expert financial analyst tasked with analyzing stock market data and providing insights. Your analysis should be thorough, data-driven, and actionable. 159 | 160 | Your task is to analyze this data and provide a comprehensive report. Follow these steps: 161 | 162 | 1. Review the data carefully, noting any patterns, anomalies, or significant trends. 163 | 2. Conduct a detailed analysis, considering factors such as price movements, trading volumes, market capitalization changes, and any other relevant metrics. 164 | 3. Identify potential causes for notable changes or patterns in the data. 165 | 4. Develop actionable insights and recommendations based on your analysis. 166 | 167 | Before providing your final report, think about the following: 168 | - List out key data points and metrics from the stock data. 169 | - Identify potential patterns or anomalies by comparing different time periods or metrics. 170 | - Consider both bullish and bearish arguments based on the data. 171 | It's OK for this section to be quite long. 172 | 173 | After your analysis, provide a structured report with the following sections: 174 | 175 | 1. Overview: A brief summary of the key findings from your analysis. 176 | 2. Key Metrics: Important statistical measures and their implications. 177 | 3. Trends: Significant patterns or movements observed in the data. 178 | 4. Recommendations: Actionable insights for investors or stakeholders based on your analysis. 179 | 180 | Use appropriate headers for each section of your report. 181 | 182 | Remember to base all your conclusions and recommendations solely on the provided data. Do not introduce external information or assumptions unless explicitly stated in the data. 183 | 184 | Please begin your analysis now. 185 | """, 186 | expected_output="A structured financial analysis including risk assessment and investment potential.", 187 | agent=financial_analyst, 188 | context=[fetch_stock_task], 189 | ) 190 | 191 | 192 | generate_report_task = Task( 193 | description=f""" 194 | You are a professional financial analyst tasked with creating a hedge fund investment report. Your report will be based on thorough financial analysis and will be saved as a PDF document using the PDF Report Generator tool. 195 | 196 | Here is the stock symbol for the investment report: 197 | 198 | {{symbol}} 199 | 200 | 201 | Please follow these steps to complete your task: 202 | 203 | 1. Conduct a comprehensive financial analysis of the stock with the given symbol. 204 | 2. Write a detailed investment report based on your analysis. 205 | 3. Generate a PDF document of your report. 206 | 4. Save the PDF document with a filename that includes the stock symbol. 207 | 208 | Before writing the report, conduct your analysis inside tags. Include the following steps: 209 | 210 | 1. List and interpret key financial metrics (e.g., P/E ratio, EPS growth, debt-to-equity ratio). 211 | 2. Perform a comparative analysis with industry peers. 212 | 3. Outline the company's financial strengths and weaknesses. 213 | 4. Consider both bull and bear arguments for the stock. 214 | 5. Summarize your key findings and overall financial health assessment. 215 | 216 | This structured analysis will ensure a thorough and well-reasoned report. 217 | 218 | Your investment report should include the following sections: 219 | 220 | 1. Executive Summary 221 | 2. Company Overview 222 | 3. Industry Analysis 223 | 4. Financial Performance Analysis 224 | 5. Valuation 225 | 6. Risk Assessment 226 | 7. Investment Recommendation 227 | 8. Conclusion 228 | 229 | Please ensure that your report is comprehensive, well-structured, and provides valuable insights for hedge fund investment decisions. 230 | 231 | After completing your analysis and report, use the PDF Report Generator tool to create the PDF document. The filename of the PDF should follow this format: "Hedge_Fund_Report_.pdf", where is replaced with the actual stock symbol provided. 232 | 233 | Begin your analysis now, followed by the investment report. 234 | """, 235 | expected_output="A professional hedge fund investment report saved as PDF with the stock symbol in the filename.", 236 | agent=report_writer, 237 | tools=[pdf_report_tool], 238 | context=[analyze_stock_task], 239 | ) 240 | 241 | hedge_fund_crew = Crew( 242 | agents=[data_collector, financial_analyst, report_writer], 243 | tasks=[fetch_stock_task, analyze_stock_task, generate_report_task], 244 | process=Process.sequential, # Ensures tasks run in order 245 | ) 246 | 247 | 248 | result = hedge_fund_crew.kickoff( 249 | inputs={"symbol": "AMZN"} 250 | ) # Only provide initial input 251 | print(result) 252 | -------------------------------------------------------------------------------- /sagemaker_ai/images/CatCapital.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/build-on-aws/agentic-workshop/7d7c0cd0a6607fd761537b1602cd75f7dd75d264/sagemaker_ai/images/CatCapital.png -------------------------------------------------------------------------------- /strands_agents/mcp_docs_diag.py: -------------------------------------------------------------------------------- 1 | from mcp import StdioServerParameters, stdio_client 2 | from strands import Agent 3 | from strands.models import BedrockModel 4 | from strands.tools.mcp import MCPClient 5 | 6 | aws_docs_client = MCPClient( 7 | lambda: stdio_client( 8 | StdioServerParameters( 9 | command="uvx", args=["awslabs.aws-documentation-mcp-server@latest"] 10 | ) 11 | ) 12 | ) 13 | 14 | aws_diag_client = MCPClient( 15 | lambda: stdio_client( 16 | StdioServerParameters( 17 | command="uvx", args=["awslabs.aws-diagram-mcp-server@latest"] 18 | ) 19 | ) 20 | ) 21 | 22 | 23 | bedrock_model = BedrockModel( 24 | model_id="us.anthropic.claude-3-5-haiku-20241022-v1:0", 25 | temperature=0.7, 26 | ) 27 | 28 | SYSTEM_PROMPT = """ 29 | You are an expert AWS Certified Solutions Architect. Your role is to help customers understand best practices on building on AWS. You can querying the AWS Documentation and generate diagrams. Make sure to tell the customer the full file path of the diagram. 30 | """ 31 | 32 | with aws_diag_client, aws_docs_client: 33 | all_tools = aws_diag_client.list_tools_sync() + aws_docs_client.list_tools_sync() 34 | agent = Agent(tools=all_tools, model=bedrock_model, system_prompt=SYSTEM_PROMPT) 35 | 36 | response = agent( 37 | "Get the documentation for AWS Lambda then create a diagram of a website that uses AWS Lambda for a static website hosted on S3" 38 | ) 39 | -------------------------------------------------------------------------------- /strands_agents/multi_agent_ppt.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from mcp import StdioServerParameters, stdio_client 4 | from strands import Agent, tool 5 | from strands.models import BedrockModel 6 | from strands.tools.mcp import MCPClient 7 | 8 | aws_docs_client = MCPClient( 9 | lambda: stdio_client( 10 | StdioServerParameters( 11 | command="uvx", args=["awslabs.aws-documentation-mcp-server@latest"] 12 | ) 13 | ) 14 | ) 15 | 16 | aws_diag_client = MCPClient( 17 | lambda: stdio_client( 18 | StdioServerParameters( 19 | command="uvx", args=["awslabs.aws-diagram-mcp-server@latest"] 20 | ) 21 | ) 22 | ) 23 | 24 | # Cost Analysis MCP Client 25 | cost_analysis_client = MCPClient( 26 | lambda: stdio_client( 27 | StdioServerParameters( 28 | command="uvx", args=["awslabs.cost-analysis-mcp-server@latest"] 29 | ) 30 | ) 31 | ) 32 | 33 | # PowerPoint MCP Client 34 | ppt_client = MCPClient( 35 | lambda: stdio_client( 36 | StdioServerParameters( 37 | command="uvx", 38 | args=["--from", "office-powerpoint-mcp-server", "ppt_mcp_server"], 39 | ) 40 | ) 41 | ) 42 | 43 | 44 | bedrock_model = BedrockModel( 45 | model_id="us.anthropic.claude-3-5-haiku-20241022-v1:0", 46 | # model_id="us.anthropic.claude-sonnet-4-20250514-v1:0", 47 | temperature=0.7, 48 | ) 49 | 50 | COST_ANALYSIS_AGENT_PROMPT = """ 51 | You are a cost analysis specialist with expertise in: 52 | - Analyzing AWS cost structures and pricing models 53 | - Performing detailed cost projections and optimization recommendations 54 | - Creating cost comparison scenarios for migration planning 55 | - Identifying cost-saving opportunities across AWS services 56 | - Building cost monitoring and alerting strategies 57 | - Analyzing Reserved Instance and Savings Plan opportunities 58 | - Providing detailed cost breakdowns by service, region, and usage patterns 59 | Use the cost analysis tools to provide accurate financial projections and optimization strategies. 60 | """ 61 | 62 | SA_AGENT_PROMPT = """ 63 | You are an AWS Certified Solutions Architect with expertise in: 64 | - Creating detailed architecture diagrams using AWS services 65 | - Performing comprehensive cost analysis and optimization 66 | - Writing technical documentation and runbooks 67 | - Analyzing security and compliance requirements 68 | - Designing for high availability, fault tolerance, and disaster recovery 69 | Use the AWS documentation and diagram tools to create accurate, professional deliverables. 70 | """ 71 | 72 | 73 | @tool 74 | def cost_analysis_specialist(query: str) -> str: 75 | """ 76 | Analyze costs and create financial projections for migration. 77 | This tool agent specializes in AWS cost analysis and optimization strategies. 78 | """ 79 | with aws_docs_client, cost_analysis_client: 80 | all_tools = ( 81 | aws_docs_client.list_tools_sync() + cost_analysis_client.list_tools_sync() 82 | ) 83 | cost_agent = Agent( 84 | system_prompt=COST_ANALYSIS_AGENT_PROMPT, 85 | tools=all_tools, 86 | model=bedrock_model, 87 | ) 88 | return str(cost_agent(query)) 89 | 90 | 91 | @tool 92 | def presentation_creator(query: str) -> str: 93 | """ 94 | Create executive presentations with PowerPoint. 95 | This tool agent specializes in creating professional presentations. 96 | """ 97 | with ppt_client: 98 | ppt_agent = Agent( 99 | system_prompt="""You create professional PowerPoint presentations for executive audiences. 100 | Focus on clear visualizations, key metrics, and strategic recommendations. 101 | Use charts, diagrams, and bullet points effectively.""", 102 | tools=ppt_client.list_tools_sync(), 103 | model=bedrock_model, 104 | ) 105 | return str(ppt_agent(query)) 106 | 107 | 108 | @tool 109 | def architecture_analyst(query: str) -> str: 110 | """ 111 | Create architecture diagrams and perform cost analysis. 112 | This tool agent specializes in AWS architecture design and cost optimization. 113 | """ 114 | with aws_docs_client, aws_diag_client: 115 | all_tools = ( 116 | aws_docs_client.list_tools_sync() + aws_diag_client.list_tools_sync() 117 | ) 118 | sa_agent = Agent( 119 | system_prompt=SA_AGENT_PROMPT, tools=all_tools, model=bedrock_model 120 | ) 121 | response = sa_agent(query) 122 | # Extract diagram path if created 123 | if "diagram" in str(response).lower(): 124 | return f"{response}\n\nNote: Check the output for the diagram file path." 125 | return str(response) 126 | 127 | 128 | def create_migration_orchestrator(): 129 | """ 130 | Create the main orchestrator agent for cloud migration planning. 131 | This orchestrator coordinates all specialized tool agents to deliver 132 | a comprehensive migration plan. 133 | """ 134 | 135 | MIGRATION_ORCHESTRATOR_PROMPT = """ 136 | You are a Cloud Migration Coordinator orchestrating a comprehensive migration plan to AWS. 137 | 138 | Your role is to: 139 | 1. Analyze the migration requirements 140 | 2. Delegate specific tasks to specialized agents 141 | 3. Synthesize outputs into a cohesive migration strategy 142 | 143 | Available tool agents: 144 | - architecture_analyst: For diagrams and architectural design 145 | - cost_analysis_specialist: For cost analysis and financial projections 146 | - presentation_creator: For executive presentations 147 | 148 | For migration projects, follow this process: 149 | 1. Use architecture_analyst to create diagrams and architectural documentation 150 | 2. Use cost_analysis_specialist to analyze costs and create financial projections 151 | 3. Use presentation_creator to build an executive presentation 152 | 153 | Always ensure: 154 | - Security best practices are followed 155 | - Cost optimization is considered 156 | - High availability and disaster recovery are planned 157 | - Final migration plan consolidates all technical and financial analysis 158 | """ 159 | 160 | orchestrator = Agent( 161 | system_prompt=MIGRATION_ORCHESTRATOR_PROMPT, 162 | tools=[ 163 | architecture_analyst, 164 | cost_analysis_specialist, 165 | presentation_creator, 166 | ], 167 | model=bedrock_model, 168 | ) 169 | 170 | return orchestrator 171 | 172 | 173 | def run_cloud_migration_demo(): 174 | """ 175 | Demo: Cloud Migration Planning using Agents as Tools pattern 176 | 177 | This demonstrates how an orchestrator agent delegates to specialized 178 | tool agents to create a comprehensive migration plan. 179 | """ 180 | print("=" * 70) 181 | print("Cloud Migration Planning (Agents as Tools Pattern)") 182 | print("=" * 70) 183 | print("\nPattern: Hierarchical delegation with specialized tool agents") 184 | print("Scenario: Planning cloud migration for e-commerce company\n") 185 | 186 | # Track execution start time 187 | start_time = datetime.now() 188 | 189 | # Create the orchestrator 190 | orchestrator = create_migration_orchestrator() 191 | 192 | # Define the migration request 193 | migration_request = """ 194 | Plan a comprehensive cloud migration for "ShopEasy" e-commerce company: 195 | 196 | Current State: 197 | - On-premise monolithic Java application 198 | - MySQL database with 50TB of data 199 | - 1 million daily active users 200 | - Peak traffic during sales events (10x normal) 201 | - Legacy file storage system with 100TB of product images 202 | 203 | Requirements: 204 | - Zero downtime migration 205 | - High availability across multiple regions 206 | - Cost optimization (current spend: $100K/month) 207 | - Compliance with PCI-DSS for payment processing 208 | - Improved performance and scalability 209 | 210 | Constraints: 211 | - 6-month migration timeline 212 | - $500K migration budget 213 | - Limited DevOps expertise in current team 214 | - Must maintain integration with existing ERP system 215 | 216 | Deliverables needed: 217 | 1. Architecture diagrams with migration phases 218 | 2. Detailed cost analysis and projections 219 | 3. Migration runbook and documentation 220 | 4. Executive presentation for board approval 221 | """ 222 | 223 | print("📋 Processing migration request...") 224 | print("🤖 Orchestrator delegating to specialized agents...\n") 225 | 226 | # Execute the orchestrated migration planning 227 | result = orchestrator(migration_request) 228 | 229 | # Track execution end time 230 | end_time = datetime.now() 231 | execution_time = (end_time - start_time).total_seconds() 232 | print(f"Execution time: {execution_time:.2f} seconds") 233 | 234 | print("\n✅ Migration Plan Generated!") 235 | print("-" * 70) 236 | print(result) 237 | print("-" * 70) 238 | 239 | 240 | run_cloud_migration_demo() 241 | -------------------------------------------------------------------------------- /strands_agents/weather_word_count.py: -------------------------------------------------------------------------------- 1 | from strands import Agent, tool 2 | from strands.models import BedrockModel 3 | from strands_tools import http_request 4 | 5 | # Define a weather-focused system prompt 6 | WEATHER_SYSTEM_PROMPT = """You are a weather assistant with HTTP capabilities. You can: 7 | 8 | 1. Make HTTP requests to the National Weather Service API 9 | 2. Process and display weather forecast data 10 | 3. Provide weather information for locations in the United States 11 | 12 | When retrieving weather information: 13 | 1. First get the coordinates or grid information using https://api.weather.gov/points/{latitude},{longitude} or https://api.weather.gov/points/{zipcode} 14 | 2. Then use the returned forecast URL to get the actual forecast 15 | 16 | When displaying responses: 17 | - Format weather data in a human-readable way 18 | - Highlight important information like temperature, precipitation, and alerts 19 | - Handle errors appropriately 20 | - Convert technical terms to user-friendly language 21 | 22 | Always explain the weather conditions clearly and provide context for the forecast. 23 | """ 24 | 25 | 26 | @tool 27 | def word_count(text: str) -> int: 28 | """Count words in text.""" 29 | return len(text.split()) 30 | 31 | 32 | # Bedrock 33 | bedrock_model = BedrockModel( 34 | model_id="us.anthropic.claude-3-5-haiku-20241022-v1:0", 35 | temperature=0.3, 36 | ) 37 | 38 | agent = Agent( 39 | system_prompt=WEATHER_SYSTEM_PROMPT, 40 | tools=[word_count, http_request], 41 | model=bedrock_model, 42 | ) 43 | response = agent( 44 | "What's the weather like in Washington D.C? Also how many words are in the response?" 45 | ) 46 | --------------------------------------------------------------------------------