├── .gitignore
├── Architecture.drawio
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── app.py
├── architecture.png
├── artifacts
├── bedrock_lambda
│ ├── index_lambda
│ │ ├── index.py
│ │ └── prompt_builder.py
│ └── query_lambda
│ │ ├── agent_executor_utils.py
│ │ ├── agents
│ │ └── retriever_agent.py
│ │ ├── prompt_utils.py
│ │ ├── query_rag_bedrock.py
│ │ ├── requirements.txt
│ │ └── strands_multi_agent
│ │ ├── casual_conversations_agent.py
│ │ ├── code_generator_agent.py
│ │ ├── index.md
│ │ ├── orchestrator.py
│ │ ├── ppt_generator_agent.py
│ │ ├── ppt_themes
│ │ ├── circuit_theme.pptx
│ │ └── ion_theme.pptx
│ │ ├── retriever_agent.py
│ │ ├── weather_agent.py
│ │ └── web_search_agent.py
├── chat-ui
│ ├── .dockerignore
│ ├── .eslintrc.cjs
│ ├── .gitignore
│ ├── Dockerfile
│ ├── README.md
│ ├── index.html
│ ├── nginx.conf
│ ├── package-lock.json
│ ├── package.json
│ ├── postcss.config.js
│ ├── prettier.config.cjs
│ ├── public
│ │ ├── favicon.ico
│ │ ├── images
│ │ │ ├── android-chrome-192x192.png
│ │ │ ├── android-chrome-512x512.png
│ │ │ ├── apple-touch-icon.png
│ │ │ ├── document-chat.png
│ │ │ ├── favicon-16x16.png
│ │ │ ├── favicon-32x32.png
│ │ │ ├── favicon.ico
│ │ │ ├── header.png
│ │ │ ├── logo.png
│ │ │ ├── multi-agent.png
│ │ │ ├── ocr.png
│ │ │ └── sentiment.png
│ │ └── manifest.json
│ ├── src
│ │ ├── app.tsx
│ │ ├── authService.ts
│ │ ├── common
│ │ │ ├── constants.ts
│ │ │ ├── context.ts
│ │ │ ├── helpers
│ │ │ │ ├── auth-help.ts
│ │ │ │ └── storage-helper.ts
│ │ │ ├── hooks
│ │ │ │ ├── use-navigation-panel-state.ts
│ │ │ │ └── use-on-follow.ts
│ │ │ └── types.ts
│ │ ├── components
│ │ │ ├── chat-ui
│ │ │ │ ├── chat-ui-input-panel-backup.tsx
│ │ │ │ ├── chat-ui-input-panel.tsx
│ │ │ │ ├── chat-ui-message-list.tsx
│ │ │ │ ├── chat-ui-message.tsx
│ │ │ │ ├── chat-ui.tsx
│ │ │ │ └── types.tsx
│ │ │ ├── dynamic-agent
│ │ │ │ ├── agent-file-reader.tsx
│ │ │ │ ├── agent-message.tsx
│ │ │ │ ├── agent-ui-input-panel.tsx
│ │ │ │ ├── agent-ui-message-list.tsx
│ │ │ │ ├── agent-ui.tsx
│ │ │ │ └── types.tsx
│ │ │ ├── upload-ui
│ │ │ │ └── upload-ui.tsx
│ │ │ └── wrappers
│ │ │ │ ├── router-button-dropdown.tsx
│ │ │ │ ├── router-button.tsx
│ │ │ │ └── router-link.tsx
│ │ ├── default-properties.json
│ │ ├── help-properties.json
│ │ ├── main.tsx
│ │ ├── pages
│ │ │ ├── agent-page.tsx
│ │ │ ├── chat-page.tsx
│ │ │ ├── help-page.tsx
│ │ │ ├── home-page.tsx
│ │ │ ├── index.tsx
│ │ │ ├── not-found.tsx
│ │ │ ├── ocr-page.tsx
│ │ │ ├── pii-redact-page.tsx
│ │ │ ├── sentiment-page.tsx
│ │ │ └── upload-page.tsx
│ │ ├── styles
│ │ │ ├── agent-ui.module.scss
│ │ │ ├── app.scss
│ │ │ ├── chat-ui.module.scss
│ │ │ └── ocr-page.module.scss
│ │ └── vite-env.d.ts
│ ├── tsconfig.json
│ ├── tsconfig.node.json
│ └── vite.config.ts
└── html_lambda
│ ├── content
│ └── rag_bedrock.html
│ └── llm_html_generator.py
├── buildspec_bedrock.yml
├── buildspec_dockerize_ui.yml
├── cdk.json
├── creator.sh
├── infrastructure
├── api_gw_stack.py
├── apprunner_hosting_stack.py
├── bedrock_layer_stack.py
├── dynamodb_stack.py
├── ecr_ui_stack.py
└── opensearch_vectordb_stack.py
├── llms_with_serverless_rag
├── __init__.py
└── llms_with_serverless_rag_stack.py
├── media
├── .gitkeep
├── Add-lambda-arn-to-dataaccess.png
├── AppRunner_UI.png
├── Bedrock-KB-Integration.png
├── Bedrock-Page.png
├── CloudShell-deployment.png
├── Collection-ARN-and-ModelId.png
├── Home-Page.png
├── Lambda-ARN.png
├── Modify-DataAccessPolicy.png
└── Retrieve-Opensearch-Endpoint.png
├── requirements-dev.txt
├── requirements.txt
└── source.bat
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | */.DS_Store
3 | .DS_Store
4 | cdk.out
5 | */__pycache__/
6 | __pycache__/
7 | **/package-lock.json
8 | node_modules
9 | generate-react-cli.json
10 | artifacts/chat-ui/src/config.json
11 |
12 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | ## Code of Conduct
2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
4 | opensource-codeofconduct@amazon.com with any additional questions or comments.
5 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
4 | documentation, we greatly value feedback and contributions from our community.
5 |
6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
7 | information to effectively respond to your bug report or contribution.
8 |
9 |
10 | ## Reporting Bugs/Feature Requests
11 |
12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features.
13 |
14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
16 |
17 | * A reproducible test case or series of steps
18 | * The version of our code being used
19 | * Any modifications you've made relevant to the bug
20 | * Anything unusual about your environment or deployment
21 |
22 |
23 | ## Contributing via Pull Requests
24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
25 |
26 | 1. You are working against the latest source on the *main* branch.
27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
29 |
30 | To send us a pull request, please:
31 |
32 | 1. Fork the repository.
33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
34 | 3. Ensure local tests pass.
35 | 4. Commit to your fork using clear commit messages.
36 | 5. Send us a pull request, answering any default questions in the pull request interface.
37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
38 |
39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
41 |
42 |
43 | ## Finding contributions to work on
44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
45 |
46 |
47 | ## Code of Conduct
48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
50 | opensource-codeofconduct@amazon.com with any additional questions or comments.
51 |
52 |
53 | ## Security issue notifications
54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
55 |
56 |
57 | ## Licensing
58 |
59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
60 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT No Attribution
2 |
3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software is furnished to do so.
10 |
11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
17 |
18 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | ### Scalable RAG solutions/Agentic Workflows with Amazon Bedrock and Amazon Opensearch serverless service
3 |
4 |
5 | # Overview
6 |
7 | Widespread AI adoption is being driven by generative AI models that can generate human-like content. However, these foundation models are trained on general data making it less effective for domain specific tasks. There lies the importance of Retrieval Augmented Generation (RAG). RAG allows augmenting prompts with relevant external data for better domain-specific outputs. With RAG, documents and queries are converted to embeddings, compared to find relevant context, and that context is appended to the original prompt before being passed to the LLM. Knowledge libraries can be updated asynchronously to provide the most relevant external data for augmenting prompts.
8 |
9 | [Amazon Opensearch Serverless(AOSS) offers vector engine to store embeddings for faster similarity searches](https://aws.amazon.com/blogs/big-data/introducing-the-vector-engine-for-amazon-opensearch-serverless-now-in-preview/). The vector engine provides a simple, scalable, and high-performing similarity search capability in Amazon OpenSearch Serverless that makes it easy for you to build generative artificial intelligence (AI) applications without having to manage the underlying vector database infrastructure.
10 |
11 | > [!NOTE]
12 | > This repository offers a production ready easily deployable Generative AI solution with the below features:
13 | > 1. Document chat
14 | > 2. Multi-Agent collaboration with Strands sdk
15 | > 3. Sentiment Analysis
16 | > 4. PII Redaction
17 | > 5. OCR
18 |
19 | > [!IMPORTANT]
20 | > The Older UI is maintained in the v0.0.1(Old-UI) branch.
21 |
22 | ### Demos
23 |
24 |
25 | Doc Chat/Doc Management (Multi-lingual)
26 |
27 | 
28 |
29 |
30 |
31 |
32 | Multi-Agent Demo
33 |
34 | 
35 |
36 |
37 |
38 |
39 | PII Redaction
40 |
41 |
42 |
43 | OCR
44 |
45 | 
46 |
47 |
48 |
49 |
50 | Sentiment Analysis
51 |
52 |
53 |
54 |
55 | Latest project updates
56 | * 28-May-2025 Multi-Agent Orchestration now through Strands SDK
57 | * 08-Nov-2024 Supports Claude-3.5 Haiku for RAG/OCR/PII Identification/Sentiment Analysis
58 | * 29-Oct-2024 Supports Claude-3.5 Sonnet V2/Opus for RAG/OCR/PII Identification/Sentiment Analysis
59 | * 1-Sept-204 Document Aware chunking strategy, to answer questions comparing several documents. For example: What did I say in Doc 1 that I contradict in Doc 7 ?
60 |
61 |
62 |
63 | ### Prerequisites
64 |
65 | Prerequisites
66 |
67 | * [An AWS account](https://aws.amazon.com/console/)
68 | * [You should have access to Anthropic Claude-3 Haiku/Sonnet models on Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html)
69 | * [For RAG, you should have access to Cohere English Embed model on Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html)
70 | * [Amazon Bedrock supported regions](https://docs.aws.amazon.com/bedrock/latest/userguide/what-is-bedrock.html#bedrock-regions)
71 | * [Amazon Opensearch serverless(AOSS) supported regions](https://aws.amazon.com/about-aws/whats-new/2023/01/amazon-opensearch-serverless-available/)
72 |
73 | #### Familiarity with below Services
74 | * [AWS IAM](https://docs.aws.amazon.com/iam/index.html)
75 | * [AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html)
76 | * [Amazon API Gateway](https://docs.aws.amazon.com/apigateway/latest/developerguide/welcome.html)
77 | * [Amazon opensearch serverless](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-overview.html)
78 |
79 |
80 |
81 |
82 | ### Architecture
83 | 
84 |
85 |
86 | ### Deploying the Solution to your AWS account with AWS Cloudshell
87 |
88 |
89 | Section 1: Create an Admin User to deploy this stack
90 |
91 | #### Section 1 - Create an IAM user with Administrator permissions (OPTIONAL: If you're already an Admin role, you may skip this step)
92 |
93 | 1. Search for the service IAM on the AWS Console and go the IAM Dashboard and click on “Roles“ tab under ”Access Management” and Click on “Create Role”
94 |
95 |
96 | 2. Select AWS Account and click “Next“
97 |
98 |
99 | 3. Under permissions select Administrator access
100 |
101 |
102 | 4. Give the role a name and create the role
103 |
104 |
105 | 5. You can now assume this role and proceed to deploy the stack. Click on Switch-Role
106 |
107 |
108 |
109 | 6. Switch role
110 |
111 |
112 | 7. Proceed to Section 2
113 |
114 |
115 |
116 |
117 | Section 2 - Deploy the RAG based Solution (Total deployment time 40 minutes)
118 |
119 | #### Section 2 - Deploy this RAG based Solution (The below commands should be executed in the region of deployment)
120 |
121 | 1. Switch to Admin role. Search for Cloudshell service on the AWS Console and follow the steps below to clone the github repository
122 |
123 |
124 |
125 | 2. Git Clone the serverless-rag-demo repository from aws-samples
126 | ```
127 | git clone https://github.com/aws-samples/serverless-rag-demo.git
128 | ```
129 |
130 | 3. Go to the directory where we have the downloaded files.
131 | ```
132 | cd serverless-rag-demo
133 | ```
134 |
135 | 4. Fire the bash script that creates the RAG based solution. Pass the environment and region for deployment. environment can be dev,qa,sandbox. Look at Prerequisites to deploy to the correct region.
136 | ```
137 | sh creator.sh
138 | ```
139 |
140 | 5. Press **Enter** to proceed with deployment of the stack or **ctrl+c** to exit
141 |
142 | 
143 |
144 | 6. The UI is hosted on AppRunner the link to AppRunner could be found in CloudShell once the script execution is complete, or you could also go to the AppRunner service on the AWS Console and obtain the https url. The UI is authenticated through Amazon Cognito hence the very first time you would have to sign-up and then sign-in to login to the application
145 | 
146 |
147 | 7. On Amazon Bedrock page enable access to the below models
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 | (ADVANCED) Using an existing Bedrock Knowledge base
157 |
158 | > [!IMPORTANT]
159 | > You could query your existing Knowledge base created on Amazon Bedrock provided it uses Amazon Opensearch Serverless service.
160 |
161 | #### Steps
162 | 1. Get the Collection ARN and the embedding model used by your Knowledge base on Bedrock
163 | 
164 | 2. Head to Amazon Opensearch Serverless and search by ARN to fetch Opensearch Endpoint
165 | 
166 | 3. Modify the configurations of your `bedrock_rag_query_*` lambda function. Set the below
167 | a. IS_BEDROCK_KB = yes
168 | b. OPENSEARCH_VECTOR_ENDPOINT = <>
169 | c. EMBED_MODEL_ID = <>. Find the base model Id from here (https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html)
170 | d. VECTOR_INDEX_NAME = <>
171 | e. BEDROCK_KB_EMBEDDING_KEY = <>
172 | 
173 | 
174 |
175 | 4. Get the ARN of the Lambda role
176 | 
177 | 5. Head to Amazon Opensearch on the AWS Console and click on Data Access Policies. Search for the Data Access Policy attached to your Bedrock KB and click on the `Edit` button
178 | 
179 | 6. In the principal section add the ARN of your Lambda role and hit save
180 | 
181 | 7. Now try Document Chat on the UI, it should query from your Amazon Bedrock Knowledge base.
182 |
183 | > [!IMPORTANT]
184 | > We do not support indexing to an existing Knowledge base. That can be done through the Amazon Bedrock Console.
185 |
186 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 |
4 | import aws_cdk as cdk
5 | from aws_cdk import Stack, Tags
6 | from infrastructure.apprunner_hosting_stack import AppRunnerHostingStack
7 | from infrastructure.ecr_ui_stack import ECRUIStack
8 | from llms_with_serverless_rag.llms_with_serverless_rag_stack import LlmsWithServerlessRagStack
9 | from infrastructure.api_gw_stack import ApiGw_Stack
10 |
11 | app = cdk.App()
12 |
13 | def tag_my_stack(stack):
14 | tags = Tags.of(stack)
15 | tags.add("project", "llms-with-serverless-rag")
16 |
17 | account_id = os.getenv('CDK_DEFAULT_ACCOUNT')
18 | region = os.getenv('CDK_DEFAULT_REGION')
19 | env=cdk.Environment(account=account_id, region=region)
20 |
21 | env_name = app.node.try_get_context("environment_name")
22 | LlmsWithServerlessRagStack(app, f"LlmsWithServerlessRag{env_name}Stack", env=env
23 | # If you don't specify 'env', this stack will be environment-agnostic.
24 | # Account/Region-dependent features and context lookups will not work,
25 | # but a single synthesized template can be deployed anywhere.
26 |
27 | # Uncomment the next line to specialize this stack for the AWS Account
28 | # and Region that are implied by the current CLI configuration.
29 |
30 | #env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')),
31 |
32 | # Uncomment the next line if you know exactly what Account and Region you
33 | # want to deploy the stack to. */
34 |
35 | #env=cdk.Environment(account='123456789012', region='us-east-1'),
36 |
37 | # For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html
38 | )
39 |
40 | api_gw_stack = ApiGw_Stack(app, f'ApiGwLlmsLambda{env_name}Stack')
41 | tag_my_stack(api_gw_stack)
42 |
43 | apprunner_stack = AppRunnerHostingStack(app, f"AppRunnerHosting{env_name}Stack")
44 | tag_my_stack(apprunner_stack)
45 |
46 | app.synth()
47 |
48 |
--------------------------------------------------------------------------------
/architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/architecture.png
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/index_lambda/prompt_builder.py:
--------------------------------------------------------------------------------
1 | import base64
2 |
3 | # Prompt template for Claude 3 to extract text from a given image
4 | claude3_textract_prompt="""Your purpose is to extract the text from the given image (traditional OCR).
5 | If the text is in another language, you should first translate it to english and then extract it.
6 | Remember not to summarize or analyze the image. You should return the extracted text.
7 | Wrap the response as a json with key text and value the extracted text.
8 | Do not include any other words or characters in the output other than the json.
9 | """
10 |
11 | claude3_title_prompt="""Your purpose is to suggest a suitable title for the provided text.
12 | The text is provided within the tags.
13 | You should suggest a title that is short, concise, and descriptive.
14 | Remember not to summarize or analyze the text. You should return the title.
15 | If the text is in another language, you should first translate it to english and then generate the title.
16 | Wrap the response as a json with key text and value the title.
17 | {
18 | "text": "
"
19 | }
20 | Do not include any other words or characters in the output other than the json.
21 | """
22 |
23 | def generate_claude_3_ocr_prompt(image_bytes_list):
24 | image_content_list = []
25 | if len(image_bytes_list) > 0:
26 | for image_bytes in image_bytes_list:
27 | image_content_list.append({
28 | "type": "image",
29 | "source": {
30 | "type": "base64",
31 | "media_type": "image/jpeg",
32 | "data": base64.b64encode(image_bytes).decode("utf-8")
33 | }
34 | })
35 | image_content_list.append({
36 | "type": "text",
37 | "text": claude3_textract_prompt
38 | })
39 |
40 | ocr_prompt = [
41 | {
42 | "role": "user",
43 | "content": image_content_list
44 | }]
45 | prompt_template= {"anthropic_version": "bedrock-2023-05-31",
46 | "max_tokens": 600000,
47 | "messages": ocr_prompt
48 | }
49 | return prompt_template
50 |
51 |
52 |
53 | def generate_claude_3_title_prompt(text_value):
54 | title_prompt = [
55 | {
56 | "role": "user",
57 | "content": [
58 | {
59 | "type": "text",
60 | "text": f"""{claude3_title_prompt}
61 | {text_value}"""
62 | }
63 | ]
64 | }]
65 | prompt_template= {"anthropic_version": "bedrock-2023-05-31",
66 | "max_tokens": 100000,
67 | "messages": title_prompt
68 | }
69 | return prompt_template
70 |
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/agent_executor_utils.py:
--------------------------------------------------------------------------------
1 | from os import getenv
2 | import boto3
3 | import logging
4 | import json
5 | from datetime import datetime
6 | from strands.models import BedrockModel
7 |
8 | region = getenv("REGION", "us-east-1")
9 | bedrock_client = boto3.client('bedrock-runtime')
10 | s3_client = boto3.client("s3")
11 | model_id = getenv("MULTI_AGENT_MODEL", "us.anthropic.claude-3-5-sonnet-20241022-v2:0")
12 | LOG = logging.getLogger()
13 | LOG.setLevel(logging.INFO)
14 | s3_bucket_name = getenv("S3_BUCKET_NAME", "S3_BUCKET_NAME_MISSING")
15 |
16 | bedrockModel = BedrockModel(
17 | model_id=model_id
18 | )
19 |
20 | def agent_executor(system_prompt, chat_input, output, output_tags="", custom_impl=False) :
21 | LOG.info(f'method=agent_executor, sys_prompt={system_prompt}, user_query={chat_input}, output= {output}, output_tags={output_tags}, custom_impl={custom_impl}')
22 | system_prompt = f""" {system_prompt}. """
23 | output_start_tag=""
24 | output_end_tag=""
25 | # If Custom_implementation then we ignore whats sent in output and output tags
26 | if not custom_impl:
27 | system_prompt = system_prompt + f""" You will wrap the {output} in {output_tags} tags only.
28 | If the {output} can't be found, you will return an empty string within {output_tags} tags.
29 |
30 | """
31 | output_start_tag = output_tags.split('><')[0] + '>'
32 | output_end_tag = '<' + output_tags.split('><')[1]
33 |
34 | prompt_template= {
35 | "anthropic_version": "bedrock-2023-05-31",
36 | "max_tokens": 10000,
37 | "system": system_prompt,
38 | "messages": chat_input
39 | }
40 |
41 | response = bedrock_client.invoke_model(
42 | body=json.dumps(prompt_template),
43 | modelId=model_id,
44 | accept='application/json',
45 | contentType='application/json'
46 | )
47 | llm_output = json.loads(response['body'].read())
48 |
49 | LOG.info(f'method=agent_executor, LLM_output={llm_output}')
50 |
51 | query_results = ''
52 | if 'content' in llm_output:
53 | query_results = llm_output['content'][0]['text']
54 | if not custom_impl and output_start_tag in query_results and output_end_tag in query_results:
55 | query_results = query_results.split(output_end_tag)[0]
56 | query_results = query_results.split(output_start_tag)[1]
57 |
58 | LOG.info(f'method=agent_executor, agent_output={query_results}')
59 | return query_results
60 |
61 |
62 | def upload_object_to_s3(artifact, file_extension, content_type):
63 | try:
64 | now = datetime.now()
65 | date_time = now.strftime("%Y-%m-%d-%H-%M-%S")
66 | s3_key = f"{file_extension}/sample_{date_time}.{file_extension}"
67 | s3_client.put_object(Body=artifact, Bucket=s3_bucket_name, Key=s3_key, ContentType=content_type)
68 | s3_presigned = generate_presigned_url(s3_key)
69 | if s3_presigned is not None:
70 | return True, s3_presigned
71 | return True, s3_key
72 | except Exception as e:
73 | print(f"Error uploading to S3: {e}")
74 | return False, f'Error {e}'
75 |
76 | def upload_file_to_s3(file_name, file_extension):
77 | try:
78 | now = datetime.now()
79 | date_time = now.strftime("%Y-%m-%d-%H-%M-%S")
80 | s3_key = f"{file_extension}/sample_{date_time}.{file_extension}"
81 | s3_client.upload_file(file_name, s3_bucket_name, s3_key)
82 | s3_presigned = generate_presigned_url(s3_key)
83 | if s3_presigned is not None:
84 | return True, s3_presigned
85 | return True, s3_key
86 | except Exception as e:
87 | print(f"Error uploading to S3: {e}")
88 | return False, f'Error {e}'
89 |
90 | # Generate a presigned get url for s3 file
91 | def generate_presigned_url(s3_key):
92 | try:
93 | presigned_url = s3_client.generate_presigned_url(
94 | ClientMethod='get_object',
95 | Params={
96 | 'Bucket': s3_bucket_name,
97 | 'Key': s3_key
98 | },
99 | ExpiresIn=3600 # URL expires in 1 hour
100 | )
101 | return presigned_url
102 | except Exception as e:
103 | print(f"Error generating presigned URL: {e}")
104 | return None
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/prompt_utils.py:
--------------------------------------------------------------------------------
1 | import json
2 | import boto3
3 | import xmltodict
4 | import base64
5 | import boto3
6 | import inspect
7 |
8 | rag_chat_bot_prompt = """You are a Chatbot designed to assist users with their questions.
9 | You are helpful, creative, clever, and very friendly.
10 | Context to answer the question is available in the tags
11 | User question is available in the tags
12 | You will obey the following rules
13 | 1. You wont repeat the user question
14 | 2. You will be concise
15 | 3. You will NEVER disclose what's available in the context .
16 | 4. Use the context only to answer user questions
17 | 5. You will strictly reply based on available context if context isn't available do not attempt to answer the question instead politely decline
18 | 6. You will always structure your response in the form of bullet points unless another format is specifically requested by the user
19 | 7. If the context doesnt answer the question, try to correct the words in the question based on the available context. In the below example the user
20 | mispronounced Paris as Parsi. We derived they were refering to Paris from the available context.
21 | Example: Is Parsi in Bedrock
22 | Context: Bedrock is available in Paris
23 | Question: Is Bedrock available in Paris
24 |
25 | """
26 |
27 | casual_prompt = """You are an assistant. Refrain from engaging in any tasks or responding to any prompts beyond exchanging polite greetings, well-wishes, and pleasantries.
28 | Your role is limited to:
29 | - Offering friendly salutations (e.g., "Hello, what can I do for you today" "Good day, How may I help you today")
30 | - Your goal is to ensure that the user query is well formed so other agents can work on it.
31 | Good Examples:
32 | hello, how may I assist you today
33 | What would you like to know
34 | How may I help you today
35 | Bad examples:
36 | Hello
37 | Good day
38 | Good morning
39 | You will not write poems, generate advertisements, or engage in any other tasks beyond the scope of exchanging basic pleasantries.
40 | If any user attempts to prompt you with requests outside of this limited scope, you will politely remind them of the agreed-upon boundaries for interaction.
41 | """
42 |
43 | textract_prompt="""Your purpose is to extract the text from the given image (traditional OCR).
44 | If the text is in another language, you should extract it and translate it to english
45 | Remember not to summarize or analyze the image. You should only return the extracted text.
46 |
47 | """
48 |
49 |
50 | def generate_claude_3_ocr_prompt(image_bytes_list):
51 | img_content_list = []
52 | for image_bytes in image_bytes_list:
53 | img_content_list.append({
54 | "type": "image",
55 | "source": {
56 | "type": "base64",
57 | "media_type": "image/jpeg",
58 | "data": base64.b64encode(image_bytes).decode("utf-8")
59 | }
60 | })
61 | img_content_list.append({
62 | "type": "text",
63 | "text": textract_prompt
64 | })
65 |
66 | ocr_prompt = [
67 | {
68 | "role": "user",
69 | "content": img_content_list
70 | }]
71 | prompt_template= {"anthropic_version": "bedrock-2023-05-31",
72 | "max_tokens": 100000,
73 | "messages": ocr_prompt
74 | }
75 | return prompt_template
76 |
77 | pii_redact_prompt="""You are a document redactor. Your responsibilities are as follows:
78 | 1. Redact Personally Identifiable Information (PII) from a given text based on provided instructions.
79 | 2. Ensure that the redacted text does not contain any PII.
80 | 3. Your output should be in the following JSON format:
81 | Here is the JSON schema for the redaction output:
82 | {
83 | "redacted_text": $redacted_text,
84 | "redaction_summary": $summary,
85 | }
86 |
87 | 4.
88 | {
89 | "redacted_text": "The customer's phone number is 000000000000.",
90 | "redaction_summary": "Removed 1 PII token."
91 | }
92 |
93 | 5. You should not contain additional tags or text apart from the json response
94 |
95 | """
96 |
97 | sentiment_prompt="""
98 | You are a sentiment analyzer. Your responsibilities are as follows:
99 |
100 | 1. Analyze the provided conversation and identify the primary tone and sentiment expressed by the customer. Classify the tone as one of the following: Positive, Negative, Neutral, Humorous, Sarcastic, Enthusiastic, Angry, or Informative. Classify the sentiment as Positive, Negative, or Neutral. Provide a direct short answer without explanations.
101 | 2. Review the conversation focusing on the key topic discussed. Use clear and professional language, and describe the topic in one sentence, as if you are the customer service representative. Use a maximum of 20 words.
102 | 3. Rate the sentiment on a scale of 1 to 10, where 1 is very negative and 10 is very positive
103 | 4. Identify the emotions conveyed in the conversation
104 | 5. Your output should be in the following JSON format:
105 | Here is the JSON schema for the sentiment analysis output:
106 | {
107 | "sentiment": $sentiment,
108 | "tone": $tone,
109 | "emotions": $emotions,
110 | "rating": $sentiment_score,
111 | "summary": $summary,
112 | }
113 |
114 | 6.
115 | {
116 | "sentiment": "Positive",
117 | "tone": "Informative",
118 | "emotions": ["Satisfied", "Impressed"],
119 | "rating":8,
120 | "summary": "The customer discusses their experience setting up and using multiple Echo Dot devices in their home, providing detailed setup instructions and highlighting the device's capabilities."
121 | }
122 |
123 | 7. You should not contain additional tags or text apart from the json response
124 |
125 | """
126 |
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/requirements.txt:
--------------------------------------------------------------------------------
1 | strands-agents
2 | strands-agents-tools
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/casual_conversations_agent.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | from os import getenv
3 | import json
4 | import logging
5 | import datetime
6 | import json
7 | from datetime import datetime, timedelta
8 | from strands import Agent, tool
9 | from agent_executor_utils import bedrockModel
10 |
11 | date = datetime.now()
12 | next_date = datetime.now() + timedelta(days=30)
13 | year = date.year
14 | month = date.month
15 | month_label = date.strftime('%B')
16 | next_month_label = next_date.strftime('%B')
17 | day = date.day
18 |
19 | GENERAL_CONVERSATION_SYSTEM_PROMPT = """
20 | You are a helpful casual assistant. You can help with general knowlegdge, maths, physics, philosophy,
21 | and creative writing.
22 |
23 | Good Examples:
24 | hello, how may I assist you today
25 | What would you like to know
26 | How may I help you today
27 |
28 | Bad Examples:
29 | Hello
30 | Good day
31 | Good morning
32 | How are you
33 | """
34 |
35 | LOG = logging.getLogger()
36 | LOG.setLevel(logging.INFO)
37 |
38 |
39 | @tool
40 | def general_assistant_agent(user_query):
41 | print(f'In casual_conversations = {user_query}')
42 | agent = Agent(system_prompt=GENERAL_CONVERSATION_SYSTEM_PROMPT, model=bedrockModel, tools=[])
43 | agent_response = agent(user_query)
44 | return agent_response
45 |
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/code_generator_agent.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | from os import getenv
3 | import json
4 | from strands import Agent, tool
5 | from os import getenv
6 | from datetime import datetime
7 | from agent_executor_utils import bedrockModel
8 |
9 | CODE_GENERATOR_SYSTEM_PROMPT = """
10 | You are a code generator agent. Your task is to generate code based on the user query.
11 |
12 | Key Responsibilities:
13 | - Generate code based on the user query
14 | - Use the tool upload_object_to_s3 to upload the generated code to S3
15 | - The upload_object_to_s3 tool returns the presigned S3 url where the code is uploaded.
16 | - You should use this S3 url to display the code to the user.
17 |
18 | General Instructions:
19 | - Use only the following tools:
20 | - upload_object_to_s3: To upload the generated code to S3
21 | - You will generate structured syntactially correct HTML code only based on User query.
22 | - You will always generate syntactically correct single page HTML/Javascript/JQuery/CSS code.
23 | - The CSS/HTML/Javascript/JQuery code should be part of a single file within the tags.
24 | - If the code is not generated, return "Code generation failed"
25 | - If the user query is not clear, return "User query is not clear"
26 | - If the user query is not valid, return "User query is not valid"
27 | - You should pass on the presigned S3 url of the code to the user so it renders on the UI.
28 | """
29 |
30 | s3_client = boto3.client('s3')
31 | s3_bucket_name = getenv("S3_BUCKET_NAME", "S3_BUCKET_NAME_MISSING")
32 |
33 | @tool
34 | def code_generator_agent(user_query: str):
35 | agent = Agent(system_prompt=CODE_GENERATOR_SYSTEM_PROMPT, model=bedrockModel, tools=[upload_object_to_s3])
36 | agent_response = agent(user_query)
37 | upload_object_to_s3(agent_response)
38 | return agent_response
39 |
40 | @tool
41 | def upload_object_to_s3(artifact, file_extension="html", content_type="text/html"):
42 | try:
43 | now = datetime.now()
44 | artifact = artifact.replace("\n", "")
45 | date_time = now.strftime("%Y-%m-%d-%H-%M-%S")
46 | s3_key = f"{file_extension}/sample_{date_time}.{file_extension}"
47 | s3_client.put_object(Body=artifact, Bucket=s3_bucket_name, Key=s3_key, ContentType=content_type)
48 | s3_presigned = generate_presigned_url(s3_key)
49 | if s3_presigned is not None:
50 | return True, s3_presigned
51 | return True, s3_key
52 | except Exception as e:
53 | print(f"Error uploading to S3: {e}")
54 | return False, f'Error {e}'
55 |
56 | # Generate a presigned get url for s3 file
57 | def generate_presigned_url(s3_key):
58 | try:
59 | presigned_url = s3_client.generate_presigned_url(
60 | ClientMethod='get_object',
61 | Params={
62 | 'Bucket': s3_bucket_name,
63 | 'Key': s3_key
64 | },
65 | ExpiresIn=3600 # URL expires in 1 hour
66 | )
67 | return presigned_url
68 | except Exception as e:
69 | print(f"Error generating presigned URL: {e}")
70 | return None
71 |
72 | # if __name__ == "__main__":
73 | # print(code_generator("Generate a simple HTML page with a header and a footer"))
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/index.md:
--------------------------------------------------------------------------------
1 | # Multi-Agent Orchestration Made Simple
2 |
3 | STRANDS[https://strandsagents.com/] is a powerful multi-agent orchestration opensource sdk. We've used the "Agents as Tools" architectural pattern wherein our Specialised agents are wrapped in as callable functions that can be used by other Agents. The Primary Orchestrator handles user interaction and calles relevant specialist agents. It then reflects if the user question is successfully answered
4 |
5 |
6 | ## System Architecture
7 |
8 | ```mermaid
9 | graph TB
10 | subgraph "User Interface"
11 | UI[WebSocket Client]
12 | end
13 |
14 | subgraph "STRANDS Multi-Agent System"
15 | Orchestrator[Orchestrator Agent]
16 |
17 | subgraph "Specialist Agents"
18 | WebSearch[Web Search Agent]
19 | Retriever[RAG Agent]
20 | CodeGen[Code Generator]
21 | Weather[Weather Agent]
22 | PPTGen[PPT Generator]
23 | General[General Assistant]
24 | end
25 |
26 |
27 | subgraph "External Services"
28 | Bedrock[Amazon Bedrock]
29 | Web[DuckDuckGo Wikipedia YahooFin]
30 | OpenSearch[OpenSearch]
31 | S3[S3 Storage]
32 | WeatherAPI[OpenWeatherMap]
33 | end
34 | end
35 |
36 | UI -->|WebSocket| Orchestrator
37 | Orchestrator -->|Route Query| WebSearch
38 | Orchestrator -->|Route Query| Retriever
39 | Orchestrator -->|Route Query| CodeGen
40 | Orchestrator -->|Route Query| Weather
41 | Orchestrator -->|Route Query| PPTGen
42 | Orchestrator -->|Route Query| General
43 |
44 |
45 | WebSearch -->|Summarize| Bedrock
46 | WebSearch -->|Search| Web
47 | Retriever -->|Query| OpenSearch
48 | CodeGen -->|Generate| Bedrock
49 | CodeGen -->|Store| S3
50 | Weather -->|Query| WeatherAPI
51 | PPTGen -->|Generate xml| Bedrock
52 | PPTGen -->|Store| S3
53 | General -->|Chat| Bedrock
54 |
55 | classDef agent fill:#f9f,stroke:#333,stroke-width:2px
56 | classDef tool fill:#ffd,stroke:#333,stroke-width:2px
57 | classDef service fill:#bbf,stroke:#333,stroke-width:2px
58 | classDef external fill:#bfb,stroke:#333,stroke-width:2px
59 |
60 | class WebSearch,Retriever,CodeGen,Weather,PPTGen,General agent
61 | class Orchestrator service
62 | class Bedrock,OpenSearch,S3,Web,WeatherAPI external
63 | ```
64 |
65 | The architecture leverages the ["Agents as Tools"](https://strandsagents.com/0.1.x/user-guide/concepts/multi-agent/agents-as-tools/) pattern, with a centralized Orchestrator Agent that routes queries to six specialized agents:
66 | * [Web Search Agent](https://github.com/aws-samples/serverless-rag-demo/blob/strands/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/web_search_agent.py)
67 | * [Code Generator](https://github.com/aws-samples/serverless-rag-demo/blob/strands/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/code_generator_agent.py)
68 | * [General Assistant](https://github.com/aws-samples/serverless-rag-demo/blob/strands/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/casual_conversations_agent.py)
69 | * [PPT Generator](https://github.com/aws-samples/serverless-rag-demo/blob/strands/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/ppt_generator_agent.py)
70 | * [RAG Agent](https://github.com/aws-samples/serverless-rag-demo/blob/strands/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/retriever_agent.py)
71 | * [Weather Agent](https://github.com/aws-samples/serverless-rag-demo/blob/strands/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/weather_agent.py)
72 |
73 | Each specialist agent communicates with services like Amazon Bedrock, OpenSearch, S3 Storage, and OpenWeatherMap and calls multiple other agents to fulfill their specific functions.
74 |
75 |
76 | ## Installation
77 |
78 | ```bash
79 | pip install strands-agents strands-agents-tools
80 | ```
81 |
82 | * You can also run these agents individually in standalone mode
83 |
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/orchestrator.py:
--------------------------------------------------------------------------------
1 | from strands import Agent
2 | from strands_tools import file_read, file_write, editor
3 | from strands.models import BedrockModel
4 | import logging
5 | from os import getenv
6 | import base64
7 | import json
8 | from strands_multi_agent.casual_conversations_agent import general_assistant_agent
9 | from strands_multi_agent.code_generator_agent import code_generator_agent
10 | from strands_multi_agent.ppt_generator_agent import ppt_generator_agent
11 | from strands_multi_agent.weather_agent import weather_agent
12 | from strands_multi_agent.web_search_agent import web_search_agent
13 | from strands_multi_agent.retriever_agent import retriever_agent
14 | import sys
15 | import os
16 | from agent_executor_utils import bedrockModel
17 |
18 | logger = logging.getLogger("orchestrator")
19 | wss_url = getenv("WSS_URL", "WEBSOCKET_URL_MISSING")
20 | # Define a focused system prompt for file operations
21 | ORCHESTRATOR_SYSTEM_PROMPT = """
22 | You are a Multi-Agent Orchestrator, designed to coordinate support across multiple agents. Your role is to:
23 |
24 | 1. Analyze incoming user queries and determine the most appropriate specialized agent to handle them:
25 | - WebSearch Agent: For searching the web if you dont have the information
26 | - Retreiver Agent: For RAG if you have the information
27 | - Code Generator Agent: For code generation. The code generator agent will generate a code and upload it on s3.
28 | It will provide you the Presigned S3 url where the code is uploaded in ... tags. You can use this S3 key to display the code to the user.
29 | - Weather Agent: For weather information
30 | - PPT Generator Agent: For presentation generation. The PPT Generator Agent will generate a presentation and upload it on s3.
31 | It will provide you the Presigned S3 url where the presentation is uploaded in ... tags. You can use this S3 key to display the presentation to the user.
32 | You should pass on the presigned S3 url to the user.
33 | - General Assistant Agent: For all topics outside the specialized areas
34 |
35 | 2. Key Responsibilities:
36 | - Accurately classify user queries by domain area
37 | - Route requests to the appropriate specialized agent
38 | - Maintain context and coordinate multi-step problems
39 | - Ensure cohesive responses when multiple agents are needed
40 |
41 | 3. Decision Protocol:
42 | - If query involves weather forecast → WeatherAgent
43 | - If query involves unknown information → Web Search Agent
44 | - If query involves data in Knowledge Base → Retreiver Agent
45 | - If query involves code generation → Code Generator Agent
46 | - If query is outside these specialized areas → General Assistant Agent
47 | - if query involves creating a presentation → PPT Generator Agent
48 | - For complex queries, coordinate multiple agents as needed
49 |
50 | Always confirm your understanding before routing to ensure accurate assistance.
51 | When using the PPT Generator Agent, you should pass on the presigned S3 url to the user.
52 | When using the Code Generator Agent, you should pass on the presigned S3 url to the user, so it renders on the UI.
53 | """
54 |
55 |
56 | tool_use_ids = []
57 | def orchestrator(user_query, connect_id, websocket_client):
58 |
59 | def callback_handler(**kwargs):
60 | if "data" in kwargs:
61 | # Log the streamed data chunks
62 | # logger.info(kwargs["data"])
63 | print(kwargs["data"])
64 | websocket_send(connect_id, kwargs["data"], websocket_client)
65 | elif "current_tool_use" in kwargs:
66 | tool = kwargs["current_tool_use"]
67 | if tool["toolUseId"] not in tool_use_ids:
68 | # Log the tool use
69 | logger.info(f"\n[Using tool: {tool.get('name')}]")
70 | websocket_send(connect_id, f"\n[Using tool: {tool.get('name')}]", websocket_client)
71 | tool_use_ids.append(tool["toolUseId"])
72 |
73 | # Create a new agent with the specified system prompt
74 | agent = Agent(system_prompt=ORCHESTRATOR_SYSTEM_PROMPT, model=bedrockModel, tools=[general_assistant_agent,
75 | code_generator_agent,
76 | ppt_generator_agent,
77 | weather_agent,
78 | web_search_agent,
79 | retriever_agent], callback_handler=callback_handler)
80 | # Invoke the agent with the user query
81 | agent_response = agent(user_query)
82 | # Return the response from the agent
83 | return str(agent_response)
84 |
85 |
86 | def websocket_send(connect_id, message, websocket_client):
87 | global wss_url
88 | logger.debug(f'WSS URL {wss_url}, connect_id {connect_id}, message {message}')
89 | response = websocket_client.post_to_connection(
90 | Data=base64.b64encode(json.dumps(message, indent=4).encode('utf-8')),
91 | ConnectionId=connect_id
92 | )
93 |
94 | # if __name__ == "__main__":
95 | # print(orchestrator("Create a presentation on NVIDIA for the year 2024 ?", "1234567890"))
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/ppt_themes/circuit_theme.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/ppt_themes/circuit_theme.pptx
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/ppt_themes/ion_theme.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/ppt_themes/ion_theme.pptx
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/retriever_agent.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | from os import getenv
3 | from opensearchpy import OpenSearch, RequestsHttpConnection, exceptions
4 | from requests_aws4auth import AWS4Auth
5 | from requests.auth import HTTPBasicAuth
6 | import json
7 | from decimal import Decimal
8 | import logging
9 | import datetime
10 | import requests
11 |
12 | import json
13 |
14 | from datetime import datetime, timedelta
15 | from strands import Agent, tool
16 | from agent_executor_utils import bedrockModel
17 |
18 | date = datetime.now()
19 | next_date = datetime.now() + timedelta(days=30)
20 | year = date.year
21 | month = date.month
22 | month_label = date.strftime('%B')
23 | next_month_label = next_date.strftime('%B')
24 | day = date.day
25 |
26 | endpoint = getenv("OPENSEARCH_VECTOR_ENDPOINT", "https://admin:P@@search-opsearch-public-24k5tlpsu5whuqmengkfpeypqu.us-east-1.es.amazonaws.com:443")
27 | credentials = boto3.Session().get_credentials()
28 | service = 'aoss'
29 | region = getenv("REGION", "us-east-1")
30 | awsauth = AWS4Auth(credentials.access_key, credentials.secret_key,
31 | region, service, session_token=credentials.token)
32 | embed_model_id = getenv("EMBED_MODEL_ID", "amazon.titan-embed-image-v1")
33 | INDEX_NAME = getenv("VECTOR_INDEX_NAME", "sample-embeddings-store-dev")
34 | is_bedrock_kb = getenv("IS_BEDROCK_KB", "no")
35 | bedrock_embedding_key_name = getenv("BEDROCK_KB_EMBEDDING_KEY", "bedrock-knowledge-base-default-vector")
36 | LOG = logging.getLogger()
37 | LOG.setLevel(logging.INFO)
38 | bedrock_client = boto3.client('bedrock-runtime')
39 |
40 | ops_client = client = OpenSearch(
41 | hosts=[{'host': endpoint, 'port': 443}],
42 | http_auth=awsauth,
43 | use_ssl=True,
44 | verify_certs=True,
45 | connection_class=RequestsHttpConnection,
46 | timeout=300
47 | )
48 |
49 | RETRIEVER_SYSTEM_PROMPT = """
50 |
51 | You are a helpful retriever agent. You will use the tools to retrieve information from the knowledge base.
52 |
53 | Key Responsibilities:
54 | - Use the tools to retrieve information from the knowledge base
55 | - Use the tools to rewrite the user query
56 | - Use the tools to classify the user query
57 | - Use the tools to fetch data from the knowledge base
58 |
59 | General Instructions:
60 | - Use only the following tools:
61 | - query_translation: To translate the user query into english if its in any other language
62 | - query_rewrite: To rewrite the user query to be more relevant to the knowledge base
63 | - fetch_data: To fetch data from the knowledge base. This tool needs the user query and the proper nouns in the user query and also if we should do a hybrid search or not.
64 |
65 | """
66 |
67 |
68 | @tool
69 | def retriever_agent(user_query):
70 | agent = Agent(system_prompt=RETRIEVER_SYSTEM_PROMPT, model=bedrockModel, tools=[query_translation, query_rewrite, fetch_data])
71 | agent_response = agent(user_query)
72 | return agent_response
73 |
74 | # As our knowledge base is in English. We should translate the user query into english if its in any other language
75 | @tool
76 | def query_translation(user_query):
77 | print(f'In Query Translation = {user_query}')
78 |
79 | QUERY_TRANSLATION_SYSTEM_PROMPT = """
80 | You are a helpful query translator. You will translate the user query into english if its in any other language
81 | It should not contain any other tags or text in the response.
82 | """
83 |
84 | agent = Agent(system_prompt=QUERY_TRANSLATION_SYSTEM_PROMPT, model=bedrockModel, tools=[])
85 | agent_response = agent(user_query)
86 | return agent_response
87 |
88 | @tool
89 | def query_rewrite(user_query):
90 | # rewrite the user query to be more relevant to the knowledge base
91 | # Step back prompting technique
92 | print(f'In Query Rewrite = {user_query}')
93 | QUERY_REWRITE_SYSTEM_PROMPT = f"""
94 | You are a query rewriter. Your task is to step back and paraphrase a question to a more generic
95 | step-back question, which is easier to answer. Remember todays year is {year} and the month is {month_label}
96 | and day is {day}.
97 | Use this information to rewrite the user query to be more relevant to the knowledge base.
98 |
99 | Example 1:
100 | Orginal query : Amazon earnings
101 |
102 | Rewritten query
103 | What are the Amazon earnings over the last 5 years from {year - 5} to {year}?
104 |
105 | Example 3:
106 | Orginal query : Who is the Amazon CEO?
107 | Rewritten query : Who are all the CEO's in Amazon
108 |
109 | """
110 |
111 | agent = Agent(system_prompt=QUERY_REWRITE_SYSTEM_PROMPT, model=bedrockModel, tools=[])
112 | agent_response = agent(user_query)
113 | return agent_response
114 |
115 |
116 | # Here we combine the results of Keyword and Semantic search to produce better results
117 | @tool
118 | def fetch_data(user_query, proper_nouns: list, is_hybrid=False):
119 | global INDEX_NAME
120 | nearest_neighbours = 10
121 | result_set_size = 20
122 | print(f'In Fetch Data = {user_query}')
123 | context = ''
124 | embeddings_key="embedding"
125 | if 'cohere' in embed_model_id:
126 | response = bedrock_client.invoke_model(
127 | body=json.dumps({"texts": [user_query], "input_type": 'search_query'}),
128 | modelId=embed_model_id,
129 | accept='application/json',
130 | contentType='application/json'
131 | )
132 | embeddings_key="embeddings"
133 | else:
134 | response = bedrock_client.invoke_model(
135 | body=json.dumps({"inputText": user_query}),
136 | modelId=embed_model_id,
137 | accept='application/json',
138 | contentType='application/json'
139 | )
140 | result = json.loads(response['body'].read())
141 | finish_reason = result.get("message")
142 | if finish_reason is not None:
143 | print(f'Embed Error {finish_reason}')
144 | if 'cohere' in embed_model_id:
145 | embedded_search = result.get(embeddings_key)[0]
146 | else:
147 | embedded_search = result.get(embeddings_key)
148 |
149 | TEXT_CHUNK_FIELD = 'text'
150 | if is_bedrock_kb == 'yes':
151 | TEXT_CHUNK_FIELD="AMAZON_BEDROCK_TEXT_CHUNK"
152 |
153 | DOC_TYPE_FIELD = 'doc_type'
154 | vector_query = {
155 | "size": result_set_size,
156 | "query": {
157 | "bool": {
158 | "must": [
159 | {
160 | "knn": {"embedding": {"vector": embedded_search, "k": nearest_neighbours}}
161 | }
162 | ]
163 | }
164 | },
165 | "track_scores": True,
166 | "_source": False,
167 | "fields": [TEXT_CHUNK_FIELD, DOC_TYPE_FIELD]
168 | }
169 |
170 | if is_bedrock_kb == 'yes':
171 | LOG.info('Connecting to Bedrock KB')
172 | if not INDEX_NAME.startswith('bedrock'):
173 | INDEX_NAME = 'bedrock-knowledge-base*'
174 | vector_query = {
175 | "size": result_set_size,
176 | "query":{
177 | "bool": {
178 | "must": [
179 | {
180 | "knn": {bedrock_embedding_key_name: {"vector": embedded_search, "k": nearest_neighbours}}
181 | }
182 | ]
183 | }
184 | },
185 | "track_scores": True,
186 | "_source": False,
187 | "fields": [TEXT_CHUNK_FIELD]
188 | }
189 |
190 | keyword_results = []
191 |
192 | # Common for both Bedrock and non-Bedrock collections
193 | if is_hybrid and len(proper_nouns) > 0:
194 | # Default operator is OR. To narrow the scope you could change it to 'AND'
195 | # "minimum_should_match": len(proper_nouns)-1
196 | # fuzziness handle spelling erros in the query
197 | KEYWORD_SEARCH = {
198 | "match": {
199 | TEXT_CHUNK_FIELD: { "query": " ".join(proper_nouns), "operator": "or", "analyzer": "english",
200 | "fuzziness": "AUTO", "auto_generate_synonyms_phrase_query": False, "zero_terms_query": "all"}
201 | }
202 | }
203 |
204 | keyword_search_query = {
205 | "size": result_set_size,
206 | "query":{
207 | "bool": {
208 | "must": [
209 | KEYWORD_SEARCH
210 | ]
211 | }
212 | },
213 | "track_scores": True,
214 | "_source": False,
215 | "fields": [TEXT_CHUNK_FIELD]
216 | }
217 | LOG.info(f'AOSS Keyword search Query {keyword_search_query}')
218 |
219 |
220 | try:
221 | response = ops_client.search(body=keyword_search_query, index=INDEX_NAME)
222 | LOG.info(f'Opensearch response {response}')
223 | keyword_results.extend(response["hits"]["hits"])
224 | except Exception as e:
225 | LOG.error(f'Keyword search query error {e}')
226 | else:
227 | # When its just semantic search increase the result set size
228 | result_set_size=50
229 | vector_query['size']=result_set_size
230 |
231 | LOG.info(f'AOSS Vector search Query {vector_query}')
232 | semantic_results = []
233 |
234 | try:
235 | response = ops_client.search(body=vector_query, index=INDEX_NAME)
236 | LOG.info(f'Opensearch response {response}')
237 | semantic_results.extend(response["hits"]["hits"])
238 | except Exception as e:
239 | LOG.error(f'Vector Index query error {e}')
240 |
241 | all_results = keyword_results + semantic_results
242 | all_results = sorted(all_results, key=lambda x: x['_score'], reverse=True)
243 | for data in all_results:
244 | if context == '':
245 | context = data['fields'][TEXT_CHUNK_FIELD][0]
246 | else:
247 | context += data['fields'][TEXT_CHUNK_FIELD][0] + ' '
248 |
249 | return context.strip()
250 |
251 |
252 | # if __name__ == "__main__":
253 | # print(retriever_agent(" Kya hai captial France Ka batao ?"))
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/weather_agent.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from datetime import datetime, timedelta
3 | from typing import Dict
4 | from geopy.geocoders import Nominatim
5 | from strands import Agent, tool
6 | from agent_executor_utils import bedrockModel
7 | from os import getenv
8 |
9 | WEATHER_SYSTEM_PROMPT = """
10 | You are a weather agent. Your task is to get the weather for a given location.
11 |
12 | Key Responsibilities:
13 | - Get the weather for a given location
14 | - Use the tools to get the weather
15 |
16 | General Instructions:
17 | - Use only the following tools:
18 | - get_lat_long: To get the latitude and longitude of a given location
19 | - get_weather: To get the weather for a given location
20 | - if you already know the latitude and longitude of a location, you can use the get_weather tool to get the weather
21 |
22 | - If the location is not found, return "Location not found"
23 | - If the weather is not found, return "Weather not found"
24 | - If the latitude and longitude are not found, return "Latitude and longitude not found"
25 | - If the weather is not found, return "Weather not found"
26 | """
27 |
28 | geo_locator = Nominatim(user_agent="openstreetmap.org")
29 |
30 | @tool
31 | def weather_agent(location: str):
32 | agent = Agent(system_prompt=WEATHER_SYSTEM_PROMPT, model=bedrockModel, tools=[get_lat_long, get_weather])
33 | agent_response = agent(location)
34 | return agent_response
35 |
36 | @tool
37 | def get_weather(latitude: str, longitude: str):
38 | url = f"https://api.open-meteo.com/v1/forecast?latitude={latitude}&longitude={longitude}¤t_weather=true"
39 | response = requests.get(url)
40 | return response.json()
41 |
42 | @tool
43 | def get_lat_long(place: str):
44 | location = geo_locator.geocode(place)
45 | if location:
46 | lat = location.latitude
47 | lon = location.longitude
48 | return {"latitude": lat, "longitude": lon}
49 | return "Location not found"
50 |
51 | # if __name__ == "__main__":
52 | # print(weather_agent("Mumbai"))
--------------------------------------------------------------------------------
/artifacts/bedrock_lambda/query_lambda/strands_multi_agent/web_search_agent.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import requests
3 | from os import getenv
4 | import logging
5 | from datetime import datetime, timedelta
6 | from agent_executor_utils import bedrockModel
7 | from strands import Agent, tool
8 | from strands_tools import http_request
9 |
10 |
11 | LOG = logging.getLogger("web_search_agent")
12 | LOG.setLevel(logging.INFO)
13 |
14 | WEB_SEARCH_SYSTEM_PROMPT = """
15 | You are a web search agent. Your task is to search the web for information based on the user query.
16 |
17 | You have access to the following tools:
18 | - search_ddg: To search the DuckDuckGo for instant answers
19 | - search_wiki: To search Wikipedia for information on a specific topic
20 | - search_yahoo_finance: To search Yahoo Finance for stock market information
21 | - summarize_search_results: To summarize the search results
22 | - rewrite_user_query: To rewrite the user query
23 | - http_request: To make HTTP requests to the web incase other tools are not able to provide the information
24 |
25 | Key Responsibilities:
26 | - Rewrite the user query if needed
27 | - Search the web for information
28 | - Summarize the search results
29 |
30 | """
31 |
32 | date = datetime.now()
33 | next_date = datetime.now() + timedelta(days=30)
34 | year = date.year
35 | month = date.month
36 | month_label = date.strftime('%B')
37 | next_month_label = next_date.strftime('%B')
38 | day = date.day
39 |
40 |
41 | def callback_handler(**kwargs):
42 | tool_use_ids = []
43 | if "data" in kwargs:
44 | # Log the streamed data chunks
45 | print(kwargs["data"])
46 | elif "current_tool_use" in kwargs:
47 | tool = kwargs["current_tool_use"]
48 | if tool["toolUseId"] not in tool_use_ids:
49 | # Log the tool use
50 | print(f"\n[Using tool: {tool.get('name')}]")
51 | tool_use_ids.append(tool["toolUseId"])
52 |
53 |
54 | @tool
55 | def web_search_agent(user_query):
56 | agent = Agent(system_prompt=WEB_SEARCH_SYSTEM_PROMPT, model=bedrockModel,
57 | tools=[ rewrite_user_query, summarize_search_results, search_ddg, search_wiki, search_yahoo_finance, http_request ]
58 | )
59 | agent_response = agent(user_query)
60 | return agent_response
61 |
62 | @tool
63 | def search_ddg(query):
64 | url = "https://api.duckduckgo.com/"
65 | params = {"q": query, "format": "json"}
66 | response = requests.get(url, params=params)
67 | return response.json()
68 |
69 | @tool
70 | def search_wiki(query):
71 | url = "https://en.wikipedia.org/w/api.php"
72 | params = {"action": "query", "format": "json", "list": "search", "srsearch": query}
73 | response = requests.get(url, params=params)
74 | return response.json()
75 |
76 | @tool
77 | def search_yahoo_finance(query):
78 | url = "https://query2.finance.yahoo.com/v8/finance/chart/"
79 | params = {"q": query, "interval": "1d", "range": "1d"}
80 | response = requests.get(url, params=params)
81 | return response.json()
82 |
83 |
84 | @tool
85 | def summarize_search_results(search_data, user_query):
86 | summarizer_system_prompt = f""" You are a search results summarizer. Given the search results and a user query,
87 | Your task is to provide a concise summary of the search results based on the user query.
88 | Remember todays year is {year} and the month is {month_label} and day is {day}.
89 | The summary should be no more than 60 sentences.
90 | Do not include any other text or tags in the response.
91 | The search results are available in the tags.
92 | Your summarized output should be placed within the tags
93 | """
94 | query_list = [
95 | {
96 | "role": "user",
97 | "content": [{"type": "text", "text": search_data},
98 | {"type": "text", "text": user_query}
99 | ]
100 | }
101 | ]
102 |
103 | summarizer_agent = Agent(system_prompt=summarizer_system_prompt, model=bedrockModel)
104 | summarized = summarizer_agent(query_list)
105 |
106 | return summarized
107 |
108 |
109 | @tool
110 | def rewrite_user_query(chat_history):
111 | print(f'In Query rewrite = {chat_history}')
112 | system_prompt = f""" You are a query rewriter. Given a user query, Your task is to step back and paraphrase a question to a more generic
113 | step-back question, which is easier to answer.
114 | Remember todays year is {year} and the month is {month_label} and day is {day}.
115 |
116 | The entire chat history is provided to you
117 | you should identify the user query from the provided chat history
118 | You should then rewrite the user query so we get accurate search results.
119 | The rewritten user query should be wrapped in tags.
120 | Do not include any other text or tags in the response.
121 |
122 | """
123 |
124 | agent = Agent(system_prompt=system_prompt, model=bedrockModel)
125 | rewritten_query = agent(chat_history)
126 | print(f'reformatted search_query text = {rewritten_query}')
127 | return rewritten_query
128 |
129 |
130 | # if __name__ == '__main__':
131 | # print(web_search_agent(' Amazon '))
132 |
--------------------------------------------------------------------------------
/artifacts/chat-ui/.dockerignore:
--------------------------------------------------------------------------------
1 | /node_modules
--------------------------------------------------------------------------------
/artifacts/chat-ui/.eslintrc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | root: true,
3 | env: { browser: true, es2020: true },
4 | extends: [
5 | 'eslint:recommended',
6 | 'plugin:@typescript-eslint/recommended',
7 | 'plugin:react-hooks/recommended',
8 | ],
9 | ignorePatterns: ['dist', '.eslintrc.cjs'],
10 | parser: '@typescript-eslint/parser',
11 | plugins: ['react-refresh'],
12 | rules: {
13 | 'react-refresh/only-export-components': ['warn', { allowConstantExport: true }],
14 | },
15 | };
16 |
--------------------------------------------------------------------------------
/artifacts/chat-ui/.gitignore:
--------------------------------------------------------------------------------
1 |
2 |
3 | #amplify-do-not-edit-begin
4 | amplify/\#current-cloud-backend
5 | amplify/.config/local-*
6 | amplify/logs
7 | amplify/mock-data
8 | amplify/mock-api-resources
9 | amplify/backend/amplify-meta.json
10 | amplify/backend/.temp
11 | build/
12 | dist/
13 | node_modules/
14 | aws-exports.js
15 | awsconfiguration.json
16 | amplifyconfiguration.json
17 | amplifyconfiguration.dart
18 | amplify-build-config.json
19 | amplify-gradle-config.json
20 | amplifytools.xcconfig
21 | .secret-*
22 | **.sample
23 | #amplify-do-not-edit-end
24 | .vite
--------------------------------------------------------------------------------
/artifacts/chat-ui/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM public.ecr.aws/docker/library/node:18-alpine AS builder
2 | WORKDIR /app
3 | COPY package*.json ./
4 | RUN npm i
5 | COPY . .
6 | RUN npm run build
7 | FROM public.ecr.aws/nginx/nginx:stable-alpine
8 | COPY --from=builder /app/dist /usr/share/nginx/html
9 | COPY nginx.conf /etc/nginx/nginx.conf
10 | CMD ["nginx", "-g", "daemon off;"]
--------------------------------------------------------------------------------
/artifacts/chat-ui/README.md:
--------------------------------------------------------------------------------
1 | # Chat UI Cloudscape App
2 |
3 | [https://cloudscape.design/](https://cloudscape.design/)
4 |
5 | Cloudscape is an open source design system for the cloud. Cloudscape offers user interface guidelines, front-end components, design resources, and development tools for building intuitive, engaging, and inclusive user experiences at scale.
6 |
7 |
8 | 
9 |
10 |
11 | ## Vite.js
12 |
13 | [https://vitejs.dev/](https://vitejs.dev/)
14 |
15 | Vite.js is a modern, fast front-end build tool that significantly improves the developer experience when building web applications.
16 |
17 | ## Development
18 | 1. Clone this repository to your local machine
19 | ```bash
20 | git clone https://github.com/aws-samples/cloudscape-examples
21 | cd cloudscape-examples/chat-ui-vite
22 | ```
23 | 2. Install the project dependencies by running:
24 | ```bash
25 | npm install
26 | ```
27 | 3. To start the development server, run:
28 | ```bash
29 | npm run dev
30 | ```
31 |
32 | This command will start a local development server at ``http://localhost:3000`` (or a different port if 3000 is in use). The server will hot-reload if you make edits to any of the source files.
33 |
34 | ## Building the App
35 | To build the application for production, run:
36 | ```bash
37 | npm run build
38 | ```
39 | This command will generate a dist folder containing the production build of your app. Vite optimizes your project for the best performance during this process.
40 |
41 | ## Running the App Locally
42 | After building the app, you can serve it locally using:
43 | ```bash
44 | npm run preview
45 | ```
46 | This command serves the production build from the dist folder, allowing you to preview the app before deployment.
--------------------------------------------------------------------------------
/artifacts/chat-ui/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
12 |
17 |
18 |
19 | Serverless RAG
20 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/artifacts/chat-ui/nginx.conf:
--------------------------------------------------------------------------------
1 | # nginx.conf
2 | user nginx;
3 | worker_processes auto;
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 | events {
7 | worker_connections 1024;
8 | }
9 | http {
10 | include /etc/nginx/mime.types;
11 | default_type application/octet-stream;
12 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
13 | '$status $body_bytes_sent "$http_referer" '
14 | '"$http_user_agent" "$http_x_forwarded_for"';
15 | access_log /var/log/nginx/access.log main;
16 | sendfile on;
17 | #tcp_nopush on;
18 |
19 | keepalive_timeout 65;
20 | #gzip on;
21 | #include /etc/nginx/conf.d/*.conf;
22 | server {
23 | listen 80;
24 | location / {
25 | root /usr/share/nginx/html;
26 | index index.html index.htm;
27 | try_files $uri $uri/ /index.html;
28 | }
29 | }
30 | }
--------------------------------------------------------------------------------
/artifacts/chat-ui/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "cloudscape-app",
3 | "private": true,
4 | "version": "0.0.0",
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite",
8 | "build": "vite build",
9 | "preview": "vite preview",
10 | "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
11 | "format": "npx prettier --ignore-path .gitignore --write \"**/*.+(tsx|js|ts|json)\""
12 | },
13 | "dependencies": {
14 | "@aws-amplify/ui-react": "^6.1.12",
15 | "@aws-sdk/client-cognito-identity-provider": "^3.629.0",
16 | "@cloudscape-design/chat-components": "^1.0.4",
17 | "@cloudscape-design/components": "^3.0.611",
18 | "@cloudscape-design/design-tokens": "^3.0.35",
19 | "@cloudscape-design/global-styles": "^1.0.27",
20 | "axios": "^1.7.4",
21 | "epoch-timeago": "^1.1.9",
22 | "react": "^18.2.0",
23 | "react-doc-viewer": "^0.1.13",
24 | "react-dom": "^18.2.0",
25 | "react-markdown": "^9.0.1",
26 | "react-router-dom": "^6.22.0",
27 | "react-textarea-autosize": "^8.5.3",
28 | "react-use-websocket": "^4.8.1",
29 | "remark-gfm": "^4.0.0",
30 | "vite": "^5.4.19"
31 | },
32 | "devDependencies": {
33 | "@types/react": "^18.2.55",
34 | "@types/react-dom": "^18.2.19",
35 | "@typescript-eslint/eslint-plugin": "^6.21.0",
36 | "@typescript-eslint/parser": "^6.21.0",
37 | "@vitejs/plugin-react": "^4.2.1",
38 | "autoprefixer": "^10.4.17",
39 | "eslint": "^8.56.0",
40 | "eslint-plugin-react-hooks": "^4.6.0",
41 | "eslint-plugin-react-refresh": "^0.4.5",
42 | "postcss": "^8.4.35",
43 | "sass": "^1.70.0",
44 | "typescript": "^5.2.2",
45 | "vite": "^5.4.19"
46 | },
47 | "optionalDependencies": {
48 | "@rollup/rollup-linux-x64-musl": "4.21.0"
49 | },
50 | "overrides": {
51 | "react-virtual": {
52 | "react": "^18.0.0"
53 | }
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/artifacts/chat-ui/postcss.config.js:
--------------------------------------------------------------------------------
1 | export default {
2 | plugins: {
3 | autoprefixer: {},
4 | },
5 | };
6 |
--------------------------------------------------------------------------------
/artifacts/chat-ui/prettier.config.cjs:
--------------------------------------------------------------------------------
1 | // eslint-disable-next-line no-undef
2 | module.export = {
3 | semi: true,
4 | trailingComma: "es5",
5 | singleQuote: false,
6 | tabWidth: 2,
7 | useTabs: false,
8 | };
9 |
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/favicon.ico
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/android-chrome-192x192.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/android-chrome-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/android-chrome-512x512.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/apple-touch-icon.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/document-chat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/document-chat.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/favicon-16x16.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/favicon-32x32.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/favicon.ico
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/header.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/header.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/logo.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/multi-agent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/multi-agent.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/ocr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/ocr.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/images/sentiment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/serverless-rag-demo/f0fdc7f724836ae44d4c4c80f2d4d6ae85502c6e/artifacts/chat-ui/public/images/sentiment.png
--------------------------------------------------------------------------------
/artifacts/chat-ui/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "/",
3 | "start_url": "/",
4 | "name": "Serverless RAG",
5 | "short_name": "Serverless RAG",
6 | "description": "Serverless RAG demo",
7 | "theme_color": "#000000",
8 | "background_color": "#ffffff",
9 | "display": "standalone",
10 | "icons": [
11 | {
12 | "src": "/images/android-chrome-192x192.png",
13 | "sizes": "192x192",
14 | "type": "image/png"
15 | },
16 | {
17 | "src": "/images/android-chrome-512x512.png",
18 | "sizes": "512x512",
19 | "type": "image/png"
20 | }
21 | ]
22 | }
23 |
--------------------------------------------------------------------------------
/artifacts/chat-ui/src/app.tsx:
--------------------------------------------------------------------------------
1 | import { useState, useEffect } from "react";
2 | import { Routes, Route, HashRouter } from 'react-router-dom';
3 | import { AppLayout, TopNavigation, SideNavigation, Badge, Alert } from '@cloudscape-design/components';
4 | import { Hub } from 'aws-amplify/utils';
5 | import { signOut } from 'aws-amplify/auth';
6 | import { AppContext } from "./common/context";
7 | import { NotFound, ChatPage, AgentPage, OcrPage, SentimentPage, HomePage, Help, PIIPage } from './pages'
8 | import '@aws-amplify/ui-react/styles.css';
9 |
10 | export default function App() {
11 | const [activeHref, setActiveHref] = useState("#/");
12 | const [utility, setUtility] = useState([])
13 | const [appData, setAppData] = useState({ userinfo: null })
14 | const [notificationVisible, setNotificationVisible] = useState(false);
15 | const [notificationMsg, setNotificationMsg] = useState("");
16 | const Router = HashRouter;
17 |
18 | useEffect(() => {
19 | Hub.listen("auth", (data) => {
20 | // setNotificationVisible(true);
21 | // setNotificationMsg("Validating Authentication")
22 | switch (data.payload.event) {
23 | case "signedOut":
24 | setAppData({ userinfo: null })
25 | break;
26 | }
27 | })
28 | }, [])
29 |
30 | useEffect(() => {
31 | if (appData.userinfo != null) {
32 | setUtility([{
33 | type: "menu-dropdown",
34 | text: "Profile",
35 | description: appData.userinfo.signInDetails.loginId,
36 | iconName: "user-profile",
37 | onItemClick: (e) => {
38 | if (e.detail.id == 'signout') { signOut({ global: true }) }
39 | },
40 | items: [
41 | { id: "signout", text: "Sign out" }
42 | ]
43 | }])
44 | } else {
45 | setUtility([])
46 | }
47 | }, [appData])
48 |
49 |
50 | return (
51 |
52 |