├── lib ├── shared │ ├── layers │ │ ├── python-sdk │ │ │ └── python │ │ │ │ ├── README.md │ │ │ │ ├── genai_core │ │ │ │ ├── websites │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── sitemap.py │ │ │ │ ├── langchain │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── workspace_retriever.py │ │ │ │ ├── opensearch │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── client.py │ │ │ │ │ ├── create.py │ │ │ │ │ ├── chunks.py │ │ │ │ │ └── delete.py │ │ │ │ ├── registry │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── index.py │ │ │ │ ├── aurora │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── utils.py │ │ │ │ │ ├── connection.py │ │ │ │ │ └── delete.py │ │ │ │ ├── kendra │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── indexes.py │ │ │ │ │ ├── client.py │ │ │ │ │ └── data_sync.py │ │ │ │ ├── auth.py │ │ │ │ ├── utils │ │ │ │ │ ├── json.py │ │ │ │ │ ├── websocket.py │ │ │ │ │ ├── files.py │ │ │ │ │ ├── delete_files_with_prefix.py │ │ │ │ │ └── comprehend.py │ │ │ │ ├── upload.py │ │ │ │ ├── parameters.py │ │ │ │ ├── semantic_search.py │ │ │ │ ├── clients.py │ │ │ │ ├── types.py │ │ │ │ └── cross_encoder.py │ │ │ │ └── pyproject.toml │ │ └── common │ │ │ └── requirements.txt │ ├── alpine-zip │ │ └── Dockerfile │ ├── web-crawler-dockerfile │ ├── file-import-dockerfile │ ├── file-import-batch-job │ │ └── requirements.txt │ └── web-crawler-batch-job │ │ ├── requirements.txt │ │ └── index.py ├── user-interface │ └── react-app │ │ ├── .eslintignore │ │ ├── src │ │ ├── vite-env.d.ts │ │ ├── common │ │ │ ├── app-context.ts │ │ │ ├── helpers │ │ │ │ ├── text-helper.ts │ │ │ │ ├── embeddings-model-helper.ts │ │ │ │ ├── storage-helper.ts │ │ │ │ ├── options-helper.ts │ │ │ │ └── metrics-helper.ts │ │ │ ├── api-client │ │ │ │ ├── health-client.ts │ │ │ │ ├── models-client.ts │ │ │ │ ├── rag-engines-client.ts │ │ │ │ ├── semantic-search-client.ts │ │ │ │ ├── cross-encoders-client.ts │ │ │ │ ├── embeddings-client.ts │ │ │ │ ├── user-feedback-client.ts │ │ │ │ ├── kendra-client.ts │ │ │ │ └── sessions-client.ts │ │ │ ├── hooks │ │ │ │ ├── use-on-follow.ts │ │ │ │ ├── use-navigation-panel-state.ts │ │ │ │ └── use-form.ts │ │ │ ├── file-uploader.ts │ │ │ └── i18n │ │ │ │ └── property-filter-i18n-strings.ts │ │ ├── pages │ │ │ ├── rag │ │ │ │ ├── add-data │ │ │ │ │ └── types.ts │ │ │ │ ├── create-workspace │ │ │ │ │ ├── create-workspace-header.tsx │ │ │ │ │ ├── hybrid-search-field.tsx │ │ │ │ │ ├── language-selector-field.tsx │ │ │ │ │ ├── chunks-selector.tsx │ │ │ │ │ ├── select-engine-panel.tsx │ │ │ │ │ ├── cross-encoder-selector-field.tsx │ │ │ │ │ └── embeddings-selector-field.tsx │ │ │ │ ├── engines │ │ │ │ │ └── engines-page-header.tsx │ │ │ │ ├── dashboard │ │ │ │ │ ├── dashboard-header.tsx │ │ │ │ │ └── general-config.tsx │ │ │ │ └── workspaces │ │ │ │ │ └── workspaces.tsx │ │ │ ├── chatbot │ │ │ │ ├── sessions │ │ │ │ │ ├── sessions.tsx │ │ │ │ │ └── chat-sessions.tsx │ │ │ │ └── playground │ │ │ │ │ ├── multi-chat-playground.tsx │ │ │ │ │ ├── chat-playground.tsx │ │ │ │ │ └── playground.tsx │ │ │ └── not-found.tsx │ │ ├── components │ │ │ ├── wrappers │ │ │ │ ├── router-link.tsx │ │ │ │ ├── router-button.tsx │ │ │ │ └── router-button-dropdown.tsx │ │ │ ├── table-no-match-state.tsx │ │ │ ├── table-empty-state.tsx │ │ │ ├── base-app-layout.tsx │ │ │ ├── rag │ │ │ │ └── workspace-delete-modal.tsx │ │ │ └── global-header.tsx │ │ ├── main.tsx │ │ └── styles │ │ │ └── app.scss │ │ ├── postcss.config.js │ │ ├── public │ │ ├── favicon.ico │ │ ├── images │ │ │ ├── logo.png │ │ │ ├── welcome │ │ │ │ ├── 3p.png │ │ │ │ ├── ui.png │ │ │ │ ├── ui-dark.png │ │ │ │ ├── self-hosted.jpg │ │ │ │ ├── amazon-bedrock.png │ │ │ │ └── amazon-kendra.png │ │ │ ├── favicon-16x16.png │ │ │ ├── favicon-32x32.png │ │ │ ├── apple-touch-icon.png │ │ │ ├── android-chrome-192x192.png │ │ │ ├── android-chrome-512x512.png │ │ │ └── file_icon.svg │ │ ├── manifest.json │ │ └── aws-exports.json │ │ ├── .env.template │ │ ├── tsconfig.node.json │ │ ├── .eslintrc.cjs │ │ ├── tsconfig.json │ │ ├── index.html │ │ ├── README.md │ │ ├── vite.config.ts │ │ └── package.json ├── rag-engines │ ├── sagemaker-rag-models │ │ ├── model │ │ │ └── requirements.txt │ │ └── index.ts │ ├── data-import │ │ └── functions │ │ │ ├── batch-crawl-rss-posts │ │ │ └── index.py │ │ │ ├── trigger-rss-ingestors │ │ │ └── index.py │ │ │ └── rss-ingestor │ │ │ └── index.py │ ├── aurora-pgvector │ │ └── functions │ │ │ ├── create-workflow │ │ │ └── create │ │ │ │ └── index.py │ │ │ └── pgvector-setup │ │ │ └── index.py │ ├── opensearch-vector │ │ └── functions │ │ │ └── create-workflow │ │ │ └── create │ │ │ └── index.py │ └── workspaces │ │ ├── functions │ │ └── delete-workspace-workflow │ │ │ └── delete │ │ │ └── index.py │ │ └── index.ts ├── model-interfaces │ ├── langchain │ │ └── functions │ │ │ └── request-handler │ │ │ └── adapters │ │ │ ├── openai │ │ │ ├── __init__.py │ │ │ └── gpt.py │ │ │ ├── azureopenai │ │ │ ├── __init__.py │ │ │ └── azuregpt.py │ │ │ ├── base │ │ │ └── __init__.py │ │ │ ├── sagemaker │ │ │ ├── meta │ │ │ │ └── __init__.py │ │ │ ├── amazon │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ └── mistralai │ │ │ │ └── __init__.py │ │ │ ├── shared │ │ │ ├── __init__.py │ │ │ └── meta │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ └── bedrock │ │ │ ├── __init__.py │ │ │ ├── mistral.py │ │ │ ├── ai21_j2.py │ │ │ ├── cohere.py │ │ │ ├── titan.py │ │ │ └── llama2_chat.py │ └── idefics │ │ └── functions │ │ └── request-handler │ │ ├── adapters │ │ ├── __init__.py │ │ └── base.py │ │ └── content_handler.py ├── sagemaker-model │ ├── hf-custom-script-model │ │ ├── build-script │ │ │ ├── requirements.txt │ │ │ └── script.py │ │ ├── samples │ │ │ ├── basic │ │ │ │ ├── requirements.txt │ │ │ │ └── inference.py │ │ │ └── pipeline │ │ │ │ └── requirements.txt │ │ └── build-function │ │ │ └── index.py │ ├── deploy-custom-script-model.ts │ ├── index.ts │ ├── types.ts │ ├── container-images.ts │ ├── deploy-package-model.ts │ └── image-repository-mapping.ts ├── chatbot-api │ ├── functions │ │ ├── api-handler │ │ │ ├── routes │ │ │ │ ├── health.py │ │ │ │ ├── models.py │ │ │ │ ├── kendra.py │ │ │ │ ├── rag.py │ │ │ │ ├── cross_encoders.py │ │ │ │ ├── embeddings.py │ │ │ │ └── user_feedback.py │ │ │ └── index.py │ │ ├── resolvers │ │ │ ├── publish-response-resolver.js │ │ │ ├── subscribe-resolver.js │ │ │ ├── lambda-resolver.js │ │ │ └── send-query-lambda-resolver │ │ │ │ └── index.py │ │ └── outgoing-message-appsync │ │ │ ├── graphql.ts │ │ │ └── index.ts │ ├── chatbot-dynamodb-tables │ │ └── index.ts │ └── chatbot-s3-buckets │ │ └── index.ts └── layer │ └── index.ts ├── cli ├── version.ts ├── magic.ts ├── aws-cron-validator.ts └── aws-cron-expressions.ts ├── assets ├── p1.png ├── p10.png ├── p11.png ├── p12.png ├── p13.png ├── p14.png ├── p15.png ├── p16.png ├── p17.png ├── p2.png ├── p3.png ├── p4.png ├── p5.png ├── p6.png ├── p7.png ├── p8.png ├── p9.png ├── architecture.png ├── example_file.xlsx └── RFPassistant0802.drawio.png ├── .npmignore ├── prettier.config.js ├── .eslintignore ├── .gitallowed ├── CODE_OF_CONDUCT.md ├── .prettierignore ├── .eslintrc.cjs ├── .graphqlconfig.yml ├── Config ├── bin ├── aws-genai-rfpassistant.ts ├── config.json └── config.ts ├── .github └── workflows │ ├── build.yaml │ └── stale.yml ├── tsconfig.json ├── LICENSE └── cdk.json /lib/shared/layers/python-sdk/python/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cli/version.ts: -------------------------------------------------------------------------------- 1 | export const LIB_VERSION = "0.0.1"; 2 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/.eslintignore: -------------------------------------------------------------------------------- 1 | src/graphql/* -------------------------------------------------------------------------------- /lib/rag-engines/sagemaker-rag-models/model/requirements.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /lib/shared/alpine-zip/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | 3 | RUN apk add zip -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/vite-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /assets/p1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p1.png -------------------------------------------------------------------------------- /assets/p10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p10.png -------------------------------------------------------------------------------- /assets/p11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p11.png -------------------------------------------------------------------------------- /assets/p12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p12.png -------------------------------------------------------------------------------- /assets/p13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p13.png -------------------------------------------------------------------------------- /assets/p14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p14.png -------------------------------------------------------------------------------- /assets/p15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p15.png -------------------------------------------------------------------------------- /assets/p16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p16.png -------------------------------------------------------------------------------- /assets/p17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p17.png -------------------------------------------------------------------------------- /assets/p2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p2.png -------------------------------------------------------------------------------- /assets/p3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p3.png -------------------------------------------------------------------------------- /assets/p4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p4.png -------------------------------------------------------------------------------- /assets/p5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p5.png -------------------------------------------------------------------------------- /assets/p6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p6.png -------------------------------------------------------------------------------- /assets/p7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p7.png -------------------------------------------------------------------------------- /assets/p8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p8.png -------------------------------------------------------------------------------- /assets/p9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/p9.png -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/websites/__init__.py: -------------------------------------------------------------------------------- 1 | from .sitemap import * 2 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | *.ts 2 | !*.d.ts 3 | 4 | # CDK asset staging directory 5 | .cdk.staging 6 | cdk.out 7 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/openai/__init__.py: -------------------------------------------------------------------------------- 1 | from .gpt import * 2 | -------------------------------------------------------------------------------- /lib/sagemaker-model/hf-custom-script-model/build-script/requirements.txt: -------------------------------------------------------------------------------- 1 | huggingface-hub 2 | hf-transfer 3 | boto3 -------------------------------------------------------------------------------- /assets/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/architecture.png -------------------------------------------------------------------------------- /assets/example_file.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/example_file.xlsx -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/azureopenai/__init__.py: -------------------------------------------------------------------------------- 1 | from .azuregpt import * 2 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/base/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import ModelAdapter, Mode 2 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/sagemaker/meta/__init__.py: -------------------------------------------------------------------------------- 1 | from .llama2_chat import * 2 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/shared/__init__.py: -------------------------------------------------------------------------------- 1 | from .meta.llama2_chat import * 2 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/shared/meta/__init__.py: -------------------------------------------------------------------------------- 1 | from .llama2_chat import * 2 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/sagemaker/amazon/__init__.py: -------------------------------------------------------------------------------- 1 | from .falconlite import * 2 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/postcss.config.js: -------------------------------------------------------------------------------- 1 | export default { 2 | plugins: { 3 | autoprefixer: {}, 4 | }, 5 | }; 6 | -------------------------------------------------------------------------------- /assets/RFPassistant0802.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/assets/RFPassistant0802.drawio.png -------------------------------------------------------------------------------- /lib/model-interfaces/idefics/functions/request-handler/adapters/__init__.py: -------------------------------------------------------------------------------- 1 | from .idefics import Idefics 2 | from .claude import Claude3 3 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/langchain/__init__.py: -------------------------------------------------------------------------------- 1 | from .workspace_retriever import * 2 | from .chat_message_history import * 3 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/opensearch/__init__.py: -------------------------------------------------------------------------------- 1 | from .client import * 2 | from .create import * 3 | from .query import * 4 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/registry/__init__.py: -------------------------------------------------------------------------------- 1 | from .index import AdapterRegistry 2 | 3 | registry = AdapterRegistry() 4 | -------------------------------------------------------------------------------- /lib/sagemaker-model/hf-custom-script-model/samples/basic/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.19.0 2 | transformers>=4.29.2 3 | bitsandbytes>=0.39.0 4 | torch 5 | einops -------------------------------------------------------------------------------- /prettier.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | semi: true, 3 | trailingComma: "es5", 4 | singleQuote: false, 5 | tabWidth: 2, 6 | useTabs: false, 7 | }; 8 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/sagemaker/__init__.py: -------------------------------------------------------------------------------- 1 | from .meta import * 2 | from .amazon import * 3 | from .mistralai import * 4 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/sagemaker/mistralai/__init__.py: -------------------------------------------------------------------------------- 1 | from .mistral_instruct import * 2 | from .mixtral_instruct import * -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/favicon.ico -------------------------------------------------------------------------------- /lib/sagemaker-model/hf-custom-script-model/samples/pipeline/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.19.0 2 | transformers>=4.29.2 3 | bitsandbytes>=0.39.0 4 | torch 5 | einops 6 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/aurora/__init__.py: -------------------------------------------------------------------------------- 1 | from .connection import * 2 | from .create import * 3 | from .query import * 4 | from .chunks import * 5 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/kendra/__init__.py: -------------------------------------------------------------------------------- 1 | from .indexes import * 2 | from .query import * 3 | from .client import * 4 | from .data_sync import * 5 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/logo.png -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/auth.py: -------------------------------------------------------------------------------- 1 | def get_user_id(router): 2 | user_id = router.current_event.get("identity", {}).get("sub") 3 | 4 | return user_id 5 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/welcome/3p.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/welcome/3p.png -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/welcome/ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/welcome/ui.png -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/favicon-16x16.png -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/favicon-32x32.png -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/apple-touch-icon.png -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/welcome/ui-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/welcome/ui-dark.png -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | lib/user-interface/react-app/src/graphql/mutations.ts 2 | lib/user-interface/react-app/src/graphql/queries.ts 3 | lib/user-interface/react-app/src/graphql/subscriptions.ts 4 | cdk.out 5 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/welcome/self-hosted.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/welcome/self-hosted.jpg -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/android-chrome-192x192.png -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/android-chrome-512x512.png -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/welcome/amazon-bedrock.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/welcome/amazon-bedrock.png -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/welcome/amazon-kendra.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-rfpassistant/HEAD/lib/user-interface/react-app/public/images/welcome/amazon-kendra.png -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/app-context.ts: -------------------------------------------------------------------------------- 1 | import { createContext } from "react"; 2 | import { AppConfig } from "./types"; 3 | 4 | export const AppContext = createContext(null); 5 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/rag/add-data/types.ts: -------------------------------------------------------------------------------- 1 | import { SelectProps } from "@cloudscape-design/components"; 2 | 3 | export interface AddDataData { 4 | workspace: SelectProps.Option | null; 5 | query: string; 6 | } 7 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/__init__.py: -------------------------------------------------------------------------------- 1 | from .openai import * 2 | from .azureopenai import * 3 | from .sagemaker import * 4 | from .bedrock import * 5 | from .base import Mode 6 | from .shared import * 7 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/bedrock/__init__.py: -------------------------------------------------------------------------------- 1 | from .claude import * 2 | from .titan import * 3 | from .ai21_j2 import * 4 | from .cohere import * 5 | from .llama2_chat import * 6 | from .mistral import * 7 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/.env.template: -------------------------------------------------------------------------------- 1 | # Replace with your configuration after you deploy 2 | AWS_PROJECT_REGION= 3 | AWS_COGNITO_REGION= 4 | AWS_USER_POOLS_ID= 5 | AWS_USER_POOLS_WEB_CLIENT_ID= 6 | API_DISTRIBUTION_DOMAIN_NAME= 7 | RAG_ENABLED= 8 | DEFAULT_EMBEDDINGS_MODEL= 9 | DEFAULT_CROSS_ENCODER_MODEL= -------------------------------------------------------------------------------- /lib/user-interface/react-app/tsconfig.node.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "composite": true, 4 | "skipLibCheck": true, 5 | "module": "ESNext", 6 | "moduleResolution": "bundler", 7 | "allowSyntheticDefaultImports": true 8 | }, 9 | "include": ["vite.config.ts"] 10 | } 11 | -------------------------------------------------------------------------------- /lib/shared/web-crawler-dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-alpine 2 | 3 | WORKDIR /app 4 | COPY web-crawler-batch-job/requirements.txt requirements.txt 5 | RUN pip install -r requirements.txt 6 | 7 | COPY layers/python-sdk/python/ . 8 | COPY web-crawler-batch-job/index.py ./index.py 9 | 10 | CMD ["python3", "index.py"] -------------------------------------------------------------------------------- /.gitallowed: -------------------------------------------------------------------------------- 1 | 626614931356 2 | 871362719292 3 | 763104351884 4 | 364406365360 5 | 772153158452 6 | 907027046896 7 | 457447274322 8 | 727897471807 9 | 380420809688 10 | 692866216735 11 | 503227376785 12 | 217643126080 13 | 914824155844 14 | 446045086412 15 | 442386744353 16 | 886529160074 17 | 094389454867 18 | 111122223333 19 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /lib/shared/file-import-dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/unstructured-io/unstructured:0.11.2 2 | 3 | WORKDIR /app 4 | COPY file-import-batch-job/requirements.txt requirements.txt 5 | RUN pip install -r requirements.txt 6 | 7 | COPY layers/python-sdk/python/ . 8 | COPY file-import-batch-job/main.py ./main.py 9 | 10 | CMD ["python3", "main.py"] -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/components/wrappers/router-link.tsx: -------------------------------------------------------------------------------- 1 | import { Link, LinkProps } from "@cloudscape-design/components"; 2 | import useOnFollow from "../../common/hooks/use-on-follow"; 3 | 4 | export default function RouterLink(props: LinkProps) { 5 | const onFollow = useOnFollow(); 6 | 7 | return ; 8 | } 9 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "genai-core" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["Amazon Web Services"] 6 | readme = "README.md" 7 | 8 | [tool.poetry.dependencies] 9 | python = "^3.10" 10 | 11 | 12 | [build-system] 13 | requires = ["poetry-core"] 14 | build-backend = "poetry.core.masonry.api" 15 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | .webpack 3 | node_modules 4 | native_modules 5 | tsconfig.json 6 | coverage 7 | build 8 | public 9 | out 10 | cdk.out 11 | lib/user-interface/react-app/src/API.ts 12 | lib/user-interface/react-app/src/graphql/mutations.ts 13 | lib/user-interface/react-app/src/graphql/queries.ts 14 | lib/user-interface/react-app/src/graphql/subscriptions.ts 15 | -------------------------------------------------------------------------------- /lib/chatbot-api/functions/api-handler/routes/health.py: -------------------------------------------------------------------------------- 1 | from aws_lambda_powertools import Logger, Tracer 2 | from aws_lambda_powertools.event_handler.appsync import Router 3 | 4 | tracer = Tracer() 5 | router = Router() 6 | logger = Logger() 7 | 8 | 9 | @router.resolver(field_name="checkHealth") 10 | @tracer.capture_method 11 | def health(): 12 | return True 13 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/components/wrappers/router-button.tsx: -------------------------------------------------------------------------------- 1 | import { ButtonProps, Button } from "@cloudscape-design/components"; 2 | import useOnFollow from "../../common/hooks/use-on-follow"; 3 | 4 | export default function RouterButton(props: ButtonProps) { 5 | const onFollow = useOnFollow(); 6 | 7 | return 17 | 18 | 19 | ); 20 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/rag/engines/engines-page-header.tsx: -------------------------------------------------------------------------------- 1 | import { Header, HeaderProps } from "@cloudscape-design/components"; 2 | 3 | interface EnginesPageHeaderProps extends HeaderProps { 4 | title?: string; 5 | } 6 | 7 | export function EnginesPageHeader({ 8 | title = "Engines", 9 | ...props 10 | }: EnginesPageHeaderProps) { 11 | return ( 12 |
17 | {title} 18 |
19 | ); 20 | } 21 | -------------------------------------------------------------------------------- /.graphqlconfig.yml: -------------------------------------------------------------------------------- 1 | projects: 2 | chatbot: 3 | schemaPath: lib/chatbot-api/schema/schema.graphql 4 | includes: 5 | - lib/user-interface/react-app/src/graphql/*.ts 6 | excludes: 7 | - ./amplify/** 8 | extensions: 9 | amplify: 10 | codeGenTarget: typescript 11 | generatedFileName: lib/user-interface/react-app/src/API.ts 12 | docsFilePath: lib/user-interface/react-app/src/graphql/ #The field is not configured correctly and needs to be changed 13 | region: us-east-1 14 | apiId: null 15 | frontend: javascript 16 | framework: react 17 | maxDepth: 2 18 | extensions: 19 | amplify: 20 | version: 3 21 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/hooks/use-on-follow.ts: -------------------------------------------------------------------------------- 1 | import { useCallback } from "react"; 2 | import { useNavigate } from "react-router"; 3 | 4 | interface FollowDetail { 5 | external?: boolean; 6 | href?: string; 7 | } 8 | 9 | export default function useOnFollow() { 10 | const navigate = useNavigate(); 11 | 12 | return useCallback( 13 | (event: CustomEvent): void => { 14 | if ( 15 | event.detail.external === true || 16 | typeof event.detail.href === "undefined" 17 | ) { 18 | return; 19 | } 20 | 21 | event.preventDefault(); 22 | navigate(event.detail.href); 23 | }, 24 | [navigate] 25 | ); 26 | } 27 | -------------------------------------------------------------------------------- /lib/model-interfaces/idefics/functions/request-handler/content_handler.py: -------------------------------------------------------------------------------- 1 | import json 2 | from langchain.llms.sagemaker_endpoint import LLMContentHandler 3 | 4 | 5 | class ContentHandler(LLMContentHandler): 6 | content_type = "application/json" 7 | accepts = "application/json" 8 | 9 | def transform_input(self, prompt: str, model_kwargs) -> bytes: 10 | req = {"inputs": prompt, "parameters": model_kwargs} 11 | input_str = json.dumps(req) 12 | return input_str.encode("utf-8") 13 | 14 | def transform_output(self, output: bytes): 15 | response_json = json.loads(output.read().decode("utf-8")) 16 | return response_json[0]["generated_text"].split("Assistant:")[-1] 17 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/hooks/use-navigation-panel-state.ts: -------------------------------------------------------------------------------- 1 | import { useState } from "react"; 2 | import { StorageHelper } from "../helpers/storage-helper"; 3 | import { NavigationPanelState } from "../types"; 4 | 5 | export function useNavigationPanelState(): [ 6 | NavigationPanelState, 7 | (state: Partial) => void, 8 | ] { 9 | const [currentState, setCurrentState] = useState( 10 | StorageHelper.getNavigationPanelState() 11 | ); 12 | 13 | const onChange = (state: Partial) => { 14 | console.log(state); 15 | setCurrentState(StorageHelper.setNavigationPanelState(state)); 16 | }; 17 | 18 | return [currentState, onChange]; 19 | } 20 | -------------------------------------------------------------------------------- /Config: -------------------------------------------------------------------------------- 1 | package.Aws-genai-rfpassistant = { 2 | interfaces = (1.0); 3 | 4 | # Use NoOpBuild. See https://w.amazon.com/index.php/BrazilBuildSystem/NoOpBuild 5 | build-system = no-op; 6 | build-tools = { 7 | 1.0 = { 8 | NoOpBuild = 1.0; 9 | }; 10 | }; 11 | 12 | # Use runtime-dependencies for when you want to bring in additional 13 | # packages when deploying. 14 | # Use dependencies instead if you intend for these dependencies to 15 | # be exported to other packages that build against you. 16 | dependencies = { 17 | 1.0 = { 18 | }; 19 | }; 20 | 21 | runtime-dependencies = { 22 | 1.0 = { 23 | }; 24 | }; 25 | 26 | }; 27 | -------------------------------------------------------------------------------- /bin/aws-genai-rfpassistant.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import * as cdk from "aws-cdk-lib"; 3 | import "source-map-support/register"; 4 | import { AwsGenaiRfpAssistantStack } from "../lib/aws-genai-rfpassistant-stack"; 5 | import { AwsSolutionsChecks } from "cdk-nag"; 6 | import { getConfig } from "./config"; 7 | import { Aspects } from "aws-cdk-lib"; 8 | 9 | const app = new cdk.App(); 10 | 11 | const config = getConfig(); 12 | 13 | new AwsGenaiRfpAssistantStack(app, `${config.prefix}GenaiRfpAssistantStack`, { 14 | config, 15 | env: { 16 | region: process.env.CDK_DEFAULT_REGION, 17 | account: process.env.CDK_DEFAULT_ACCOUNT, 18 | }, 19 | }); 20 | 21 | Aspects.of(app).add(new AwsSolutionsChecks({ verbose: true })); 22 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/.eslintrc.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | root: true, 3 | env: { browser: true, es2020: true }, 4 | extends: [ 5 | "eslint:recommended", 6 | "plugin:@typescript-eslint/recommended", 7 | "plugin:react-hooks/recommended", 8 | ], 9 | ignorePatterns: ["dist", ".eslintrc.cjs"], 10 | parser: "@typescript-eslint/parser", 11 | plugins: ["react-refresh"], 12 | rules: { 13 | "@typescript-eslint/no-explicit-any": ["off"], 14 | "@typescript-eslint/no-non-null-asserted-optional-chain": ["off"], 15 | "react-hooks/exhaustive-deps": ["off"], 16 | "react-refresh/only-export-components": [ 17 | "warn", 18 | { allowConstantExport: true }, 19 | ], 20 | }, 21 | }; 22 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/images/file_icon.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", 4 | "useDefineForClassFields": true, 5 | "lib": ["ES2020", "DOM", "DOM.Iterable"], 6 | "module": "ESNext", 7 | "skipLibCheck": true, 8 | 9 | /* Bundler mode */ 10 | "moduleResolution": "bundler", 11 | "allowImportingTsExtensions": true, 12 | "resolveJsonModule": true, 13 | "isolatedModules": true, 14 | "noEmit": true, 15 | "jsx": "react-jsx", 16 | 17 | /* Linting */ 18 | "strict": true, 19 | "noUnusedLocals": true, 20 | "noUnusedParameters": true, 21 | "noFallthroughCasesInSwitch": true 22 | }, 23 | "include": ["src"], 24 | "references": [{ "path": "./tsconfig.node.json" }] 25 | } 26 | -------------------------------------------------------------------------------- /lib/rag-engines/data-import/functions/trigger-rss-ingestors/index.py: -------------------------------------------------------------------------------- 1 | import os 2 | from botocore.exceptions import ClientError 3 | from aws_lambda_powertools import Logger, Tracer 4 | from aws_lambda_powertools.utilities.typing import LambdaContext 5 | import genai_core.documents 6 | 7 | logger = Logger() 8 | tracer = Tracer() 9 | 10 | 11 | @tracer.capture_lambda_handler() 12 | @logger.inject_lambda_context(log_event=True) 13 | def lambda_handler(event, context: LambdaContext): 14 | logger.info(f"Triggering daily checks for RSS Feed Posts") 15 | try: 16 | genai_core.documents.ingest_rss_feeds() 17 | except Exception as e: 18 | logger.error("Error triggering RSS Feed checks") 19 | logger.error(e) 20 | raise e 21 | -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: smoke-build 2 | on: 3 | push: 4 | pull_request: 5 | jobs: 6 | build-cdk: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v4 10 | - uses: actions/setup-node@v3 11 | with: 12 | node-version: "20" 13 | - name: Formatting 14 | run: | 15 | npm ci 16 | cd ./lib/user-interface/react-app 17 | npm ci 18 | cd - 19 | npm run lint 20 | - name: Backend 21 | run: | 22 | npm ci 23 | npm run build 24 | npx cdk synth 25 | - name: Frontend 26 | working-directory: ./lib/user-interface/react-app 27 | run: | 28 | npm ci 29 | npm run build -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", 4 | "module": "commonjs", 5 | "lib": ["es2020", "dom"], 6 | "declaration": true, 7 | "strict": true, 8 | "noImplicitAny": true, 9 | "strictNullChecks": true, 10 | "noImplicitThis": true, 11 | "alwaysStrict": true, 12 | "noUnusedLocals": false, 13 | "noUnusedParameters": false, 14 | "noImplicitReturns": true, 15 | "noFallthroughCasesInSwitch": false, 16 | "inlineSourceMap": true, 17 | "inlineSources": true, 18 | "experimentalDecorators": true, 19 | "strictPropertyInitialization": false, 20 | "typeRoots": ["./node_modules/@types"] 21 | }, 22 | "exclude": ["node_modules", "cdk.out", "lib/user-interface/react-app"] 23 | } 24 | -------------------------------------------------------------------------------- /lib/rag-engines/aurora-pgvector/functions/create-workflow/create/index.py: -------------------------------------------------------------------------------- 1 | import genai_core.workspaces 2 | import genai_core.aurora.create 3 | from aws_lambda_powertools import Logger 4 | from aws_lambda_powertools.utilities.typing import LambdaContext 5 | 6 | logger = Logger() 7 | 8 | 9 | @logger.inject_lambda_context(log_event=True) 10 | def lambda_handler(event, context: LambdaContext): 11 | workspace_id = event["workspace_id"] 12 | logger.info(f"Creating workspace {workspace_id}") 13 | 14 | workspace = genai_core.workspaces.get_workspace(workspace_id) 15 | if not workspace: 16 | raise Exception(f"Workspace {workspace_id} does not exist") 17 | 18 | genai_core.aurora.create.create_workspace_table(workspace) 19 | 20 | return {"ok": True} 21 | -------------------------------------------------------------------------------- /lib/rag-engines/opensearch-vector/functions/create-workflow/create/index.py: -------------------------------------------------------------------------------- 1 | import genai_core.workspaces 2 | import genai_core.opensearch.create 3 | from aws_lambda_powertools import Logger 4 | from aws_lambda_powertools.utilities.typing import LambdaContext 5 | 6 | logger = Logger() 7 | 8 | 9 | @logger.inject_lambda_context(log_event=True) 10 | def lambda_handler(event, context: LambdaContext): 11 | workspace_id = event["workspace_id"] 12 | logger.info(f"Creating workspace {workspace_id}") 13 | 14 | workspace = genai_core.workspaces.get_workspace(workspace_id) 15 | if not workspace: 16 | raise Exception(f"Workspace {workspace_id} does not exist") 17 | 18 | genai_core.opensearch.create.create_workspace_index(workspace) 19 | 20 | return {"ok": True} 21 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close stale issues 2 | 3 | on: 4 | schedule: 5 | - cron: "38 1 * * *" 6 | 7 | permissions: 8 | issues: write 9 | pull-requests: write 10 | 11 | jobs: 12 | close-issues: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/stale@v9 16 | with: 17 | days-before-issue-stale: 60 18 | days-before-issue-close: 30 19 | stale-issue-label: "stale" 20 | stale-issue-message: "This issue is stale because it has been open for 60 days with no activity." 21 | close-issue-message: "This issue was closed because it has been inactive for 30 days since being marked as stale." 22 | days-before-pr-stale: -1 23 | days-before-pr-close: -1 24 | repo-token: ${{ secrets.GITHUB_TOKEN }} 25 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 12 | 17 | 18 | 19 | AWS GenAI RFP Assistant 20 | 23 | 24 | 25 |
26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /lib/rag-engines/data-import/functions/rss-ingestor/index.py: -------------------------------------------------------------------------------- 1 | import os 2 | from botocore.exceptions import ClientError 3 | from aws_lambda_powertools import Logger, Tracer 4 | from aws_lambda_powertools.utilities.typing import LambdaContext 5 | import genai_core.documents 6 | 7 | logger = Logger() 8 | tracer = Tracer() 9 | 10 | 11 | @tracer.capture_lambda_handler() 12 | @logger.inject_lambda_context(log_event=True) 13 | def lambda_handler(event, context: LambdaContext): 14 | logger.info(f"Starting scheduled RSS Feed poll") 15 | workspace_id = event["workspace_id"] 16 | document_id = event["document_id"] 17 | logger.info(f"workspace_id = {workspace_id}") 18 | logger.info(f"document_id = {document_id}") 19 | try: 20 | genai_core.documents.check_rss_feed_for_posts(workspace_id, document_id) 21 | except Exception as e: 22 | logger.error("Error checking for new posts from feed!") 23 | logger.error(e) 24 | raise e 25 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/registry/index.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | class AdapterRegistry: 5 | def __init__(self): 6 | # The registry is a dictionary where: 7 | # Keys are compiled regular expressions 8 | # Values are model IDs 9 | self.registry = {} 10 | 11 | def register(self, regex, model_id): 12 | # Compiles the regex and stores it in the registry 13 | self.registry[re.compile(regex)] = model_id 14 | 15 | def get_adapter(self, model): 16 | # Iterates over the registered regexes 17 | for regex, adapter in self.registry.items(): 18 | # If a match is found, returns the associated model ID 19 | if regex.match(model): 20 | return adapter 21 | # If no match is found, returns None 22 | raise ValueError( 23 | f"Adapter for model {model} not found in registry. Available adapters: {self.registry}" 24 | ) 25 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/rag/create-workspace/hybrid-search-field.tsx: -------------------------------------------------------------------------------- 1 | import { FormField, Toggle } from "@cloudscape-design/components"; 2 | 3 | interface HybridSearchProps { 4 | submitting: boolean; 5 | onChange: (data: Partial<{ hybridSearch: boolean }>) => void; 6 | checked: boolean; 7 | errors: Record; 8 | } 9 | 10 | export function HybridSearchField(props: HybridSearchProps) { 11 | return ( 12 | 17 | 21 | props.onChange({ hybridSearch: checked }) 22 | } 23 | > 24 | Use hybrid search 25 | 26 | 27 | ); 28 | } 29 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/components/table-empty-state.tsx: -------------------------------------------------------------------------------- 1 | import { Box, SpaceBetween } from "@cloudscape-design/components"; 2 | import RouterButton from "./wrappers/router-button"; 3 | 4 | export const TableEmptyState = (props: { 5 | resourceName: string; 6 | createHref?: string; 7 | createText?: string; 8 | }) => ( 9 | 10 | 11 |
12 | No {props.resourceName}s 13 | 14 | No {props.resourceName}s associated with this resource. 15 | 16 |
17 | {props.createHref && ( 18 | 19 | {props.createText ? ( 20 | <>{props.createText} 21 | ) : ( 22 | <>Create {props.resourceName} 23 | )} 24 | 25 | )} 26 |
27 |
28 | ); 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 17 | 18 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/public/aws-exports.json: -------------------------------------------------------------------------------- 1 | {"aws_project_region":"us-east-1","aws_cognito_region":"us-east-1","aws_user_pools_id":"us-east-1_sCz41fJJ3","aws_user_pools_web_client_id":"6g8ei2aek3ce5v5fq3resf21tv","aws_cognito_identity_pool_id":"us-east-1:ef14d82b-1997-4e91-a8ad-efa582514490","Auth":{"region":"us-east-1","userPoolId":"us-east-1_sCz41fJJ3","userPoolWebClientId":"6g8ei2aek3ce5v5fq3resf21tv","identityPoolId":"us-east-1:ef14d82b-1997-4e91-a8ad-efa582514490"},"aws_appsync_graphqlEndpoint":"https://c5wiu7mknncrfnbavdn5ylmxsu.appsync-api.us-east-1.amazonaws.com/graphql","aws_appsync_region":"us-east-1","aws_appsync_authenticationType":"AMAZON_COGNITO_USER_POOLS","aws_appsync_apiKey":"da2-uhhn3urfdrhl5muiwj6jpa735q","Storage":{"AWSS3":{"bucket":"chatgenaichatbotstack-chatbotapichatbucketsfilesbu-uzwsul8r6glk","region":"us-east-1"}},"config":{"rag_enabled":true,"cross_encoders_enabled":true,"sagemaker_embeddings_enabled":true,"default_embeddings_model":"bedrock::1024::cohere.embed-english-v3","default_cross_encoder_model":"sagemaker::cross-encoder/ms-marco-MiniLM-L-12-v2","privateWebsite":false}} -------------------------------------------------------------------------------- /lib/chatbot-api/functions/api-handler/routes/kendra.py: -------------------------------------------------------------------------------- 1 | import genai_core.parameters 2 | import genai_core.kendra 3 | from pydantic import BaseModel 4 | from aws_lambda_powertools import Logger, Tracer 5 | from aws_lambda_powertools.event_handler.appsync import Router 6 | 7 | tracer = Tracer() 8 | router = Router() 9 | logger = Logger() 10 | 11 | 12 | class KendraDataSynchRequest(BaseModel): 13 | workspaceId: str 14 | 15 | 16 | @router.resolver(field_name="listKendraIndexes") 17 | @tracer.capture_method 18 | def kendra_indexes(): 19 | indexes = genai_core.kendra.get_kendra_indexes() 20 | 21 | return indexes 22 | 23 | 24 | @router.resolver(field_name="startKendraDataSync") 25 | @tracer.capture_method 26 | def kendra_data_sync(workspaceId: str): 27 | genai_core.kendra.start_kendra_data_sync(workspace_id=workspaceId) 28 | 29 | return True 30 | 31 | 32 | @router.resolver(field_name="isKendraDataSynching") 33 | @tracer.capture_method 34 | def kendra_is_syncing(workspaceId: str): 35 | result = genai_core.kendra.kendra_is_syncing(workspace_id=workspaceId) 36 | 37 | return result 38 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/opensearch/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | import boto3 3 | import urllib.parse 4 | from opensearchpy import OpenSearch, RequestsHttpConnection 5 | from requests_aws4auth import AWS4Auth 6 | 7 | 8 | OPEN_SEARCH_COLLECTION_ENDPOINT = os.environ.get("OPEN_SEARCH_COLLECTION_ENDPOINT") 9 | 10 | port = 443 11 | timeout = 300 12 | 13 | 14 | def get_open_search_client(): 15 | service = "aoss" 16 | session = boto3.Session() 17 | credentials = session.get_credentials() 18 | host = urllib.parse.urlparse(OPEN_SEARCH_COLLECTION_ENDPOINT).hostname 19 | 20 | awsauth = AWS4Auth( 21 | credentials.access_key, 22 | credentials.secret_key, 23 | session.region_name, 24 | service, 25 | session_token=credentials.token, 26 | ) 27 | 28 | opensearch = OpenSearch( 29 | hosts=[{"host": host, "port": port}], 30 | http_auth=awsauth, 31 | use_ssl=True, 32 | verify_certs=True, 33 | connection_class=RequestsHttpConnection, 34 | timeout=timeout, 35 | ) 36 | 37 | return opensearch 38 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/rag/create-workspace/language-selector-field.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | FormField, 3 | Multiselect, 4 | SelectProps, 5 | } from "@cloudscape-design/components"; 6 | import { languageList } from "../../../common/constants"; 7 | 8 | interface LanguageSelectorProps { 9 | selectedLanguages: SelectProps.Options; 10 | onChange: (data: Partial<{ languages: SelectProps.Options }>) => void; 11 | submitting: boolean; 12 | errors: Record; 13 | } 14 | 15 | export function LanguageSelectorField(props: LanguageSelectorProps) { 16 | return ( 17 | 18 | 26 | props.onChange({ languages: selectedOptions }) 27 | } 28 | /> 29 | 30 | ); 31 | } 32 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/components/base-app-layout.tsx: -------------------------------------------------------------------------------- 1 | import { AppLayout, AppLayoutProps } from "@cloudscape-design/components"; 2 | import { useNavigationPanelState } from "../common/hooks/use-navigation-panel-state"; 3 | import NavigationPanel from "./navigation-panel"; 4 | import { ReactElement, useState } from "react"; 5 | 6 | export default function BaseAppLayout( 7 | props: AppLayoutProps & { info?: ReactElement } 8 | ) { 9 | const [navigationPanelState, setNavigationPanelState] = 10 | useNavigationPanelState(); 11 | const [toolsOpen, setToolsOpen] = useState(false); 12 | 13 | return ( 14 | } 17 | navigationOpen={!navigationPanelState.collapsed} 18 | onNavigationChange={({ detail }) => 19 | setNavigationPanelState({ collapsed: !detail.open }) 20 | } 21 | toolsHide={props.info === undefined ? true : false} 22 | tools={props.info} 23 | toolsOpen={toolsOpen} 24 | onToolsChange={({ detail }) => setToolsOpen(detail.open)} 25 | {...props} 26 | /> 27 | ); 28 | } 29 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/bedrock/mistral.py: -------------------------------------------------------------------------------- 1 | from .llama2_chat import BedrockMetaLLama2ChatAdapter 2 | from genai_core.registry import registry 3 | import genai_core 4 | from .base import Bedrock 5 | 6 | 7 | class BedrockMistralAdapter(BedrockMetaLLama2ChatAdapter): 8 | def get_llm(self, model_kwargs={}): 9 | bedrock = genai_core.clients.get_bedrock_client() 10 | 11 | params = {} 12 | if "temperature" in model_kwargs: 13 | params["temperature"] = model_kwargs["temperature"] 14 | if "topP" in model_kwargs: 15 | params["top_p"] = model_kwargs["topP"] 16 | if "maxTokens" in model_kwargs: 17 | params["max_tokens"] = model_kwargs["maxTokens"] 18 | 19 | return Bedrock( 20 | client=bedrock, 21 | model_id=self.model_id, 22 | model_kwargs=params, 23 | streaming=model_kwargs.get("streaming", False), 24 | callbacks=[self.callback_handler], 25 | ) 26 | 27 | 28 | # Register the adapter 29 | registry.register( 30 | r"^bedrock.mistral.mi.*", 31 | BedrockMistralAdapter, 32 | ) 33 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/openai/gpt.py: -------------------------------------------------------------------------------- 1 | import os 2 | from langchain.chat_models import ChatOpenAI 3 | from ..base import ModelAdapter 4 | from genai_core.registry import registry 5 | 6 | 7 | class GPTAdapter(ModelAdapter): 8 | def __init__(self, model_id, *args, **kwargs): 9 | self.model_id = model_id 10 | 11 | super().__init__(*args, **kwargs) 12 | 13 | def get_llm(self, model_kwargs={}): 14 | if not os.environ.get("OPENAI_API_KEY"): 15 | raise Exception("OPENAI_API_KEY must be set in the environment") 16 | 17 | params = {} 18 | if "streaming" in model_kwargs: 19 | params["streaming"] = model_kwargs["streaming"] 20 | if "temperature" in model_kwargs: 21 | params["temperature"] = model_kwargs["temperature"] 22 | if "maxTokens" in model_kwargs: 23 | params["max_tokens"] = model_kwargs["maxTokens"] 24 | 25 | return ChatOpenAI( 26 | model_name=self.model_id, callbacks=[self.callback_handler], **params 27 | ) 28 | 29 | 30 | # Register the adapter 31 | registry.register(r"^openai*", GPTAdapter) 32 | -------------------------------------------------------------------------------- /lib/sagemaker-model/deploy-custom-script-model.ts: -------------------------------------------------------------------------------- 1 | import { Construct } from "constructs"; 2 | 3 | import { HuggingFaceCustomScriptModel } from "./hf-custom-script-model"; 4 | import { SageMakerModelProps, ModelCustomScriptConfig } from "./types"; 5 | import { createHash } from "crypto"; 6 | 7 | export function deployCustomScriptModel( 8 | scope: Construct, 9 | props: SageMakerModelProps, 10 | modelConfig: ModelCustomScriptConfig 11 | ) { 12 | const { vpc, region } = props; 13 | const { modelId, instanceType, codeFolder, container, env } = modelConfig; 14 | 15 | const endpointName = ( 16 | Array.isArray(modelId) 17 | ? `Multi${createHash("md5") 18 | .update(modelId.join(",")) 19 | .digest("hex") 20 | .toUpperCase() 21 | .slice(-5)}` 22 | : modelId 23 | ) 24 | .replace(/[^a-zA-Z0-9]/g, "") 25 | .slice(-10); 26 | const llmModel = new HuggingFaceCustomScriptModel(scope, endpointName, { 27 | vpc, 28 | region, 29 | modelId, 30 | instanceType, 31 | codeFolder, 32 | container, 33 | env, 34 | }); 35 | 36 | return { model: llmModel.model, endpoint: llmModel.endpoint }; 37 | } 38 | -------------------------------------------------------------------------------- /lib/rag-engines/workspaces/functions/delete-workspace-workflow/delete/index.py: -------------------------------------------------------------------------------- 1 | import genai_core.types 2 | import genai_core.workspaces 3 | import genai_core.aurora.delete 4 | import genai_core.opensearch.delete 5 | import genai_core.kendra.delete 6 | from aws_lambda_powertools import Logger 7 | from aws_lambda_powertools.utilities.typing import LambdaContext 8 | 9 | logger = Logger() 10 | 11 | 12 | @logger.inject_lambda_context(log_event=True) 13 | def lambda_handler(event, context: LambdaContext): 14 | workspace_id = event["workspace_id"] 15 | workspace = genai_core.workspaces.get_workspace(workspace_id) 16 | if workspace is None: 17 | raise genai_core.types.CommonError("Workspace not found") 18 | 19 | if workspace["engine"] == "aurora": 20 | genai_core.aurora.delete.delete_aurora_workspace(workspace) 21 | elif workspace["engine"] == "opensearch": 22 | genai_core.opensearch.delete.delete_open_search_workspace(workspace) 23 | elif workspace["engine"] == "kendra": 24 | genai_core.kendra.delete.delete_kendra_workspace(workspace) 25 | else: 26 | raise genai_core.types.CommonError("Workspace engine not supported") 27 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/upload.py: -------------------------------------------------------------------------------- 1 | import os 2 | import boto3 3 | import genai_core.workspaces 4 | import genai_core.types 5 | import unicodedata 6 | 7 | UPLOAD_BUCKET_NAME = os.environ.get("UPLOAD_BUCKET_NAME") 8 | MAX_FILE_SIZE = 100 * 1000 * 1000 # 100Mb 9 | 10 | 11 | def generate_presigned_post(workspace_id: str, file_name: str, expiration=3600): 12 | s3_client = boto3.client("s3") 13 | 14 | file_name = unicodedata.normalize("NFC", file_name) 15 | workspace = genai_core.workspaces.get_workspace(workspace_id) 16 | if not workspace: 17 | raise genai_core.types.CommonError(f"Workspace not found") 18 | 19 | file_name = os.path.basename(file_name) 20 | object_name = f"{workspace_id}/{file_name}" 21 | 22 | conditions = [ 23 | ["content-length-range", 0, MAX_FILE_SIZE], 24 | ] 25 | 26 | response = s3_client.generate_presigned_post( 27 | UPLOAD_BUCKET_NAME, object_name, Conditions=conditions, ExpiresIn=expiration 28 | ) 29 | 30 | if not response: 31 | return None 32 | 33 | response["url"] = f"https://{UPLOAD_BUCKET_NAME}.s3-accelerate.amazonaws.com" 34 | 35 | return response 36 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/api-client/cross-encoders-client.ts: -------------------------------------------------------------------------------- 1 | import { API } from "aws-amplify"; 2 | import { GraphQLQuery, GraphQLResult } from "@aws-amplify/api"; 3 | import { listCrossEncoders, rankPassages } from "../../graphql/queries"; 4 | import { ListCrossEncodersQuery, RankPassagesQuery } from "../../API"; 5 | 6 | export class CrossEncodersClient { 7 | async getModels(): Promise< 8 | GraphQLResult> 9 | > { 10 | const result = await API.graphql>({ 11 | query: listCrossEncoders, 12 | }); 13 | return result; 14 | } 15 | 16 | async getRanking( 17 | provider: string, 18 | model: string, 19 | input: string, 20 | passages: string[] 21 | ): Promise>> { 22 | const result = await API.graphql>({ 23 | query: rankPassages, 24 | variables: { 25 | input: { 26 | model: model, 27 | passages: passages, 28 | provider: provider, 29 | reference: input, 30 | }, 31 | }, 32 | }); 33 | return result; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /lib/chatbot-api/functions/resolvers/send-query-lambda-resolver/index.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import os 3 | import json 4 | from datetime import datetime 5 | from aws_lambda_powertools import Logger, Tracer 6 | from aws_lambda_powertools.utilities.typing import LambdaContext 7 | 8 | tracer = Tracer() 9 | logger = Logger(log_uncaught_exceptions=True) 10 | 11 | sns = boto3.client("sns") 12 | TOPIC_ARN=os.environ.get("SNS_TOPIC_ARN", "") 13 | 14 | @tracer.capture_lambda_handler 15 | @logger.inject_lambda_context(log_event=True) 16 | def handler(event, context: LambdaContext): 17 | print(event["arguments"]["data"]) 18 | print(event["identity"]) 19 | request = json.loads(event["arguments"]["data"]) 20 | message = { 21 | "action": request["action"], 22 | "modelInterface": request["modelInterface"], 23 | "direction": "IN", 24 | "timestamp": str(int(round(datetime.now().timestamp()))), 25 | "userId": event["identity"]["sub"], 26 | "data": request.get("data", {}), 27 | } 28 | print(message) 29 | 30 | response = sns.publish( 31 | TopicArn=TOPIC_ARN, Message=json.dumps(message) 32 | ) 33 | 34 | return response 35 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/parameters.py: -------------------------------------------------------------------------------- 1 | import os 2 | from aws_lambda_powertools.utilities import parameters 3 | 4 | X_ORIGIN_VERIFY_SECRET_ARN = os.environ.get("X_ORIGIN_VERIFY_SECRET_ARN") 5 | API_KEYS_SECRETS_ARN = os.environ.get("API_KEYS_SECRETS_ARN") 6 | CONFIG_PARAMETER_NAME = os.environ.get("CONFIG_PARAMETER_NAME") 7 | MODELS_PARAMETER_NAME = os.environ.get("MODELS_PARAMETER_NAME") 8 | 9 | 10 | def get_external_api_key(name: str): 11 | api_keys = parameters.get_secret(API_KEYS_SECRETS_ARN, transform="json", max_age=60) 12 | 13 | key_value = api_keys.get(name) 14 | return key_value 15 | 16 | 17 | def get_origin_verify_header_value(): 18 | origin_verify_header_value = parameters.get_secret( 19 | X_ORIGIN_VERIFY_SECRET_ARN, transform="json", max_age=60 20 | )["headerValue"] 21 | 22 | return origin_verify_header_value 23 | 24 | 25 | def get_config(): 26 | config = parameters.get_parameter( 27 | CONFIG_PARAMETER_NAME, transform="json", max_age=60 * 5 28 | ) 29 | 30 | return config 31 | 32 | 33 | def get_sagemaker_models(): 34 | return parameters.get_parameter(MODELS_PARAMETER_NAME, transform="json", max_age=30) 35 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/chatbot/sessions/sessions.tsx: -------------------------------------------------------------------------------- 1 | import { useState } from "react"; 2 | import BaseAppLayout from "../../../components/base-app-layout"; 3 | import Sessions from "../../../components/chatbot/sessions"; 4 | import { BreadcrumbGroup } from "@cloudscape-design/components"; 5 | import { CHATBOT_NAME } from "../../../common/constants"; 6 | import useOnFollow from "../../../common/hooks/use-on-follow"; 7 | 8 | export default function SessionPage() { 9 | const [toolsOpen, setToolsOpen] = useState(false); 10 | const onFollow = useOnFollow(); 11 | 12 | return ( 13 | setToolsOpen(e.detail.open)} 17 | breadcrumbs={ 18 | 31 | } 32 | content={} 33 | /> 34 | ); 35 | } 36 | -------------------------------------------------------------------------------- /lib/shared/web-crawler-batch-job/index.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import boto3 4 | import genai_core.utils.json 5 | import genai_core.websites.crawler 6 | 7 | PROCESSING_BUCKET_NAME = os.environ["INPUT_BUCKET_NAME"] 8 | WORKSPACE_ID = os.environ["WORKSPACE_ID"] 9 | DOCUMENT_ID = os.environ["DOCUMENT_ID"] 10 | OBJECT_KEY = os.environ["INPUT_OBJECT_KEY"] 11 | s3_client = boto3.client("s3") 12 | 13 | def main(): 14 | 15 | response = s3_client.get_object(Bucket=PROCESSING_BUCKET_NAME, Key=OBJECT_KEY) 16 | file_content = response["Body"].read().decode("utf-8") 17 | data = json.loads(file_content) 18 | print(data) 19 | 20 | workspace = data["workspace"] 21 | document = data["document"] 22 | priority_queue = data["priority_queue"] 23 | processed_urls = data["processed_urls"] 24 | follow_links = data["follow_links"] 25 | limit = data["limit"] 26 | 27 | return genai_core.websites.crawler.crawl_urls( 28 | workspace=workspace, 29 | document=document, 30 | priority_queue=priority_queue, 31 | processed_urls=processed_urls, 32 | follow_links=follow_links, 33 | limit=limit, 34 | ) 35 | 36 | if __name__ == "__main__": 37 | main() -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/chatbot/sessions/chat-sessions.tsx: -------------------------------------------------------------------------------- 1 | import { useState } from "react"; 2 | import BaseAppLayout from "../../../components/base-app-layout"; 3 | import Sessions from "../../../components/chatdirect/sessions"; 4 | import { BreadcrumbGroup } from "@cloudscape-design/components"; 5 | import { CHATBOT_NAME } from "../../../common/constants"; 6 | import useOnFollow from "../../../common/hooks/use-on-follow"; 7 | 8 | export default function ChatSessionPage() { 9 | const [toolsOpen, setToolsOpen] = useState(false); 10 | const onFollow = useOnFollow(); 11 | 12 | return ( 13 | setToolsOpen(e.detail.open)} 17 | breadcrumbs={ 18 | 31 | } 32 | content={} 33 | /> 34 | ); 35 | } 36 | -------------------------------------------------------------------------------- /lib/chatbot-api/functions/outgoing-message-appsync/graphql.ts: -------------------------------------------------------------------------------- 1 | import * as crypto from "@aws-crypto/sha256-js"; 2 | import { defaultProvider } from "@aws-sdk/credential-provider-node"; 3 | import { SignatureV4 } from "@aws-sdk/signature-v4"; 4 | import { HttpRequest } from "@aws-sdk/protocol-http"; 5 | 6 | const { Sha256 } = crypto; 7 | const AWS_REGION = process.env.AWS_REGION || "eu-west-1"; 8 | 9 | const endpoint = new URL(process.env.GRAPHQL_ENDPOINT ?? ""); 10 | 11 | export const graphQlQuery = async (query: string) => { 12 | const signer = new SignatureV4({ 13 | credentials: defaultProvider(), 14 | region: AWS_REGION, 15 | service: "appsync", 16 | sha256: Sha256, 17 | }); 18 | 19 | const requestToBeSigned = new HttpRequest({ 20 | method: "POST", 21 | headers: { 22 | "Content-Type": "application/json", 23 | host: endpoint.host, 24 | }, 25 | hostname: endpoint.host, 26 | body: JSON.stringify({ query }), 27 | path: endpoint.pathname, 28 | }); 29 | 30 | const signed = await signer.sign(requestToBeSigned); 31 | const request = new Request(endpoint, signed); 32 | 33 | const response = await fetch(request); 34 | const body = await response.json(); 35 | return body; 36 | }; -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/api-client/embeddings-client.ts: -------------------------------------------------------------------------------- 1 | import { API } from "aws-amplify"; 2 | import { GraphQLQuery, GraphQLResult } from "@aws-amplify/api"; 3 | import { 4 | listEmbeddingModels, 5 | calculateEmbeddings, 6 | } from "../../graphql/queries"; 7 | import { ListEmbeddingModelsQuery, CalculateEmbeddingsQuery } from "../../API"; 8 | 9 | export class EmbeddingsClient { 10 | async getModels(): Promise< 11 | GraphQLResult> 12 | > { 13 | const result = await API.graphql>({ 14 | query: listEmbeddingModels, 15 | }); 16 | return result; 17 | } 18 | 19 | async getEmbeddings( 20 | provider: string, 21 | model: string, 22 | input: string[], 23 | task: "retrieve" | "store" 24 | ): Promise>> { 25 | const result = API.graphql>({ 26 | query: calculateEmbeddings, 27 | variables: { 28 | input: { 29 | provider: provider, 30 | model: model, 31 | passages: input, 32 | task: task, 33 | }, 34 | }, 35 | }); 36 | return result; 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /lib/chatbot-api/functions/api-handler/routes/rag.py: -------------------------------------------------------------------------------- 1 | import genai_core.parameters 2 | import genai_core.kendra 3 | from pydantic import BaseModel 4 | from aws_lambda_powertools import Logger, Tracer 5 | from aws_lambda_powertools.event_handler.appsync import Router 6 | 7 | tracer = Tracer() 8 | router = Router() 9 | logger = Logger() 10 | 11 | 12 | class KendraDataSynchRequest(BaseModel): 13 | workspaceId: str 14 | 15 | 16 | @router.resolver(field_name="listRagEngines") 17 | @tracer.capture_method 18 | def engines(): 19 | config = genai_core.parameters.get_config() 20 | 21 | engines = config["rag"]["engines"] 22 | ret_value = [ 23 | { 24 | "id": "aurora", 25 | "name": "Amazon Aurora", 26 | "enabled": engines.get("aurora", {}).get("enabled", False) == True, 27 | }, 28 | { 29 | "id": "opensearch", 30 | "name": "Amazon OpenSearch", 31 | "enabled": engines.get("opensearch", {}).get("enabled", False) == True, 32 | }, 33 | { 34 | "id": "kendra", 35 | "name": "Amazon Kendra", 36 | "enabled": engines.get("kendra", {}).get("enabled", False) == True, 37 | }, 38 | ] 39 | 40 | return ret_value 41 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/README.md: -------------------------------------------------------------------------------- 1 | # User Interface 2 | 3 | ## Running locally 4 | 5 | You can run this vite react app locally following these steps. 6 | 7 | ### Deploy infrastructure to AWS 8 | 9 | Follow instructions on the root folder README to deploy the cdk app. 10 | 11 | You will need the CloudFormation Output values displayed after completion in the following step. 12 | 13 | ### Option 1: Obtain environment configuration 14 | 15 | Grab the `aws-exports.json` from the CloudFront distribution endpoint you obtained from the CDK Output, and save it into `./lib/user-interface/react-app/public/` folder. Then run `npm run dev`. 16 | 17 | For example: 18 | 19 | ```bash 20 | cd lib/user-interface/react-app/public 21 | curl -O https://dxxxxxxxxxxxx.cloudfront.net/aws-exports.json 22 | cd .. 23 | npm run dev 24 | ``` 25 | 26 | ### Option 2: Set configuration as env variable 27 | 28 | ```bash 29 | export AWS_PROJECT_REGION="..." 30 | export AWS_COGNITO_REGION="..." 31 | export AWS_USER_POOLS_ID="..." 32 | export AWS_USER_POOLS_WEB_CLIENT_ID="..." 33 | export API_DISTRIBUTION_DOMAIN_NAME="..." 34 | export RAG_ENABLED=1|0 35 | export DEFAULT_EMBEDDINGS_MODEL="..." 36 | export DEFAULT_CROSS_ENCODER_MODEL="..." 37 | npm run build:dev 38 | npm run dev 39 | ``` 40 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/kendra/indexes.py: -------------------------------------------------------------------------------- 1 | import os 2 | import genai_core.parameters 3 | 4 | DEFAULT_KENDRA_INDEX_ID = os.environ.get("DEFAULT_KENDRA_INDEX_ID", "") 5 | DEFAULT_KENDRA_INDEX_NAME = os.environ.get("DEFAULT_KENDRA_INDEX_NAME", "") 6 | 7 | 8 | def get_kendra_indexes(): 9 | config = genai_core.parameters.get_config() 10 | kendra_config = config.get("rag", {}).get("engines", {}).get("kendra", {}) 11 | external = kendra_config.get("external", {}) 12 | 13 | ret_value = [] 14 | if DEFAULT_KENDRA_INDEX_ID and DEFAULT_KENDRA_INDEX_NAME: 15 | ret_value.append( 16 | { 17 | "id": DEFAULT_KENDRA_INDEX_ID, 18 | "name": DEFAULT_KENDRA_INDEX_NAME, 19 | "external": False, 20 | } 21 | ) 22 | 23 | for kendraIndex in external: 24 | current_id = kendraIndex.get("kendraId", "") 25 | current_name = kendraIndex.get("name", "") 26 | 27 | if not current_id or not current_name: 28 | continue 29 | 30 | ret_value.append( 31 | { 32 | "id": current_id, 33 | "name": current_name, 34 | "external": True, 35 | } 36 | ) 37 | 38 | return ret_value 39 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/file-uploader.ts: -------------------------------------------------------------------------------- 1 | import { FileUploadResult } from "../API"; 2 | 3 | export class FileUploader { 4 | upload( 5 | file: File, 6 | signature: FileUploadResult, 7 | onProgress: (uploaded: number) => void 8 | ): Promise { 9 | return new Promise((resolve, reject) => { 10 | const formData = new FormData(); 11 | const fields = signature.fields!.replace("{", "").replace("}", ""); 12 | fields.split(",").forEach((f: any) => { 13 | const sepIdx = f.indexOf("="); 14 | const k = f.slice(0, sepIdx); 15 | const v = f.slice(sepIdx + 1); 16 | formData.append(k, v); 17 | }); 18 | 19 | formData.append("file", file); 20 | const xhr = new XMLHttpRequest(); 21 | xhr.onreadystatechange = function () { 22 | if (xhr.readyState === XMLHttpRequest.DONE) { 23 | if (xhr.status === 200 || xhr.status === 204) { 24 | resolve(true); 25 | } else { 26 | reject(false); 27 | } 28 | } 29 | }; 30 | 31 | xhr.open("POST", signature.url, true); 32 | xhr.upload.addEventListener("progress", (event) => { 33 | onProgress(event.loaded); 34 | }); 35 | xhr.send(formData); 36 | }); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /lib/sagemaker-model/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./container-images"; 2 | export * from "./types"; 3 | import * as sagemaker from "aws-cdk-lib/aws-sagemaker"; 4 | import { Construct } from "constructs"; 5 | import { deployContainerModel } from "./deploy-container-model"; 6 | import { deployCustomScriptModel } from "./deploy-custom-script-model"; 7 | import { deployPackageModel } from "./deploy-package-model"; 8 | import { DeploymentType, SageMakerModelProps } from "./types"; 9 | 10 | export class SageMakerModel extends Construct { 11 | public readonly endpoint: sagemaker.CfnEndpoint; 12 | public readonly modelId: string | string[]; 13 | 14 | constructor(scope: Construct, id: string, props: SageMakerModelProps) { 15 | super(scope, id); 16 | 17 | const { model } = props; 18 | this.modelId = model.modelId; 19 | 20 | if (model.type == DeploymentType.Container) { 21 | const { endpoint } = deployContainerModel(this, props, model); 22 | this.endpoint = endpoint; 23 | } else if (model.type == DeploymentType.ModelPackage) { 24 | const { endpoint } = deployPackageModel(this, props, model); 25 | this.endpoint = endpoint; 26 | } else if (model.type == DeploymentType.CustomInferenceScript) { 27 | const { endpoint } = deployCustomScriptModel(this, props, model); 28 | this.endpoint = endpoint; 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /lib/chatbot-api/functions/api-handler/routes/cross_encoders.py: -------------------------------------------------------------------------------- 1 | import genai_core.types 2 | import genai_core.parameters 3 | import genai_core.cross_encoder 4 | from typing import List 5 | from pydantic import BaseModel 6 | from aws_lambda_powertools import Logger, Tracer 7 | from aws_lambda_powertools.event_handler.appsync import Router 8 | 9 | tracer = Tracer() 10 | router = Router() 11 | logger = Logger() 12 | 13 | 14 | class CrossEncodersRequest(BaseModel): 15 | provider: str 16 | model: str 17 | reference: str 18 | passages: List[str] 19 | 20 | 21 | @router.resolver(field_name="listCrossEncoders") 22 | @tracer.capture_method 23 | def models(): 24 | models = genai_core.cross_encoder.get_cross_encoder_models() 25 | 26 | return models 27 | 28 | 29 | @router.resolver(field_name="rankPassages") 30 | @tracer.capture_method 31 | def cross_encoders(input: dict): 32 | request = CrossEncodersRequest(**input) 33 | selected_model = genai_core.cross_encoder.get_cross_encoder_model( 34 | request.provider, request.model 35 | ) 36 | 37 | if selected_model is None: 38 | raise genai_core.types.CommonError("Model not found") 39 | 40 | ret_value = genai_core.cross_encoder.rank_passages( 41 | selected_model, request.reference, request.passages 42 | ) 43 | return [{"score": v, "passage": p} for v, p in zip(ret_value, request.passages)] 44 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/semantic_search.py: -------------------------------------------------------------------------------- 1 | import genai_core.types 2 | import genai_core.workspaces 3 | import genai_core.embeddings 4 | from genai_core.aurora import query_workspace_aurora 5 | from genai_core.opensearch import query_workspace_open_search 6 | from genai_core.kendra import query_workspace_kendra 7 | 8 | 9 | def semantic_search( 10 | workspace_id: str, query: str, limit: int = 5, full_response: bool = False 11 | ): 12 | workspace = genai_core.workspaces.get_workspace(workspace_id) 13 | 14 | if not workspace: 15 | raise genai_core.types.CommonError("Workspace not found") 16 | 17 | if workspace["status"] != "ready": 18 | raise genai_core.types.CommonError("Workspace is not ready") 19 | 20 | if workspace["engine"] == "aurora": 21 | return query_workspace_aurora( 22 | workspace_id, workspace, query, limit, full_response 23 | ) 24 | elif workspace["engine"] == "opensearch": 25 | return query_workspace_open_search( 26 | workspace_id, workspace, query, limit, full_response 27 | ) 28 | elif workspace["engine"] == "kendra": 29 | return query_workspace_kendra( 30 | workspace_id, workspace, query, limit, full_response 31 | ) 32 | 33 | raise genai_core.types.CommonError( 34 | "Semantic search is not supported for this workspace" 35 | ) 36 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/i18n/property-filter-i18n-strings.ts: -------------------------------------------------------------------------------- 1 | import { PropertyFilterProps } from "@cloudscape-design/components"; 2 | 3 | export const PropertyFilterI18nStrings: PropertyFilterProps.I18nStrings = { 4 | filteringAriaLabel: "your choice", 5 | dismissAriaLabel: "Dismiss", 6 | 7 | groupValuesText: "Values", 8 | groupPropertiesText: "Properties", 9 | operatorsText: "Operators", 10 | 11 | operationAndText: "and", 12 | operationOrText: "or", 13 | 14 | operatorLessText: "Less than", 15 | operatorLessOrEqualText: "Less than or equal", 16 | operatorGreaterText: "Greater than", 17 | operatorGreaterOrEqualText: "Greater than or equal", 18 | operatorContainsText: "Contains", 19 | operatorDoesNotContainText: "Does not contain", 20 | operatorEqualsText: "Equals", 21 | operatorDoesNotEqualText: "Does not equal", 22 | 23 | editTokenHeader: "Edit filter", 24 | propertyText: "Property", 25 | operatorText: "Operator", 26 | valueText: "Value", 27 | cancelActionText: "Cancel", 28 | applyActionText: "Apply", 29 | allPropertiesLabel: "All properties", 30 | 31 | tokenLimitShowMore: "Show more", 32 | tokenLimitShowFewer: "Show fewer", 33 | clearFiltersText: "Clear filters", 34 | removeTokenButtonAriaLabel: (token) => 35 | `Remove token ${token.propertyKey} ${token.operator} ${token.value}`, 36 | enteredTextLabel: (text) => `Use: "${text}"`, 37 | }; 38 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/utils/delete_files_with_prefix.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | 3 | 4 | def delete_files_with_prefix(bucket_name, prefix): 5 | s3_client = boto3.client("s3") 6 | continuation_token = None 7 | 8 | while True: 9 | # If we have a continuation token from the previous response, use it 10 | if continuation_token: 11 | objects_to_delete = s3_client.list_objects_v2( 12 | Bucket=bucket_name, Prefix=prefix, ContinuationToken=continuation_token 13 | ) 14 | else: 15 | objects_to_delete = s3_client.list_objects_v2( 16 | Bucket=bucket_name, Prefix=prefix 17 | ) 18 | 19 | # Prepare the list of objects to delete 20 | if "Contents" in objects_to_delete: 21 | delete_list = [{"Key": obj["Key"]} for obj in objects_to_delete["Contents"]] 22 | 23 | # Delete the objects in a batch 24 | s3_client.delete_objects( 25 | Bucket=bucket_name, Delete={"Objects": delete_list} 26 | ) 27 | 28 | # If there"s no NextContinuationToken in the response, we"ve fetched all objects 29 | if "NextContinuationToken" in objects_to_delete: 30 | continuation_token = objects_to_delete["NextContinuationToken"] 31 | else: 32 | print("Finished deleting all objects with the specified prefix.") 33 | break 34 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/not-found.tsx: -------------------------------------------------------------------------------- 1 | import useOnFollow from "../common/hooks/use-on-follow"; 2 | import { 3 | Alert, 4 | BreadcrumbGroup, 5 | Container, 6 | ContentLayout, 7 | Header, 8 | SpaceBetween, 9 | } from "@cloudscape-design/components"; 10 | import BaseAppLayout from "../components/base-app-layout"; 11 | import { CHATBOT_NAME } from "../common/constants"; 12 | 13 | export default function NotFound() { 14 | const onFollow = useOnFollow(); 15 | 16 | return ( 17 | 34 | } 35 | content={ 36 | 404. Page Not Found} 38 | > 39 | 40 | 41 | 42 | The page you are looking for does not exist. 43 | 44 | 45 | 46 | 47 | } 48 | /> 49 | ); 50 | } 51 | -------------------------------------------------------------------------------- /lib/chatbot-api/functions/api-handler/routes/embeddings.py: -------------------------------------------------------------------------------- 1 | import genai_core.types 2 | import genai_core.parameters 3 | import genai_core.embeddings 4 | from typing import List, Optional 5 | from pydantic import BaseModel 6 | from aws_lambda_powertools import Logger, Tracer 7 | from aws_lambda_powertools.event_handler.appsync import Router 8 | from genai_core.types import CommonError, Task 9 | 10 | tracer = Tracer() 11 | router = Router() 12 | logger = Logger() 13 | 14 | 15 | class EmbeddingsRequest(BaseModel): 16 | provider: str 17 | model: str 18 | passages: List[str] 19 | task: Optional[Task] = Task.STORE 20 | 21 | 22 | @router.resolver(field_name="listEmbeddingModels") 23 | @tracer.capture_method 24 | def models(): 25 | models = genai_core.embeddings.get_embeddings_models() 26 | 27 | return models 28 | 29 | 30 | @router.resolver(field_name="calculateEmbeddings") 31 | @tracer.capture_method 32 | def embeddings(input: dict): 33 | request = EmbeddingsRequest(**input) 34 | selected_model = genai_core.embeddings.get_embeddings_model( 35 | request.provider, request.model 36 | ) 37 | 38 | if selected_model is None: 39 | raise CommonError("Model not found") 40 | 41 | ret_value = genai_core.embeddings.generate_embeddings( 42 | selected_model, request.passages, request.task 43 | ) 44 | 45 | return [ 46 | {"vector": v, "passage": request.passages[idx]} 47 | for idx, v in enumerate(ret_value) 48 | ] -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/api-client/user-feedback-client.ts: -------------------------------------------------------------------------------- 1 | import { GraphQLResult } from "@aws-amplify/api-graphql"; 2 | import { API, GraphQLQuery } from "@aws-amplify/api"; 3 | import { AddUserFeedbackMutation, DownloadFileMutation } from "../../API.ts"; 4 | import { addUserFeedback, downloadFile } from "../../graphql/mutations.ts"; 5 | import { 6 | FeedbackData, 7 | DownloadFileData, 8 | } from "../../components/chatbot/types.ts"; 9 | 10 | export class UserFeedbackClient { 11 | async addUserFeedback(params: { 12 | feedbackData: FeedbackData; 13 | }): Promise>> { 14 | const result = API.graphql>({ 15 | query: addUserFeedback, 16 | variables: { 17 | input: { 18 | sessionId: params.feedbackData.sessionId, 19 | questionId: params.feedbackData.questionId, 20 | feedback: params.feedbackData.feedback, 21 | }, 22 | }, 23 | }); 24 | return result; 25 | } 26 | 27 | async downloadFile(params: { 28 | downloadFileData: DownloadFileData; 29 | }): Promise>> { 30 | const result = API.graphql>({ 31 | query: downloadFile, 32 | variables: { 33 | SessionId: params.downloadFileData.SessionId, 34 | S3ObjectKey: params.downloadFileData.S3ObjectKey, 35 | }, 36 | }); 37 | return result; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/styles/app.scss: -------------------------------------------------------------------------------- 1 | * { 2 | box-sizing: border-box; 3 | } 4 | 5 | :root { 6 | --app-color-scheme: light; 7 | color-scheme: var(--app-color-scheme); 8 | } 9 | 10 | html, 11 | body, 12 | #root, 13 | div[data-amplify-authenticator], 14 | div[data-amplify-theme] { 15 | height: 100%; 16 | } 17 | 18 | body { 19 | background-color: #ffffff; 20 | overflow-y: scroll; 21 | } 22 | 23 | body.awsui-dark-mode { 24 | background-color: #0e1b2a; 25 | } 26 | 27 | .matrix-table { 28 | border: 1px solid #d6d6d6; 29 | border-radius: 2px; 30 | border-collapse: collapse; 31 | font-size: 1.1rem; 32 | 33 | th { 34 | border: 1px solid #d6d6d6; 35 | } 36 | 37 | td { 38 | border: 1px solid #d6d6d6; 39 | padding: 10px; 40 | } 41 | } 42 | 43 | .awsui-dark-mode { 44 | .matrix-table { 45 | border: 1px solid rgb(95, 107, 122); 46 | 47 | th { 48 | border: 1px solid rgb(95, 107, 122); 49 | } 50 | 51 | td { 52 | border: 1px solid rgb(95, 107, 122); 53 | padding: 12px; 54 | } 55 | } 56 | } 57 | 58 | .jsonContainer { 59 | font-family: "Open Sans", sans-serif; 60 | font-size: 1em; 61 | background-color: #0e1b2ac3; 62 | } 63 | 64 | .jsonStrings { 65 | color: rgb(74, 234, 167); 66 | 67 | } 68 | 69 | .jsonNumbers { 70 | color: rgb(255, 223, 60); 71 | } 72 | 73 | .jsonBool { 74 | color: rgb(252, 178, 250); 75 | font-weight: 600; 76 | } 77 | 78 | .jsonNull { 79 | color:rgb(74, 205, 234); 80 | font-weight: 600; 81 | } 82 | -------------------------------------------------------------------------------- /lib/rag-engines/sagemaker-rag-models/index.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "aws-cdk-lib"; 2 | import { Construct } from "constructs"; 3 | import * as path from "path"; 4 | import { DeploymentType, SageMakerModel } from "../../sagemaker-model"; 5 | import { Shared } from "../../shared"; 6 | import { SystemConfig } from "../../shared/types"; 7 | 8 | export interface SageMakerRagModelsProps { 9 | readonly config: SystemConfig; 10 | readonly shared: Shared; 11 | } 12 | 13 | export class SageMakerRagModels extends Construct { 14 | readonly model: SageMakerModel; 15 | 16 | constructor(scope: Construct, id: string, props: SageMakerRagModelsProps) { 17 | super(scope, id); 18 | 19 | const sageMakerEmbeddingsModelIds = props.config.rag.embeddingsModels 20 | .filter((c) => c.provider === "sagemaker") 21 | .map((c) => c.name); 22 | 23 | const sageMakerCrossEncoderModelIds = props.config.rag.crossEncoderModels 24 | .filter((c) => c.provider === "sagemaker") 25 | .map((c) => c.name); 26 | 27 | const model = new SageMakerModel(this, "Model", { 28 | vpc: props.shared.vpc, 29 | region: cdk.Aws.REGION, 30 | model: { 31 | type: DeploymentType.CustomInferenceScript, 32 | modelId: [ 33 | ...sageMakerEmbeddingsModelIds, 34 | ...sageMakerCrossEncoderModelIds, 35 | ], 36 | codeFolder: path.join(__dirname, "./model"), 37 | instanceType: "ml.g4dn.xlarge", 38 | }, 39 | }); 40 | 41 | this.model = model; 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/api-client/kendra-client.ts: -------------------------------------------------------------------------------- 1 | import { API } from "aws-amplify"; 2 | import { GraphQLQuery, GraphQLResult } from "@aws-amplify/api"; 3 | import { listKendraIndexes, isKendraDataSynching } from "../../graphql/queries"; 4 | import { startKendraDataSync } from "../../graphql/mutations"; 5 | import { 6 | ListKendraIndexesQuery, 7 | IsKendraDataSynchingQuery, 8 | StartKendraDataSyncMutation, 9 | } from "../../API"; 10 | 11 | export class KendraClient { 12 | async getKendraIndexes(): Promise< 13 | GraphQLResult> 14 | > { 15 | const result = await API.graphql>({ 16 | query: listKendraIndexes, 17 | }); 18 | return result; 19 | } 20 | 21 | async startKendraDataSync( 22 | workspaceId: string 23 | ): Promise>> { 24 | const result = await API.graphql>( 25 | { 26 | query: startKendraDataSync, 27 | variables: { 28 | workspaceId, 29 | }, 30 | } 31 | ); 32 | return result; 33 | } 34 | 35 | async kendraIsSyncing( 36 | workspaceId: string 37 | ): Promise>> { 38 | const result = await API.graphql>({ 39 | query: isKendraDataSynching, 40 | variables: { 41 | workspaceId, 42 | }, 43 | }); 44 | return result; 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/aurora/connection.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import boto3 4 | import psycopg2 5 | import psycopg2.extras 6 | from pgvector.psycopg2 import register_vector 7 | 8 | secretsmanager_client = boto3.client("secretsmanager") 9 | AURORA_DB_SECRET_ID = os.environ.get("AURORA_DB_SECRET_ID") 10 | 11 | 12 | class AuroraConnection(object): 13 | def __init__(self, autocommit=True): 14 | secret_response = secretsmanager_client.get_secret_value( 15 | SecretId=AURORA_DB_SECRET_ID 16 | ) 17 | database_secrets = json.loads(secret_response["SecretString"]) 18 | self.autocommit = autocommit 19 | 20 | self.dbhost = database_secrets["host"] 21 | self.dbport = database_secrets["port"] 22 | self.dbuser = database_secrets["username"] 23 | self.dbpass = database_secrets["password"] 24 | 25 | def __enter__(self): 26 | connection = psycopg2.connect( 27 | host=self.dbhost, 28 | user=self.dbuser, 29 | password=self.dbpass, 30 | port=self.dbport, 31 | connect_timeout=10, 32 | ) 33 | 34 | connection.set_session(autocommit=self.autocommit) 35 | psycopg2.extras.register_uuid() 36 | register_vector(connection) 37 | cursor = connection.cursor() 38 | self.connection = connection 39 | self.cursor = cursor 40 | 41 | return cursor 42 | 43 | def __exit__(self, *args): 44 | self.cursor.close() 45 | self.connection.close() 46 | -------------------------------------------------------------------------------- /bin/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "prefix": "", 3 | "privateWebsite": false, 4 | "certificate": "", 5 | "domain": "", 6 | "cfGeoRestrictEnable": false, 7 | "cfGeoRestrictList": [], 8 | "companyName": "AnyCompany", 9 | "bedrock": { 10 | "enabled": true, 11 | "region": "us-west-2" 12 | }, 13 | "llms": { 14 | "sagemaker": [] 15 | }, 16 | "rag": { 17 | "enabled": true, 18 | "engines": { 19 | "aurora": { 20 | "enabled": false 21 | }, 22 | "opensearch": { 23 | "enabled": true 24 | }, 25 | "kendra": { 26 | "enabled": false, 27 | "createIndex": false, 28 | "external": [], 29 | "enterprise": false 30 | } 31 | }, 32 | "embeddingsModels": [ 33 | 34 | { 35 | "provider": "bedrock", 36 | "name": "amazon.titan-embed-text-v1", 37 | "dimensions": 1536 38 | }, 39 | { 40 | "provider": "bedrock", 41 | "name": "amazon.titan-embed-image-v1", 42 | "dimensions": 1024 43 | }, 44 | { 45 | "provider": "bedrock", 46 | "name": "cohere.embed-english-v3", 47 | "dimensions": 1024, 48 | "default": true 49 | }, 50 | { 51 | "provider": "bedrock", 52 | "name": "cohere.embed-multilingual-v3", 53 | "dimensions": 1024 54 | } 55 | ], 56 | "crossEncoderModels": [ 57 | { 58 | "provider": "sagemaker", 59 | "name": "cross-encoder/ms-marco-MiniLM-L-12-v2", 60 | "default": true 61 | } 62 | ] 63 | } 64 | } -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/langchain/workspace_retriever.py: -------------------------------------------------------------------------------- 1 | import genai_core.semantic_search 2 | from typing import List 3 | from langchain.callbacks.manager import CallbackManagerForRetrieverRun 4 | from langchain.schema import BaseRetriever, Document 5 | 6 | 7 | class WorkspaceRetriever(BaseRetriever): 8 | workspace_id: str 9 | 10 | def _get_relevant_documents( 11 | self, query: str, *, run_manager: CallbackManagerForRetrieverRun 12 | ) -> List[Document]: 13 | result = genai_core.semantic_search.semantic_search( 14 | self.workspace_id, query, limit=3, full_response=False 15 | ) 16 | 17 | return [self._get_document(item) for item in result.get("items", [])] 18 | 19 | def _get_document(self, item): 20 | content = item["content"] 21 | content_complement = item.get("content_complement") 22 | 23 | page_content = content 24 | if content_complement: 25 | page_content = content_complement 26 | 27 | metadata = { 28 | "chunk_id": item["chunk_id"], 29 | "workspace_id": item["workspace_id"], 30 | "document_id": item["document_id"], 31 | "document_sub_id": item["document_sub_id"], 32 | "document_type": item["document_type"], 33 | "document_sub_type": item["document_sub_type"], 34 | "path": item["path"], 35 | "title": item["title"], 36 | "score": item["score"], 37 | } 38 | 39 | return Document(page_content=page_content, metadata=metadata) 40 | -------------------------------------------------------------------------------- /lib/sagemaker-model/types.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "aws-cdk-lib"; 2 | import * as ec2 from "aws-cdk-lib/aws-ec2"; 3 | import * as lambda from "aws-cdk-lib/aws-lambda"; 4 | import { Construct } from "constructs"; 5 | 6 | export interface SageMakerModelProps extends cdk.NestedStackProps { 7 | vpc: ec2.Vpc; 8 | region: string; 9 | model: ModelConfig; 10 | } 11 | 12 | export enum DeploymentType { 13 | Container = "container", 14 | ModelPackage = "model-package", 15 | CustomInferenceScript = "custom-inference-script", 16 | } 17 | 18 | export type ModelConfig = 19 | | ModelContainerConfig 20 | | ModelPackageConfig 21 | | ModelCustomScriptConfig; 22 | 23 | export interface ModelConfigBase { 24 | modelId: string; 25 | instanceType: string; 26 | } 27 | 28 | export interface ModelContainerConfig extends ModelConfigBase { 29 | type: DeploymentType.Container; 30 | container?: string; 31 | env?: { [key: string]: string }; 32 | containerStartupHealthCheckTimeoutInSeconds?: number; 33 | } 34 | 35 | export interface ModelPackageConfig extends ModelConfigBase { 36 | type: DeploymentType.ModelPackage; 37 | packages: (scope: Construct) => cdk.CfnMapping; 38 | containerStartupHealthCheckTimeoutInSeconds?: number; 39 | } 40 | 41 | export interface ModelCustomScriptConfig 42 | extends Omit { 43 | type: DeploymentType.CustomInferenceScript; 44 | modelId: string | string[]; 45 | codeFolder: string; 46 | container?: string; 47 | env?: { [key: string]: string }; 48 | architecture?: lambda.Architecture; 49 | runtime?: lambda.Runtime; 50 | } 51 | -------------------------------------------------------------------------------- /lib/chatbot-api/functions/api-handler/routes/user_feedback.py: -------------------------------------------------------------------------------- 1 | import genai_core.types 2 | import genai_core.auth 3 | import genai_core.user_feedback 4 | from pydantic import BaseModel 5 | from aws_lambda_powertools import Logger, Tracer 6 | from aws_lambda_powertools.event_handler.appsync import Router 7 | 8 | tracer = Tracer() 9 | router = Router() 10 | logger = Logger() 11 | 12 | 13 | class CreateUserFeedbackRequest(BaseModel): 14 | sessionId: str 15 | questionId: str 16 | feedback: str 17 | 18 | 19 | 20 | @router.resolver(field_name="addUserFeedback") 21 | @tracer.capture_method 22 | def user_feedback(input: dict): 23 | request = CreateUserFeedbackRequest(**input) 24 | 25 | 26 | userId = genai_core.auth.get_user_id(router) 27 | 28 | if userId is None: 29 | raise genai_core.types.CommonError("User not found") 30 | 31 | result = genai_core.user_feedback.add_user_feedback( 32 | request.sessionId,request.questionId,request.feedback, userId) 33 | 34 | return { 35 | "questionId": result["questionId"], 36 | "sessionId": result["sessionId"], 37 | "message": result["message"] 38 | } 39 | 40 | @router.resolver(field_name="downloadFile") 41 | @tracer.capture_method 42 | def download_file(SessionId: str, S3ObjectKey: "None"): 43 | 44 | # Add your logic to create excel file and write it back to S3. Return the s3 file. 45 | result = genai_core.user_feedback.download_file(SessionId, S3ObjectKey) 46 | 47 | #result = {"id": "session_id", "S3ObjectKey": "s3ObjectKey"} 48 | 49 | return result 50 | 51 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/rag/dashboard/dashboard-header.tsx: -------------------------------------------------------------------------------- 1 | import { Header, SpaceBetween } from "@cloudscape-design/components"; 2 | import RouterButton from "../../../components/wrappers/router-button"; 3 | import RouterButtonDropdown from "../../../components/wrappers/router-button-dropdown"; 4 | 5 | export default function DashboardHeader() { 6 | return ( 7 |
11 | 12 | Semantic search 13 | 14 | 38 | Add data 39 | 40 | 41 | } 42 | > 43 | Dashboard 44 |
45 | ); 46 | } 47 | -------------------------------------------------------------------------------- /lib/layer/index.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "aws-cdk-lib"; 2 | import * as lambda from "aws-cdk-lib/aws-lambda"; 3 | import * as s3assets from "aws-cdk-lib/aws-s3-assets"; 4 | import { Construct } from "constructs"; 5 | 6 | interface LayerProps { 7 | runtime: lambda.Runtime; 8 | architecture: lambda.Architecture; 9 | path: string; 10 | autoUpgrade?: boolean; 11 | } 12 | 13 | export class Layer extends Construct { 14 | public layer: lambda.LayerVersion; 15 | 16 | constructor(scope: Construct, id: string, props: LayerProps) { 17 | super(scope, id); 18 | 19 | const { runtime, architecture, path, autoUpgrade } = props; 20 | 21 | const args = ["-t /asset-output/python"]; 22 | if (autoUpgrade) { 23 | args.push("--upgrade"); 24 | } 25 | 26 | const layerAsset = new s3assets.Asset(this, "LayerAsset", { 27 | path, 28 | bundling: { 29 | image: runtime.bundlingImage, 30 | platform: architecture.dockerPlatform, 31 | command: [ 32 | "bash", 33 | "-c", 34 | `pip install -r requirements.txt ${args.join(" ")}`, 35 | ], 36 | outputType: cdk.BundlingOutput.AUTO_DISCOVER, 37 | securityOpt: "no-new-privileges:true", 38 | network: "host", 39 | }, 40 | }); 41 | 42 | const layer = new lambda.LayerVersion(this, "Layer", { 43 | code: lambda.Code.fromBucket(layerAsset.bucket, layerAsset.s3ObjectKey), 44 | compatibleRuntimes: [runtime], 45 | compatibleArchitectures: [architecture], 46 | removalPolicy: cdk.RemovalPolicy.DESTROY, 47 | }); 48 | 49 | this.layer = layer; 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /lib/rag-engines/workspaces/index.ts: -------------------------------------------------------------------------------- 1 | import * as sfn from "aws-cdk-lib/aws-stepfunctions"; 2 | import { Construct } from "constructs"; 3 | import { Shared } from "../../shared"; 4 | import { SystemConfig } from "../../shared/types"; 5 | import { AuroraPgVector } from "../aurora-pgvector"; 6 | import { DataImport } from "../data-import"; 7 | import { KendraRetrieval } from "../kendra-retrieval"; 8 | import { OpenSearchVector } from "../opensearch-vector"; 9 | import { RagDynamoDBTables } from "../rag-dynamodb-tables"; 10 | import { DeleteWorkspace } from "./delete-workspace"; 11 | 12 | export interface WorkkspacesProps { 13 | readonly config: SystemConfig; 14 | readonly shared: Shared; 15 | readonly dataImport: DataImport; 16 | readonly ragDynamoDBTables: RagDynamoDBTables; 17 | readonly auroraPgVector?: AuroraPgVector; 18 | readonly openSearchVector?: OpenSearchVector; 19 | readonly kendraRetrieval?: KendraRetrieval; 20 | } 21 | 22 | export class Workspaces extends Construct { 23 | public readonly deleteWorkspaceWorkflow?: sfn.StateMachine; 24 | 25 | constructor(scope: Construct, id: string, props: WorkkspacesProps) { 26 | super(scope, id); 27 | 28 | const workflow = new DeleteWorkspace(this, "DeleteWorkspace", { 29 | config: props.config, 30 | shared: props.shared, 31 | dataImport: props.dataImport, 32 | ragDynamoDBTables: props.ragDynamoDBTables, 33 | auroraPgVector: props.auroraPgVector, 34 | openSearchVector: props.openSearchVector, 35 | kendraRetrieval: props.kendraRetrieval, 36 | }); 37 | 38 | this.deleteWorkspaceWorkflow = workflow.stateMachine; 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/rag/create-workspace/chunks-selector.tsx: -------------------------------------------------------------------------------- 1 | import { ColumnLayout, FormField, Input } from "@cloudscape-design/components"; 2 | 3 | interface ChunkSelectorProps { 4 | errors: Record; 5 | data: { chunkSize: number; chunkOverlap: number }; 6 | submitting: boolean; 7 | onChange: ( 8 | data: Partial<{ chunkSize: number; chunkOverlap: number }> 9 | ) => void; 10 | } 11 | 12 | export function ChunkSelectorField(props: ChunkSelectorProps) { 13 | return ( 14 | 19 | 20 | 21 | 26 | props.onChange({ chunkSize: parseInt(value) }) 27 | } 28 | /> 29 | 30 | 31 | 36 | props.onChange({ chunkOverlap: parseInt(value) }) 37 | } 38 | /> 39 | 40 | 41 | 42 | ); 43 | } 44 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/azureopenai/azuregpt.py: -------------------------------------------------------------------------------- 1 | import os 2 | from langchain.chat_models import AzureChatOpenAI 3 | from langchain.prompts.prompt import PromptTemplate 4 | 5 | from ..base import ModelAdapter 6 | from genai_core.registry import registry 7 | 8 | 9 | class AzureGptAdapter(ModelAdapter): 10 | def __init__(self, model_id, *args, **kwargs): 11 | self.model_id = model_id 12 | 13 | super().__init__(*args, **kwargs) 14 | 15 | def get_llm(self, model_kwargs={}): 16 | if not os.environ.get(f"AZURE_OPENAI_API_KEY__{self.model_id}"): 17 | raise Exception("AZURE_OPENAI_API_KEY must be set in the environment") 18 | 19 | params = {} 20 | if "streaming" in model_kwargs: 21 | params["streaming"] = model_kwargs["streaming"] 22 | if "temperature" in model_kwargs: 23 | params["temperature"] = model_kwargs["temperature"] 24 | if "maxTokens" in model_kwargs: 25 | params["max_tokens"] = model_kwargs["maxTokens"] 26 | 27 | return AzureChatOpenAI( 28 | openai_api_base=os.environ.get(f"AZURE_OPENAI_API_BASE__{self.model_id}"), 29 | deployment_name=os.environ.get(f"AZURE_OPENAI_API_DEPLOYMENT_NAME__{self.model_id}"), 30 | openai_api_key=os.environ.get(f"AZURE_OPENAI_API_KEY__{self.model_id}"), 31 | openai_api_type=os.environ.get(f"AZURE_OPENAI_API_TYPE__{self.model_id}"), 32 | openai_api_version=os.environ.get(f"AZURE_OPENAI_API_VERSION__{self.model_id}"), 33 | callbacks=[self.callback_handler], **params 34 | ) 35 | 36 | 37 | 38 | # Register the adapter 39 | registry.register(r"^azure.openai*", AzureGptAdapter) 40 | -------------------------------------------------------------------------------- /lib/chatbot-api/functions/api-handler/index.py: -------------------------------------------------------------------------------- 1 | from aws_lambda_powertools import Logger, Tracer 2 | from aws_lambda_powertools.logging import correlation_paths 3 | from aws_lambda_powertools.utilities.typing import LambdaContext 4 | from aws_lambda_powertools.event_handler import ( 5 | AppSyncResolver, 6 | ) 7 | from routes.health import router as health_router 8 | from routes.embeddings import router as embeddings_router 9 | from routes.cross_encoders import router as cross_encoders_router 10 | from routes.rag import router as rag_router 11 | from routes.models import router as models_router 12 | from routes.workspaces import router as workspaces_router 13 | from routes.sessions import router as sessions_router 14 | from routes.semantic_search import router as semantic_search_router 15 | from routes.documents import router as documents_router 16 | from routes.kendra import router as kendra_router 17 | from routes.user_feedback import router as user_feedback_router 18 | 19 | tracer = Tracer() 20 | logger = Logger() 21 | 22 | app = AppSyncResolver() 23 | 24 | app.include_router(health_router) 25 | app.include_router(rag_router) 26 | app.include_router(embeddings_router) 27 | app.include_router(cross_encoders_router) 28 | app.include_router(models_router) 29 | app.include_router(workspaces_router) 30 | app.include_router(sessions_router) 31 | app.include_router(semantic_search_router) 32 | app.include_router(documents_router) 33 | app.include_router(kendra_router) 34 | app.include_router(user_feedback_router) 35 | 36 | 37 | @logger.inject_lambda_context( 38 | log_event=True, correlation_id_path=correlation_paths.APPSYNC_RESOLVER 39 | ) 40 | @tracer.capture_lambda_handler 41 | def handler(event: dict, context: LambdaContext) -> dict: 42 | return app.resolve(event, context) 43 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/hooks/use-form.ts: -------------------------------------------------------------------------------- 1 | import { useCallback, useState } from "react"; 2 | import { Utils } from "../utils"; 3 | 4 | export interface UseFormProps { 5 | initialValue: T | (() => T); 6 | validate: (form: T) => Record | null; 7 | } 8 | 9 | export function useForm(props: UseFormProps) { 10 | const [initialProps] = useState(props); 11 | const [data, setData] = useState(() => { 12 | const value = Utils.isFunction(props.initialValue) 13 | ? props.initialValue() 14 | : props.initialValue; 15 | 16 | return { 17 | dirty: false, 18 | value, 19 | }; 20 | }); 21 | const [errors, setErrors] = useState>({}); 22 | 23 | const validate = useCallback(() => { 24 | const errors = initialProps.validate(data.value); 25 | setErrors(errors ?? {}); 26 | setData((current) => ({ ...current, dirty: true })); 27 | 28 | return errors === null || Object.keys(errors).length === 0; 29 | }, [data, initialProps]); 30 | 31 | const onChange = useCallback( 32 | (partial: Partial, resetDirty = false) => { 33 | setData((current) => { 34 | let dirty = current.dirty; 35 | if (resetDirty) { 36 | dirty = false; 37 | } 38 | 39 | const updatedData = { 40 | ...current, 41 | dirty, 42 | value: { ...current.value, ...partial }, 43 | }; 44 | 45 | if (dirty) { 46 | const errors = initialProps.validate(updatedData.value); 47 | setErrors(errors ?? {}); 48 | } 49 | 50 | return updatedData; 51 | }); 52 | }, 53 | [initialProps] 54 | ); 55 | 56 | return { data: data.value, onChange, errors, validate }; 57 | } 58 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/clients.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import openai 3 | import genai_core.types 4 | import genai_core.parameters 5 | from botocore.config import Config 6 | 7 | 8 | sts_client = boto3.client("sts") 9 | 10 | 11 | def get_openai_client(): 12 | api_key = genai_core.parameters.get_external_api_key("OPENAI_API_KEY") 13 | if not api_key: 14 | return None 15 | 16 | openai.api_key = api_key 17 | 18 | return openai 19 | 20 | 21 | def get_sagemaker_client(): 22 | config = Config(retries={"max_attempts": 15, "mode": "adaptive"}) 23 | 24 | client = boto3.client("sagemaker-runtime", config=config) 25 | 26 | return client 27 | 28 | 29 | def get_bedrock_client(service_name="bedrock-runtime"): 30 | config = genai_core.parameters.get_config() 31 | bedrock_config = config.get("bedrock", {}) 32 | bedrock_enabled = bedrock_config.get("enabled", False) 33 | if not bedrock_enabled: 34 | return None 35 | 36 | bedrock_config_data = {"service_name": service_name} 37 | region_name = bedrock_config.get("region") 38 | role_arn = bedrock_config.get("roleArn") 39 | 40 | if region_name: 41 | bedrock_config_data["region_name"] = region_name 42 | 43 | if role_arn: 44 | assumed_role_object = sts_client.assume_role( 45 | RoleArn=role_arn, 46 | RoleSessionName="AssumedRoleSession", 47 | ) 48 | 49 | credentials = assumed_role_object["Credentials"] 50 | bedrock_config_data["aws_access_key_id"] = credentials["AccessKeyId"] 51 | bedrock_config_data["aws_secret_access_key"] = credentials["SecretAccessKey"] 52 | bedrock_config_data["aws_session_token"] = credentials["SessionToken"] 53 | 54 | return boto3.client(**bedrock_config_data) 55 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/rag/create-workspace/select-engine-panel.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | Container, 3 | Header, 4 | FormField, 5 | Tiles, 6 | } from "@cloudscape-design/components"; 7 | 8 | export default function SelectEnginePanel(props: { 9 | engine: string; 10 | engines: Map; 11 | setEngine: (engine: string) => void; 12 | }) { 13 | return ( 14 | Workspace Engine}> 15 | 16 | props.setEngine(e.detail.value)} 42 | /> 43 | 44 | 45 | ); 46 | } 47 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from "vite"; 2 | import fs from "fs"; 3 | import path from "path"; 4 | import react from "@vitejs/plugin-react"; 5 | 6 | const isDev = process.env.NODE_ENV === "development"; 7 | 8 | // https://vitejs.dev/config/ 9 | export default defineConfig({ 10 | define: { 11 | "process.env": {}, 12 | }, 13 | plugins: [ 14 | isDev && { 15 | name: "aws-exports", 16 | writeBundle() { 17 | const outputPath = path.resolve("public/aws-exports.json"); 18 | 19 | // Write the modified JSON data to the public folder 20 | fs.writeFileSync( 21 | outputPath, 22 | JSON.stringify( 23 | { 24 | aws_project_region: process.env.AWS_PROJECT_REGION, 25 | aws_cognito_region: process.env.AWS_COGNITO_REGION, 26 | aws_user_pools_id: process.env.AWS_USER_POOLS_ID, 27 | aws_user_pools_web_client_id: 28 | process.env.AWS_USER_POOLS_WEB_CLIENT_ID, 29 | config: { 30 | api_endpoint: `https://${process.env.API_DISTRIBUTION_DOMAIN_NAME}/api`, 31 | websocket_endpoint: `wss://${process.env.API_DISTRIBUTION_DOMAIN_NAME}/socket`, 32 | rag_enabled: ["T", "t", "true", "True", "TRUE", "1"].includes( 33 | process.env.RAG_ENABLED 34 | ), 35 | default_embeddings_model: process.env.DEFAULT_EMBEDDINGS_MODEL, 36 | default_cross_encoder_model: 37 | process.env.DEFAULT_CROSS_ENCODER_MODEL, 38 | }, 39 | }, 40 | null, 41 | 2 42 | ), 43 | "utf-8" 44 | ); 45 | }, 46 | }, 47 | react(), 48 | ], 49 | server: { 50 | port: 3000, 51 | }, 52 | }); 53 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/components/rag/workspace-delete-modal.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | Modal, 3 | Box, 4 | SpaceBetween, 5 | Button, 6 | Alert, 7 | } from "@cloudscape-design/components"; 8 | import { Workspace } from "../../API"; 9 | 10 | export interface WorkspaceDeleteModalProps { 11 | visible: boolean; 12 | workspace?: Workspace; 13 | onDelete: () => void; 14 | onDiscard: () => void; 15 | } 16 | 17 | export default function WorkspaceDeleteModal(props: WorkspaceDeleteModalProps) { 18 | return ( 19 | 26 | 27 | 30 | 37 | 38 | 39 | } 40 | > 41 | {props.workspace && ( 42 | 43 | 44 | Permanently delete workspace{" "} 45 | 46 | {props.workspace.name} 47 | 48 | ? You can't undo this action. 49 | 50 | Worksapce Id: {props.workspace.id} 51 | 52 | Proceeding with this action will delete the workspace with all its 53 | content. 54 | 55 | 56 | )} 57 | 58 | ); 59 | } 60 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/rag/dashboard/general-config.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | Container, 3 | ColumnLayout, 4 | Box, 5 | Header, 6 | } from "@cloudscape-design/components"; 7 | import { Utils } from "../../../common/utils"; 8 | 9 | export interface WorkspacesStatistics { 10 | count: number; 11 | documents: number; 12 | vectors: number; 13 | sizeInBytes: number; 14 | } 15 | 16 | export interface GeneralConfigProps { 17 | statistics: WorkspacesStatistics | null; 18 | } 19 | 20 | export default function GeneralConfig(props: GeneralConfigProps) { 21 | return ( 22 | Statistics}> 23 | 24 |
25 | Workspaces 26 |
27 | {!props.statistics ? "-" : props.statistics.count} 28 |
29 |
30 |
31 | Documents 32 |
33 | {!props.statistics ? "-" : props.statistics.documents} 34 |
35 |
36 |
37 | Vectors 38 |
39 | {" "} 40 | {!props.statistics ? "-" : props.statistics.vectors} 41 |
42 |
43 |
44 | Size 45 |
46 | {!props.statistics 47 | ? "-" 48 | : Utils.bytesToSize(props.statistics.sizeInBytes)} 49 |
50 |
51 |
52 |
53 | ); 54 | } 55 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/types.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Optional 3 | 4 | from pydantic import BaseModel 5 | 6 | 7 | class CommonError(Exception): 8 | pass 9 | 10 | 11 | class EmbeddingsModel(BaseModel): 12 | provider: str 13 | name: str 14 | default: Optional[bool] = None 15 | dimensions: int 16 | 17 | 18 | class CrossEncoderModel(BaseModel): 19 | provider: str 20 | name: str 21 | default: Optional[bool] = None 22 | 23 | 24 | class Workspace(BaseModel): 25 | id: str 26 | name: str 27 | engine: str 28 | 29 | 30 | class Provider(Enum): 31 | BEDROCK = "bedrock" 32 | OPENAI = "openai" 33 | AZURE_OPENAI = "azure.openai" 34 | SAGEMAKER = "sagemaker" 35 | AMAZON = "amazon" 36 | COHERE = "cohere" 37 | 38 | 39 | class Modality(Enum): 40 | TEXT = "TEXT" 41 | IMAGE = "IMAGE" 42 | EMBEDDING = "EMBEDDING" 43 | 44 | 45 | class InferenceType(Enum): 46 | ON_DEMAND = "ON_DEMAND" 47 | PROVISIONED = "PROVISIONED" 48 | 49 | 50 | class ModelStatus(Enum): 51 | ACTIVE = "ACTIVE" 52 | LEGACY = "LEGACY" 53 | 54 | 55 | class ModelInterface(Enum): 56 | LANGCHIAN = "langchain" 57 | IDEFICS = "idefics" 58 | 59 | 60 | class Direction(Enum): 61 | IN = "IN" 62 | OUT = "OUT" 63 | 64 | 65 | class ChatbotMode(Enum): 66 | CHAIN = "chain" 67 | 68 | 69 | class ChatbotAction(Enum): 70 | HEARTBEAT = "heartbeat" 71 | RUN = "run" 72 | LLM_NEW_TOKEN = "llm_new_token" 73 | FINAL_RESPONSE = "final_response" 74 | STARTED="started" 75 | QUESTIONS="questions" 76 | ANSWER="answer" 77 | 78 | 79 | class ChatbotMessageType(Enum): 80 | Human = "human" 81 | AI = "ai" 82 | 83 | class Task(Enum): 84 | STORE = "store" 85 | RETRIEVE = "retrieve" 86 | SEARCH_QUERY = "search_query" 87 | SEARCH_DOCUMENT = "search_document" -------------------------------------------------------------------------------- /lib/rag-engines/aurora-pgvector/functions/pgvector-setup/index.py: -------------------------------------------------------------------------------- 1 | import json 2 | import boto3 3 | import psycopg2 4 | import cfnresponse 5 | from aws_lambda_powertools import Logger 6 | from aws_lambda_powertools.utilities.typing import LambdaContext 7 | from pgvector.psycopg2 import register_vector 8 | 9 | logger = Logger() 10 | secretsmanager_client = boto3.client("secretsmanager") 11 | 12 | 13 | @logger.inject_lambda_context(log_event=True) 14 | def lambda_handler(event, context: LambdaContext): 15 | request_type = event["RequestType"] 16 | resource_properties = event["ResourceProperties"] 17 | AURORA_DB_SECRET_ID = resource_properties["AURORA_DB_SECRET_ID"] 18 | 19 | secret_response = secretsmanager_client.get_secret_value( 20 | SecretId=AURORA_DB_SECRET_ID 21 | ) 22 | database_secrets = json.loads(secret_response["SecretString"]) 23 | dbhost = database_secrets["host"] 24 | dbport = database_secrets["port"] 25 | dbuser = database_secrets["username"] 26 | dbpass = database_secrets["password"] 27 | 28 | if request_type == "Create" or request_type == "Update": 29 | dbconn = psycopg2.connect( 30 | host=dbhost, user=dbuser, password=dbpass, port=dbport, connect_timeout=10 31 | ) 32 | 33 | dbconn.set_session(autocommit=True) 34 | 35 | cur = dbconn.cursor() 36 | 37 | cur.execute("CREATE EXTENSION IF NOT EXISTS vector;") 38 | register_vector(dbconn) 39 | 40 | cur.execute("SELECT typname FROM pg_type WHERE typname = 'vector';") 41 | rows = cur.fetchall() 42 | 43 | for row in rows: 44 | logger.info(f"pg_type.typname: {row}") 45 | 46 | cur.close() 47 | dbconn.close() 48 | 49 | logger.info("Created vector extension") 50 | 51 | cfnresponse.send(event, context, cfnresponse.SUCCESS, {"ok": True}) 52 | 53 | return {"ok": True} 54 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/chatbot/playground/multi-chat-playground.tsx: -------------------------------------------------------------------------------- 1 | import BaseAppLayout from "../../../components/base-app-layout"; 2 | import MultiChat from "../../../components/chatbot/multi-chat"; 3 | import { Header, HelpPanel } from "@cloudscape-design/components"; 4 | import { Link } from "react-router-dom"; 5 | 6 | export default function MultiChatPlayground() { 7 | return ( 8 | Using the chat}> 11 |

12 | The multi-chat playground allows user to interact with up to 4 LLM 13 | and RAG workspaces combinations. 14 |

15 |

Settings

16 |

17 | You can configure additional settings for the LLM via the setting 18 | action at the bottom-right. You can change the Temperature and Top P 19 | values to be used for the answer generation. You can also enable and 20 | disable streaming mode for those models that support it (the setting 21 | is ignored if the model does not support streaming). Turning on 22 | Metadata displays additional information about the answer, such as 23 | the prompts being used to interact with the LLM and the document 24 | passages that might have been retrieved from the RAG storage. 25 |

26 |

Session history

27 |

28 | All individual conversations are saved and can be later accessed via 29 | the Session in the navigation 30 | bar. For example, if you have 3 chats, there will be 3 sessions 31 | saved in the history. 32 |

33 | 34 | } 35 | content={} 36 | /> 37 | ); 38 | } 39 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/websites/sitemap.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import defusedxml.ElementTree as ET 3 | import gzip 4 | import os 5 | 6 | def decompress_gzip_data(response): 7 | filename = f'/tmp/{hash(response.url)}.gzip' 8 | with open(filename, 'wb') as file: 9 | file.write(response.content) 10 | with gzip.open(filename, 'rb') as f: 11 | sitemap_xml = f.read() 12 | os.remove(filename) 13 | return sitemap_xml 14 | 15 | def extract_urls_from_sitemap(sitemap_url: str): 16 | urls = [] 17 | try: 18 | response = requests.get(sitemap_url) 19 | if response.status_code != 200: 20 | print(f'Error while fetching sitemap data: {sitemap_url}') 21 | return [] 22 | 23 | # Handle sitemap with gzip compression 24 | if sitemap_url.lower().endswith('gz'): 25 | sitemap = decompress_gzip_data(response) 26 | else: 27 | sitemap = response.content 28 | root = ET.fromstring(sitemap) 29 | root_tag = root.tag.lower() 30 | 31 | # if root element is sitemapindex, fetch individual sitemaps recursively 32 | if 'sitemapindex' in root_tag: 33 | for elem in root.findall("{http://www.sitemaps.org/schemas/sitemap/0.9}sitemap/{http://www.sitemaps.org/schemas/sitemap/0.9}loc"): 34 | links = extract_urls_from_sitemap(elem.text) 35 | urls.extend(links) 36 | elif 'urlset' in root_tag: 37 | for elem in root.findall("{http://www.sitemaps.org/schemas/sitemap/0.9}url/{http://www.sitemaps.org/schemas/sitemap/0.9}loc"): 38 | urls.append(elem.text) 39 | else: 40 | print(f'No valid root tag found for sitemap: {sitemap_url}') 41 | except Exception as e: 42 | print(f'Error while processing sitemaps for {sitemap_url}',e) 43 | else: 44 | return urls 45 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/rag/workspaces/workspaces.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | BreadcrumbGroup, 3 | Header, 4 | HelpPanel, 5 | } from "@cloudscape-design/components"; 6 | import useOnFollow from "../../../common/hooks/use-on-follow"; 7 | import WorkspacesTable from "./workspaces-table"; 8 | import BaseAppLayout from "../../../components/base-app-layout"; 9 | import { CHATBOT_NAME } from "../../../common/constants"; 10 | import { Link } from "react-router-dom"; 11 | 12 | export default function Workspaces() { 13 | const onFollow = useOnFollow(); 14 | 15 | return ( 16 | 36 | } 37 | content={} 38 | info={ 39 | RAG Workspaces}> 40 |

41 | RAG workspaces are built on top of a{" "} 42 | RAG Engine. 43 |

44 |

45 | {" "} 46 | RAG engines can be modified at deployment time by running{" "} 47 | 55 | npm run config 56 | 57 | . 58 |

59 |
60 | } 61 | /> 62 | ); 63 | } 64 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/utils/comprehend.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from typing import Optional, List 3 | 4 | comprehend = boto3.client("comprehend") 5 | 6 | aws_to_pg = { 7 | # Afrikaans closely related to Dutch. Might not be accurate. Better than nothing. 8 | "af": "dutch", 9 | "ar": "arabic", 10 | "bn": "hindi", 11 | "cs": "czech", 12 | "da": "danish", 13 | "de": "german", 14 | "el": "greek", 15 | "en": "english", 16 | "es": "spanish", 17 | "fa": "persian", 18 | "fi": "finnish", 19 | "fr": "french", 20 | "he": "hebrew", 21 | "hi": "hindi", 22 | "hu": "hungarian", 23 | "id": "indonesian", 24 | "it": "italian", 25 | "nl": "dutch", 26 | "no": "norwegian", 27 | "pl": "polish", 28 | "pt": "portuguese", 29 | "ro": "romanian", 30 | "ru": "russian", 31 | "sv": "swedish", 32 | "tr": "turkish", 33 | "vi": "vietnamese", 34 | "zh": "chinese", 35 | "zh-TW": "chinese", 36 | } 37 | 38 | 39 | def comprehend_language_code_to_postgres(language_code: str) -> Optional[str]: 40 | return aws_to_pg.get(language_code, None) 41 | 42 | 43 | def get_query_language(query: str, languages: List[str]): 44 | language_name = "english" 45 | comprehend_response = comprehend.detect_dominant_language(Text=query) 46 | comprehend_languages = comprehend_response["Languages"] 47 | detected_languages = [ 48 | {"code": language["LanguageCode"], "score": language["Score"]} 49 | for language in comprehend_languages 50 | ] 51 | 52 | if len(comprehend_languages) > 0: 53 | postgres_language_name = comprehend_language_code_to_postgres( 54 | comprehend_languages[0]["LanguageCode"] 55 | ) 56 | 57 | if postgres_language_name is not None and postgres_language_name in languages: 58 | language_name = postgres_language_name 59 | 60 | return [language_name, detected_languages] 61 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/bedrock/ai21_j2.py: -------------------------------------------------------------------------------- 1 | import genai_core.clients 2 | from langchain.llms import Bedrock 3 | from langchain.prompts.prompt import PromptTemplate 4 | 5 | from ..base import ModelAdapter 6 | from genai_core.registry import registry 7 | 8 | 9 | class AI21J2Adapter(ModelAdapter): 10 | def __init__(self, model_id, *args, **kwargs): 11 | self.model_id = model_id 12 | 13 | super().__init__(*args, **kwargs) 14 | 15 | def get_llm(self, model_kwargs={}): 16 | bedrock = genai_core.clients.get_bedrock_client() 17 | 18 | params = {} 19 | if "temperature" in model_kwargs: 20 | params["temperature"] = model_kwargs["temperature"] 21 | if "topP" in model_kwargs: 22 | params["topP"] = model_kwargs["topP"] 23 | if "maxTokens" in model_kwargs: 24 | params["maxTokens"] = model_kwargs["maxTokens"] 25 | 26 | return Bedrock( 27 | client=bedrock, 28 | model_id=self.model_id, 29 | model_kwargs=params, 30 | callbacks=[self.callback_handler], 31 | ) 32 | 33 | def get_prompt(self): 34 | template = """Human: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. 35 | 36 | Current conversation: 37 | {chat_history} 38 | 39 | Question: {input} 40 | 41 | Assistant:""" 42 | 43 | input_variables = ["input", "chat_history"] 44 | prompt_template_args = { 45 | "chat_history": "{chat_history}", 46 | "input_variables": input_variables, 47 | "template": template, 48 | } 49 | prompt_template = PromptTemplate(**prompt_template_args) 50 | 51 | return prompt_template 52 | 53 | 54 | # Register the adapter 55 | registry.register(r"^bedrock.ai21.j2*", AI21J2Adapter) 56 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/chatbot/playground/chat-playground.tsx: -------------------------------------------------------------------------------- 1 | import BaseAppLayout from "../../../components/base-app-layout"; 2 | import Chat from "../../../components/chatdirect/chat"; 3 | 4 | import { Link, useParams } from "react-router-dom"; 5 | import { Header, HelpPanel } from "@cloudscape-design/components"; 6 | 7 | export default function ChatPlayground() { 8 | const { sessionId } = useParams(); 9 | 10 | return ( 11 | Using the RFP Assistant} 15 | > 16 |

17 | This chat playground allows user to interact with a chosen LLM and 18 | optional RAG retriever. You can create new RAG workspaces via the{" "} 19 | Workspaces console. 20 |

21 |

Settings

22 |

23 | You can configure additional settings for the LLM via the setting 24 | action at the bottom-right. You can change the Temperature and Top P 25 | values to be used for the answer generation. You can also enable and 26 | disable streaming mode for those models that support it (the setting 27 | is ignored if the model does not support streaming). Turning on 28 | Metadata displays additional information about the answer, such as 29 | the prompts being used to interact with the LLM and the document 30 | passages that might have been retrieved from the RAG storage. 31 |

32 |

Session history

33 |

34 | All conversations are saved and can be later accessed via the{" "} 35 | Session in the navigation 36 | bar. 37 |

38 | 39 | } 40 | toolsWidth={300} 41 | content={} 42 | /> 43 | ); 44 | } 45 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/chatbot/playground/playground.tsx: -------------------------------------------------------------------------------- 1 | import BaseAppLayout from "../../../components/base-app-layout"; 2 | import Chat from "../../../components/chatbot/chat"; 3 | 4 | import { Link, useParams } from "react-router-dom"; 5 | import { Header, HelpPanel } from "@cloudscape-design/components"; 6 | 7 | export default function Playground() { 8 | const { sessionId } = useParams(); 9 | 10 | return ( 11 | Using the RFP Assistant} 15 | > 16 |

17 | This chat playground allows user to interact with a chosen LLM and 18 | upload the RFP questions as excel to generate answers. You can 19 | create new RAG workspaces via the{" "} 20 | Workspaces console. 21 |

22 |

Settings

23 |

24 | You can configure additional settings for the LLM via the setting 25 | action at the bottom-right. You can change the Temperature and Top P 26 | values to be used for the answer generation. You can also enable and 27 | disable streaming mode for those models that support it (the setting 28 | is ignored if the model does not support streaming). Turning on 29 | Metadata displays additional information about the answer, such as 30 | the prompts being used to interact with the LLM and the document 31 | passages that might have been retrieved from the RAG storage. 32 |

33 |

Session history

34 |

35 | All conversations are saved and can be later accessed via the{" "} 36 | Session in the navigation bar. 37 |

38 | 39 | } 40 | toolsWidth={300} 41 | content={} 42 | /> 43 | ); 44 | } 45 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/bedrock/cohere.py: -------------------------------------------------------------------------------- 1 | import genai_core.clients 2 | 3 | from langchain.llms import Bedrock 4 | from langchain.prompts.prompt import PromptTemplate 5 | 6 | from ..base import ModelAdapter 7 | from genai_core.registry import registry 8 | 9 | 10 | class BedrockCohereCommandAdapter(ModelAdapter): 11 | def __init__(self, model_id, *args, **kwargs): 12 | self.model_id = model_id 13 | 14 | super().__init__(*args, **kwargs) 15 | 16 | def get_llm(self, model_kwargs={}): 17 | bedrock = genai_core.clients.get_bedrock_client() 18 | 19 | params = {} 20 | if "temperature" in model_kwargs: 21 | params["temperature"] = model_kwargs["temperature"] 22 | if "maxTokens" in model_kwargs: 23 | params["max_tokens"] = model_kwargs["maxTokens"] 24 | params["return_likelihoods"] = "GENERATION" 25 | 26 | return Bedrock( 27 | client=bedrock, 28 | model_id=self.model_id, 29 | model_kwargs=params, 30 | streaming=model_kwargs.get("streaming", False), 31 | callbacks=[self.callback_handler], 32 | ) 33 | 34 | def get_prompt(self): 35 | template = """ 36 | 37 | Human: The following is a friendly conversation between a human and an AI. If the AI does not know the answer to a question, it truthfully says it does not know. 38 | 39 | Current conversation: 40 | {chat_history} 41 | 42 | Question: {input} 43 | 44 | Assistant:""" 45 | 46 | input_variables = ["input", "chat_history"] 47 | prompt_template_args = { 48 | "chat_history": "{chat_history}", 49 | "input_variables": input_variables, 50 | "template": template, 51 | } 52 | prompt_template = PromptTemplate(**prompt_template_args) 53 | 54 | return prompt_template 55 | 56 | 57 | # Register the adapter 58 | registry.register( 59 | r"^bedrock\.cohere\.command-(text|light-text).*", BedrockCohereCommandAdapter 60 | ) 61 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/api-client/sessions-client.ts: -------------------------------------------------------------------------------- 1 | import { API } from "aws-amplify"; 2 | import { GraphQLQuery, GraphQLResult } from "@aws-amplify/api"; 3 | import { listSessions, getSession } from "../../graphql/queries"; 4 | import { deleteSession, deleteUserSessions } from "../../graphql/mutations"; 5 | import { 6 | ListSessionsQuery, 7 | GetSessionQuery, 8 | DeleteSessionMutation, 9 | DeleteUserSessionsMutation, 10 | } from "../../API"; 11 | 12 | export class SessionsClient { 13 | async getSessions( 14 | sessionType: string 15 | ): Promise>> { 16 | const result = await API.graphql>({ 17 | query: listSessions, 18 | variables: { 19 | sessionType: sessionType, 20 | }, 21 | }); 22 | return result; 23 | } 24 | 25 | async getSession( 26 | sessionId: string, 27 | sessionType: string 28 | ): Promise>> { 29 | const result = await API.graphql>({ 30 | query: getSession, 31 | variables: { 32 | id: sessionId, 33 | sessionType: sessionType, 34 | }, 35 | }); 36 | return result; 37 | } 38 | 39 | async deleteSession( 40 | sessionId: string, 41 | sessionType: string 42 | ): Promise>> { 43 | const result = await API.graphql>({ 44 | query: deleteSession, 45 | variables: { 46 | id: sessionId, 47 | sessionType: sessionType, 48 | }, 49 | }); 50 | return result; 51 | } 52 | 53 | async deleteSessions( 54 | sessionType: string 55 | ): Promise>> { 56 | const result = await API.graphql>({ 57 | query: deleteUserSessions, 58 | variables: { 59 | sessionType: sessionType, 60 | }, 61 | }); 62 | return result; 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /lib/chatbot-api/chatbot-dynamodb-tables/index.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "aws-cdk-lib"; 2 | import { Construct } from "constructs"; 3 | import * as dynamodb from "aws-cdk-lib/aws-dynamodb"; 4 | 5 | export class ChatBotDynamoDBTables extends Construct { 6 | public readonly sessionsTable: dynamodb.Table; 7 | public readonly questionsTable: dynamodb.Table; 8 | public readonly bySessionIdIndex: string = "bySessionId"; 9 | 10 | constructor(scope: Construct, id: string) { 11 | super(scope, id); 12 | 13 | const sessionsTable = new dynamodb.Table(this, "SessionsTable", { 14 | partitionKey: { 15 | name: "UserId", 16 | type: dynamodb.AttributeType.STRING, 17 | }, 18 | sortKey: { 19 | name: "SessionType", 20 | type: dynamodb.AttributeType.STRING, 21 | }, 22 | billingMode: dynamodb.BillingMode.PAY_PER_REQUEST, 23 | encryption: dynamodb.TableEncryption.AWS_MANAGED, 24 | removalPolicy: cdk.RemovalPolicy.DESTROY, 25 | pointInTimeRecovery: true, 26 | }); 27 | 28 | sessionsTable.addGlobalSecondaryIndex({ 29 | indexName: this.bySessionIdIndex, 30 | partitionKey: { name: "SessionId", type: dynamodb.AttributeType.STRING }, 31 | }); 32 | 33 | this.sessionsTable = sessionsTable; 34 | 35 | const questionsTable = new dynamodb.Table(this, "QuestionsTable", { 36 | partitionKey: { 37 | name: "QuestionId", 38 | type: dynamodb.AttributeType.STRING, 39 | }, 40 | sortKey: { 41 | name: "SessionId", 42 | type: dynamodb.AttributeType.STRING, 43 | }, 44 | billingMode: dynamodb.BillingMode.PAY_PER_REQUEST, 45 | encryption: dynamodb.TableEncryption.AWS_MANAGED, 46 | removalPolicy: cdk.RemovalPolicy.DESTROY, 47 | pointInTimeRecovery: true, 48 | }); 49 | 50 | questionsTable.addGlobalSecondaryIndex({ 51 | indexName: this.bySessionIdIndex, 52 | partitionKey: { name: "SessionId", type: dynamodb.AttributeType.STRING }, 53 | }); 54 | 55 | this.questionsTable = questionsTable; 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/bedrock/titan.py: -------------------------------------------------------------------------------- 1 | import genai_core.clients 2 | from langchain.prompts.prompt import PromptTemplate 3 | 4 | from langchain.llms import Bedrock 5 | 6 | from ..base import ModelAdapter 7 | from genai_core.registry import registry 8 | 9 | 10 | class BedrockTitanAdapter(ModelAdapter): 11 | def __init__(self, model_id, *args, **kwargs): 12 | self.model_id = model_id 13 | 14 | super().__init__(*args, **kwargs) 15 | 16 | def get_llm(self, model_kwargs={}): 17 | bedrock = genai_core.clients.get_bedrock_client() 18 | 19 | params = {} 20 | if "temperature" in model_kwargs: 21 | params["temperature"] = model_kwargs["temperature"] 22 | if "topP" in model_kwargs: 23 | params["topP"] = model_kwargs["topP"] 24 | if "maxTokens" in model_kwargs: 25 | params["maxTokenCount"] = model_kwargs["maxTokens"] 26 | 27 | return Bedrock( 28 | client=bedrock, 29 | model_id=self.model_id, 30 | model_kwargs=params, 31 | streaming=model_kwargs.get("streaming", False), 32 | callbacks=[self.callback_handler], 33 | ) 34 | 35 | def get_prompt(self): 36 | template = """Human: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. 37 | 38 | Current conversation: 39 | {chat_history} 40 | 41 | Question: {input} 42 | 43 | Assistant:""" 44 | 45 | input_variables = ["input", "chat_history"] 46 | prompt_template_args = { 47 | "chat_history": "{chat_history}", 48 | "input_variables": input_variables, 49 | "template": template, 50 | } 51 | prompt_template = PromptTemplate(**prompt_template_args) 52 | 53 | return prompt_template 54 | 55 | 56 | # Register the adapter 57 | registry.register(r"^bedrock.amazon.titan-t*", BedrockTitanAdapter) 58 | -------------------------------------------------------------------------------- /lib/sagemaker-model/hf-custom-script-model/build-function/index.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import boto3 4 | 5 | logger = logging.getLogger() 6 | logger.setLevel(logging.INFO) 7 | 8 | codebuild = boto3.client("codebuild") 9 | 10 | 11 | def on_event(event, context): 12 | logger.info(f"Event: {json.dumps(event)}") 13 | 14 | if event["RequestType"] == "Create": 15 | logger.info("Starting the build...") 16 | response = codebuild.start_build( 17 | projectName=event["ResourceProperties"]["ProjectName"] 18 | ) 19 | build_id = response["build"]["id"] 20 | logger.info(f"Build started with ID: {build_id}") 21 | return {"PhysicalResourceId": build_id, "Data": {"BuildId": build_id}} 22 | 23 | if event["RequestType"] == "Delete": 24 | logger.info("Delete event - nothing to do") 25 | return {"PhysicalResourceId": event["PhysicalResourceId"], "IsComplete": True} 26 | 27 | error_message = "Invalid request type" 28 | logger.error(error_message) 29 | raise Exception(error_message) 30 | 31 | 32 | def is_complete(event, context): 33 | logger.info(f"Event: {json.dumps(event)}") 34 | if event["RequestType"] == "Delete": 35 | logger.info("Delete event - nothing to do") 36 | return {"PhysicalResourceId": event["PhysicalResourceId"], "IsComplete": True} 37 | 38 | build_id = event["Data"]["BuildId"] 39 | logger.info(f"Checking build status for Build ID: {build_id}") 40 | 41 | response = codebuild.batch_get_builds(ids=[build_id]) 42 | build = response["builds"][0] 43 | build_status = build["buildStatus"] 44 | 45 | logger.info(f"Build status: {build_status}") 46 | 47 | if build_status == "SUCCEEDED": 48 | return {"PhysicalResourceId": build_id, "IsComplete": True} 49 | 50 | if build_status in ["FAILED", "FAULT", "STOPPED", "TIMED_OUT"]: 51 | error_message = f"Build failed with status: {build_status}" 52 | logger.error(error_message) 53 | raise Exception(error_message) 54 | 55 | return {"PhysicalResourceId": build_id, "IsComplete": False} 56 | -------------------------------------------------------------------------------- /lib/chatbot-api/functions/outgoing-message-appsync/index.ts: -------------------------------------------------------------------------------- 1 | import { 2 | BatchProcessor, 3 | EventType, 4 | processPartialResponse, 5 | } from "@aws-lambda-powertools/batch"; 6 | import { Logger } from "@aws-lambda-powertools/logger"; 7 | import type { 8 | SQSEvent, 9 | SQSRecord, 10 | Context, 11 | SQSBatchResponse, 12 | } from "aws-lambda"; 13 | import { graphQlQuery } from "./graphql"; 14 | 15 | const processor = new BatchProcessor(EventType.SQS); 16 | const logger = new Logger(); 17 | 18 | const recordHandler = async (record: SQSRecord): Promise => { 19 | const payload = record.body; 20 | if (payload) { 21 | const item = JSON.parse(payload); 22 | 23 | const req = JSON.parse(item.Message); 24 | logger.debug("Processed message", req); 25 | /*** 26 | * Payload format 27 | * 28 | payload: str = record.body 29 | message: dict = json.loads(payload) 30 | detail: dict = json.loads(message["Message"]) 31 | logger.info(detail) 32 | user_id = detail["userId"] 33 | */ 34 | 35 | const query = /* GraphQL */ ` 36 | mutation Mutation { 37 | publishResponse (data: ${JSON.stringify(item.Message)}, sessionId: "${ 38 | req.data.sessionId 39 | }", userId: "${req.userId}") { 40 | data 41 | sessionId 42 | userId 43 | } 44 | } 45 | `; 46 | //logger.info(query); 47 | await graphQlQuery(query); 48 | //logger.info(resp); 49 | } 50 | }; 51 | 52 | export const handler = async ( 53 | event: SQSEvent, 54 | context: Context 55 | ): Promise => { 56 | logger.debug("Event", { event }); 57 | event.Records = event.Records.sort((a, b) => { 58 | try { 59 | const x: number = JSON.parse(JSON.parse(a.body).Message).data?.token 60 | ?.sequenceNumber; 61 | const y: number = JSON.parse(JSON.parse(b.body).Message).data?.token 62 | ?.sequenceNumber; 63 | return x - y; 64 | } catch { 65 | return 0; 66 | } 67 | }); 68 | return processPartialResponse(event, recordHandler, processor, { 69 | context, 70 | }); 71 | }; -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/kendra/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | import boto3 3 | import genai_core.types 4 | import genai_core.parameters 5 | 6 | DEFAULT_KENDRA_INDEX_ID = os.environ.get("DEFAULT_KENDRA_INDEX_ID", "") 7 | DEFAULT_KENDRA_INDEX_NAME = os.environ.get("DEFAULT_KENDRA_INDEX_NAME", "") 8 | 9 | sts_client = boto3.client("sts") 10 | 11 | 12 | def get_kendra_client_for_index(kendra_index_id: str): 13 | is_default = kendra_index_id == DEFAULT_KENDRA_INDEX_ID 14 | 15 | if is_default: 16 | kendra = boto3.client("kendra") 17 | return kendra 18 | 19 | config = genai_core.parameters.get_config() 20 | kendra_config = config.get("rag", {}).get("engines", {}).get("kendra", {}) 21 | external = kendra_config.get("external", {}) 22 | 23 | for kendraIndex in external: 24 | current_id = kendraIndex.get("kendraId", "") 25 | current_name = kendraIndex.get("name", "") 26 | region_name = kendraIndex.get("region") 27 | role_arn = kendraIndex.get("roleArn") 28 | 29 | if not current_id or not current_name: 30 | continue 31 | 32 | if current_id == kendra_index_id: 33 | kendra_config_data = {"service_name": "kendra"} 34 | if region_name: 35 | kendra_config_data["region_name"] = region_name 36 | 37 | if role_arn: 38 | assumed_role_object = sts_client.assume_role( 39 | RoleArn=role_arn, 40 | RoleSessionName="AssumedRoleSession", 41 | ) 42 | 43 | credentials = assumed_role_object["Credentials"] 44 | kendra_config_data["aws_access_key_id"] = credentials["AccessKeyId"] 45 | kendra_config_data["aws_secret_access_key"] = credentials[ 46 | "SecretAccessKey" 47 | ] 48 | kendra_config_data["aws_session_token"] = credentials["SessionToken"] 49 | 50 | kendra = boto3.client(**kendra_config_data) 51 | 52 | return kendra 53 | 54 | raise genai_core.types.CommonError(f"Could not find kendra index {kendra_index_id}") 55 | -------------------------------------------------------------------------------- /lib/chatbot-api/chatbot-s3-buckets/index.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "aws-cdk-lib"; 2 | import { Construct } from "constructs"; 3 | import * as s3 from "aws-cdk-lib/aws-s3"; 4 | import { NagSuppressions } from "cdk-nag"; 5 | 6 | export class ChatBotS3Buckets extends Construct { 7 | public readonly filesBucket: s3.Bucket; 8 | public readonly userFeedbackBucket: s3.Bucket; 9 | 10 | constructor(scope: Construct, id: string) { 11 | super(scope, id); 12 | 13 | const logsBucket = new s3.Bucket(this, "LogsBucket", { 14 | blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, 15 | removalPolicy: cdk.RemovalPolicy.DESTROY, 16 | autoDeleteObjects: true, 17 | enforceSSL: true, 18 | }); 19 | 20 | const filesBucket = new s3.Bucket(this, "FilesBucket", { 21 | blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, 22 | removalPolicy: cdk.RemovalPolicy.DESTROY, 23 | autoDeleteObjects: true, 24 | transferAcceleration: true, 25 | enforceSSL: true, 26 | serverAccessLogsBucket: logsBucket, 27 | cors: [ 28 | { 29 | allowedHeaders: ["*"], 30 | allowedMethods: [ 31 | s3.HttpMethods.PUT, 32 | s3.HttpMethods.POST, 33 | s3.HttpMethods.GET, 34 | s3.HttpMethods.HEAD, 35 | ], 36 | allowedOrigins: ["*"], 37 | exposedHeaders: ["ETag"], 38 | maxAge: 3000, 39 | }, 40 | ], 41 | }); 42 | 43 | const userFeedbackBucket = new s3.Bucket(this, "UserFeedbackBucket", { 44 | blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, 45 | removalPolicy: cdk.RemovalPolicy.DESTROY, 46 | autoDeleteObjects: true, 47 | enforceSSL: true, 48 | serverAccessLogsBucket: logsBucket, 49 | }); 50 | 51 | this.filesBucket = filesBucket; 52 | this.userFeedbackBucket = userFeedbackBucket; 53 | 54 | /** 55 | * CDK NAG suppression 56 | */ 57 | NagSuppressions.addResourceSuppressions(logsBucket, [ 58 | { 59 | id: "AwsSolutions-S1", 60 | reason: "Logging bucket does not require it's own access logs.", 61 | }, 62 | ]); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /cli/aws-cron-validator.ts: -------------------------------------------------------------------------------- 1 | class AWSCronError extends Error {} 2 | 3 | import { 4 | minuteRegex, 5 | hourRegex, 6 | dayOfMonthRegex, 7 | monthRegex, 8 | dayOfWeekRegex, 9 | yearRegex, 10 | } from "./aws-cron-expressions"; 11 | 12 | export class AWSCronValidator { 13 | public static validate(expression: string): string { 14 | if (!expression.trim()) { 15 | throw new AWSCronError( 16 | `No parameters entered, this format is required in UTC: 0 20 ? * SUN-FRI *` 17 | ); 18 | } 19 | const valueCount = expression.split(" ").length; 20 | if (valueCount !== 6) { 21 | throw new AWSCronError( 22 | `Incorrect amount of parameters in '${expression}'. 6 required, ${valueCount} provided.` 23 | ); 24 | } 25 | 26 | const [minute, hour, dayOfMonth, month, dayOfWeek, year] = 27 | expression.split(" "); 28 | 29 | // special handling for Day of Month and Day of Week 30 | if ( 31 | !( 32 | (dayOfMonth === "?" && dayOfWeek !== "?") || 33 | (dayOfMonth !== "?" && dayOfWeek === "?") 34 | ) 35 | ) { 36 | throw new AWSCronError( 37 | `Invalid combination of day-of-month '${dayOfMonth}' and day-of-week '${dayOfWeek}'. One must be a question mark (?)` 38 | ); 39 | } 40 | 41 | if (!new RegExp(minuteRegex()).test(minute)) { 42 | throw new AWSCronError(`Invalid minute value '${minute}'.`); 43 | } 44 | if (!new RegExp(hourRegex()).test(hour)) { 45 | throw new AWSCronError(`Invalid hour value '${hour}'.`); 46 | } 47 | if (!new RegExp(dayOfMonthRegex()).test(dayOfMonth)) { 48 | throw new AWSCronError(`Invalid day-of-month value '${dayOfMonth}'.`); 49 | } 50 | if (!new RegExp(monthRegex(), "i").test(month)) { 51 | throw new AWSCronError(`Invalid month value '${month}'.`); 52 | } 53 | if (!new RegExp(dayOfWeekRegex(), "i").test(dayOfWeek)) { 54 | throw new AWSCronError(`Invalid day-of-week value '${dayOfWeek}'.`); 55 | } 56 | if (!new RegExp(yearRegex()).test(year)) { 57 | throw new AWSCronError(`Invalid year value '${year}'.`); 58 | } 59 | 60 | return expression; 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/opensearch/create.py: -------------------------------------------------------------------------------- 1 | from .client import get_open_search_client 2 | 3 | 4 | def create_workspace_index(workspace: dict): 5 | workspace_id = workspace["workspace_id"] 6 | index_name = workspace_id.replace("-", "") 7 | embeddings_model_dimensions = workspace["embeddings_model_dimensions"] 8 | 9 | client = get_open_search_client() 10 | 11 | ef_search = 512 12 | index_body = { 13 | "settings": { 14 | "index": { 15 | "knn": True, 16 | "knn.algo_param.ef_search": ef_search, 17 | } 18 | }, 19 | "mappings": { 20 | "properties": { 21 | "content_embeddings": { 22 | "type": "knn_vector", 23 | "dimension": int(embeddings_model_dimensions), 24 | "method": { 25 | "name": "hnsw", 26 | "space_type": "l2", 27 | "engine": "nmslib", 28 | "parameters": {"ef_construction": 512, "m": 16}, 29 | }, 30 | }, 31 | "chunk_id": {"type": "keyword"}, 32 | "workspace_id": {"type": "keyword"}, 33 | "document_id": {"type": "keyword"}, 34 | "document_sub_id": {"type": "keyword"}, 35 | "document_type": {"type": "keyword"}, 36 | "document_sub_type": {"type": "keyword"}, 37 | "path": {"type": "text"}, 38 | "language": {"type": "keyword"}, 39 | "title": {"type": "text"}, 40 | "content": {"type": "text"}, 41 | "content_complement": {"type": "text"}, 42 | "metadata": {"type": "object"}, 43 | "created_at": { 44 | "type": "date", 45 | "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis", 46 | }, 47 | } 48 | }, 49 | } 50 | 51 | response = client.indices.create(index_name, body=index_body) 52 | 53 | print("Created workspace index") 54 | print(response) 55 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/cross_encoder.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import genai_core.types 4 | import genai_core.clients 5 | import genai_core.parameters 6 | from typing import List, Optional 7 | 8 | 9 | SAGEMAKER_RAG_MODELS_ENDPOINT = os.environ.get("SAGEMAKER_RAG_MODELS_ENDPOINT") 10 | 11 | 12 | def rank_passages( 13 | model: genai_core.types.CrossEncoderModel, input: str, passages: List[str] 14 | ): 15 | input = input[:10000] 16 | passages = passages[:1000] 17 | passages = list(map(lambda x: x[:10000], passages)) 18 | 19 | if model.provider == "sagemaker": 20 | return _rank_passages_sagemaker(model, input, passages) 21 | 22 | raise genai_core.typesCommonError(f"Unknown provider") 23 | 24 | 25 | def get_cross_encoder_models(): 26 | config = genai_core.parameters.get_config() 27 | models = config["rag"]["crossEncoderModels"] 28 | 29 | if not SAGEMAKER_RAG_MODELS_ENDPOINT: 30 | models = list(filter(lambda x: x["provider"] != "sagemaker", models)) 31 | 32 | return models 33 | 34 | 35 | def get_cross_encoder_model( 36 | provider: str, name: str 37 | ) -> Optional[genai_core.types.CrossEncoderModel]: 38 | config = genai_core.parameters.get_config() 39 | models = config["rag"]["crossEncoderModels"] 40 | 41 | for model in models: 42 | if model["provider"] == provider and model["name"] == name: 43 | return genai_core.types.CrossEncoderModel(**model) 44 | 45 | return None 46 | 47 | 48 | def _rank_passages_sagemaker( 49 | model: genai_core.types.CrossEncoderModel, input: str, passages: List[str] 50 | ): 51 | client = genai_core.clients.get_sagemaker_client() 52 | 53 | response = client.invoke_endpoint( 54 | EndpointName=SAGEMAKER_RAG_MODELS_ENDPOINT, 55 | ContentType="application/json", 56 | Body=json.dumps( 57 | { 58 | "type": "cross-encoder", 59 | "model": model.name, 60 | "input": input, 61 | "passages": passages, 62 | } 63 | ), 64 | ) 65 | 66 | ret_value = json.loads(response["Body"].read().decode()) 67 | 68 | return ret_value 69 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aws-genai-rfpassistant", 3 | "private": true, 4 | "version": "1.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite", 8 | "build": "tsc && vite build", 9 | "build:dev": "tsc && cross-env NODE_ENV=development vite build", 10 | "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", 11 | "preview": "vite preview", 12 | "format": "npx prettier --ignore-path .gitignore --write \"**/*.+(tsx|js|ts|json)\"" 13 | }, 14 | "dependencies": { 15 | "@aws-amplify/ui-react": "^5.3.2", 16 | "@cloudscape-design/components": "^3.0.651", 17 | "@cloudscape-design/design-tokens": "^3.0.36", 18 | "@cloudscape-design/global-styles": "^1.0.27", 19 | "@fortawesome/fontawesome-svg-core": "^6.4.2", 20 | "@fortawesome/free-solid-svg-icons": "^6.4.2", 21 | "@fortawesome/react-fontawesome": "^0.2.0", 22 | "aws-amplify": "^5.3.12", 23 | "luxon": "^3.4.3", 24 | "react": "^18.2.0", 25 | "react-dom": "^18.2.0", 26 | "react-json-view-lite": "^0.9.8", 27 | "react-markdown": "^9.0.0", 28 | "react-router-dom": "^6.15.0", 29 | "react-speech-recognition": "^3.10.0", 30 | "react-textarea-autosize": "^8.5.3", 31 | "react-use-websocket": "^4.5.0", 32 | "regenerator-runtime": "^0.14.0", 33 | "remark-gfm": "^4.0.0", 34 | "uuid": "^9.0.0" 35 | }, 36 | "devDependencies": { 37 | "@types/luxon": "^3.3.2", 38 | "@types/react": "^18.2.15", 39 | "@types/react-dom": "^18.2.7", 40 | "@types/react-speech-recognition": "^3.9.2", 41 | "@types/uuid": "^9.0.3", 42 | "@types/zen-observable": "^0.8.5", 43 | "@typescript-eslint/eslint-plugin": "^6.0.0", 44 | "@typescript-eslint/parser": "^6.0.0", 45 | "@vitejs/plugin-react": "^4.0.3", 46 | "autoprefixer": "^10.4.14", 47 | "cross-env": "^7.0.3", 48 | "esbuild": "0.21.5", 49 | "eslint": "^8.45.0", 50 | "eslint-plugin-react-hooks": "^4.6.0", 51 | "eslint-plugin-react-refresh": "^0.4.3", 52 | "postcss": "^8.4.27", 53 | "sass": "^1.65.1", 54 | "typescript": "^5.0.2", 55 | "vite": "^4.5.3", 56 | "zen-observable": "^0.10.0" 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /bin/config.ts: -------------------------------------------------------------------------------- 1 | import { SupportedRegion, SystemConfig } from "../lib/shared/types"; 2 | import { existsSync, readFileSync } from "fs"; 3 | 4 | export function getConfig(): SystemConfig { 5 | if (existsSync("./bin/config.json")) { 6 | return JSON.parse(readFileSync("./bin/config.json").toString("utf8")); 7 | } 8 | // Default config 9 | return { 10 | prefix: "", 11 | /* vpc: { 12 | vpcId: "vpc-00000000000000000", 13 | createVpcEndpoints: true, 14 | },*/ 15 | privateWebsite: false, 16 | companyName: "AnyCompany", 17 | certificate: "", 18 | cfGeoRestrictEnable: false, 19 | cfGeoRestrictList: [], 20 | bedrock: { 21 | enabled: true, 22 | region: SupportedRegion.US_EAST_1, 23 | }, 24 | llms: { 25 | // sagemaker: [SupportedSageMakerModels.FalconLite] 26 | sagemaker: [], 27 | }, 28 | rag: { 29 | enabled: true, 30 | engines: { 31 | aurora: { 32 | enabled: false, 33 | }, 34 | opensearch: { 35 | enabled: true, 36 | }, 37 | kendra: { 38 | enabled: false, 39 | createIndex: false, 40 | enterprise: false, 41 | }, 42 | }, 43 | embeddingsModels: [ 44 | { 45 | provider: "bedrock", 46 | name: "amazon.titan-embed-text-v1", 47 | dimensions: 1536, 48 | }, 49 | //Support for inputImage is not yet implemented for amazon.titan-embed-image-v1 50 | { 51 | provider: "bedrock", 52 | name: "amazon.titan-embed-image-v1", 53 | dimensions: 1024, 54 | }, 55 | { 56 | provider: "bedrock", 57 | name: "cohere.embed-english-v3", 58 | dimensions: 1024, 59 | }, 60 | { 61 | provider: "bedrock", 62 | name: "cohere.embed-multilingual-v3", 63 | dimensions: 1024, 64 | default: true, 65 | }, 66 | ], 67 | crossEncoderModels: [ 68 | { 69 | provider: "sagemaker", 70 | name: "cross-encoder/ms-marco-MiniLM-L-12-v2", 71 | default: true, 72 | }, 73 | ], 74 | }, 75 | }; 76 | } 77 | 78 | export const config: SystemConfig = getConfig(); 79 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/kendra/data_sync.py: -------------------------------------------------------------------------------- 1 | import os 2 | import genai_core.types 3 | import genai_core.workspaces 4 | from .client import get_kendra_client_for_index 5 | 6 | DEFAULT_KENDRA_S3_DATA_SOURCE_ID = os.environ.get("DEFAULT_KENDRA_S3_DATA_SOURCE_ID") 7 | 8 | 9 | def start_kendra_data_sync(workspace_id: str): 10 | workspace = genai_core.workspaces.get_workspace(workspace_id=workspace_id) 11 | 12 | if not workspace: 13 | raise genai_core.types.CommonError(f"Workspace {workspace_id} not found") 14 | 15 | if workspace["engine"] != "kendra": 16 | raise genai_core.types.CommonError( 17 | f"Workspace {workspace_id} is not a kendra workspace" 18 | ) 19 | 20 | if workspace["kendra_index_external"]: 21 | raise genai_core.types.CommonError( 22 | f"Workspace {workspace_id} is an external kendra workspace" 23 | ) 24 | 25 | kendra_index_id = workspace["kendra_index_id"] 26 | kendra = get_kendra_client_for_index(kendra_index_id) 27 | 28 | response = kendra.start_data_source_sync_job( 29 | Id=DEFAULT_KENDRA_S3_DATA_SOURCE_ID, IndexId=kendra_index_id 30 | ) 31 | 32 | print(response) 33 | 34 | 35 | def kendra_is_syncing(workspace_id: str): 36 | workspace = genai_core.workspaces.get_workspace(workspace_id=workspace_id) 37 | 38 | if not workspace: 39 | raise genai_core.types.CommonError(f"Workspace {workspace_id} not found") 40 | 41 | if workspace["engine"] != "kendra": 42 | raise genai_core.types.CommonError( 43 | f"Workspace {workspace_id} is not a kendra workspace" 44 | ) 45 | 46 | if workspace["kendra_index_external"]: 47 | return False 48 | 49 | kendra_index_id = workspace["kendra_index_id"] 50 | kendra = get_kendra_client_for_index(kendra_index_id) 51 | 52 | response = kendra.list_data_source_sync_jobs( 53 | IndexId=kendra_index_id, Id=DEFAULT_KENDRA_S3_DATA_SOURCE_ID, MaxResults=5 54 | ) 55 | 56 | ret_value = False 57 | for item in response["History"]: 58 | status = item["Status"] 59 | 60 | if status == "SYNCING" or status == "SYNCING_INDEXING": 61 | ret_value = True 62 | break 63 | 64 | return ret_value 65 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/helpers/embeddings-model-helper.ts: -------------------------------------------------------------------------------- 1 | import { SelectProps } from "@cloudscape-design/components"; 2 | import { EmbeddingModel } from "../../API"; 3 | 4 | export abstract class EmbeddingsModelHelper { 5 | static getSelectOption(model?: string): SelectProps.Option | null { 6 | if (!model) return null; 7 | const [, dimensions, name] = model.split("::") ?? []; 8 | if (!name) return null; 9 | 10 | return { 11 | label: `${name} (${dimensions})`, 12 | value: model, 13 | }; 14 | } 15 | 16 | static parseValue(value?: string) { 17 | const retValue = { 18 | provider: "", 19 | dimensions: 0, 20 | name: "", 21 | }; 22 | 23 | if (!value) return retValue; 24 | const [provider, dimensionsStr, name] = value.split("::") ?? []; 25 | let dimensions = parseInt(dimensionsStr); 26 | if (isNaN(dimensions)) dimensions = 0; 27 | 28 | return { 29 | provider, 30 | dimensions, 31 | name, 32 | }; 33 | } 34 | 35 | static getSelectOptions(embeddingsModels: EmbeddingModel[]) { 36 | const modelsMap = new Map(); 37 | embeddingsModels.forEach((model) => { 38 | let items = modelsMap.get(model.provider); 39 | if (!items) { 40 | items = []; 41 | modelsMap.set(model.provider, [model]); 42 | } else { 43 | modelsMap.set(model.provider, [...items, model]); 44 | } 45 | }); 46 | 47 | const keys = [...modelsMap.keys()]; 48 | keys.sort((a, b) => a.localeCompare(b)); 49 | 50 | const options: SelectProps.OptionGroup[] = keys.map((key) => { 51 | const items = modelsMap.get(key); 52 | items?.sort((a, b) => a.name.localeCompare(b.name)); 53 | 54 | let label = key; 55 | if (label === "sagemaker") label = "SageMaker"; 56 | else if (label === "bedrock") label = "Bedrock"; 57 | else if (label === "openai") label = "OpenAI"; 58 | 59 | return { 60 | label, 61 | options: 62 | items?.map((item) => ({ 63 | label: `${item.name} (${item.dimensions})`, 64 | value: `${item.provider}::${item.dimensions}::${item.name}`, 65 | })) ?? [], 66 | }; 67 | }); 68 | 69 | return options; 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /cli/aws-cron-expressions.ts: -------------------------------------------------------------------------------- 1 | export const minuteExp = `(0?[0-9]|[1-5][0-9])`; // [0]0-59 2 | export const hourExp = `(0?[0-9]|1[0-9]|2[0-3])`; // [0]0-23 3 | export const dayOfMonthExp = `(0?[1-9]|[1-2][0-9]|3[0-1])`; // [0]1-31 4 | export const monthExp = `(0?[1-9]|1[0-2]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)`; // [0]1-12 or JAN-DEC 5 | export const dayOfWeekExp = `([1-7]|SUN|MON|TUE|WED|THU|FRI|SAT)`; // 1-7 or SAT-SUN 6 | export const yearExp = `((19[8-9][0-9])|(2[0-1][0-9][0-9]))`; // 1980-2199 7 | export const numbers = `([0-9]*[1-9][0-9]*)`; // whole numbers greater than 0 8 | 9 | export function dayOfWeekHash(): string { 10 | return `(${dayOfWeekExp}#[1-5])`; // add hash expression to enable supported use case 11 | } 12 | 13 | function rangeRegex(values: string): string { 14 | return `(${values}|(\\*\\-${values})|(${values}\\-${values})|(${values}\\-\\*))`; 15 | } 16 | 17 | function listRangeRegex(values: string): string { 18 | const range = rangeRegex(values); 19 | return `(${range}(\\,${range})*)`; 20 | } 21 | 22 | function slashRegex(values: string): string { 23 | const range = rangeRegex(values); 24 | return `((\\*|${range}|${values})\\/${numbers})`; 25 | } 26 | 27 | function listSlashRegex(values: string): string { 28 | const slash = slashRegex(values); 29 | const slashOrRange = `(${slash}|${rangeRegex(values)})`; 30 | return `(${slashOrRange}(\\,${slashOrRange})*)`; 31 | } 32 | 33 | function commonRegex(values: string): string { 34 | return `(${listRangeRegex(values)}|\\*|${listSlashRegex(values)})`; 35 | } 36 | 37 | export function minuteRegex(): string { 38 | return `^(${commonRegex(minuteExp)})$`; 39 | } 40 | 41 | export function hourRegex(): string { 42 | return `^(${commonRegex(hourExp)})$`; 43 | } 44 | 45 | export function dayOfMonthRegex(): string { 46 | return `^(${commonRegex(dayOfMonthExp)}|\\?|L|LW|${dayOfMonthExp}W)$`; 47 | } 48 | 49 | export function monthRegex(): string { 50 | return `^(${commonRegex(monthExp)})$`; 51 | } 52 | 53 | export function dayOfWeekRegex(): string { 54 | const rangeList = listRangeRegex(dayOfWeekExp); 55 | return `^(${rangeList}|\\*|\\?|${dayOfWeekExp}L|L|L-[1-7]|${dayOfWeekHash()})$`; 56 | } 57 | 58 | export function yearRegex(): string { 59 | return `^(${commonRegex(yearExp)})$`; 60 | } 61 | -------------------------------------------------------------------------------- /lib/model-interfaces/langchain/functions/request-handler/adapters/bedrock/llama2_chat.py: -------------------------------------------------------------------------------- 1 | import genai_core.clients 2 | 3 | # from langchain.llms import Bedrock (pending https://github.com/langchain-ai/langchain/issues/13316) 4 | from .base import Bedrock 5 | 6 | from langchain.prompts.prompt import PromptTemplate 7 | 8 | 9 | from ..shared.meta.llama2_chat import ( 10 | Llama2ChatPromptTemplate, 11 | Llama2ChatQAPromptTemplate, 12 | Llama2ChatCondensedQAPromptTemplate, 13 | ) 14 | from ..shared.meta.llama2_chat import Llama2ConversationBufferMemory 15 | 16 | from ..base import ModelAdapter 17 | from genai_core.registry import registry 18 | 19 | 20 | class BedrockMetaLLama2ChatAdapter(ModelAdapter): 21 | def __init__(self, model_id, *args, **kwargs): 22 | self.model_id = model_id 23 | 24 | super().__init__(*args, **kwargs) 25 | 26 | def get_memory(self, output_key=None, return_messages=False): 27 | return Llama2ConversationBufferMemory( 28 | memory_key="chat_history", 29 | chat_memory=self.chat_history, 30 | return_messages=return_messages, 31 | output_key=output_key, 32 | ) 33 | 34 | def get_llm(self, model_kwargs={}): 35 | bedrock = genai_core.clients.get_bedrock_client() 36 | 37 | params = {} 38 | if "temperature" in model_kwargs: 39 | params["temperature"] = model_kwargs["temperature"] 40 | if "topP" in model_kwargs: 41 | params["top_p"] = model_kwargs["topP"] 42 | if "maxTokens" in model_kwargs: 43 | params["max_gen_len"] = model_kwargs["maxTokens"] 44 | 45 | return Bedrock( 46 | client=bedrock, 47 | model_id=self.model_id, 48 | model_kwargs=params, 49 | streaming=model_kwargs.get("streaming", False), 50 | callbacks=[self.callback_handler], 51 | ) 52 | 53 | def get_prompt(self): 54 | return Llama2ChatPromptTemplate 55 | 56 | def get_qa_prompt(self): 57 | return Llama2ChatQAPromptTemplate 58 | 59 | def get_condense_question_prompt(self): 60 | return Llama2ChatCondensedQAPromptTemplate 61 | 62 | 63 | # Register the adapter 64 | registry.register( 65 | r"^bedrock.meta.llama2-.*-chat.*", 66 | BedrockMetaLLama2ChatAdapter, 67 | ) 68 | -------------------------------------------------------------------------------- /lib/sagemaker-model/container-images.ts: -------------------------------------------------------------------------------- 1 | // https://github.com/aws/deep-learning-containers/blob/master/available_images.md 2 | export class ContainerImages { 3 | /* 4 | HF_PYTORCH_INFERENCE 5 | https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/image_uri_config/huggingface.json 6 | */ 7 | static readonly HF_PYTORCH_INFERENCE_4_26_0 = 8 | "huggingface-pytorch-inference:1.13.1-transformers4.26.0-gpu-py39-cu117-ubuntu20.04"; 9 | static readonly HF_PYTORCH_INFERENCE_4_28_1 = 10 | "huggingface-pytorch-inference:2.0.0-transformers4.28.1-gpu-py310-cu118-ubuntu20.04"; 11 | static readonly HF_PYTORCH_INFERENCE_LATEST = 12 | ContainerImages.HF_PYTORCH_INFERENCE_4_28_1; 13 | /* 14 | HF_PYTORCH_LLM_TGI_INFERENCE 15 | https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/image_uri_config/huggingface-llm.json 16 | */ 17 | static readonly HF_PYTORCH_LLM_TGI_INFERENCE_0_6_0 = 18 | "huggingface-pytorch-tgi-inference:2.0.0-tgi0.6.0-gpu-py39-cu118-ubuntu20.04"; 19 | static readonly HF_PYTORCH_LLM_TGI_INFERENCE_0_8_2 = 20 | "huggingface-pytorch-tgi-inference:2.0.0-tgi0.8.2-gpu-py39-cu118-ubuntu20.04"; 21 | static readonly HF_PYTORCH_LLM_TGI_INFERENCE_0_9_3 = 22 | "huggingface-pytorch-tgi-inference:2.0.1-tgi0.9.3-gpu-py39-cu118-ubuntu20.04"; 23 | static readonly HF_PYTORCH_LLM_TGI_INFERENCE_1_0_3 = 24 | "huggingface-pytorch-tgi-inference:2.0.1-tgi1.0.3-gpu-py39-cu118-ubuntu20.04"; 25 | static readonly HF_PYTORCH_LLM_TGI_INFERENCE_1_1_0 = 26 | "huggingface-pytorch-tgi-inference:2.0.1-tgi1.1.0-gpu-py39-cu118-ubuntu20.04"; 27 | static readonly HF_PYTORCH_LLM_TGI_INFERENCE_1_3_3 = 28 | "huggingface-pytorch-tgi-inference:2.1.1-tgi1.3.3-gpu-py310-cu121-ubuntu20.04"; 29 | static readonly HF_PYTORCH_LLM_TGI_INFERENCE_LATEST = 30 | ContainerImages.HF_PYTORCH_LLM_TGI_INFERENCE_1_1_0; 31 | /* 32 | DJL_INFERENCE_DEEPSPEED 33 | https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/image_uri_config/djl-deepspeed.json 34 | */ 35 | static readonly DJL_INFERENCE_DEEPSPEED_0_8_3 = 36 | "djl-inference:0.22.1-deepspeed0.8.3-cu118"; 37 | static readonly DJL_INFERENCE_DEEPSPEED_0_9_2 = 38 | "djl-inference:0.22.1-deepspeed0.9.2-cu118"; 39 | static readonly DJL_INFERENCE_DEEPSPEED_LATEST = 40 | ContainerImages.DJL_INFERENCE_DEEPSPEED_0_9_2; 41 | } 42 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/pages/rag/create-workspace/cross-encoder-selector-field.tsx: -------------------------------------------------------------------------------- 1 | import { useContext, useEffect, useState } from "react"; 2 | import { ApiClient } from "../../../common/api-client/api-client"; 3 | import { LoadingStatus } from "../../../common/types"; 4 | import { AppContext } from "../../../common/app-context"; 5 | import { OptionsHelper } from "../../../common/helpers/options-helper"; 6 | import { Select, SelectProps } from "@cloudscape-design/components"; 7 | import { CrossEncoderData } from "../../../API"; 8 | import { Utils } from "../../../common/utils"; 9 | 10 | interface CrossEncoderSelectorProps { 11 | submitting: boolean; 12 | onChange: (data: Partial<{ crossEncoderModel: SelectProps.Option }>) => void; 13 | selectedModel: SelectProps.Option | null; 14 | errors: Record; 15 | } 16 | 17 | export function CrossEncoderSelectorField(props: CrossEncoderSelectorProps) { 18 | const appContext = useContext(AppContext); 19 | const [crossEncoderModelsStatus, setCrossEncoderModelsStatus] = 20 | useState("loading"); 21 | const [crossEncoderModels, setCrossEncoderModels] = useState< 22 | CrossEncoderData[] 23 | >([]); 24 | 25 | useEffect(() => { 26 | if (!appContext) return; 27 | 28 | (async () => { 29 | const apiClient = new ApiClient(appContext); 30 | try { 31 | const result = await apiClient.crossEncoders.getModels(); 32 | 33 | setCrossEncoderModels(result.data?.listCrossEncoders!); 34 | setCrossEncoderModelsStatus("finished"); 35 | } catch (error) { 36 | console.error(Utils.getErrorMessage(error)); 37 | setCrossEncoderModels([]); 38 | setCrossEncoderModelsStatus("error"); 39 | } 40 | })(); 41 | }, [appContext]); 42 | 43 | const crossEncoderModelOptions = 44 | OptionsHelper.getSelectOptionGroups(crossEncoderModels); 45 | 46 | return ( 47 | 59 | props.onChange({ embeddingsModel: selectedOption }) 60 | } 61 | /> 62 | 63 | ); 64 | } 65 | -------------------------------------------------------------------------------- /lib/sagemaker-model/deploy-package-model.ts: -------------------------------------------------------------------------------- 1 | import * as iam from "aws-cdk-lib/aws-iam"; 2 | import * as sagemaker from "aws-cdk-lib/aws-sagemaker"; 3 | import { Construct } from "constructs"; 4 | 5 | import { SageMakerModelProps, ModelPackageConfig } from "./types"; 6 | import { NagSuppressions } from "cdk-nag"; 7 | 8 | export function deployPackageModel( 9 | scope: Construct, 10 | props: SageMakerModelProps, 11 | modelConfig: ModelPackageConfig 12 | ) { 13 | const { region } = props; 14 | const { 15 | modelId, 16 | instanceType, 17 | containerStartupHealthCheckTimeoutInSeconds = 900, 18 | } = modelConfig; 19 | 20 | const executionRole = new iam.Role(scope, "SageMakerExecutionRole", { 21 | assumedBy: new iam.ServicePrincipal("sagemaker.amazonaws.com"), 22 | managedPolicies: [ 23 | iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonSageMakerFullAccess"), 24 | ], 25 | }); 26 | 27 | const modelPackageMapping = modelConfig.packages(scope); 28 | const modelPackageName = modelPackageMapping.findInMap(region, "arn"); 29 | 30 | const model = new sagemaker.CfnModel(scope, "Model", { 31 | executionRoleArn: executionRole.roleArn, 32 | enableNetworkIsolation: true, 33 | primaryContainer: { 34 | modelPackageName, 35 | }, 36 | }); 37 | 38 | const endpointConfig = new sagemaker.CfnEndpointConfig( 39 | scope, 40 | "EndpointConfig", 41 | { 42 | productionVariants: [ 43 | { 44 | instanceType, 45 | initialVariantWeight: 1, 46 | initialInstanceCount: 1, 47 | variantName: "AllTraffic", 48 | modelName: model.getAtt("ModelName").toString(), 49 | containerStartupHealthCheckTimeoutInSeconds, 50 | }, 51 | ], 52 | } 53 | ); 54 | 55 | endpointConfig.addDependency(model); 56 | 57 | const endpoint = new sagemaker.CfnEndpoint(scope, modelId, { 58 | endpointConfigName: endpointConfig.getAtt("EndpointConfigName").toString(), 59 | endpointName: modelId.split("/").join("-").split(".").join("-"), 60 | }); 61 | 62 | endpoint.addDependency(endpointConfig); 63 | 64 | /** 65 | * CDK NAG suppression 66 | */ 67 | NagSuppressions.addResourceSuppressions(executionRole, [ 68 | { 69 | id: "AwsSolutions-IAM4", 70 | reason: "Gives user ability to deploy and delete endpoints from the UI.", 71 | }, 72 | { 73 | id: "AwsSolutions-IAM5", 74 | reason: "Gives user ability to deploy and delete endpoints from the UI.", 75 | }, 76 | ]); 77 | 78 | return { model, endpoint }; 79 | } 80 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/helpers/options-helper.ts: -------------------------------------------------------------------------------- 1 | import { SelectProps } from "@cloudscape-design/components"; 2 | 3 | export abstract class OptionsHelper { 4 | static getSelectOption(model?: string): SelectProps.Option | null { 5 | if (!model) return null; 6 | const [, name] = model.split("::") ?? []; 7 | if (!name) return null; 8 | 9 | return { 10 | label: name, 11 | value: model, 12 | }; 13 | } 14 | 15 | static parseValue(value?: string) { 16 | const retValue = { 17 | provider: "", 18 | name: "", 19 | }; 20 | 21 | try { 22 | if (!value) return retValue; 23 | const [provider, name] = value.split("::") ?? []; 24 | 25 | return { 26 | provider, 27 | name, 28 | }; 29 | } catch (error) { 30 | console.error(error); 31 | return retValue; 32 | } 33 | } 34 | 35 | static getSelectOptionGroups( 36 | data: T[] 37 | ) { 38 | const modelsMap = new Map(); 39 | data.forEach((item) => { 40 | let items = modelsMap.get(item.provider); 41 | if (!items) { 42 | items = []; 43 | modelsMap.set(item.provider, [item]); 44 | } else { 45 | modelsMap.set(item.provider, [...items, item]); 46 | } 47 | }); 48 | 49 | const keys = [...modelsMap.keys()]; 50 | keys.sort((a, b) => a.localeCompare(b)); 51 | 52 | const options: SelectProps.OptionGroup[] = keys.map((key) => { 53 | const items = modelsMap.get(key); 54 | items?.sort((a, b) => a.name.localeCompare(b.name)); 55 | 56 | return { 57 | label: this.getProviderLabel(key), 58 | options: 59 | items?.map((item) => ({ 60 | label: item.name, 61 | value: `${item.provider}::${item.name}`, 62 | })) ?? [], 63 | }; 64 | }); 65 | 66 | return options; 67 | } 68 | 69 | static getSelectOptions(data: T[]) { 70 | data?.sort((a, b) => a.name.localeCompare(b.name)); 71 | 72 | const options: SelectProps.Option[] = data.map((item) => { 73 | return { 74 | label: item.name, 75 | value: item.id, 76 | }; 77 | }); 78 | 79 | return options; 80 | } 81 | 82 | static getProviderLabel(provider: string) { 83 | let label = provider; 84 | if (label === "sagemaker") label = "SageMaker"; 85 | else if (label === "bedrock") label = "Bedrock"; 86 | else if (label === "openai") label = "OpenAI"; 87 | 88 | return label; 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /lib/sagemaker-model/hf-custom-script-model/build-script/script.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import subprocess 4 | from pathlib import Path 5 | 6 | import boto3 7 | from huggingface_hub import snapshot_download 8 | 9 | s3_client = boto3.client("s3") 10 | 11 | local_model_folder = os.getenv("LOCAL_MODEL_FOLDER", "./model") 12 | bucket = os.getenv("BUILD_BUCKET", "") 13 | model_ids = os.getenv("MODEL_ID", "") 14 | models_list = list(map(lambda val: val.strip(), model_ids.split(","))) 15 | models_num = len(models_list) 16 | 17 | print(f"Model ID: {model_ids}", flush=True) 18 | print(f"Bucket: {bucket}", flush=True) 19 | 20 | out_folder = Path("out") 21 | if out_folder.exists(): 22 | shutil.rmtree(str(out_folder)) 23 | out_folder.mkdir(exist_ok=True) 24 | 25 | print(f"Creating new code folder: {out_folder}/code", flush=True) 26 | model_code_folder = Path(os.path.join(out_folder, "code")) 27 | model_code_folder.mkdir(exist_ok=True) 28 | 29 | print(f"Copying contents from {local_model_folder} to {model_code_folder}", flush=True) 30 | shutil.copytree(local_model_folder, str(model_code_folder), dirs_exist_ok=True) 31 | 32 | for model_id in models_list: 33 | if models_num == 1: 34 | model_folder = out_folder 35 | else: 36 | model_folder = Path(out_folder, model_id.split("/")[-1]) 37 | if model_folder.exists(): 38 | shutil.rmtree(str(model_folder)) 39 | model_folder.mkdir(exist_ok=True) 40 | 41 | print(f"Model folder: {model_folder}", flush=True) 42 | print( 43 | f"Downloading model snapshot for: {model_id} into {model_folder}", 44 | flush=True, 45 | ) 46 | 47 | snapshot_download( 48 | model_id, local_dir=str(model_folder), local_dir_use_symlinks=False 49 | ) 50 | 51 | print(f"Model snapshot downloaded to: {model_folder}", flush=True) 52 | 53 | 54 | print(f"Compressing the out folder: {out_folder}", flush=True) 55 | 56 | current_folder = os.getcwd() 57 | print(f"Current folder: {current_folder}") 58 | os.chdir(str(out_folder)) 59 | 60 | print(f"Compressing the model folder: {out_folder}") 61 | command = "tar -cf model.tar.gz --use-compress-program=pigz *" 62 | print(f"Running command: {command}") 63 | subprocess.run(command, shell=True, check=True) 64 | print(f"Model folder compressed: {out_folder}") 65 | print(f"Moving back to: {current_folder}") 66 | os.chdir(current_folder) 67 | 68 | print(f"Uploading the model to S3 bucket: {bucket}") 69 | s3_client.upload_file(out_folder.joinpath("model.tar.gz"), bucket, f"out/model.tar.gz") 70 | model_data = f"s3://{bucket}/out/model.tar.gz" 71 | 72 | print(f"Model archive uploaded to: {model_data}") 73 | -------------------------------------------------------------------------------- /lib/user-interface/react-app/src/common/helpers/metrics-helper.ts: -------------------------------------------------------------------------------- 1 | export abstract class MetricsHelper { 2 | static magnitude(vector: number[]): number { 3 | const magnitude = Math.sqrt( 4 | vector.reduce((sum, val) => sum + val * val, 0) 5 | ); 6 | 7 | return magnitude; 8 | } 9 | 10 | static cosineSimilarity(vecA: number[], vecB: number[]) { 11 | if (vecA.length !== vecB.length) { 12 | throw new Error("Both vectors must have the same number of elements."); 13 | } 14 | 15 | let dotProduct = 0; 16 | let magnitudeA = 0; 17 | let magnitudeB = 0; 18 | 19 | for (let i = 0; i < vecA.length; i++) { 20 | dotProduct += vecA[i] * vecB[i]; 21 | magnitudeA += vecA[i] * vecA[i]; 22 | magnitudeB += vecB[i] * vecB[i]; 23 | } 24 | 25 | magnitudeA = Math.sqrt(magnitudeA); 26 | magnitudeB = Math.sqrt(magnitudeB); 27 | 28 | const retValue = dotProduct / (magnitudeA * magnitudeB); 29 | 30 | return retValue; 31 | } 32 | 33 | static euclideanDistance(vecA: number[], vecB: number[]) { 34 | if (vecA.length !== vecB.length) { 35 | throw new Error("Both vectors must have the same number of elements."); 36 | } 37 | 38 | let sum = 0; 39 | for (let i = 0; i < vecA.length; i++) { 40 | const difference = vecA[i] - vecB[i]; 41 | sum += difference * difference; 42 | } 43 | 44 | return Math.sqrt(sum); 45 | } 46 | 47 | static innerProduct(vecA: number[], vecB: number[]) { 48 | if (vecA.length !== vecB.length) { 49 | throw new Error("Both vectors must have the same number of elements."); 50 | } 51 | 52 | let sum = 0; 53 | for (let i = 0; i < vecA.length; i++) { 54 | sum += vecA[i] * vecB[i]; 55 | } 56 | 57 | return sum; 58 | } 59 | 60 | static matrices(vectors: number[][]) { 61 | const cosineSimilarity = vectors.map((vecA) => { 62 | return vectors.map((vecB) => { 63 | return this.cosineSimilarity(vecA, vecB); 64 | }); 65 | }); 66 | 67 | const cosineDistance = vectors.map((vecA) => { 68 | return vectors.map((vecB) => { 69 | return 1 - this.cosineSimilarity(vecA, vecB); 70 | }); 71 | }); 72 | 73 | const innerProduct = vectors.map((vecA) => { 74 | return vectors.map((vecB) => { 75 | return this.innerProduct(vecA, vecB); 76 | }); 77 | }); 78 | 79 | const l2 = vectors.map((vecA) => { 80 | return vectors.map((vecB) => { 81 | return this.euclideanDistance(vecA, vecB); 82 | }); 83 | }); 84 | 85 | return { 86 | cosineSimilarity, 87 | cosineDistance, 88 | innerProduct, 89 | l2, 90 | }; 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/opensearch/delete.py: -------------------------------------------------------------------------------- 1 | import os 2 | import boto3 3 | from .client import get_open_search_client 4 | import genai_core.utils.delete_files_with_prefix 5 | 6 | 7 | PROCESSING_BUCKET_NAME = os.environ["PROCESSING_BUCKET_NAME"] 8 | UPLOAD_BUCKET_NAME = os.environ["UPLOAD_BUCKET_NAME"] 9 | WORKSPACES_TABLE_NAME = os.environ["WORKSPACES_TABLE_NAME"] 10 | DOCUMENTS_TABLE_NAME = os.environ.get("DOCUMENTS_TABLE_NAME") 11 | 12 | WORKSPACE_OBJECT_TYPE = "workspace" 13 | 14 | dynamodb = boto3.resource("dynamodb") 15 | 16 | 17 | def delete_open_search_workspace(workspace: dict): 18 | workspace_id = workspace["workspace_id"] 19 | index_name = workspace_id.replace("-", "") 20 | 21 | genai_core.utils.delete_files_with_prefix.delete_files_with_prefix( 22 | UPLOAD_BUCKET_NAME, workspace_id 23 | ) 24 | genai_core.utils.delete_files_with_prefix.delete_files_with_prefix( 25 | PROCESSING_BUCKET_NAME, workspace_id 26 | ) 27 | 28 | client = get_open_search_client() 29 | if client.indices.exists(index_name): 30 | client.indices.delete(index=index_name) 31 | print(f"Index {index_name} deleted.") 32 | 33 | workspaces_table = dynamodb.Table(WORKSPACES_TABLE_NAME) 34 | documents_table = dynamodb.Table(DOCUMENTS_TABLE_NAME) 35 | 36 | items_to_delete = [] 37 | last_evaluated_key = None 38 | while True: 39 | query_args = { 40 | "KeyConditionExpression": boto3.dynamodb.conditions.Key("workspace_id").eq( 41 | workspace_id 42 | ) 43 | } 44 | 45 | if last_evaluated_key: 46 | query_args["ExclusiveStartKey"] = last_evaluated_key 47 | 48 | response = documents_table.query(**query_args) 49 | items_to_delete.extend(response["Items"]) 50 | 51 | last_evaluated_key = response.get("LastEvaluatedKey") 52 | if not last_evaluated_key: 53 | break 54 | 55 | # Batch delete in groups of 25 56 | for i in range(0, len(items_to_delete), 25): 57 | with documents_table.batch_writer() as batch: 58 | for item in items_to_delete[i : i + 25]: 59 | batch.delete_item( 60 | Key={ 61 | "workspace_id": item["workspace_id"], 62 | "document_id": item["document_id"], 63 | } 64 | ) 65 | 66 | print(f"Deleted {len(items_to_delete)} items.") 67 | 68 | response = workspaces_table.delete_item( 69 | Key={"workspace_id": workspace_id, "object_type": WORKSPACE_OBJECT_TYPE}, 70 | ) 71 | 72 | print(f"Delete Item succeeded: {response}") 73 | -------------------------------------------------------------------------------- /lib/shared/layers/python-sdk/python/genai_core/aurora/delete.py: -------------------------------------------------------------------------------- 1 | import os 2 | import boto3 3 | import genai_core.utils.delete_files_with_prefix 4 | from psycopg2 import sql 5 | from genai_core.aurora.connection import AuroraConnection 6 | 7 | PROCESSING_BUCKET_NAME = os.environ["PROCESSING_BUCKET_NAME"] 8 | UPLOAD_BUCKET_NAME = os.environ["UPLOAD_BUCKET_NAME"] 9 | WORKSPACES_TABLE_NAME = os.environ["WORKSPACES_TABLE_NAME"] 10 | DOCUMENTS_TABLE_NAME = os.environ.get("DOCUMENTS_TABLE_NAME") 11 | 12 | WORKSPACE_OBJECT_TYPE = "workspace" 13 | 14 | dynamodb = boto3.resource("dynamodb") 15 | 16 | 17 | def delete_aurora_workspace(workspace: dict): 18 | workspace_id = workspace["workspace_id"] 19 | genai_core.utils.delete_files_with_prefix.delete_files_with_prefix( 20 | UPLOAD_BUCKET_NAME, workspace_id 21 | ) 22 | genai_core.utils.delete_files_with_prefix.delete_files_with_prefix( 23 | PROCESSING_BUCKET_NAME, workspace_id 24 | ) 25 | 26 | table_name = sql.Identifier(workspace_id.replace("-", "")) 27 | with AuroraConnection(autocommit=False) as cursor: 28 | cursor.execute( 29 | sql.SQL("DROP TABLE IF EXISTS {table};").format(table=table_name) 30 | ) 31 | 32 | workspaces_table = dynamodb.Table(WORKSPACES_TABLE_NAME) 33 | documents_table = dynamodb.Table(DOCUMENTS_TABLE_NAME) 34 | 35 | items_to_delete = [] 36 | last_evaluated_key = None 37 | while True: 38 | query_args = { 39 | "KeyConditionExpression": boto3.dynamodb.conditions.Key("workspace_id").eq( 40 | workspace_id 41 | ) 42 | } 43 | 44 | if last_evaluated_key: 45 | query_args["ExclusiveStartKey"] = last_evaluated_key 46 | 47 | response = documents_table.query(**query_args) 48 | items_to_delete.extend(response["Items"]) 49 | 50 | last_evaluated_key = response.get("LastEvaluatedKey") 51 | if not last_evaluated_key: 52 | break 53 | 54 | # Batch delete in groups of 25 55 | for i in range(0, len(items_to_delete), 25): 56 | with documents_table.batch_writer() as batch: 57 | for item in items_to_delete[i : i + 25]: 58 | batch.delete_item( 59 | Key={ 60 | "workspace_id": item["workspace_id"], 61 | "document_id": item["document_id"], 62 | } 63 | ) 64 | print(f"Deleted {len(items_to_delete)} items.") 65 | 66 | response = workspaces_table.delete_item( 67 | Key={"workspace_id": workspace_id, "object_type": WORKSPACE_OBJECT_TYPE}, 68 | ) 69 | 70 | print(f"Delete Item succeeded: {response}") 71 | -------------------------------------------------------------------------------- /lib/sagemaker-model/image-repository-mapping.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "aws-cdk-lib"; 2 | import { Construct } from "constructs"; 3 | 4 | export interface ImageRepositoryMappingProps { 5 | region: string; 6 | } 7 | 8 | export class ImageRepositoryMapping extends Construct { 9 | public readonly mapping: cdk.CfnMapping; 10 | public readonly account: string; 11 | 12 | constructor( 13 | scope: Construct, 14 | id: string, 15 | props: ImageRepositoryMappingProps 16 | ) { 17 | super(scope, id); 18 | 19 | const { region } = props; 20 | 21 | const mapping = new cdk.CfnMapping(scope, "ImageRepositoryCfnMapping", { 22 | lazy: true, 23 | mapping: { 24 | "af-south-1": { account: "626614931356" }, 25 | "ap-east-1": { account: "871362719292" }, 26 | "ap-northeast-1": { account: "763104351884" }, 27 | "ap-northeast-2": { account: "763104351884" }, 28 | "ap-northeast-3": { account: "364406365360" }, 29 | "ap-south-1": { account: "763104351884" }, 30 | "ap-south-2": { account: "772153158452" }, 31 | "ap-southeast-1": { account: "763104351884" }, 32 | "ap-southeast-2": { account: "763104351884" }, 33 | "ap-southeast-3": { account: "907027046896" }, 34 | "ap-southeast-4": { account: "457447274322" }, 35 | "ca-central-1": { account: "763104351884" }, 36 | "cn-north-1": { account: "727897471807" }, 37 | "cn-northwest-1": { account: "727897471807" }, 38 | "eu-central-1": { account: "763104351884" }, 39 | "eu-central-2": { account: "380420809688" }, 40 | "eu-north-1": { account: "763104351884" }, 41 | "eu-west-1": { account: "763104351884" }, 42 | "eu-west-2": { account: "763104351884" }, 43 | "eu-west-3": { account: "763104351884" }, 44 | "eu-south-1": { account: "692866216735" }, 45 | "eu-south-2": { account: "503227376785" }, 46 | "me-south-1": { account: "217643126080" }, 47 | "me-central-1": { account: "914824155844" }, 48 | "sa-east-1": { account: "763104351884" }, 49 | "us-east-1": { account: "763104351884" }, 50 | "us-east-2": { account: "763104351884" }, 51 | "us-gov-east-1": { account: "446045086412" }, 52 | "us-gov-west-1": { account: "442386744353" }, 53 | "us-iso-east-1": { account: "886529160074" }, 54 | "us-isob-east-1": { account: "094389454867" }, 55 | "us-west-1": { account: "763104351884" }, 56 | "us-west-2": { account: "763104351884" }, 57 | }, 58 | }); 59 | 60 | const account = mapping.findInMap(region, "account"); 61 | 62 | this.mapping = mapping; 63 | this.account = account; 64 | } 65 | } 66 | --------------------------------------------------------------------------------