├── .gitignore ├── LICENSE ├── README.md ├── bedrock-chat-with-pdf ├── Admin │ ├── Dockerfile │ ├── admin.py │ └── requirements.txt ├── Bedrock-ChatWithPdf.png ├── README.md └── User │ ├── Dockerfile │ ├── app.py │ └── requirements.txt ├── bedrock-with-serverless-text-summarization ├── Bedrock-Text-Summarization-Serverless-App.png ├── Dockerfile ├── README.md ├── app.py └── requirements.txt ├── customer-service-demo-with-chatbison ├── Dockerfile ├── README.md ├── app.py └── requirements.txt └── youtube-demos ├── all_fm.json └── query_llama.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | api_keys/ 7 | api_keys/* 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | cover/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | .pybuilder/ 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | # For a library or package, you might want to ignore these files since the code is 90 | # intended to run in multiple environments; otherwise, check them in: 91 | # .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # poetry 101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 102 | # This is especially recommended for binary packages to ensure reproducibility, and is more 103 | # commonly ignored for libraries. 104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 105 | #poetry.lock 106 | 107 | # pdm 108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 109 | #pdm.lock 110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 111 | # in version control. 112 | # https://pdm.fming.dev/#use-with-ide 113 | .pdm.toml 114 | 115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 116 | __pypackages__/ 117 | 118 | # Celery stuff 119 | celerybeat-schedule 120 | celerybeat.pid 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # Environments 126 | .env 127 | .venv 128 | env/ 129 | venv/ 130 | ENV/ 131 | env.bak/ 132 | venv.bak/ 133 | 134 | # Spyder project settings 135 | .spyderproject 136 | .spyproject 137 | 138 | # Rope project settings 139 | .ropeproject 140 | 141 | # mkdocs documentation 142 | /site 143 | 144 | # mypy 145 | .mypy_cache/ 146 | .dmypy.json 147 | dmypy.json 148 | 149 | # Pyre type checker 150 | .pyre/ 151 | 152 | # pytype static type analyzer 153 | .pytype/ 154 | 155 | # Cython debug symbols 156 | cython_debug/ 157 | 158 | # PyCharm 159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 161 | # and can be added to the global gitignore or merged into this file. For a more nuclear 162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 163 | #.idea/ 164 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 mycloudtutorials 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Generative AI Demos 2 | 3 | This is code repository for the Generative AI demos code and instrunctions. I will create basic to advanced demos for generative AI chatbots using Amazon Bedrock, Google Vertex AI, Langchain, Streamlit etc. 4 | 5 | Also planning to create demo for how to create serverless inference using API Gateway, Lambda and Bedrock. 6 | 7 | 8 | ## About Me: 9 | My name is Girish Jaju. I have been doing IT consulting for over 20 years. Last several years I have been working with AWS cloud architect, devops, developer (wearing many hats). 10 | 11 | For any consulting opportunities, please reach out to me at girish@jajusoft.com. 12 | 13 | ## Connect with me: 14 | LinkedIn: https://www.linkedin.com/in/girishjaju/ 15 | 16 | -------------------------------------------------------------------------------- /bedrock-chat-with-pdf/Admin/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11 2 | EXPOSE 8083 3 | WORKDIR /app 4 | COPY requirements.txt ./ 5 | RUN pip install -r requirements.txt 6 | COPY . ./ 7 | ENTRYPOINT [ "streamlit", "run", "admin.py", "--server.port=8083", "--server.address=0.0.0.0" ] -------------------------------------------------------------------------------- /bedrock-chat-with-pdf/Admin/admin.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import streamlit as st 3 | import os 4 | import uuid 5 | 6 | ## s3_client 7 | s3_client = boto3.client("s3") 8 | BUCKET_NAME = os.getenv("BUCKET_NAME") 9 | 10 | ## Bedrock 11 | from langchain_community.embeddings import BedrockEmbeddings 12 | 13 | ## Text Splitter 14 | from langchain.text_splitter import RecursiveCharacterTextSplitter 15 | 16 | ## Pdf Loader 17 | from langchain_community.document_loaders import PyPDFLoader 18 | 19 | ## import FAISS 20 | from langchain_community.vectorstores import FAISS 21 | 22 | bedrock_client = boto3.client(service_name="bedrock-runtime") 23 | bedrock_embeddings = BedrockEmbeddings(model_id="amazon.titan-embed-text-v1", client=bedrock_client) 24 | 25 | def get_unique_id(): 26 | return str(uuid.uuid4()) 27 | 28 | 29 | ## Split the pages / text into chunks 30 | def split_text(pages, chunk_size, chunk_overlap): 31 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) 32 | docs = text_splitter.split_documents(pages) 33 | return docs 34 | 35 | ## create vector store 36 | def create_vector_store(request_id, documents): 37 | vectorstore_faiss=FAISS.from_documents(documents, bedrock_embeddings) 38 | file_name=f"{request_id}.bin" 39 | folder_path="/tmp/" 40 | vectorstore_faiss.save_local(index_name=file_name, folder_path=folder_path) 41 | 42 | ## upload to S3 43 | s3_client.upload_file(Filename=folder_path + "/" + file_name + ".faiss", Bucket=BUCKET_NAME, Key="my_faiss.faiss") 44 | s3_client.upload_file(Filename=folder_path + "/" + file_name + ".pkl", Bucket=BUCKET_NAME, Key="my_faiss.pkl") 45 | 46 | return True 47 | 48 | ## main method 49 | def main(): 50 | st.write("This is Admin Site for Chat with PDF demo") 51 | uploaded_file = st.file_uploader("Choose a file", "pdf") 52 | if uploaded_file is not None: 53 | request_id = get_unique_id() 54 | st.write(f"Request Id: {request_id}") 55 | saved_file_name = f"{request_id}.pdf" 56 | with open(saved_file_name, mode="wb") as w: 57 | w.write(uploaded_file.getvalue()) 58 | 59 | loader = PyPDFLoader(saved_file_name) 60 | pages = loader.load_and_split() 61 | 62 | st.write(f"Total Pages: {len(pages)}") 63 | 64 | ## Split Text 65 | splitted_docs = split_text(pages, 1000, 200) 66 | st.write(f"Splitted Docs length: {len(splitted_docs)}") 67 | st.write("===================") 68 | st.write(splitted_docs[0]) 69 | st.write("===================") 70 | st.write(splitted_docs[1]) 71 | 72 | st.write("Creating the Vector Store") 73 | result = create_vector_store(request_id, splitted_docs) 74 | 75 | if result: 76 | st.write("Hurray!! PDF processed successfully") 77 | else: 78 | st.write("Error!! Please check logs.") 79 | 80 | 81 | 82 | if __name__ == "__main__": 83 | main() -------------------------------------------------------------------------------- /bedrock-chat-with-pdf/Admin/requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit 2 | pypdf 3 | langchain 4 | faiss-cpu 5 | boto3 -------------------------------------------------------------------------------- /bedrock-chat-with-pdf/Bedrock-ChatWithPdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mycloudtutorials/generative-ai-demos/7807880dc50353db55ca00b8f5791683467858b5/bedrock-chat-with-pdf/Bedrock-ChatWithPdf.png -------------------------------------------------------------------------------- /bedrock-chat-with-pdf/README.md: -------------------------------------------------------------------------------- 1 | # Chat With PDF - Generative AI Application 2 | ## Built Using Amazon Bedrock, Langchain, Python, Docker, Amazon S3 3 | ## Models used: 4 | Amazon Titan Embedding G1 - Text 5 | Anthropic Claude 2.1 6 | 7 | ## Introduction 8 | In this video we will build a CHATBOT like application with AWS Amazon Bedrock, docker, python, Langchain, and Streamlit. We will use Retrieval-Augmented generation concept to provide context to the Large Language model along with user query to generate response from our Knowledgebase. 9 | 10 | In this hands-on tutorial, we will demonstrate the following: 11 | - Architecture of the applications 12 | - Build 2 applications (ADMIN and USER) and create DOCKER images 13 | 14 | 15 | ## Architecture 16 | ![image info](./Bedrock-ChatWithPdf.png) 17 | 18 | ## ADMIN Application: 19 | - Build Admin Web application where AdminUser can upload the pdf. 20 | - The PDF text is split into chunks 21 | - Using the Amazon Titan Embedding Model, create the vector representation of the chunks 22 | - Using FAISS, save the vector index locally 23 | - Upload the index to Amazon S3 bucket (You can use other vector stores like OpenSearch, Pinecone, PgVector etc., but for this demo, I chose cost effective S3) 24 | 25 | ### Docker Commands: 26 | 27 | Build Docker Image: 28 | `docker build -t pdf-reader-admin .` 29 | 30 | Run ADMIN application: 31 | `docker run -e BUCKET_NAME= -v ~/.aws:/root/.aws -p 8083:8083 -it pdf-reader-admin` 32 | 33 | 34 | 35 | ## USER Application: 36 | - Build User Web application where users can query / chat with the pdf. 37 | - At the application start, download the index files from S3 to build local FAISS index (vector store) 38 | - Langchain's RetrievalQA, does the following: 39 | - Convert the User's query to vector embedding using Amazon Titan Embedding Model (Make sure to use the same model that was used for creating the chunk's embedding on the Admin side) 40 | - Do similarity search to the FAISS index and retrieve 5 relevant documents pertaining to the user query to build the context 41 | - Using Prompt template, provide the question and context to the Large Language Model. We are using Claude model from Anthropic. 42 | - Display the LLM's response to the user. 43 | 44 | ### Docker Commands: 45 | 46 | Build Docker Image: 47 | `docker build -t pdf-reader-client .` 48 | 49 | Run ADMIN application: 50 | `docker run -e BUCKET_NAME= -v ~/.aws:/root/.aws -p 8084:8084 -it pdf-reader-client` 51 | 52 | 53 | #### Note: The docker volume mount is only needed in local. If you are running the container in ECS, or EKS, the iam role is used. 54 | 55 | 56 | ## Youtube 57 | I have created a Youtube video for this tutorials with step-by-step hands-on coding. 58 | 59 | [![Chat With PDF - Generative AI Application](https://i9.ytimg.com/vi/KFibP7KnDVM/mqdefault.jpg?v=66342224&sqp=CKzU0LEG&rs=AOn4CLASIjZrAdMHdLjZjWOnwM4a7gvQnA)](https://www.youtube.com/watch?v=KFibP7KnDVM) 60 | -------------------------------------------------------------------------------- /bedrock-chat-with-pdf/User/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11 2 | EXPOSE 8084 3 | WORKDIR /app 4 | COPY requirements.txt ./ 5 | RUN pip install -r requirements.txt 6 | COPY . ./ 7 | ENTRYPOINT [ "streamlit", "run", "app.py", "--server.port=8084", "--server.address=0.0.0.0" ] -------------------------------------------------------------------------------- /bedrock-chat-with-pdf/User/app.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import streamlit as st 3 | import os 4 | import uuid 5 | 6 | ## s3_client 7 | s3_client = boto3.client("s3") 8 | BUCKET_NAME = os.getenv("BUCKET_NAME") 9 | 10 | ## Bedrock 11 | from langchain_community.embeddings import BedrockEmbeddings 12 | from langchain.llms.bedrock import Bedrock 13 | 14 | ## prompt and chain 15 | from langchain.prompts import PromptTemplate 16 | from langchain.chains import RetrievalQA 17 | 18 | ## Text Splitter 19 | from langchain.text_splitter import RecursiveCharacterTextSplitter 20 | 21 | ## Pdf Loader 22 | from langchain_community.document_loaders import PyPDFLoader 23 | 24 | ## import FAISS 25 | from langchain_community.vectorstores import FAISS 26 | 27 | bedrock_client = boto3.client(service_name="bedrock-runtime") 28 | bedrock_embeddings = BedrockEmbeddings(model_id="amazon.titan-embed-text-v1", client=bedrock_client) 29 | 30 | folder_path="/tmp/" 31 | 32 | def get_unique_id(): 33 | return str(uuid.uuid4()) 34 | 35 | ## load index 36 | def load_index(): 37 | s3_client.download_file(Bucket=BUCKET_NAME, Key="my_faiss.faiss", Filename=f"{folder_path}my_faiss.faiss") 38 | s3_client.download_file(Bucket=BUCKET_NAME, Key="my_faiss.pkl", Filename=f"{folder_path}my_faiss.pkl") 39 | 40 | def get_llm(): 41 | llm=Bedrock(model_id="anthropic.claude-v2:1", client=bedrock_client, 42 | model_kwargs={'max_tokens_to_sample': 512}) 43 | return llm 44 | 45 | # get_response() 46 | def get_response(llm,vectorstore, question ): 47 | ## create prompt / template 48 | prompt_template = """ 49 | 50 | Human: Please use the given context to provide concise answer to the question 51 | If you don't know the answer, just say that you don't know, don't try to make up an answer. 52 | 53 | {context} 54 | 55 | 56 | Question: {question} 57 | 58 | Assistant:""" 59 | 60 | PROMPT = PromptTemplate( 61 | template=prompt_template, input_variables=["context", "question"] 62 | ) 63 | 64 | qa = RetrievalQA.from_chain_type( 65 | llm=llm, 66 | chain_type="stuff", 67 | retriever=vectorstore.as_retriever( 68 | search_type="similarity", search_kwargs={"k": 5} 69 | ), 70 | return_source_documents=True, 71 | chain_type_kwargs={"prompt": PROMPT} 72 | ) 73 | answer=qa({"query":question}) 74 | return answer['result'] 75 | 76 | 77 | ## main method 78 | def main(): 79 | st.header("This is Client Site for Chat with PDF demo using Bedrock, RAG etc") 80 | 81 | load_index() 82 | 83 | dir_list = os.listdir(folder_path) 84 | st.write(f"Files and Directories in {folder_path}") 85 | st.write(dir_list) 86 | 87 | ## create index 88 | faiss_index = FAISS.load_local( 89 | index_name="my_faiss", 90 | folder_path = folder_path, 91 | embeddings=bedrock_embeddings, 92 | allow_dangerous_deserialization=True 93 | ) 94 | 95 | st.write("INDEX IS READY") 96 | question = st.text_input("Please ask your question") 97 | if st.button("Ask Question"): 98 | with st.spinner("Querying..."): 99 | 100 | llm = get_llm() 101 | 102 | # get_response 103 | st.write(get_response(llm, faiss_index, question)) 104 | st.success("Done") 105 | 106 | if __name__ == "__main__": 107 | main() -------------------------------------------------------------------------------- /bedrock-chat-with-pdf/User/requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit 2 | langchain 3 | faiss-cpu 4 | boto3 -------------------------------------------------------------------------------- /bedrock-with-serverless-text-summarization/Bedrock-Text-Summarization-Serverless-App.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mycloudtutorials/generative-ai-demos/7807880dc50353db55ca00b8f5791683467858b5/bedrock-with-serverless-text-summarization/Bedrock-Text-Summarization-Serverless-App.png -------------------------------------------------------------------------------- /bedrock-with-serverless-text-summarization/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11 2 | EXPOSE 8082 3 | WORKDIR /app 4 | COPY requirements.txt ./ 5 | RUN pip install -r requirements.txt 6 | COPY . ./ 7 | ENTRYPOINT [ "streamlit", "run", "app.py", "--server.port=8082", "--server.address=0.0.0.0" ] 8 | -------------------------------------------------------------------------------- /bedrock-with-serverless-text-summarization/README.md: -------------------------------------------------------------------------------- 1 | # Text Summarization - Generative AI Application [Hands-On] 2 | ## Built Using Amazon Bedrock - Cohere FM, Lambda, Python, Docker, Streamlit 3 | ## Models used: 4 | Cohere FM (Foundation Model) 5 | 6 | ## Introduction 7 | In this video, we will implement a text summarization application. This application has both backend and frontend. 8 | 9 | ## Architecture 10 | ![image info](./Bedrock-Text-Summarization-Serverless-App.png) 11 | 12 | ## Backend Application: 13 | The backend application is built using Serverless Technologies like AWS Lambda and and exposed as Lambda Function URL. 14 | The python lambda integrates with Bedrock to generate the response from LLM, which is Cohere Foundation Model in this case. 15 | 16 | ## Frontend Application 17 | The frontend is implemented using Python, Docker and Streamlit. An api call is made to the lambda function url with user's entered text. 18 | The response is displayed to the user as streaming text. 19 | 20 | ### Docker Commands: 21 | 22 | Build Docker Image: 23 | `docker build -t serverless-text-summarization-frontend .` 24 | 25 | Run Frontend application: 26 | `docker run -p 8082:8082 -it serverless-text-summarization-frontend` 27 | 28 | 29 | ## Youtube 30 | I have created a Youtube video for this tutorials with step-by-step hands-on coding. 31 | 32 | [![Text Summarization - Generative AI Application - With AWS Bedrock and Serverless Lambda](https://i9.ytimg.com/vi/DIt2Hwy-FTM/mqdefault.jpg?v=664d1aae&sqp=CKyDtrIG&rs=AOn4CLDA-_EQ3-Ksn5bh__pB-IdDpuBdKg)](https://www.youtube.com/watch?v=DIt2Hwy-FTM) 33 | -------------------------------------------------------------------------------- /bedrock-with-serverless-text-summarization/app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import time 3 | import requests 4 | 5 | api_endpoint = "https://xum2r4r3zyuyllk4wx3vayakxq0pgeah.lambda-url.us-west-2.on.aws/" 6 | 7 | 8 | # Method takes in user text, creates a prompt and invoke the foundation model 9 | 10 | def get_response(text): 11 | payload = { 12 | "prompt": f"Please summarize the following text.\n {text}" 13 | } 14 | 15 | response = requests.post(api_endpoint, json = payload) 16 | print(f"response status:: {response.status_code}") 17 | response_text = response.text 18 | 19 | # For streaming like behavior 20 | for word in response_text.split(): 21 | yield word + " " 22 | time.sleep(0.1) 23 | 24 | 25 | def main(): 26 | st.set_page_config("Text Summarization Demo") 27 | st.header("AWS Bedrock integration with Serverless Lambda - Cohere model") 28 | 29 | text = st.text_area("Write text to summarize") 30 | 31 | if st.button("Summarize It"): 32 | with st.spinner("processing..."): 33 | ##call lambda function url 34 | st.write(get_response(text)) 35 | 36 | 37 | if __name__ == "__main__": 38 | main() -------------------------------------------------------------------------------- /bedrock-with-serverless-text-summarization/requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit 2 | requests -------------------------------------------------------------------------------- /customer-service-demo-with-chatbison/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11 2 | EXPOSE 8083 3 | WORKDIR /app 4 | COPY requirements.txt ./ 5 | RUN pip install -r requirements.txt 6 | COPY . ./ 7 | ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8083", "--server.address=0.0.0.0" ] -------------------------------------------------------------------------------- /customer-service-demo-with-chatbison/README.md: -------------------------------------------------------------------------------- 1 | # Simple Customer Service Agent Demo - Generative AI Application - Hands On Coding 2 | ## Built Using Google Vertex AI, Langchain, Python, Docker 3 | ## Models used: 4 | Chat-Bison 5 | 6 | ## Introduction 7 | This video shoes how to build a simple customer service agent that helps customers with item return. 8 | The model is provided context, so it asks questions to the customers if the item was purchased within last 15 days and it's unused. 9 | It makes the decision based on the Customer's response to those questions. 10 | 11 | The video also shows how to enhance user experience by streaming responses from the Large Language Model (LLMs). 12 | 13 | ## What will you learn: 14 | - How to create key for Service Account in Google Cloud Platform 15 | - Build Docker Image with the python packages needed 16 | - How to mount the key on Docker image in runtime, so you don't accidentally share your key file 17 | - Build the Streamlit application with Langchain, Python and Google Cloud AI Platform SDK 18 | 19 | ### Docker Commands: 20 | 21 | Build Docker Image: 22 | `docker build -t simple-customer-service-demo .` 23 | 24 | Run ADMIN application: 25 | `docker run -v :/root/key -p 8083:8083 -it simple-customer-service-demo` 26 | 27 | 28 | #### Note: The docker volume mount is only needed in local. If you are running the container in CloudRun, GKE, ECS, or EKS, the iam role is used. Please check the approprite cloud provider's documentation. 29 | 30 | 31 | ## Youtube 32 | I have created a Youtube video for this tutorials with step-by-step hands-on coding. 33 | 34 | [![Simple Customer Service Agent - Generative AI Application With Chat Model](https://i9.ytimg.com/vi/McGWCwSCjd8/mqdefault.jpg?v=664644ff&sqp=CPCJmbIG&rs=AOn4CLDT5WJAKuPIaOjQfMn0sbKa-ZKduQ)](https://www.youtube.com/watch?v=McGWCwSCjd8) 35 | 36 | -------------------------------------------------------------------------------- /customer-service-demo-with-chatbison/app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | import time 4 | 5 | from vertexai.language_models import ChatModel 6 | from langchain_core.messages import HumanMessage, AIMessage 7 | 8 | os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = '/root/key/key.json' 9 | model_id = "chat-bison@002" 10 | 11 | context = ''' 12 | You are a helpful order return assistant. 13 | Customers will use you to return their items. Items can only be returned if they were purchased within the last 15 days and are unused. 14 | Make sure to confirm that item is BOTH unused and was purchased within the last 15 days. Please ask customer for both the rules. 15 | If BOTH the above conditions are met, then show a message with the return address [1st main street, new york, NY 10001], otherwise reject the return with a friendly message. 16 | Do not worry about Order Numbers or Product Details. 17 | ''' 18 | 19 | chat_model = ChatModel.from_pretrained(model_id) 20 | chat = chat_model.start_chat(context=context) 21 | 22 | parameters = { 23 | "temperature": 0.0, 24 | "max_output_tokens": 256, 25 | "top_p": 0.95, 26 | "top_k": 40 27 | } 28 | 29 | def response_generator(prompt, parameters, messages): 30 | history = '' 31 | for message in messages: 32 | if isinstance(message, HumanMessage): 33 | history = f"{history}\nUser: {message.content}" 34 | 35 | if isinstance(message, AIMessage): 36 | history = f"{history}\nAssistant: {message.content}" 37 | 38 | 39 | 40 | response = chat.send_message(f"{history}\n{prompt}", **parameters) 41 | response_text = response.text 42 | 43 | for word in response_text.split(): 44 | yield word + " " 45 | time.sleep(0.05) 46 | 47 | def reset_chat(): 48 | st.session_state.messages = [] 49 | 50 | def main(): 51 | st.title("Customer Service Agent Demo") 52 | st.write("Hello, Wecome to the return center. I am here to help you.") 53 | 54 | if "messages" not in st.session_state: 55 | st.session_state.messages = [] 56 | 57 | 58 | for message in st.session_state.messages: 59 | if isinstance(message, HumanMessage): 60 | with st.chat_message("Human"): 61 | st.markdown(message.content) 62 | 63 | if isinstance(message, AIMessage): 64 | with st.chat_message("AI"): 65 | st.markdown(message.content) 66 | 67 | 68 | if prompt:= st.chat_input("Hello, how can I help you?"): 69 | st.session_state.messages.append(HumanMessage(prompt)) 70 | with st.chat_message("Human"): 71 | st.markdown(prompt) 72 | 73 | with st.chat_message("AI"): 74 | response = st.write_stream(response_generator(prompt, parameters, st.session_state.messages)) 75 | 76 | st.session_state.messages.append(AIMessage(response)) 77 | 78 | st.button("Reset Chat", on_click=reset_chat) 79 | 80 | if __name__ == "__main__": 81 | main() 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | -------------------------------------------------------------------------------- /customer-service-demo-with-chatbison/requirements.txt: -------------------------------------------------------------------------------- 1 | google-cloud-aiplatform 2 | langchain 3 | streamlit -------------------------------------------------------------------------------- /youtube-demos/all_fm.json: -------------------------------------------------------------------------------- 1 | { 2 | "modelSummaries": [ 3 | { 4 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-tg1-large", 5 | "modelId": "amazon.titan-tg1-large", 6 | "modelName": "Titan Text Large", 7 | "providerName": "Amazon", 8 | "inputModalities": [ 9 | "TEXT" 10 | ], 11 | "outputModalities": [ 12 | "TEXT" 13 | ], 14 | "responseStreamingSupported": true, 15 | "customizationsSupported": [], 16 | "inferenceTypesSupported": [ 17 | "ON_DEMAND" 18 | ], 19 | "modelLifecycle": { 20 | "status": "ACTIVE" 21 | } 22 | }, 23 | { 24 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-embed-g1-text-02", 25 | "modelId": "amazon.titan-embed-g1-text-02", 26 | "modelName": "Titan Text Embeddings v2", 27 | "providerName": "Amazon", 28 | "inputModalities": [ 29 | "TEXT" 30 | ], 31 | "outputModalities": [ 32 | "EMBEDDING" 33 | ], 34 | "customizationsSupported": [], 35 | "inferenceTypesSupported": [ 36 | "ON_DEMAND" 37 | ], 38 | "modelLifecycle": { 39 | "status": "ACTIVE" 40 | } 41 | }, 42 | { 43 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-text-lite-v1:0:4k", 44 | "modelId": "amazon.titan-text-lite-v1:0:4k", 45 | "modelName": "Titan Text G1 - Lite", 46 | "providerName": "Amazon", 47 | "inputModalities": [ 48 | "TEXT" 49 | ], 50 | "outputModalities": [ 51 | "TEXT" 52 | ], 53 | "responseStreamingSupported": true, 54 | "customizationsSupported": [ 55 | "FINE_TUNING", 56 | "CONTINUED_PRE_TRAINING" 57 | ], 58 | "inferenceTypesSupported": [ 59 | "PROVISIONED" 60 | ], 61 | "modelLifecycle": { 62 | "status": "ACTIVE" 63 | } 64 | }, 65 | { 66 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-text-lite-v1", 67 | "modelId": "amazon.titan-text-lite-v1", 68 | "modelName": "Titan Text G1 - Lite", 69 | "providerName": "Amazon", 70 | "inputModalities": [ 71 | "TEXT" 72 | ], 73 | "outputModalities": [ 74 | "TEXT" 75 | ], 76 | "responseStreamingSupported": true, 77 | "customizationsSupported": [], 78 | "inferenceTypesSupported": [ 79 | "ON_DEMAND" 80 | ], 81 | "modelLifecycle": { 82 | "status": "ACTIVE" 83 | } 84 | }, 85 | { 86 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-text-express-v1:0:8k", 87 | "modelId": "amazon.titan-text-express-v1:0:8k", 88 | "modelName": "Titan Text G1 - Express", 89 | "providerName": "Amazon", 90 | "inputModalities": [ 91 | "TEXT" 92 | ], 93 | "outputModalities": [ 94 | "TEXT" 95 | ], 96 | "responseStreamingSupported": true, 97 | "customizationsSupported": [ 98 | "FINE_TUNING", 99 | "CONTINUED_PRE_TRAINING" 100 | ], 101 | "inferenceTypesSupported": [ 102 | "PROVISIONED" 103 | ], 104 | "modelLifecycle": { 105 | "status": "ACTIVE" 106 | } 107 | }, 108 | { 109 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-text-express-v1", 110 | "modelId": "amazon.titan-text-express-v1", 111 | "modelName": "Titan Text G1 - Express", 112 | "providerName": "Amazon", 113 | "inputModalities": [ 114 | "TEXT" 115 | ], 116 | "outputModalities": [ 117 | "TEXT" 118 | ], 119 | "responseStreamingSupported": true, 120 | "customizationsSupported": [], 121 | "inferenceTypesSupported": [ 122 | "ON_DEMAND" 123 | ], 124 | "modelLifecycle": { 125 | "status": "ACTIVE" 126 | } 127 | }, 128 | { 129 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-embed-text-v1:2:8k", 130 | "modelId": "amazon.titan-embed-text-v1:2:8k", 131 | "modelName": "Titan Embeddings G1 - Text", 132 | "providerName": "Amazon", 133 | "inputModalities": [ 134 | "TEXT" 135 | ], 136 | "outputModalities": [ 137 | "EMBEDDING" 138 | ], 139 | "responseStreamingSupported": false, 140 | "customizationsSupported": [], 141 | "inferenceTypesSupported": [ 142 | "PROVISIONED" 143 | ], 144 | "modelLifecycle": { 145 | "status": "ACTIVE" 146 | } 147 | }, 148 | { 149 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-embed-text-v1", 150 | "modelId": "amazon.titan-embed-text-v1", 151 | "modelName": "Titan Embeddings G1 - Text", 152 | "providerName": "Amazon", 153 | "inputModalities": [ 154 | "TEXT" 155 | ], 156 | "outputModalities": [ 157 | "EMBEDDING" 158 | ], 159 | "responseStreamingSupported": false, 160 | "customizationsSupported": [], 161 | "inferenceTypesSupported": [ 162 | "ON_DEMAND" 163 | ], 164 | "modelLifecycle": { 165 | "status": "ACTIVE" 166 | } 167 | }, 168 | { 169 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-embed-image-v1:0", 170 | "modelId": "amazon.titan-embed-image-v1:0", 171 | "modelName": "Titan Multimodal Embeddings G1", 172 | "providerName": "Amazon", 173 | "inputModalities": [ 174 | "TEXT", 175 | "IMAGE" 176 | ], 177 | "outputModalities": [ 178 | "EMBEDDING" 179 | ], 180 | "customizationsSupported": [ 181 | "FINE_TUNING" 182 | ], 183 | "inferenceTypesSupported": [ 184 | "PROVISIONED" 185 | ], 186 | "modelLifecycle": { 187 | "status": "ACTIVE" 188 | } 189 | }, 190 | { 191 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-embed-image-v1", 192 | "modelId": "amazon.titan-embed-image-v1", 193 | "modelName": "Titan Multimodal Embeddings G1", 194 | "providerName": "Amazon", 195 | "inputModalities": [ 196 | "TEXT", 197 | "IMAGE" 198 | ], 199 | "outputModalities": [ 200 | "EMBEDDING" 201 | ], 202 | "customizationsSupported": [], 203 | "inferenceTypesSupported": [ 204 | "ON_DEMAND" 205 | ], 206 | "modelLifecycle": { 207 | "status": "ACTIVE" 208 | } 209 | }, 210 | { 211 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-image-generator-v1:0", 212 | "modelId": "amazon.titan-image-generator-v1:0", 213 | "modelName": "Titan Image Generator G1", 214 | "providerName": "Amazon", 215 | "inputModalities": [ 216 | "TEXT", 217 | "IMAGE" 218 | ], 219 | "outputModalities": [ 220 | "IMAGE" 221 | ], 222 | "customizationsSupported": [ 223 | "FINE_TUNING" 224 | ], 225 | "inferenceTypesSupported": [ 226 | "PROVISIONED" 227 | ], 228 | "modelLifecycle": { 229 | "status": "ACTIVE" 230 | } 231 | }, 232 | { 233 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-image-generator-v1", 234 | "modelId": "amazon.titan-image-generator-v1", 235 | "modelName": "Titan Image Generator G1", 236 | "providerName": "Amazon", 237 | "inputModalities": [ 238 | "TEXT", 239 | "IMAGE" 240 | ], 241 | "outputModalities": [ 242 | "IMAGE" 243 | ], 244 | "customizationsSupported": [], 245 | "inferenceTypesSupported": [ 246 | "ON_DEMAND" 247 | ], 248 | "modelLifecycle": { 249 | "status": "ACTIVE" 250 | } 251 | }, 252 | { 253 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/stability.stable-diffusion-xl", 254 | "modelId": "stability.stable-diffusion-xl", 255 | "modelName": "SDXL 0.8", 256 | "providerName": "Stability AI", 257 | "inputModalities": [ 258 | "TEXT", 259 | "IMAGE" 260 | ], 261 | "outputModalities": [ 262 | "IMAGE" 263 | ], 264 | "customizationsSupported": [], 265 | "inferenceTypesSupported": [ 266 | "ON_DEMAND" 267 | ], 268 | "modelLifecycle": { 269 | "status": "LEGACY" 270 | } 271 | }, 272 | { 273 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/stability.stable-diffusion-xl-v0", 274 | "modelId": "stability.stable-diffusion-xl-v0", 275 | "modelName": "SDXL 0.8", 276 | "providerName": "Stability AI", 277 | "inputModalities": [ 278 | "TEXT", 279 | "IMAGE" 280 | ], 281 | "outputModalities": [ 282 | "IMAGE" 283 | ], 284 | "customizationsSupported": [], 285 | "inferenceTypesSupported": [ 286 | "ON_DEMAND" 287 | ], 288 | "modelLifecycle": { 289 | "status": "LEGACY" 290 | } 291 | }, 292 | { 293 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/stability.stable-diffusion-xl-v1:0", 294 | "modelId": "stability.stable-diffusion-xl-v1:0", 295 | "modelName": "SDXL 1.0", 296 | "providerName": "Stability AI", 297 | "inputModalities": [ 298 | "TEXT", 299 | "IMAGE" 300 | ], 301 | "outputModalities": [ 302 | "IMAGE" 303 | ], 304 | "customizationsSupported": [], 305 | "inferenceTypesSupported": [ 306 | "PROVISIONED" 307 | ], 308 | "modelLifecycle": { 309 | "status": "ACTIVE" 310 | } 311 | }, 312 | { 313 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/stability.stable-diffusion-xl-v1", 314 | "modelId": "stability.stable-diffusion-xl-v1", 315 | "modelName": "SDXL 1.0", 316 | "providerName": "Stability AI", 317 | "inputModalities": [ 318 | "TEXT", 319 | "IMAGE" 320 | ], 321 | "outputModalities": [ 322 | "IMAGE" 323 | ], 324 | "customizationsSupported": [], 325 | "inferenceTypesSupported": [ 326 | "ON_DEMAND" 327 | ], 328 | "modelLifecycle": { 329 | "status": "ACTIVE" 330 | } 331 | }, 332 | { 333 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/ai21.j2-grande-instruct", 334 | "modelId": "ai21.j2-grande-instruct", 335 | "modelName": "J2 Grande Instruct", 336 | "providerName": "AI21 Labs", 337 | "inputModalities": [ 338 | "TEXT" 339 | ], 340 | "outputModalities": [ 341 | "TEXT" 342 | ], 343 | "responseStreamingSupported": false, 344 | "customizationsSupported": [], 345 | "inferenceTypesSupported": [ 346 | "ON_DEMAND" 347 | ], 348 | "modelLifecycle": { 349 | "status": "ACTIVE" 350 | } 351 | }, 352 | { 353 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/ai21.j2-jumbo-instruct", 354 | "modelId": "ai21.j2-jumbo-instruct", 355 | "modelName": "J2 Jumbo Instruct", 356 | "providerName": "AI21 Labs", 357 | "inputModalities": [ 358 | "TEXT" 359 | ], 360 | "outputModalities": [ 361 | "TEXT" 362 | ], 363 | "responseStreamingSupported": false, 364 | "customizationsSupported": [], 365 | "inferenceTypesSupported": [ 366 | "ON_DEMAND" 367 | ], 368 | "modelLifecycle": { 369 | "status": "ACTIVE" 370 | } 371 | }, 372 | { 373 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/ai21.j2-mid", 374 | "modelId": "ai21.j2-mid", 375 | "modelName": "Jurassic-2 Mid", 376 | "providerName": "AI21 Labs", 377 | "inputModalities": [ 378 | "TEXT" 379 | ], 380 | "outputModalities": [ 381 | "TEXT" 382 | ], 383 | "responseStreamingSupported": false, 384 | "customizationsSupported": [], 385 | "inferenceTypesSupported": [ 386 | "ON_DEMAND" 387 | ], 388 | "modelLifecycle": { 389 | "status": "ACTIVE" 390 | } 391 | }, 392 | { 393 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/ai21.j2-mid-v1", 394 | "modelId": "ai21.j2-mid-v1", 395 | "modelName": "Jurassic-2 Mid", 396 | "providerName": "AI21 Labs", 397 | "inputModalities": [ 398 | "TEXT" 399 | ], 400 | "outputModalities": [ 401 | "TEXT" 402 | ], 403 | "responseStreamingSupported": false, 404 | "customizationsSupported": [], 405 | "inferenceTypesSupported": [ 406 | "ON_DEMAND" 407 | ], 408 | "modelLifecycle": { 409 | "status": "ACTIVE" 410 | } 411 | }, 412 | { 413 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/ai21.j2-ultra", 414 | "modelId": "ai21.j2-ultra", 415 | "modelName": "Jurassic-2 Ultra", 416 | "providerName": "AI21 Labs", 417 | "inputModalities": [ 418 | "TEXT" 419 | ], 420 | "outputModalities": [ 421 | "TEXT" 422 | ], 423 | "responseStreamingSupported": false, 424 | "customizationsSupported": [], 425 | "inferenceTypesSupported": [ 426 | "ON_DEMAND" 427 | ], 428 | "modelLifecycle": { 429 | "status": "ACTIVE" 430 | } 431 | }, 432 | { 433 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/ai21.j2-ultra-v1", 434 | "modelId": "ai21.j2-ultra-v1", 435 | "modelName": "Jurassic-2 Ultra", 436 | "providerName": "AI21 Labs", 437 | "inputModalities": [ 438 | "TEXT" 439 | ], 440 | "outputModalities": [ 441 | "TEXT" 442 | ], 443 | "responseStreamingSupported": false, 444 | "customizationsSupported": [], 445 | "inferenceTypesSupported": [ 446 | "ON_DEMAND" 447 | ], 448 | "modelLifecycle": { 449 | "status": "ACTIVE" 450 | } 451 | }, 452 | { 453 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-instant-v1:2:100k", 454 | "modelId": "anthropic.claude-instant-v1:2:100k", 455 | "modelName": "Claude Instant", 456 | "providerName": "Anthropic", 457 | "inputModalities": [ 458 | "TEXT" 459 | ], 460 | "outputModalities": [ 461 | "TEXT" 462 | ], 463 | "responseStreamingSupported": true, 464 | "customizationsSupported": [], 465 | "inferenceTypesSupported": [ 466 | "PROVISIONED" 467 | ], 468 | "modelLifecycle": { 469 | "status": "ACTIVE" 470 | } 471 | }, 472 | { 473 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-instant-v1", 474 | "modelId": "anthropic.claude-instant-v1", 475 | "modelName": "Claude Instant", 476 | "providerName": "Anthropic", 477 | "inputModalities": [ 478 | "TEXT" 479 | ], 480 | "outputModalities": [ 481 | "TEXT" 482 | ], 483 | "responseStreamingSupported": true, 484 | "customizationsSupported": [], 485 | "inferenceTypesSupported": [ 486 | "ON_DEMAND" 487 | ], 488 | "modelLifecycle": { 489 | "status": "ACTIVE" 490 | } 491 | }, 492 | { 493 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-v2:0:18k", 494 | "modelId": "anthropic.claude-v2:0:18k", 495 | "modelName": "Claude", 496 | "providerName": "Anthropic", 497 | "inputModalities": [ 498 | "TEXT" 499 | ], 500 | "outputModalities": [ 501 | "TEXT" 502 | ], 503 | "responseStreamingSupported": true, 504 | "customizationsSupported": [], 505 | "inferenceTypesSupported": [ 506 | "PROVISIONED" 507 | ], 508 | "modelLifecycle": { 509 | "status": "ACTIVE" 510 | } 511 | }, 512 | { 513 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-v2:0:100k", 514 | "modelId": "anthropic.claude-v2:0:100k", 515 | "modelName": "Claude", 516 | "providerName": "Anthropic", 517 | "inputModalities": [ 518 | "TEXT" 519 | ], 520 | "outputModalities": [ 521 | "TEXT" 522 | ], 523 | "responseStreamingSupported": true, 524 | "customizationsSupported": [], 525 | "inferenceTypesSupported": [ 526 | "PROVISIONED" 527 | ], 528 | "modelLifecycle": { 529 | "status": "ACTIVE" 530 | } 531 | }, 532 | { 533 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-v2:1:18k", 534 | "modelId": "anthropic.claude-v2:1:18k", 535 | "modelName": "Claude", 536 | "providerName": "Anthropic", 537 | "inputModalities": [ 538 | "TEXT" 539 | ], 540 | "outputModalities": [ 541 | "TEXT" 542 | ], 543 | "responseStreamingSupported": true, 544 | "customizationsSupported": [], 545 | "inferenceTypesSupported": [ 546 | "PROVISIONED" 547 | ], 548 | "modelLifecycle": { 549 | "status": "ACTIVE" 550 | } 551 | }, 552 | { 553 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-v2:1:200k", 554 | "modelId": "anthropic.claude-v2:1:200k", 555 | "modelName": "Claude", 556 | "providerName": "Anthropic", 557 | "inputModalities": [ 558 | "TEXT" 559 | ], 560 | "outputModalities": [ 561 | "TEXT" 562 | ], 563 | "responseStreamingSupported": true, 564 | "customizationsSupported": [], 565 | "inferenceTypesSupported": [ 566 | "PROVISIONED" 567 | ], 568 | "modelLifecycle": { 569 | "status": "ACTIVE" 570 | } 571 | }, 572 | { 573 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-v2:1", 574 | "modelId": "anthropic.claude-v2:1", 575 | "modelName": "Claude", 576 | "providerName": "Anthropic", 577 | "inputModalities": [ 578 | "TEXT" 579 | ], 580 | "outputModalities": [ 581 | "TEXT" 582 | ], 583 | "responseStreamingSupported": true, 584 | "customizationsSupported": [], 585 | "inferenceTypesSupported": [ 586 | "ON_DEMAND" 587 | ], 588 | "modelLifecycle": { 589 | "status": "ACTIVE" 590 | } 591 | }, 592 | { 593 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-v2", 594 | "modelId": "anthropic.claude-v2", 595 | "modelName": "Claude", 596 | "providerName": "Anthropic", 597 | "inputModalities": [ 598 | "TEXT" 599 | ], 600 | "outputModalities": [ 601 | "TEXT" 602 | ], 603 | "responseStreamingSupported": true, 604 | "customizationsSupported": [], 605 | "inferenceTypesSupported": [ 606 | "ON_DEMAND" 607 | ], 608 | "modelLifecycle": { 609 | "status": "ACTIVE" 610 | } 611 | }, 612 | { 613 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-sonnet-20240229-v1:0:28k", 614 | "modelId": "anthropic.claude-3-sonnet-20240229-v1:0:28k", 615 | "modelName": "Claude 3 Sonnet", 616 | "providerName": "Anthropic", 617 | "inputModalities": [ 618 | "TEXT", 619 | "IMAGE" 620 | ], 621 | "outputModalities": [ 622 | "TEXT" 623 | ], 624 | "responseStreamingSupported": true, 625 | "customizationsSupported": [], 626 | "inferenceTypesSupported": [ 627 | "PROVISIONED" 628 | ], 629 | "modelLifecycle": { 630 | "status": "ACTIVE" 631 | } 632 | }, 633 | { 634 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-sonnet-20240229-v1:0:200k", 635 | "modelId": "anthropic.claude-3-sonnet-20240229-v1:0:200k", 636 | "modelName": "Claude 3 Sonnet", 637 | "providerName": "Anthropic", 638 | "inputModalities": [ 639 | "TEXT", 640 | "IMAGE" 641 | ], 642 | "outputModalities": [ 643 | "TEXT" 644 | ], 645 | "responseStreamingSupported": true, 646 | "customizationsSupported": [], 647 | "inferenceTypesSupported": [ 648 | "PROVISIONED" 649 | ], 650 | "modelLifecycle": { 651 | "status": "ACTIVE" 652 | } 653 | }, 654 | { 655 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-sonnet-20240229-v1:0", 656 | "modelId": "anthropic.claude-3-sonnet-20240229-v1:0", 657 | "modelName": "Claude 3 Sonnet", 658 | "providerName": "Anthropic", 659 | "inputModalities": [ 660 | "TEXT", 661 | "IMAGE" 662 | ], 663 | "outputModalities": [ 664 | "TEXT" 665 | ], 666 | "responseStreamingSupported": true, 667 | "customizationsSupported": [], 668 | "inferenceTypesSupported": [ 669 | "ON_DEMAND" 670 | ], 671 | "modelLifecycle": { 672 | "status": "ACTIVE" 673 | } 674 | }, 675 | { 676 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-haiku-20240307-v1:0:48k", 677 | "modelId": "anthropic.claude-3-haiku-20240307-v1:0:48k", 678 | "modelName": "Claude 3 Haiku", 679 | "providerName": "Anthropic", 680 | "inputModalities": [ 681 | "TEXT", 682 | "IMAGE" 683 | ], 684 | "outputModalities": [ 685 | "TEXT" 686 | ], 687 | "responseStreamingSupported": true, 688 | "customizationsSupported": [], 689 | "inferenceTypesSupported": [ 690 | "PROVISIONED" 691 | ], 692 | "modelLifecycle": { 693 | "status": "ACTIVE" 694 | } 695 | }, 696 | { 697 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-haiku-20240307-v1:0:200k", 698 | "modelId": "anthropic.claude-3-haiku-20240307-v1:0:200k", 699 | "modelName": "Claude 3 Haiku", 700 | "providerName": "Anthropic", 701 | "inputModalities": [ 702 | "TEXT", 703 | "IMAGE" 704 | ], 705 | "outputModalities": [ 706 | "TEXT" 707 | ], 708 | "responseStreamingSupported": true, 709 | "customizationsSupported": [], 710 | "inferenceTypesSupported": [ 711 | "PROVISIONED" 712 | ], 713 | "modelLifecycle": { 714 | "status": "ACTIVE" 715 | } 716 | }, 717 | { 718 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-haiku-20240307-v1:0", 719 | "modelId": "anthropic.claude-3-haiku-20240307-v1:0", 720 | "modelName": "Claude 3 Haiku", 721 | "providerName": "Anthropic", 722 | "inputModalities": [ 723 | "TEXT", 724 | "IMAGE" 725 | ], 726 | "outputModalities": [ 727 | "TEXT" 728 | ], 729 | "responseStreamingSupported": true, 730 | "customizationsSupported": [], 731 | "inferenceTypesSupported": [ 732 | "ON_DEMAND" 733 | ], 734 | "modelLifecycle": { 735 | "status": "ACTIVE" 736 | } 737 | }, 738 | { 739 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-opus-20240229-v1:0", 740 | "modelId": "anthropic.claude-3-opus-20240229-v1:0", 741 | "modelName": "Claude 3 Opus", 742 | "providerName": "Anthropic", 743 | "inputModalities": [ 744 | "TEXT", 745 | "IMAGE" 746 | ], 747 | "outputModalities": [ 748 | "TEXT" 749 | ], 750 | "responseStreamingSupported": true, 751 | "customizationsSupported": [], 752 | "inferenceTypesSupported": [ 753 | "ON_DEMAND" 754 | ], 755 | "modelLifecycle": { 756 | "status": "ACTIVE" 757 | } 758 | }, 759 | { 760 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/cohere.command-text-v14:7:4k", 761 | "modelId": "cohere.command-text-v14:7:4k", 762 | "modelName": "Command", 763 | "providerName": "Cohere", 764 | "inputModalities": [ 765 | "TEXT" 766 | ], 767 | "outputModalities": [ 768 | "TEXT" 769 | ], 770 | "responseStreamingSupported": true, 771 | "customizationsSupported": [ 772 | "FINE_TUNING" 773 | ], 774 | "inferenceTypesSupported": [ 775 | "PROVISIONED" 776 | ], 777 | "modelLifecycle": { 778 | "status": "ACTIVE" 779 | } 780 | }, 781 | { 782 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/cohere.command-text-v14", 783 | "modelId": "cohere.command-text-v14", 784 | "modelName": "Command", 785 | "providerName": "Cohere", 786 | "inputModalities": [ 787 | "TEXT" 788 | ], 789 | "outputModalities": [ 790 | "TEXT" 791 | ], 792 | "responseStreamingSupported": true, 793 | "customizationsSupported": [], 794 | "inferenceTypesSupported": [ 795 | "ON_DEMAND" 796 | ], 797 | "modelLifecycle": { 798 | "status": "ACTIVE" 799 | } 800 | }, 801 | { 802 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/cohere.command-light-text-v14:7:4k", 803 | "modelId": "cohere.command-light-text-v14:7:4k", 804 | "modelName": "Command Light", 805 | "providerName": "Cohere", 806 | "inputModalities": [ 807 | "TEXT" 808 | ], 809 | "outputModalities": [ 810 | "TEXT" 811 | ], 812 | "responseStreamingSupported": true, 813 | "customizationsSupported": [ 814 | "FINE_TUNING" 815 | ], 816 | "inferenceTypesSupported": [ 817 | "PROVISIONED" 818 | ], 819 | "modelLifecycle": { 820 | "status": "ACTIVE" 821 | } 822 | }, 823 | { 824 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/cohere.command-light-text-v14", 825 | "modelId": "cohere.command-light-text-v14", 826 | "modelName": "Command Light", 827 | "providerName": "Cohere", 828 | "inputModalities": [ 829 | "TEXT" 830 | ], 831 | "outputModalities": [ 832 | "TEXT" 833 | ], 834 | "responseStreamingSupported": true, 835 | "customizationsSupported": [], 836 | "inferenceTypesSupported": [ 837 | "ON_DEMAND" 838 | ], 839 | "modelLifecycle": { 840 | "status": "ACTIVE" 841 | } 842 | }, 843 | { 844 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/cohere.embed-english-v3:0:512", 845 | "modelId": "cohere.embed-english-v3:0:512", 846 | "modelName": "Embed English", 847 | "providerName": "Cohere", 848 | "inputModalities": [ 849 | "TEXT" 850 | ], 851 | "outputModalities": [ 852 | "EMBEDDING" 853 | ], 854 | "responseStreamingSupported": false, 855 | "customizationsSupported": [], 856 | "inferenceTypesSupported": [ 857 | "PROVISIONED" 858 | ], 859 | "modelLifecycle": { 860 | "status": "ACTIVE" 861 | } 862 | }, 863 | { 864 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/cohere.embed-english-v3", 865 | "modelId": "cohere.embed-english-v3", 866 | "modelName": "Embed English", 867 | "providerName": "Cohere", 868 | "inputModalities": [ 869 | "TEXT" 870 | ], 871 | "outputModalities": [ 872 | "EMBEDDING" 873 | ], 874 | "responseStreamingSupported": false, 875 | "customizationsSupported": [], 876 | "inferenceTypesSupported": [ 877 | "ON_DEMAND" 878 | ], 879 | "modelLifecycle": { 880 | "status": "ACTIVE" 881 | } 882 | }, 883 | { 884 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/cohere.embed-multilingual-v3:0:512", 885 | "modelId": "cohere.embed-multilingual-v3:0:512", 886 | "modelName": "Embed Multilingual", 887 | "providerName": "Cohere", 888 | "inputModalities": [ 889 | "TEXT" 890 | ], 891 | "outputModalities": [ 892 | "EMBEDDING" 893 | ], 894 | "responseStreamingSupported": false, 895 | "customizationsSupported": [], 896 | "inferenceTypesSupported": [ 897 | "PROVISIONED" 898 | ], 899 | "modelLifecycle": { 900 | "status": "ACTIVE" 901 | } 902 | }, 903 | { 904 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/cohere.embed-multilingual-v3", 905 | "modelId": "cohere.embed-multilingual-v3", 906 | "modelName": "Embed Multilingual", 907 | "providerName": "Cohere", 908 | "inputModalities": [ 909 | "TEXT" 910 | ], 911 | "outputModalities": [ 912 | "EMBEDDING" 913 | ], 914 | "responseStreamingSupported": false, 915 | "customizationsSupported": [], 916 | "inferenceTypesSupported": [ 917 | "ON_DEMAND" 918 | ], 919 | "modelLifecycle": { 920 | "status": "ACTIVE" 921 | } 922 | }, 923 | { 924 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/meta.llama2-13b-chat-v1:0:4k", 925 | "modelId": "meta.llama2-13b-chat-v1:0:4k", 926 | "modelName": "Llama 2 Chat 13B", 927 | "providerName": "Meta", 928 | "inputModalities": [ 929 | "TEXT" 930 | ], 931 | "outputModalities": [ 932 | "TEXT" 933 | ], 934 | "responseStreamingSupported": true, 935 | "customizationsSupported": [], 936 | "inferenceTypesSupported": [ 937 | "PROVISIONED" 938 | ], 939 | "modelLifecycle": { 940 | "status": "ACTIVE" 941 | } 942 | }, 943 | { 944 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/meta.llama2-13b-chat-v1", 945 | "modelId": "meta.llama2-13b-chat-v1", 946 | "modelName": "Llama 2 Chat 13B", 947 | "providerName": "Meta", 948 | "inputModalities": [ 949 | "TEXT" 950 | ], 951 | "outputModalities": [ 952 | "TEXT" 953 | ], 954 | "responseStreamingSupported": true, 955 | "customizationsSupported": [], 956 | "inferenceTypesSupported": [ 957 | "ON_DEMAND" 958 | ], 959 | "modelLifecycle": { 960 | "status": "ACTIVE" 961 | } 962 | }, 963 | { 964 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/meta.llama2-70b-chat-v1:0:4k", 965 | "modelId": "meta.llama2-70b-chat-v1:0:4k", 966 | "modelName": "Llama 2 Chat 70B", 967 | "providerName": "Meta", 968 | "inputModalities": [ 969 | "TEXT" 970 | ], 971 | "outputModalities": [ 972 | "TEXT" 973 | ], 974 | "responseStreamingSupported": true, 975 | "customizationsSupported": [], 976 | "inferenceTypesSupported": [], 977 | "modelLifecycle": { 978 | "status": "ACTIVE" 979 | } 980 | }, 981 | { 982 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/meta.llama2-70b-chat-v1", 983 | "modelId": "meta.llama2-70b-chat-v1", 984 | "modelName": "Llama 2 Chat 70B", 985 | "providerName": "Meta", 986 | "inputModalities": [ 987 | "TEXT" 988 | ], 989 | "outputModalities": [ 990 | "TEXT" 991 | ], 992 | "responseStreamingSupported": true, 993 | "customizationsSupported": [], 994 | "inferenceTypesSupported": [ 995 | "ON_DEMAND" 996 | ], 997 | "modelLifecycle": { 998 | "status": "ACTIVE" 999 | } 1000 | }, 1001 | { 1002 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/meta.llama2-13b-v1:0:4k", 1003 | "modelId": "meta.llama2-13b-v1:0:4k", 1004 | "modelName": "Llama 2 13B", 1005 | "providerName": "Meta", 1006 | "inputModalities": [ 1007 | "TEXT" 1008 | ], 1009 | "outputModalities": [ 1010 | "TEXT" 1011 | ], 1012 | "responseStreamingSupported": true, 1013 | "customizationsSupported": [ 1014 | "FINE_TUNING" 1015 | ], 1016 | "inferenceTypesSupported": [], 1017 | "modelLifecycle": { 1018 | "status": "ACTIVE" 1019 | } 1020 | }, 1021 | { 1022 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/meta.llama2-13b-v1", 1023 | "modelId": "meta.llama2-13b-v1", 1024 | "modelName": "Llama 2 13B", 1025 | "providerName": "Meta", 1026 | "inputModalities": [ 1027 | "TEXT" 1028 | ], 1029 | "outputModalities": [ 1030 | "TEXT" 1031 | ], 1032 | "responseStreamingSupported": true, 1033 | "customizationsSupported": [], 1034 | "inferenceTypesSupported": [], 1035 | "modelLifecycle": { 1036 | "status": "ACTIVE" 1037 | } 1038 | }, 1039 | { 1040 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/meta.llama2-70b-v1:0:4k", 1041 | "modelId": "meta.llama2-70b-v1:0:4k", 1042 | "modelName": "Llama 2 70B", 1043 | "providerName": "Meta", 1044 | "inputModalities": [ 1045 | "TEXT" 1046 | ], 1047 | "outputModalities": [ 1048 | "TEXT" 1049 | ], 1050 | "responseStreamingSupported": true, 1051 | "customizationsSupported": [ 1052 | "FINE_TUNING" 1053 | ], 1054 | "inferenceTypesSupported": [], 1055 | "modelLifecycle": { 1056 | "status": "ACTIVE" 1057 | } 1058 | }, 1059 | { 1060 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/meta.llama2-70b-v1", 1061 | "modelId": "meta.llama2-70b-v1", 1062 | "modelName": "Llama 2 70B", 1063 | "providerName": "Meta", 1064 | "inputModalities": [ 1065 | "TEXT" 1066 | ], 1067 | "outputModalities": [ 1068 | "TEXT" 1069 | ], 1070 | "responseStreamingSupported": true, 1071 | "customizationsSupported": [], 1072 | "inferenceTypesSupported": [], 1073 | "modelLifecycle": { 1074 | "status": "ACTIVE" 1075 | } 1076 | }, 1077 | { 1078 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/meta.llama3-8b-instruct-v1:0", 1079 | "modelId": "meta.llama3-8b-instruct-v1:0", 1080 | "modelName": "Llama 3 8B Instruct", 1081 | "providerName": "Meta", 1082 | "inputModalities": [ 1083 | "TEXT" 1084 | ], 1085 | "outputModalities": [ 1086 | "TEXT" 1087 | ], 1088 | "responseStreamingSupported": true, 1089 | "customizationsSupported": [], 1090 | "inferenceTypesSupported": [ 1091 | "ON_DEMAND" 1092 | ], 1093 | "modelLifecycle": { 1094 | "status": "ACTIVE" 1095 | } 1096 | }, 1097 | { 1098 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/meta.llama3-70b-instruct-v1:0", 1099 | "modelId": "meta.llama3-70b-instruct-v1:0", 1100 | "modelName": "Llama 3 70B Instruct", 1101 | "providerName": "Meta", 1102 | "inputModalities": [ 1103 | "TEXT" 1104 | ], 1105 | "outputModalities": [ 1106 | "TEXT" 1107 | ], 1108 | "responseStreamingSupported": true, 1109 | "customizationsSupported": [], 1110 | "inferenceTypesSupported": [ 1111 | "ON_DEMAND" 1112 | ], 1113 | "modelLifecycle": { 1114 | "status": "ACTIVE" 1115 | } 1116 | }, 1117 | { 1118 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/mistral.mistral-7b-instruct-v0:2", 1119 | "modelId": "mistral.mistral-7b-instruct-v0:2", 1120 | "modelName": "Mistral 7B Instruct", 1121 | "providerName": "Mistral AI", 1122 | "inputModalities": [ 1123 | "TEXT" 1124 | ], 1125 | "outputModalities": [ 1126 | "TEXT" 1127 | ], 1128 | "responseStreamingSupported": true, 1129 | "customizationsSupported": [], 1130 | "inferenceTypesSupported": [ 1131 | "ON_DEMAND" 1132 | ], 1133 | "modelLifecycle": { 1134 | "status": "ACTIVE" 1135 | } 1136 | }, 1137 | { 1138 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/mistral.mixtral-8x7b-instruct-v0:1", 1139 | "modelId": "mistral.mixtral-8x7b-instruct-v0:1", 1140 | "modelName": "Mixtral 8x7B Instruct", 1141 | "providerName": "Mistral AI", 1142 | "inputModalities": [ 1143 | "TEXT" 1144 | ], 1145 | "outputModalities": [ 1146 | "TEXT" 1147 | ], 1148 | "responseStreamingSupported": true, 1149 | "customizationsSupported": [], 1150 | "inferenceTypesSupported": [ 1151 | "ON_DEMAND" 1152 | ], 1153 | "modelLifecycle": { 1154 | "status": "ACTIVE" 1155 | } 1156 | }, 1157 | { 1158 | "modelArn": "arn:aws:bedrock:us-west-2::foundation-model/mistral.mistral-large-2402-v1:0", 1159 | "modelId": "mistral.mistral-large-2402-v1:0", 1160 | "modelName": "Mistral Large", 1161 | "providerName": "Mistral AI", 1162 | "inputModalities": [ 1163 | "TEXT" 1164 | ], 1165 | "outputModalities": [ 1166 | "TEXT" 1167 | ], 1168 | "responseStreamingSupported": true, 1169 | "customizationsSupported": [], 1170 | "inferenceTypesSupported": [ 1171 | "ON_DEMAND" 1172 | ], 1173 | "modelLifecycle": { 1174 | "status": "ACTIVE" 1175 | } 1176 | } 1177 | ] 1178 | } 1179 | -------------------------------------------------------------------------------- /youtube-demos/query_llama.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | 4 | client = boto3.client(service_name = "bedrock-runtime") 5 | 6 | model_id = "meta.llama2-13b-chat-v1" 7 | #model_id = "meta.llama3-8b-instruct-v1:0" 8 | 9 | user_message = "What is the capital of Norway?" 10 | # user_message = "Write me a fairytale story in 500 words." 11 | 12 | prompt = f"[INST] {user_message} [/INST]" 13 | 14 | request = { 15 | "prompt": prompt, 16 | # Optional inference parameters: 17 | "max_gen_len": 100, 18 | "temperature": 0.5, 19 | "top_p": 0.9, 20 | } 21 | 22 | response = client.invoke_model(body=json.dumps(request), modelId=model_id) 23 | 24 | # Decode the native response body. 25 | model_response = json.loads(response["body"].read()) 26 | 27 | print(model_response) 28 | 29 | print("-------------") 30 | 31 | # Extract and print the generated text. 32 | response_text = model_response["generation"] 33 | print(response_text) --------------------------------------------------------------------------------