├── requirements.txt
├── claude_3_examples
├── cat_image.png
├── README.md
├── message_api.py
└── image_api_st.py
├── CODE_OF_CONDUCT.md
├── LICENSE
├── chat_bedrock_st.py
├── rag_example.py
├── README.md
├── sd_sample_st.py
├── CONTRIBUTING.md
└── text_examples.py
/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3
2 | langchain
3 | langchain-community
4 | streamlit
5 | pillow
6 | faiss-cpu
--------------------------------------------------------------------------------
/claude_3_examples/cat_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/build-on-aws/amazon-bedrock-quick-start/HEAD/claude_3_examples/cat_image.png
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | ## Code of Conduct
2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
4 | opensource-codeofconduct@amazon.com with any additional questions or comments.
5 |
--------------------------------------------------------------------------------
/claude_3_examples/README.md:
--------------------------------------------------------------------------------
1 | # Getting Started with Claude 3 on Amazon Bedrock
2 |
3 | This repository contains code samples for using Cladue 3 on Amazon Bedrock.
4 |
5 | ## Getting Started
6 |
7 | To get a local copy up and running, follow these simple steps.
8 |
9 | ### Prerequisites
10 | * Python 3.9 or higher
11 | * pip
12 | * [Model Access in Amazon Bedrock](https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/modelaccess)
13 |
14 | ### Text Examples
15 |
16 | Run this Python script to see different text-based applications like text summarization, code generation, and Q&A:
17 |
18 | ```bash
19 | python text_examples.py
20 | ```
21 |
22 | ### Image Generation
23 |
24 | To generate images using Stable Diffusion and have Claude 3 capation the image, run the following command:
25 |
26 | ```bash
27 | streamlit run image_api_st.py
28 | ```
29 |

30 |
31 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT No Attribution
2 |
3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software is furnished to do so.
10 |
11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
17 |
18 |
--------------------------------------------------------------------------------
/chat_bedrock_st.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import boto3
4 | import streamlit as st
5 | from langchain.chains import ConversationChain
6 | from langchain.llms.bedrock import Bedrock
7 | from langchain.memory import ConversationBufferMemory
8 |
9 | st.title("ChatBedrock")
10 |
11 | # Setup bedrock
12 | bedrock_runtime = boto3.client(
13 | service_name="bedrock-runtime",
14 | region_name="us-east-1",
15 | )
16 |
17 | @st.cache_resource
18 | def load_llm():
19 | llm = Bedrock(client=bedrock_runtime, model_id="anthropic.claude-v2")
20 | llm.model_kwargs = {"temperature": 0.7, "max_tokens_to_sample": 2048}
21 |
22 | model = ConversationChain(llm=llm, verbose=True, memory=ConversationBufferMemory())
23 |
24 | return model
25 |
26 |
27 | model = load_llm()
28 |
29 | if "messages" not in st.session_state:
30 | st.session_state.messages = []
31 |
32 | for message in st.session_state.messages:
33 | with st.chat_message(message["role"]):
34 | st.markdown(message["content"])
35 |
36 | if prompt := st.chat_input("What is up?"):
37 | st.session_state.messages.append({"role": "user", "content": prompt})
38 | with st.chat_message("user"):
39 | st.markdown(prompt)
40 |
41 | with st.chat_message("assistant"):
42 | message_placeholder = st.empty()
43 | full_response = ""
44 |
45 | # prompt = prompt_fixer(prompt)
46 | result = model.predict(input=prompt)
47 |
48 | # Simulate stream of response with milliseconds delay
49 | for chunk in result.split(' '): # fix for https://github.com/streamlit/streamlit/issues/868
50 | full_response += chunk + ' '
51 | if chunk.endswith('\n'):
52 | full_response += ' '
53 | time.sleep(0.05)
54 | # Add a blinking cursor to simulate typing
55 | message_placeholder.markdown(full_response + "▌")
56 |
57 | message_placeholder.markdown(full_response)
58 |
59 | st.session_state.messages.append({"role": "assistant", "content": full_response})
60 |
--------------------------------------------------------------------------------
/rag_example.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import boto3
4 | from langchain_community.embeddings import BedrockEmbeddings
5 | from langchain_community.vectorstores import FAISS
6 |
7 | # Setup bedrock
8 | bedrock_runtime = boto3.client(
9 | service_name="bedrock-runtime",
10 | region_name="us-east-1",
11 | )
12 |
13 | sentences = [
14 | # Pets
15 | "Your dog is so cute.",
16 | "How cute your dog is!",
17 | "You have such a cute dog!",
18 | # Cities in the US
19 | "New York City is the place where I work.",
20 | "I work in New York City.",
21 | # Color
22 | "What color do you like the most?",
23 | "What is your favourite color?",
24 | ]
25 |
26 |
27 | def claude_prompt_format(prompt: str) -> str:
28 | # Add headers to start and end of prompt
29 | return "\n\nHuman: " + prompt + "\n\nAssistant:"
30 |
31 | # Call Claude model
32 | def call_claude(prompt):
33 | prompt_config = {
34 | "prompt": claude_prompt_format(prompt),
35 | "max_tokens_to_sample": 4096,
36 | "temperature": 0.5,
37 | "top_k": 250,
38 | "top_p": 0.5,
39 | "stop_sequences": [],
40 | }
41 |
42 | body = json.dumps(prompt_config)
43 |
44 | modelId = "anthropic.claude-v2"
45 | accept = "application/json"
46 | contentType = "application/json"
47 |
48 | response = bedrock_runtime.invoke_model(
49 | body=body, modelId=modelId, accept=accept, contentType=contentType
50 | )
51 | response_body = json.loads(response.get("body").read())
52 |
53 | results = response_body.get("completion")
54 | return results
55 |
56 | def rag_setup(query):
57 | embeddings = BedrockEmbeddings(
58 | client=bedrock_runtime,
59 | model_id="amazon.titan-embed-text-v1",
60 | )
61 | local_vector_store = FAISS.from_texts(sentences, embeddings)
62 |
63 | docs = local_vector_store.similarity_search(query)
64 | context = ""
65 |
66 | for doc in docs:
67 | context += doc.page_content
68 |
69 | prompt = f"""Use the following pieces of context to answer the question at the end.
70 |
71 | {context}
72 |
73 | Question: {query}
74 | Answer:"""
75 |
76 | return call_claude(prompt)
77 |
78 |
79 | query = "What type of pet do I have?"
80 | print(query)
81 | print(rag_setup(query))
82 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Quickly build Generative AI applications with Amazon Bedrock
2 |
3 | This repository contains code samples for building diverse AI applications using Amazon Bedrock's foundation models. Learn how to accelerate projects in image and text generation and beyond.
4 |
5 | ## Getting Started
6 |
7 | To get a local copy up and running, follow these simple steps.
8 |
9 | ### Prerequisites
10 | * Python 3.9 or higher
11 | * pip
12 | * [Model Access in Amazon Bedrock](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/modelaccess)
13 |
14 | ### Installation
15 |
16 | Clone the repo
17 |
18 | ```bash
19 | git clone https://github.com/build-on-aws/amazon-bedrock-quick-start.git
20 | ```
21 |
22 | Install required packages
23 |
24 | ```bash
25 | pip install -r requirements.txt
26 | ```
27 |
28 | ## Usage
29 |
30 | This repository contains various code samples demonstrating how to build AI applications using Amazon Bedrock's foundation models. Here's how to use each:
31 |
32 | ### Image Generation
33 |
34 | To generate images using Stable Diffusion, run the following command:
35 |
36 | ```bash
37 | streamlit run sd_sample_st.py
38 | ```
39 |
40 | This will launch a Streamlit app where you can enter text prompts to generate corresponding images.
41 |
42 | ### Text Examples
43 |
44 | Run this Python script to see different text-based applications like text summarization, code generation, and Q&A:
45 |
46 | ```bash
47 | python text_examples.py
48 | ```
49 |
50 | This script will output results for each of these applications, showcasing the versatility of foundation models in text-based tasks.
51 |
52 | ### Chatbot
53 |
54 | To interact with a chatbot built using Amazon Bedrock, LangChain, and Streamlit, run:
55 |
56 | ```bash
57 | streamlit run chat_bedrock_st.py
58 | ```
59 |
60 | This launches a Streamlit app where you can have a conversation with the chatbot, witnessing AI-powered conversational capabilities firsthand.
61 |
62 | ### RAG Example
63 |
64 | To see how Retrieval Augmented Generation (RAG) works with LangChain, execute:
65 |
66 | ```bash
67 | python rag_example.py
68 | ```
69 |
70 | This will demonstrate how RAG augments foundation models by retrieving and incorporating external data into the generated content.
71 |
72 | ## Security
73 |
74 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information.
75 |
76 | ## License
77 |
78 | This library is licensed under the MIT-0 License. See the LICENSE file.
--------------------------------------------------------------------------------
/sd_sample_st.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | import boto3
3 | import json
4 | from PIL import Image
5 | import io
6 |
7 |
8 | st.title("Building with Bedrock") # Title of the application
9 | st.subheader("Stable Diffusion Demo")
10 |
11 |
12 | # List of Stable Diffusion Preset Styles
13 | sd_presets = [
14 | "None",
15 | "3d-model",
16 | "analog-film",
17 | "anime",
18 | "cinematic",
19 | "comic-book",
20 | "digital-art",
21 | "enhance",
22 | "fantasy-art",
23 | "isometric",
24 | "line-art",
25 | "low-poly",
26 | "modeling-compound",
27 | "neon-punk",
28 | "origami",
29 | "photographic",
30 | "pixel-art",
31 | "tile-texture",
32 | ]
33 |
34 | # Setup bedrock
35 | bedrock_runtime = boto3.client(
36 | service_name="bedrock-runtime",
37 | region_name="us-east-1",
38 | )
39 |
40 | # Bedrock api call to stable diffusion
41 | def generate_image(text, style):
42 | """
43 | Purpose:
44 | Uses Bedrock API to generate an Image
45 | Args/Requests:
46 | text: Prompt
47 | style: style for image
48 | Return:
49 | image: base64 string of image
50 | """
51 | body = {
52 | "text_prompts": [{"text": text}],
53 | "cfg_scale": 10,
54 | "seed": 0,
55 | "steps": 50,
56 | "style_preset": style,
57 | }
58 |
59 | if style == "None":
60 | del body["style_preset"]
61 |
62 | body = json.dumps(body)
63 |
64 | modelId = "stability.stable-diffusion-xl"
65 | accept = "application/json"
66 | contentType = "application/json"
67 |
68 | response = bedrock_runtime.invoke_model(
69 | body=body, modelId=modelId, accept=accept, contentType=contentType
70 | )
71 | response_body = json.loads(response.get("body").read())
72 |
73 | results = response_body.get("artifacts")[0].get("base64")
74 | return results
75 |
76 |
77 | # Turn base64 string to image with PIL
78 | def base64_to_pil(base64_string):
79 | """
80 | Purpose:
81 | Turn base64 string to image with PIL
82 | Args/Requests:
83 | base64_string: base64 string of image
84 | Return:
85 | image: PIL image
86 | """
87 | import base64
88 |
89 | imgdata = base64.b64decode(base64_string)
90 | image = Image.open(io.BytesIO(imgdata))
91 | return image
92 |
93 |
94 | # select box for styles
95 | style = st.selectbox("Select Style", sd_presets)
96 | # text input
97 | prompt = st.text_input("Enter prompt")
98 |
99 |
100 | # Generate image from prompt,
101 | if st.button("Generate Image"):
102 | image = base64_to_pil(generate_image(prompt, style))
103 | st.image(image)
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
4 | documentation, we greatly value feedback and contributions from our community.
5 |
6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
7 | information to effectively respond to your bug report or contribution.
8 |
9 |
10 | ## Reporting Bugs/Feature Requests
11 |
12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features.
13 |
14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
16 |
17 | * A reproducible test case or series of steps
18 | * The version of our code being used
19 | * Any modifications you've made relevant to the bug
20 | * Anything unusual about your environment or deployment
21 |
22 |
23 | ## Contributing via Pull Requests
24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
25 |
26 | 1. You are working against the latest source on the *main* branch.
27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
29 |
30 | To send us a pull request, please:
31 |
32 | 1. Fork the repository.
33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
34 | 3. Ensure local tests pass.
35 | 4. Commit to your fork using clear commit messages.
36 | 5. Send us a pull request, answering any default questions in the pull request interface.
37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
38 |
39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
41 |
42 |
43 | ## Finding contributions to work on
44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
45 |
46 |
47 | ## Code of Conduct
48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
50 | opensource-codeofconduct@amazon.com with any additional questions or comments.
51 |
52 |
53 | ## Security issue notifications
54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
55 |
56 |
57 | ## Licensing
58 |
59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
60 |
--------------------------------------------------------------------------------
/claude_3_examples/message_api.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import json
3 | import time
4 |
5 | # Setup bedrock
6 | bedrock_runtime = boto3.client(
7 | service_name="bedrock-runtime",
8 | region_name="us-west-2",
9 | )
10 |
11 |
12 | def call_claude_sonet(prompt):
13 |
14 | prompt_config = {
15 | "anthropic_version": "bedrock-2023-05-31",
16 | "max_tokens": 4096,
17 | "messages": [
18 | {
19 | "role": "user",
20 | "content": [
21 | {"type": "text", "text": prompt},
22 | ],
23 | }
24 | ],
25 | }
26 |
27 | body = json.dumps(prompt_config)
28 |
29 | modelId = "anthropic.claude-3-sonnet-20240229-v1:0"
30 | accept = "application/json"
31 | contentType = "application/json"
32 |
33 | response = bedrock_runtime.invoke_model(
34 | body=body, modelId=modelId, accept=accept, contentType=contentType
35 | )
36 | response_body = json.loads(response.get("body").read())
37 |
38 | results = response_body.get("content")[0].get("text")
39 | return results
40 |
41 | def summarize_text(text):
42 | """
43 | Function to summarize text using a generative AI model.
44 | """
45 | prompt = f"Summarize the following text in 50 words or less: {text}"
46 | result = call_claude_sonet(prompt)
47 | return result
48 |
49 |
50 | def sentiment_analysis(text):
51 | """
52 | Function to return a JSON object of sentiment from a given text.
53 | """
54 | prompt = f"Giving the following text, return only a valid JSON object of sentiment analysis. text: {text} "
55 | result = call_claude_sonet(prompt)
56 | return result
57 |
58 |
59 | def perform_qa(question, text):
60 | """
61 | Function to perform a Q&A operation based on the provided text.
62 | """
63 | prompt = f"Given the following text, answer the question. If the answer is not in the text, 'say you do not know': {question} text: {text} "
64 | result = call_claude_sonet(prompt)
65 | return result
66 |
67 |
68 | if __name__ == "__main__":
69 | # Sample text for summarization
70 | text = "Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. Using Amazon Bedrock, you can easily experiment with and evaluate top FMs for your use case, privately customize them with your data using techniques such as fine-tuning and Retrieval Augmented Generation (RAG), and build agents that execute tasks using your enterprise systems and data sources. Since Amazon Bedrock is serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy generative AI capabilities into your applications using the AWS services you are already familiar with"
71 |
72 | print("\n=== Summarization Example ===")
73 | summary = summarize_text(text)
74 | print(f"Summary:\n {summary}")
75 | time.sleep(2)
76 |
77 | print("\n=== Sentiment Analysis Example ===")
78 | sentiment_analysis_json = sentiment_analysis(text)
79 | print(f"{sentiment_analysis_json}")
80 | time.sleep(2)
81 |
82 | print("\n=== Q&A Example ===")
83 |
84 | q1 = "How many companies have models in Amazon Bedrock?"
85 | print(q1)
86 | answer = perform_qa(q1, text)
87 | print(f"Answer: {answer}")
88 | time.sleep(2)
89 |
90 | q2 = "Can Amazon Bedrock support RAG?"
91 | print(q2)
92 | answer = perform_qa(q2, text)
93 | print(f"Answer: {answer}")
94 | time.sleep(2)
95 |
96 | q3 = "When was Amzozn Bedrock announced?"
97 | print(q3)
98 | answer = perform_qa(q3, text)
99 | print(f"Answer: {answer}")
--------------------------------------------------------------------------------
/text_examples.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import json
3 |
4 | # Setup bedrock
5 | bedrock_runtime = boto3.client(
6 | service_name="bedrock-runtime",
7 | region_name="us-east-1",
8 | )
9 |
10 | def claude_prompt_format(prompt: str) -> str:
11 | # Add headers to start and end of prompt
12 | return "\n\nHuman: " + prompt + "\n\nAssistant:"
13 |
14 | # Call AI21 labs model
15 | def run_mid(prompt):
16 | prompt_config = {
17 | "prompt": prompt,
18 | "maxTokens": 5147,
19 | "temperature": 0.7,
20 | "stopSequences": [],
21 | }
22 |
23 | body = json.dumps(prompt_config)
24 |
25 | modelId = "ai21.j2-mid"
26 | accept = "application/json"
27 | contentType = "application/json"
28 |
29 | response = bedrock_runtime.invoke_model(
30 | body=body, modelId=modelId, accept=accept, contentType=contentType
31 | )
32 | response_body = json.loads(response.get("body").read())
33 |
34 | results = response_body.get("completions")[0].get("data").get("text")
35 | return results
36 |
37 |
38 | # Call Claude model
39 | def call_claude(prompt):
40 | prompt_config = {
41 | "prompt": claude_prompt_format(prompt),
42 | "max_tokens_to_sample": 4096,
43 | "temperature": 0.5,
44 | "top_k": 250,
45 | "top_p": 0.5,
46 | "stop_sequences": [],
47 | }
48 |
49 | body = json.dumps(prompt_config)
50 |
51 | modelId = "anthropic.claude-v2"
52 | accept = "application/json"
53 | contentType = "application/json"
54 |
55 | response = bedrock_runtime.invoke_model(
56 | body=body, modelId=modelId, accept=accept, contentType=contentType
57 | )
58 | response_body = json.loads(response.get("body").read())
59 |
60 | results = response_body.get("completion")
61 | return results
62 |
63 |
64 | # Call Cohere model
65 | def call_cohere(prompt):
66 | prompt_config = {
67 | "prompt": prompt,
68 | "max_tokens": 2048,
69 | "temperature": 0.7,
70 | # "return_likelihood": "GENERATION"
71 | }
72 |
73 | body = json.dumps(prompt_config)
74 |
75 | modelId = "cohere.command-text-v14"
76 | accept = "application/json"
77 | contentType = "application/json"
78 |
79 | response = bedrock_runtime.invoke_model(
80 | body=body, modelId=modelId, accept=accept, contentType=contentType
81 | )
82 | response_body = json.loads(response.get("body").read())
83 |
84 | results = response_body.get("generations")[0].get("text")
85 | return results
86 |
87 | def summarize_text(text):
88 | """
89 | Function to summarize text using a generative AI model.
90 | """
91 | prompt = f"Summarize the following text: {text}"
92 | result = run_mid(prompt) # Assuming run_mid is the function that executes the model
93 | return result
94 |
95 | def generate_code():
96 | """
97 | Function to generate Python code for uploading a file to Amazon S3.
98 | """
99 | prompt = "Write a Python function that uploads a file to Amazon S3"
100 | result = call_claude(
101 | prompt
102 | ) # Assuming call_claude is the function that executes the model
103 | return result
104 |
105 | def perform_qa(text):
106 | """
107 | Function to perform a Q&A operation based on the provided text.
108 | """
109 | prompt = (
110 | f"How many models does Amazon Bedrock support given the following text: {text}"
111 | )
112 | result = call_cohere(
113 | prompt
114 | ) # Assuming call_cohere is the function that executes the model
115 | return result
116 |
117 |
118 | if __name__ == "__main__":
119 | # Sample text for summarization
120 | text = "This April, we announced Amazon Bedrock as part of a set of new tools for building with generative AI on AWS. Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies, including AI21 Labs, Anthropic, Cohere, Stability AI, and Amazon, along with a broad set of capabilities to build generative AI applications, simplifying the development while maintaining privacy and security Today, I'm happy to announce that Amazon Bedrock is now generally available! I'm also excited to share that Meta's Llama 2 13B and 70B parameter models will soon be available on Amazon Bedrock."
121 |
122 | print("\n=== Summarization Example ===")
123 | summary = summarize_text(text)
124 | print(f"Summary: {summary}")
125 |
126 | print("\n=== Code Generation Example ===")
127 | code_snippet = generate_code()
128 | print(f"Generated Code:\n{code_snippet}")
129 |
130 | print("\n=== Q&A Example ===")
131 | answer = perform_qa(text)
132 | print(f"Answer: {answer}")
--------------------------------------------------------------------------------
/claude_3_examples/image_api_st.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | import boto3
3 | import json
4 | import base64
5 | import io
6 | from PIL import Image
7 |
8 |
9 | st.title("Building with Bedrock") # Title of the application
10 | st.subheader("Image Generation Demo")
11 |
12 | REGION = "us-west-2"
13 |
14 | # List of Stable Diffusion Preset Styles
15 | sd_presets = [
16 | "None",
17 | "3d-model",
18 | "analog-film",
19 | "anime",
20 | "cinematic",
21 | "comic-book",
22 | "digital-art",
23 | "enhance",
24 | "fantasy-art",
25 | "isometric",
26 | "line-art",
27 | "low-poly",
28 | "modeling-compound",
29 | "neon-punk",
30 | "origami",
31 | "photographic",
32 | "pixel-art",
33 | "tile-texture",
34 | ]
35 |
36 | # Define bedrock
37 | bedrock_runtime = boto3.client(
38 | service_name="bedrock-runtime",
39 | region_name=REGION,
40 | )
41 |
42 |
43 | def call_claude_sonet(base64_string):
44 |
45 | prompt_config = {
46 | "anthropic_version": "bedrock-2023-05-31",
47 | "max_tokens": 4096,
48 | "messages": [
49 | {
50 | "role": "user",
51 | "content": [
52 | {
53 | "type": "image",
54 | "source": {
55 | "type": "base64",
56 | "media_type": "image/png",
57 | "data": base64_string,
58 | },
59 | },
60 | {"type": "text", "text": "Provide a caption for this image"},
61 | ],
62 | }
63 | ],
64 | }
65 |
66 | body = json.dumps(prompt_config)
67 |
68 | modelId = "anthropic.claude-3-sonnet-20240229-v1:0"
69 | accept = "application/json"
70 | contentType = "application/json"
71 |
72 | response = bedrock_runtime.invoke_model(
73 | body=body, modelId=modelId, accept=accept, contentType=contentType
74 | )
75 | response_body = json.loads(response.get("body").read())
76 |
77 | results = response_body.get("content")[0].get("text")
78 | return results
79 |
80 |
81 | # Bedrock api call to stable diffusion
82 | def generate_image_sd(text, style):
83 | """
84 | Purpose:
85 | Uses Bedrock API to generate an Image
86 | Args/Requests:
87 | text: Prompt
88 | style: style for image
89 | Return:
90 | image: base64 string of image
91 | """
92 | body = {
93 | "text_prompts": [{"text": text}],
94 | "cfg_scale": 10,
95 | "seed": 0,
96 | "steps": 50,
97 | "style_preset": style,
98 | }
99 |
100 | if style == "None":
101 | del body["style_preset"]
102 |
103 | body = json.dumps(body)
104 |
105 | modelId = "stability.stable-diffusion-xl"
106 | accept = "application/json"
107 | contentType = "application/json"
108 |
109 | response = bedrock_runtime.invoke_model(
110 | body=body, modelId=modelId, accept=accept, contentType=contentType
111 | )
112 | response_body = json.loads(response.get("body").read())
113 |
114 | results = response_body.get("artifacts")[0].get("base64")
115 | return results
116 |
117 |
118 | def convert_base64_to_image(base64_string):
119 | img_data = base64.b64decode(base64_string)
120 | img = Image.open(io.BytesIO(img_data))
121 | return img
122 |
123 |
124 | def generate_image_titan(text):
125 | """
126 | Purpose:
127 | Uses Bedrock API to generate an Image using Titan
128 | Args/Requests:
129 | text: Prompt
130 | Return:
131 | image: base64 string of image
132 | """
133 | body = {
134 | "textToImageParams": {"text": text},
135 | "taskType": "TEXT_IMAGE",
136 | "imageGenerationConfig": {
137 | "cfgScale": 10,
138 | "seed": 0,
139 | "quality": "standard",
140 | "width": 512,
141 | "height": 512,
142 | "numberOfImages": 1,
143 | },
144 | }
145 |
146 | body = json.dumps(body)
147 |
148 | modelId = "amazon.titan-image-generator-v1"
149 | accept = "application/json"
150 | contentType = "application/json"
151 |
152 | response = bedrock_runtime.invoke_model(
153 | body=body, modelId=modelId, accept=accept, contentType=contentType
154 | )
155 | response_body = json.loads(response.get("body").read())
156 |
157 | results = response_body.get("images")[0]
158 | return results
159 |
160 |
161 | model = st.selectbox("Select model", ["Stable Diffusion", "Amazon Titan"])
162 |
163 |
164 | if model == "Stable Diffusion":
165 |
166 | style = st.selectbox("Select style", sd_presets)
167 | prompt = st.text_input("Enter prompt")
168 |
169 | if st.button("Generate"):
170 |
171 | results = generate_image_sd(prompt, style)
172 |
173 | # use claude to describe image
174 | desc_image = call_claude_sonet(results)
175 |
176 | img = convert_base64_to_image(results)
177 | st.image(img, caption=desc_image)
178 |
179 |
180 | elif model == "Amazon Titan":
181 | prompt = st.text_input("Enter prompt")
182 |
183 | if st.button("Generate"):
184 | results = generate_image_titan(prompt)
185 | # use claude to describe image
186 | desc_image = call_claude_sonet(results)
187 |
188 | img = convert_base64_to_image(results)
189 | st.image(img, caption=desc_image)
190 |
--------------------------------------------------------------------------------