├── .gitignore ├── src ├── image_examples │ ├── images │ │ ├── generated_image.png │ │ ├── inpainted_image.png │ │ ├── generated_cannyedge.png │ │ ├── generated_segmentation.png │ │ ├── inpainted_image_mask_prompt.png │ │ └── outpainted_image_mask_prompt.png │ ├── amazon_nova_canvas_cannyedge.py │ ├── amazon_nova_canvas_segmentation.py │ ├── amazon_nova_canvas_image_generation.py │ ├── amazon_nova_canvas_inpainting_mask_prompt.py │ └── amazon_nova_canvas_outpainting_mask_prompt.py └── text_examples │ ├── amazon_nova_invoke_model.py │ ├── amazon_nova_retrieve_generate.py │ ├── amazon_nova_invoke_model_response_stream.py │ ├── amazon_nova_converse_stream.py │ └── amazon_nova_converse.py ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md └── CONTRIBUTING.md /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .git/ -------------------------------------------------------------------------------- /src/image_examples/images/generated_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/allthings-bedrock-nova-models/main/src/image_examples/images/generated_image.png -------------------------------------------------------------------------------- /src/image_examples/images/inpainted_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/allthings-bedrock-nova-models/main/src/image_examples/images/inpainted_image.png -------------------------------------------------------------------------------- /src/image_examples/images/generated_cannyedge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/allthings-bedrock-nova-models/main/src/image_examples/images/generated_cannyedge.png -------------------------------------------------------------------------------- /src/image_examples/images/generated_segmentation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/allthings-bedrock-nova-models/main/src/image_examples/images/generated_segmentation.png -------------------------------------------------------------------------------- /src/image_examples/images/inpainted_image_mask_prompt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/allthings-bedrock-nova-models/main/src/image_examples/images/inpainted_image_mask_prompt.png -------------------------------------------------------------------------------- /src/image_examples/images/outpainted_image_mask_prompt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/allthings-bedrock-nova-models/main/src/image_examples/images/outpainted_image_mask_prompt.png -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 17 | 18 | -------------------------------------------------------------------------------- /src/text_examples/amazon_nova_invoke_model.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | 4 | def invoke_nova_model(): 5 | bedrock_runtime = boto3.client( 6 | service_name='bedrock-runtime', 7 | region_name='us-east-1' 8 | ) 9 | 10 | # Define the messages for the conversation with content as an array of objects 11 | messages = [ 12 | { 13 | "role": "user", 14 | "content": [{"text": "What are the key features of Amazon S3?"}] 15 | } 16 | ] 17 | 18 | # Prepare the request body with only supported parameters 19 | request_body = { 20 | "messages": messages 21 | } 22 | 23 | try: 24 | # Invoke the model 25 | response = bedrock_runtime.invoke_model( 26 | modelId='us.amazon.nova-lite-v1:0', 27 | body=json.dumps(request_body) 28 | ) 29 | 30 | response_body = json.loads(response['body'].read()) 31 | print("Full Response:", json.dumps(response_body, indent=2)) 32 | 33 | except Exception as e: 34 | print(f"Error invoking model: {str(e)}") 35 | 36 | if __name__ == "__main__": 37 | invoke_nova_model() 38 | -------------------------------------------------------------------------------- /src/image_examples/amazon_nova_canvas_cannyedge.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import base64 3 | import json 4 | from PIL import Image 5 | import io 6 | 7 | def nova_canvas_canny_edge_example(): 8 | try: 9 | client = boto3.client('bedrock-runtime', region_name='us-east-1') 10 | with open('./images/generated_image.png', 'rb') as image_file: 11 | condition_image = base64.b64encode(image_file.read()).decode('utf-8') 12 | 13 | request_body = { 14 | "taskType": "TEXT_IMAGE", 15 | "textToImageParams": { 16 | "text": "3d animated film style cat", 17 | "conditionImage": condition_image, 18 | "controlMode": "CANNY_EDGE", 19 | "controlStrength": 0.7 20 | }, 21 | "imageGenerationConfig": { 22 | "numberOfImages": 1, 23 | "quality": "standard", 24 | "cfgScale": 8.0 25 | } 26 | } 27 | 28 | response = client.invoke_model( 29 | modelId="amazon.nova-canvas-v1:0", 30 | body=json.dumps(request_body) 31 | ) 32 | 33 | response_body = json.loads(response['body'].read()) 34 | image_data = base64.b64decode(response_body['images'][0]) 35 | image = Image.open(io.BytesIO(image_data)) 36 | image.save('./images/generated_cannyedge.png') 37 | print("Generated image saved as: ./images/generated_cannyedge.png") 38 | 39 | except Exception as e: 40 | print(f"Error: {str(e)}") 41 | 42 | if __name__ == "__main__": 43 | nova_canvas_canny_edge_example() 44 | -------------------------------------------------------------------------------- /src/image_examples/amazon_nova_canvas_segmentation.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import base64 3 | import json 4 | from PIL import Image 5 | import io 6 | 7 | def nova_canvas_segmentation_example(): 8 | try: 9 | client = boto3.client('bedrock-runtime', region_name='us-east-1') 10 | 11 | with open('./images/generated_image.png', 'rb') as image_file: 12 | condition_image = base64.b64encode(image_file.read()).decode('utf-8') 13 | 14 | request_body = { 15 | "taskType": "TEXT_IMAGE", 16 | "textToImageParams": { 17 | "text": "3d animated film style cat", 18 | "conditionImage": condition_image, 19 | "controlMode": "SEGMENTATION", 20 | "controlStrength": 0.7 21 | }, 22 | "imageGenerationConfig": { 23 | "numberOfImages": 1, 24 | "quality": "standard", 25 | "cfgScale": 8.0 26 | } 27 | } 28 | 29 | response = client.invoke_model( 30 | modelId="amazon.nova-canvas-v1:0", 31 | body=json.dumps(request_body) 32 | ) 33 | 34 | response_body = json.loads(response['body'].read()) 35 | image_data = base64.b64decode(response_body['images'][0]) 36 | image = Image.open(io.BytesIO(image_data)) 37 | image.save('./images/generated_segmentation.png') 38 | print("Generated image saved as: ./images/generated_segmentation.png") 39 | 40 | except Exception as e: 41 | print(f"Error: {str(e)}") 42 | 43 | if __name__ == "__main__": 44 | nova_canvas_segmentation_example() 45 | -------------------------------------------------------------------------------- /src/image_examples/amazon_nova_canvas_image_generation.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | import base64 4 | import random 5 | import os 6 | 7 | def generate_image(prompt): 8 | client = boto3.client('bedrock-runtime', region_name='us-east-1') 9 | model_id = 'amazon.nova-canvas-v1:0' 10 | 11 | request_body = { 12 | "taskType": "TEXT_IMAGE", 13 | "textToImageParams": { 14 | "text": prompt 15 | }, 16 | "imageGenerationConfig": { 17 | "numberOfImages": 1, 18 | "quality": "standard", 19 | "height": 512, 20 | "width": 512, 21 | "cfgScale": 8.0, 22 | "seed": random.randint(1, 2147483647), 23 | } 24 | } 25 | 26 | try: 27 | # Create images directory if it doesn't exist 28 | images_dir = './images' 29 | os.makedirs(images_dir, exist_ok=True) 30 | 31 | response = client.invoke_model( 32 | modelId=model_id, 33 | body=json.dumps(request_body) 34 | ) 35 | 36 | response_body = json.loads(response['body'].read()) 37 | base64_image = response_body['images'][0] 38 | image_path = os.path.join(images_dir, 'generated_image.png') 39 | with open(image_path, 'wb') as f: 40 | f.write(base64.b64decode(base64_image)) 41 | 42 | print(f"Image generated successfully and saved as: {image_path}") 43 | 44 | except Exception as e: 45 | print(f"Error generating image: {str(e)}") 46 | 47 | def main(): 48 | prompt = "A beautiful cat sitting on a bench in New York City" 49 | generate_image(prompt) 50 | 51 | if __name__ == "__main__": 52 | main() 53 | -------------------------------------------------------------------------------- /src/text_examples/amazon_nova_retrieve_generate.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | import os 4 | 5 | def retrieve_and_generate(user_query): 6 | bedrock_agent = boto3.client('bedrock-agent-runtime', 7 | region_name='us-east-1') 8 | 9 | try: 10 | # Call the retrieve_and_generate API 11 | response = bedrock_agent.retrieve_and_generate( 12 | input={ 13 | 'text': user_query 14 | }, 15 | retrieveAndGenerateConfiguration={ 16 | 'type': 'KNOWLEDGE_BASE', 17 | 'knowledgeBaseConfiguration': { 18 | # Replace with your knowledge base ID 19 | 'knowledgeBaseId': 'XYZ-123', 20 | # Using Nova Pro model 21 | 'modelArn': 'arn:aws:bedrock:us-east-1::foundation-model/amazon.nova-pro-v1:0' 22 | } 23 | } 24 | ) 25 | 26 | # Extract the generated text from the response 27 | if 'output' in response and 'text' in response['output']: 28 | return response['output']['text'] 29 | else: 30 | return "No response generated" 31 | 32 | except Exception as e: 33 | print(f"Error occurred: {str(e)}") 34 | return f"Error: {str(e)}" 35 | 36 | def main(): 37 | sample_queries = [ 38 | "A relevant question about the information from your data source?" 39 | ] 40 | 41 | for query in sample_queries: 42 | print("\nQuery:", query) 43 | print("-" * 50) 44 | response = retrieve_and_generate(query) 45 | print("Response:", response) 46 | print("-" * 50) 47 | 48 | if __name__ == "__main__": 49 | main() 50 | -------------------------------------------------------------------------------- /src/text_examples/amazon_nova_invoke_model_response_stream.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | from datetime import datetime 4 | 5 | # Create a Bedrock Runtime client 6 | client = boto3.client("bedrock-runtime", region_name="us-east-1") 7 | 8 | # Nova Lite model ID 9 | NOVA_MODEL_ID = "us.amazon.nova-lite-v1:0" 10 | 11 | 12 | system_prompts = [ 13 | { 14 | "text": "You are a helpful AI assistant that provides clear and concise responses." 15 | } 16 | ] 17 | 18 | messages = [ 19 | { 20 | "role": "user", 21 | "content": [ 22 | { 23 | "text": "What are the benefits of cloud computing?" 24 | } 25 | ] 26 | } 27 | ] 28 | 29 | inference_config = { 30 | "max_new_tokens": 500, 31 | "top_p": 0.9, 32 | "top_k": 20, 33 | "temperature": 0.7 34 | } 35 | 36 | request_body = { 37 | "schemaVersion": "messages-v1", 38 | "messages": messages, 39 | "system": system_prompts, 40 | "inferenceConfig": inference_config 41 | } 42 | 43 | try: 44 | start_time = datetime.now() 45 | print("Sending request...") 46 | response = client.invoke_model_with_response_stream( 47 | modelId=NOVA_MODEL_ID, 48 | body=json.dumps(request_body) 49 | ) 50 | 51 | # Get request ID for tracking 52 | request_id = response.get("ResponseMetadata", {}).get("RequestId") 53 | print(f"Request ID: {request_id}") 54 | print("Response:") 55 | 56 | stream = response.get("body") 57 | if stream: 58 | for event in stream: 59 | chunk = event.get("chunk") 60 | if chunk: 61 | chunk_data = json.loads(chunk.get("bytes").decode()) 62 | content_block = chunk_data.get("contentBlockDelta") 63 | if content_block: 64 | # Print the text from the response 65 | print(content_block.get("delta", {}).get("text", ""), end="") 66 | else: 67 | print("No response stream received") 68 | 69 | except Exception as e: 70 | print(f"Error: {str(e)}") 71 | -------------------------------------------------------------------------------- /src/text_examples/amazon_nova_converse_stream.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | 4 | def converse_nova_stream(): 5 | client = boto3.client('bedrock-runtime', region_name='us-east-1') 6 | model_id = "amazon.nova-pro-v1:0" 7 | conversation = [ 8 | { 9 | "role": "user", 10 | "content": [ 11 | { 12 | "text": "Explain briefly the benefits of Amazon S3" 13 | } 14 | ] 15 | } 16 | ] 17 | 18 | try: 19 | # Call converse_stream API 20 | streaming_response = client.converse_stream( 21 | modelId=model_id, 22 | messages=conversation, 23 | inferenceConfig={ 24 | "maxTokens": 512, 25 | "temperature": 0.5, 26 | "topP": 0.9 27 | } 28 | ) 29 | 30 | # Process the streaming response 31 | print("\nStreaming response:") 32 | stream = streaming_response.get('stream') 33 | if stream: 34 | for event in stream: 35 | if 'messageStart' in event: 36 | print(f"\nRole: {event['messageStart']['role']}") 37 | 38 | if 'contentBlockDelta' in event: 39 | print(event['contentBlockDelta']['delta']['text'], end="", flush=True) 40 | 41 | if 'messageStop' in event: 42 | print(f"\nStop reason: {event['messageStop']['stopReason']}") 43 | 44 | if 'metadata' in event: 45 | metadata = event['metadata'] 46 | if 'usage' in metadata: 47 | print("\nToken usage:") 48 | print(f"Input tokens: {metadata['usage']['inputTokens']}") 49 | print(f"Output tokens: {metadata['usage']['outputTokens']}") 50 | print(f"Total tokens: {metadata['usage']['totalTokens']}") 51 | 52 | print("\n") 53 | 54 | except Exception as e: 55 | print(f"Error: {str(e)}") 56 | 57 | if __name__ == "__main__": 58 | converse_nova_stream() 59 | -------------------------------------------------------------------------------- /src/image_examples/amazon_nova_canvas_inpainting_mask_prompt.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | import base64 4 | from PIL import Image 5 | import io 6 | import random 7 | 8 | def inpaint_image(input_image_path, prompt, mask_prompt): 9 | client = boto3.client('bedrock-runtime', region_name='us-east-1') 10 | model_id = 'amazon.nova-canvas-v1:0' 11 | 12 | with open(input_image_path, "rb") as image_file: 13 | input_image = base64.b64encode(image_file.read()).decode('utf8') 14 | 15 | request_body = { 16 | "taskType": "INPAINTING", 17 | "inPaintingParams": { 18 | "text": prompt, 19 | "image": input_image, 20 | "maskPrompt": mask_prompt 21 | }, 22 | "imageGenerationConfig": { 23 | "numberOfImages": 1, 24 | "quality": "standard", 25 | "height": 512, 26 | "width": 512, 27 | "cfgScale": 8.0, 28 | "seed": random.randint(1, 2147483647), 29 | } 30 | } 31 | 32 | try: 33 | response = client.invoke_model( 34 | modelId=model_id, 35 | body=json.dumps(request_body) 36 | ) 37 | 38 | response_body = json.loads(response['body'].read()) 39 | base64_image = response_body['images'][0] 40 | 41 | image_bytes = base64.b64decode(base64_image) 42 | image = Image.open(io.BytesIO(image_bytes)) 43 | 44 | # Save the inpainted image in the images directory 45 | output_path = './images/inpainted_image_mask_prompt.png' 46 | image.save(output_path) 47 | print(f"Image inpainted successfully and saved as: {output_path}") 48 | 49 | except Exception as e: 50 | print(f"Error during inpainting: {str(e)}") 51 | 52 | def main(): 53 | # Example usage 54 | input_image_path = "./images/generated_image.png" # Path to your source image 55 | prompt = "dog with a cool hat" # Describe what you want to add/modify 56 | mask_prompt = "cat" # Specify what part of the image to modify 57 | 58 | inpaint_image(input_image_path, prompt, mask_prompt) 59 | 60 | if __name__ == "__main__": 61 | main() 62 | -------------------------------------------------------------------------------- /src/image_examples/amazon_nova_canvas_outpainting_mask_prompt.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import os 4 | import boto3 5 | import random 6 | from PIL import Image 7 | import io 8 | 9 | def outpaint_with_mask_prompt(input_image_path, prompt, mask_prompt): 10 | client = boto3.client('bedrock-runtime', region_name='us-east-1') 11 | with open(input_image_path, "rb") as image_file: 12 | input_image = base64.b64encode(image_file.read()).decode('utf-8') 13 | 14 | request_body = { 15 | "taskType": "OUTPAINTING", 16 | "outPaintingParams": { 17 | "text": prompt, 18 | "image": input_image, 19 | "maskPrompt": mask_prompt, 20 | "outPaintingMode": "PRECISE", 21 | }, 22 | "imageGenerationConfig": { 23 | "numberOfImages": 1, 24 | "quality": "standard", 25 | "height": 1024, 26 | "width": 1024, 27 | "cfgScale": 8.0, 28 | "seed": random.randint(1, 2147483647), 29 | } 30 | } 31 | 32 | try: 33 | response = client.invoke_model( 34 | modelId="amazon.nova-canvas-v1:0", 35 | body=json.dumps(request_body), 36 | contentType="application/json", 37 | accept="application/json" 38 | ) 39 | response_body = json.loads(response.get("body").read()) 40 | 41 | if "images" in response_body and len(response_body["images"]) > 0: 42 | base64_image = response_body["images"][0] 43 | image_bytes = base64.b64decode(base64_image) 44 | 45 | output_path = './images/outpainted_image_mask_prompt.png' 46 | with open(output_path, "wb") as f: 47 | f.write(image_bytes) 48 | 49 | print(f"Generated outpainted image saved to: {output_path}") 50 | return output_path 51 | 52 | else: 53 | print("No image was generated in the response") 54 | return None 55 | 56 | except Exception as e: 57 | print(f"Error generating image: {str(e)}") 58 | return None 59 | 60 | if __name__ == "__main__": 61 | input_image_path = "./images/generated_image.png" 62 | mask_prompt = "cat" # Areas to keep unchanged 63 | prompt = "a beautiful garden in the background" 64 | result = outpaint_with_mask_prompt(input_image_path, prompt, mask_prompt) 65 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Amazon Nova Models Examples 2 | 3 | This repository provides a collection of Python examples demonstrating how to interact with Amazon's Nova models through Amazon Bedrock. 4 | 5 | ## Repository Structure 6 | ``` 7 | . 8 | ├── src/ 9 | │ ├── image_examples/ # Image manipulation examples using Nova Canvas 10 | │ │ ├── amazon_nova_canvas_cannyedge.py # Image Conditioning - Canny Edge mode 11 | │ │ ├── amazon_nova_canvas_image_generation.py # Basic image generation from text 12 | │ │ ├── amazon_nova_canvas_inpainting_mask_prompt.py # Image inpainting with mask prompt 13 | │ │ ├── amazon_nova_canvas_outpainting_mask_prompt.py # Image outpainting with mask prompt 14 | │ │ └── amazon_nova_canvas_segmentation.py # Image Conditioning - Segmentation mode 15 | │ └── text_examples/ # Text generation and conversation examples 16 | │ ├── amazon_nova_converse_stream.py # Streaming conversation interface 17 | │ ├── amazon_nova_converse.py # Basic conversation implementation 18 | │ ├── amazon_nova_invoke_model_response_stream.py # Streaming model responses 19 | │ ├── amazon_nova_invoke_model.py # Direct model invocation 20 | │ └── amazon_nova_retrieve_generate.py # Knowledge base integration 21 | ``` 22 | 23 | ## Usage Instructions 24 | ### Prerequisites 25 | - Python 3.7 or higher 26 | - AWS Account with Bedrock access 27 | - AWS CLI configured with appropriate credentials 28 | - Required Python packages: 29 | - boto3 30 | - Pillow (PIL) 31 | - json 32 | - base64 33 | 34 | ### Installation 35 | ```bash 36 | # Clone the repository 37 | git clone 38 | cd 39 | 40 | # Install required packages 41 | pip install boto3 Pillow 42 | 43 | # Configure AWS credentials if not already done 44 | aws configure 45 | ``` 46 | 47 | ### Troubleshooting 48 | Common Issues and Solutions: 49 | 50 | 1. AWS Credentials Error 51 | ``` 52 | Ensure AWS credentials are properly configured 53 | ``` 54 | 55 | 2. Model Access Issues 56 | ``` 57 | Verify your AWS region is correct and model access is provided to Amazon Nova models in Amazon Bedrock service. 58 | ``` 59 | 60 | 3. Image Generation Failures 61 | - Check if the images directory exists 62 | - Ensure proper file permissions in the output directory 63 | 64 | ## Security 65 | 66 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 67 | 68 | ## License 69 | 70 | This library is licensed under the MIT-0 License. See the LICENSE file. 71 | 72 | ## Disclaimer 73 | 74 | You should not use this Content in your production accounts, or on production or other critical data. You are responsible for testing, securing, and optimizing content, such as sample code, as appropriate for production grade use based on your specific quality control practices and standards 75 | 76 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /src/text_examples/amazon_nova_converse.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | import logging 4 | from typing import Optional, Dict, Any 5 | from pathlib import Path 6 | 7 | class NovaConverse: 8 | def __init__(self, model_id: str = "amazon.nova-pro-v1:0", region: str = "us-east-1"): 9 | self.bedrock = boto3.client("bedrock-runtime", region_name=region) 10 | self.model_id = model_id 11 | self.logger = logging.getLogger(__name__) 12 | 13 | logging.basicConfig( 14 | level=logging.INFO, 15 | format='%(asctime)s - %(levelname)s - %(message)s' 16 | ) 17 | 18 | def create_message(self, 19 | text: str, 20 | image_path: Optional[str] = None, 21 | role: str = "user") -> Dict[str, Any]: 22 | """Create a message for the conversation.""" 23 | content = [] 24 | 25 | if text: 26 | content.append({"text": text}) 27 | 28 | return { 29 | "role": role, 30 | "content": content 31 | } 32 | 33 | def converse(self, 34 | messages: list, 35 | temperature: float = 0.7, 36 | top_p: float = 0.9, 37 | max_tokens: int = 2048, 38 | stop_sequences: list = None, 39 | stream: bool = False) -> Dict[str, Any]: 40 | try: 41 | inference_config = { 42 | "temperature": temperature, 43 | "topP": top_p, 44 | "maxTokens": max_tokens 45 | } 46 | 47 | if stop_sequences: 48 | inference_config["stopSequences"] = stop_sequences 49 | 50 | request_params = { 51 | "modelId": self.model_id, 52 | "messages": messages, 53 | "inferenceConfig": inference_config 54 | } 55 | 56 | 57 | response = self.bedrock.converse(**request_params) 58 | 59 | return response 60 | 61 | except Exception as e: 62 | self.logger.error(f"Error in conversation: {str(e)}") 63 | raise 64 | 65 | def text_conversation_example(): 66 | """Example of a text-based conversation.""" 67 | nova = NovaConverse() 68 | 69 | messages = [ 70 | nova.create_message( 71 | "What are the main differences between supervised and unsupervised learning?" 72 | ) 73 | ] 74 | 75 | response = nova.converse(messages) 76 | 77 | if response and "output" in response: 78 | assistant_message = response["output"]["message"] 79 | print("\nAssistant's response:") 80 | print(assistant_message["content"][0]["text"]) 81 | 82 | messages.append(assistant_message) 83 | messages.append( 84 | nova.create_message( 85 | "Can you provide a specific example of each?" 86 | ) 87 | ) 88 | 89 | follow_up_response = nova.converse(messages) 90 | if follow_up_response and "output" in follow_up_response: 91 | print("\nFollow-up response:") 92 | print(follow_up_response["output"]["message"]["content"][0]["text"]) 93 | 94 | usage = follow_up_response.get("usage", {}) 95 | print("\nToken Usage:") 96 | print(f"Input tokens: {usage.get('inputTokens', 0)}") 97 | print(f"Output tokens: {usage.get('outputTokens', 0)}") 98 | 99 | def main(): 100 | try: 101 | print("Running text conversation example...") 102 | text_conversation_example() 103 | 104 | except Exception as e: 105 | logging.error(f"Error in main: {str(e)}") 106 | 107 | if __name__ == "__main__": 108 | main() 109 | --------------------------------------------------------------------------------