├── .gitignore
├── AmazonBedrock
├── utils
│ ├── __init__.py
│ └── hints.py
├── requirements.txt
├── LICENSE
├── anthropic
│ ├── 10_4_Appendix_Search_and_Retrieval.ipynb
│ ├── 00_Tutorial_How-To.ipynb
│ ├── 03_Assigning_Roles_Role_Prompting.ipynb
│ ├── 02_Being_Clear_and_Direct.ipynb
│ ├── 10_3_Appendix_Empirical_Performance_Evaluations.ipynb
│ └── 01_Basic_Prompt_Structure.ipynb
├── boto3
│ ├── 10_4_Appendix_Search_and_Retrieval.ipynb
│ ├── 00_Tutorial_How-To.ipynb
│ ├── 03_Assigning_Roles_Role_Prompting.ipynb
│ ├── 02_Being_Clear_and_Direct.ipynb
│ ├── 10_3_Appendix_Empirical_Performance_Eval.ipynb
│ └── 01_Basic_Prompt_Structure.ipynb
├── cloudformation
│ └── workshop-v1-final-cfn.yml
├── README.md
└── CONTRIBUTING.md
├── Anthropic 1P
├── 10.3_Appendix_Search & Retrieval.ipynb
├── 00_Tutorial_How-To.ipynb
├── 03_Assigning_Roles_Role_Prompting.ipynb
├── 02_Being_Clear_and_Direct.ipynb
├── hints.py
└── 01_Basic_Prompt_Structure.ipynb
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 |
3 |
--------------------------------------------------------------------------------
/AmazonBedrock/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/AmazonBedrock/requirements.txt:
--------------------------------------------------------------------------------
1 | awscli==1.32.74
2 | boto3==1.34.74
3 | botocore==1.34.74
4 | anthropic==0.21.3
5 | pickleshare==0.7.5
6 |
--------------------------------------------------------------------------------
/AmazonBedrock/LICENSE:
--------------------------------------------------------------------------------
1 | MIT No Attribution
2 |
3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software is furnished to do so.
10 |
11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
17 |
18 |
--------------------------------------------------------------------------------
/Anthropic 1P/10.3_Appendix_Search & Retrieval.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Appendix 10.3: Search & Retrieval\n",
8 | "\n",
9 | "Did you know you can use Claude to **search through Wikipedia for you**? Claude can find and retrieve articles, at which point you can also use Claude to summarize and synthesize them, write novel content from what it found, and much more. And not just Wikipedia! You can also search over your own docs, whether stored as plain text or embedded in a vector datastore.\n",
10 | "\n",
11 | "See our [RAG cookbook examples](https://github.com/anthropics/anthropic-cookbook/blob/main/third_party/Wikipedia/wikipedia-search-cookbook.ipynb) to learn how to supplement Claude's knowledge and improve the accuracy and relevance of Claude's responses with data retrieved from vector databases, Wikipedia, the internet, and more. There, you can also learn about how to use certain [embeddings](https://docs.anthropic.com/claude/docs/embeddings) and vector database tools.\n",
12 | "\n",
13 | "If you are interested in learning about advanced RAG architectures using Claude, check out our [Claude 3 technical presentation slides on RAG architectures](https://docs.google.com/presentation/d/1zxkSI7lLUBrZycA-_znwqu8DDyVhHLkQGScvzaZrUns/edit#slide=id.g2c736259dac_63_782)."
14 | ]
15 | }
16 | ],
17 | "metadata": {
18 | "language_info": {
19 | "name": "python"
20 | }
21 | },
22 | "nbformat": 4,
23 | "nbformat_minor": 2
24 | }
25 |
--------------------------------------------------------------------------------
/AmazonBedrock/anthropic/10_4_Appendix_Search_and_Retrieval.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Appendix 10.3: Search & Retrieval\n",
8 | "\n",
9 | "Did you know you can use Claude to **search through Wikipedia for you**? Claude can find and retrieve articles, at which point you can also use Claude to summarize and synthesize them, write novel content from what it found, and much more. And not just Wikipedia! You can also search over your own docs, whether stored as plain text or embedded in a vector datastore.\n",
10 | "\n",
11 | "See our [RAG cookbook examples](https://github.com/anthropics/anthropic-cookbook/blob/main/third_party/Wikipedia/wikipedia-search-cookbook.ipynb) to learn how to supplement Claude's knowledge and improve the accuracy and relevance of Claude's responses with data retrieved from vector databases, Wikipedia, the internet, and more. There, you can also learn about how to use certain [embeddings](https://docs.anthropic.com/claude/docs/embeddings) and vector database tools.\n",
12 | "\n",
13 | "If you are interested in learning about advanced RAG architectures using Claude, check out our [Claude 3 technical presentation slides on RAG architectures](https://docs.google.com/presentation/d/1zxkSI7lLUBrZycA-_znwqu8DDyVhHLkQGScvzaZrUns/edit#slide=id.g2c736259dac_63_782)."
14 | ]
15 | }
16 | ],
17 | "metadata": {
18 | "language_info": {
19 | "name": "python"
20 | }
21 | },
22 | "nbformat": 4,
23 | "nbformat_minor": 2
24 | }
25 |
--------------------------------------------------------------------------------
/AmazonBedrock/boto3/10_4_Appendix_Search_and_Retrieval.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Appendix 10.4: Search & Retrieval\n",
8 | "\n",
9 | "Did you know you can use Claude to **search through Wikipedia for you**? Claude can find and retrieve articles, at which point you can also use Claude to summarize and synthesize them, write novel content from what it found, and much more. And not just Wikipedia! You can also search over your own docs, whether stored as plain text or embedded in a vector datastore.\n",
10 | "\n",
11 | "See our [RAG cookbook examples](https://github.com/anthropics/anthropic-cookbook/blob/main/third_party/Wikipedia/wikipedia-search-cookbook.ipynb) to learn how to supplement Claude's knowledge and improve the accuracy and relevance of Claude's responses with data retrieved from vector databases, Wikipedia, the internet, and more. There, you can also learn about how to use certain [embeddings](https://docs.anthropic.com/claude/docs/embeddings) and vector database tools.\n",
12 | "\n",
13 | "If you are interested in learning about advanced RAG architectures using Claude, check out our [Claude 3 technical presentation slides on RAG architectures](https://docs.google.com/presentation/d/1zxkSI7lLUBrZycA-_znwqu8DDyVhHLkQGScvzaZrUns/edit#slide=id.g2c736259dac_63_782)."
14 | ]
15 | }
16 | ],
17 | "metadata": {
18 | "language_info": {
19 | "name": "python"
20 | }
21 | },
22 | "nbformat": 4,
23 | "nbformat_minor": 2
24 | }
25 |
--------------------------------------------------------------------------------
/AmazonBedrock/cloudformation/workshop-v1-final-cfn.yml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: 'CloudFormation template to create a Jupyter notebook in SageMaker with an execution role and Anthropic Prompt Eng. Repo'
3 |
4 | Parameters:
5 | NotebookName:
6 | Type: String
7 | Default: 'PromptEngWithAnthropicNotebook'
8 | DefaultRepoUrl:
9 | Type: String
10 | Default: 'https://github.com/aws-samples/prompt-engineering-with-anthropic-claude-v-3.git'
11 |
12 | Resources:
13 | SageMakerExecutionRole:
14 | Type: AWS::IAM::Role
15 | Properties:
16 | AssumeRolePolicyDocument:
17 | Version: '2012-10-17'
18 | Statement:
19 | - Effect: Allow
20 | Principal:
21 | Service:
22 | - sagemaker.amazonaws.com
23 | Action:
24 | - sts:AssumeRole
25 | ManagedPolicyArns:
26 | - arn:aws:iam::aws:policy/AmazonSageMakerFullAccess
27 | - arn:aws:iam::aws:policy/AmazonBedrockFullAccess
28 |
29 | KmsKey:
30 | Type: AWS::KMS::Key
31 | Properties:
32 | Description: 'KMS key for SageMaker notebook'
33 | KeyPolicy:
34 | Version: '2012-10-17'
35 | Statement:
36 | - Effect: Allow
37 | Principal:
38 | AWS: !Sub 'arn:aws:iam::${AWS::AccountId}:root'
39 | Action: 'kms:*'
40 | Resource: '*'
41 | EnableKeyRotation: true
42 |
43 | KmsKeyAlias:
44 | Type: AWS::KMS::Alias
45 | Properties:
46 | AliasName: !Sub 'alias/${NotebookName}-kms-key'
47 | TargetKeyId: !Ref KmsKey
48 |
49 | SageMakerNotebookInstance:
50 | Type: AWS::SageMaker::NotebookInstance
51 | Properties:
52 | InstanceType: ml.t3.large
53 | NotebookInstanceName: !Ref NotebookName
54 | RoleArn: !GetAtt SageMakerExecutionRole.Arn
55 | DefaultCodeRepository: !Ref DefaultRepoUrl
56 | KmsKeyId: !GetAtt KmsKey.Arn
57 |
58 | Outputs:
59 | NotebookInstanceName:
60 | Description: The name of the created SageMaker Notebook Instance
61 | Value: !Ref SageMakerNotebookInstance
62 | ExecutionRoleArn:
63 | Description: The ARN of the created SageMaker Execution Role
64 | Value: !GetAtt SageMakerExecutionRole.Arn
65 | KmsKeyArn:
66 | Description: The ARN of the created KMS Key for the notebook
67 | Value: !GetAtt KmsKey.Arn
68 |
--------------------------------------------------------------------------------
/AmazonBedrock/README.md:
--------------------------------------------------------------------------------
1 | # Welcome to Anthropic's Prompt Engineering Interactive Tutorial - Bedrock Edition
2 |
3 | ## Course introduction and goals
4 |
5 | This course is intended to provide you with a comprehensive step-by-step understanding of how to engineer optimal prompts within Claude, using Bedrock.
6 |
7 | **After completing this course, you will be able to**:
8 | - Master the basic structure of a good prompt
9 | - Recognize common failure modes and learn the '80/20' techniques to address them
10 | - Understand Claude's strengths and weaknesses
11 | - Build strong prompts from scratch for common use cases
12 |
13 | ## Course structure and content
14 |
15 | This course is structured to allow you many chances to practice writing and troubleshooting prompts yourself. The course is broken up into **9 chapters with accompanying exercises**, as well as an appendix of even more advanced methods. It is intended for you to **work through the course in chapter order**.
16 |
17 | **Each lesson has an "Example Playground" area** at the bottom where you are free to experiment with the examples in the lesson and see for yourself how changing prompts can change Claude's responses. There is also an [answer key](https://docs.google.com/spreadsheets/d/1jIxjzUWG-6xBVIa2ay6yDpLyeuOh_hR_ZB75a47KX_E/edit?usp=sharing). While this answer key is structured for 1P API requests, the solutions are the same.
18 |
19 | Note: This tutorial uses our smallest, fastest, and cheapest model, Claude 3 Haiku. Anthropic has [two other models](https://docs.anthropic.com/claude/docs/models-overview), Claude 3 Sonnet and Claude 3 Opus, which are more intelligent than Haiku, with Opus being the most intelligent.
20 |
21 | When you are ready to begin, go to `01_Basic Prompt Structure` to proceed.
22 |
23 | ## Table of Contents
24 |
25 | Each chapter consists of a lesson and a set of exercises.
26 |
27 | ### Beginner
28 | - **Chapter 1:** Basic Prompt Structure
29 |
30 | - **Chapter 2:** Being Clear and Direct
31 |
32 | - **Chapter 3:** Assigning Roles
33 |
34 | ### Intermediate
35 | - **Chapter 4:** Separating Data from Instructions
36 |
37 | - **Chapter 5:** Formatting Output & Speaking for Claude
38 |
39 | - **Chapter 6:** Precognition (Thinking Step by Step)
40 |
41 | - **Chapter 7:** Using Examples
42 |
43 | ### Advanced
44 | - **Chapter 8:** Avoiding Hallucinations
45 |
46 | - **Chapter 9:** Building Complex Prompts (Industry Use Cases)
47 | - Complex Prompts from Scratch - Chatbot
48 | - Complex Prompts for Legal Services
49 | - **Exercise:** Complex Prompts for Financial Services
50 | - **Exercise:** Complex Prompts for Coding
51 | - Congratulations & Next Steps
52 |
53 | - **Appendix:** Beyond Standard Prompting
54 | - Chaining Prompts
55 | - Tool Use
56 | - Empriical Performance Evaluations
57 | - Search & Retrieval
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Welcome to Anthropic's Prompt Engineering Interactive Tutorial
2 |
3 | ## Course introduction and goals
4 |
5 | This course is intended to provide you with a comprehensive step-by-step understanding of how to engineer optimal prompts within Claude.
6 |
7 | **After completing this course, you will be able to**:
8 | - Master the basic structure of a good prompt
9 | - Recognize common failure modes and learn the '80/20' techniques to address them
10 | - Understand Claude's strengths and weaknesses
11 | - Build strong prompts from scratch for common use cases
12 |
13 | ## Course structure and content
14 |
15 | This course is structured to allow you many chances to practice writing and troubleshooting prompts yourself. The course is broken up into **9 chapters with accompanying exercises**, as well as an appendix of even more advanced methods. It is intended for you to **work through the course in chapter order**.
16 |
17 | **Each lesson has an "Example Playground" area** at the bottom where you are free to experiment with the examples in the lesson and see for yourself how changing prompts can change Claude's responses. There is also an [answer key](https://docs.google.com/spreadsheets/d/1jIxjzUWG-6xBVIa2ay6yDpLyeuOh_hR_ZB75a47KX_E/edit?usp=sharing).
18 |
19 | Note: This tutorial uses our smallest, fastest, and cheapest model, Claude 3 Haiku. Anthropic has [two other models](https://docs.anthropic.com/claude/docs/models-overview), Claude 3 Sonnet and Claude 3 Opus, which are more intelligent than Haiku, with Opus being the most intelligent.
20 |
21 | *This tutorial also exists on [Google Sheets using Anthropic's Claude for Sheets extension](https://docs.google.com/spreadsheets/d/19jzLgRruG9kjUQNKtCg1ZjdD6l6weA6qRXG5zLIAhC8/edit?usp=sharing). We recommend using that version as it is more user friendly.*
22 |
23 | When you are ready to begin, go to `01_Basic Prompt Structure` to proceed.
24 |
25 | ## Table of Contents
26 |
27 | Each chapter consists of a lesson and a set of exercises.
28 |
29 | ### Beginner
30 | - **Chapter 1:** Basic Prompt Structure
31 |
32 | - **Chapter 2:** Being Clear and Direct
33 |
34 | - **Chapter 3:** Assigning Roles
35 |
36 | ### Intermediate
37 | - **Chapter 4:** Separating Data from Instructions
38 |
39 | - **Chapter 5:** Formatting Output & Speaking for Claude
40 |
41 | - **Chapter 6:** Precognition (Thinking Step by Step)
42 |
43 | - **Chapter 7:** Using Examples
44 |
45 | ### Advanced
46 | - **Chapter 8:** Avoiding Hallucinations
47 |
48 | - **Chapter 9:** Building Complex Prompts (Industry Use Cases)
49 | - Complex Prompts from Scratch - Chatbot
50 | - Complex Prompts for Legal Services
51 | - **Exercise:** Complex Prompts for Financial Services
52 | - **Exercise:** Complex Prompts for Coding
53 | - Congratulations & Next Steps
54 |
55 | - **Appendix:** Beyond Standard Prompting
56 | - Chaining Prompts
57 | - Tool Use
58 | - Search & Retrieval
--------------------------------------------------------------------------------
/AmazonBedrock/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
4 | documentation, we greatly value feedback and contributions from our community.
5 |
6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
7 | information to effectively respond to your bug report or contribution.
8 |
9 |
10 | ## Reporting Bugs/Feature Requests
11 |
12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features.
13 |
14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
16 |
17 | * A reproducible test case or series of steps
18 | * The version of our code being used
19 | * Any modifications you've made relevant to the bug
20 | * Anything unusual about your environment or deployment
21 |
22 |
23 | ## Contributing via Pull Requests
24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
25 |
26 | 1. You are working against the latest source on the *main* branch.
27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
29 |
30 | To send us a pull request, please:
31 |
32 | 1. Fork the repository.
33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
34 | 3. Ensure local tests pass.
35 | 4. Commit to your fork using clear commit messages.
36 | 5. Send us a pull request, answering any default questions in the pull request interface.
37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
38 |
39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
41 |
42 |
43 | ## Finding contributions to work on
44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
45 |
46 |
47 | ## Code of Conduct
48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
50 | opensource-codeofconduct@amazon.com with any additional questions or comments.
51 |
52 |
53 | ## Security issue notifications
54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
55 |
56 |
57 | ## Licensing
58 |
59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
60 |
--------------------------------------------------------------------------------
/Anthropic 1P/00_Tutorial_How-To.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Tutorial How-To\n",
8 | "\n",
9 | "This tutorial **requires an API key** for interaction. If you don't have an API key, you can sign up for one via the [Anthropic Console](https://console.anthropic.com/) or view our [static tutorial answer key](https://docs.google.com/spreadsheets/u/0/d/1jIxjzUWG-6xBVIa2ay6yDpLyeuOh_hR_ZB75a47KX_E/edit) instead."
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "## How to get started\n",
17 | "\n",
18 | "1. Clone this repository to your local machine.\n",
19 | "\n",
20 | "2. Install the required dependencies by running the following command:\n",
21 | " "
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": null,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "!pip install anthropic"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "metadata": {},
36 | "source": [
37 | "3. Set up your API key and model name. Replace `\"your_api_key_here\"` with your actual Anthropic API key."
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "metadata": {},
44 | "outputs": [],
45 | "source": [
46 | "API_KEY = \"your_api_key_here\"\n",
47 | "MODEL_NAME = \"claude-3-haiku-20240307\"\n",
48 | "\n",
49 | "# Stores the API_KEY & MODEL_NAME variables for use across notebooks within the IPython store\n",
50 | "%store API_KEY\n",
51 | "%store MODEL_NAME"
52 | ]
53 | },
54 | {
55 | "cell_type": "markdown",
56 | "metadata": {},
57 | "source": [
58 | "4. Run the notebook cells in order, following the instructions provided."
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {},
64 | "source": [
65 | "---\n",
66 | "\n",
67 | "## Usage Notes & Tips 💡\n",
68 | "\n",
69 | "- This course uses Claude 3 Haiku with temperature 0. We will talk more about temperature later in the course. For now, it's enough to understand that these settings yield more deterministic results. All prompt engineering techniques in this course also apply to previous generation legacy Claude models such as Claude 2 and Claude Instant 1.2.\n",
70 | "\n",
71 | "- You can use `Shift + Enter` to execute the cell and move to the next one.\n",
72 | "\n",
73 | "- When you reach the bottom of a tutorial page, navigate to the next numbered file in the folder, or to the next numbered folder if you're finished with the content within that chapter file.\n",
74 | "\n",
75 | "### The Anthropic SDK & the Messages API\n",
76 | "We will be using the [Anthropic python SDK](https://docs.anthropic.com/claude/reference/client-sdks) and the [Messages API](https://docs.anthropic.com/claude/reference/messages_post) throughout this tutorial. \n",
77 | "\n",
78 | "Below is an example of what running a prompt will look like in this tutorial. First, we create `get_completion`, which is a helper function that sends a prompt to Claude and returns Claude's generated response. Run that cell now."
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": null,
84 | "metadata": {},
85 | "outputs": [],
86 | "source": [
87 | "import anthropic\n",
88 | "\n",
89 | "client = anthropic.Anthropic(api_key=API_KEY)\n",
90 | "\n",
91 | "def get_completion(prompt: str):\n",
92 | " message = client.messages.create(\n",
93 | " model=MODEL_NAME,\n",
94 | " max_tokens=2000,\n",
95 | " temperature=0.0,\n",
96 | " messages=[\n",
97 | " {\"role\": \"user\", \"content\": prompt}\n",
98 | " ]\n",
99 | " )\n",
100 | " return message.content[0].text"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "metadata": {},
106 | "source": [
107 | "Now we will write out an example prompt for Claude and print Claude's output by running our `get_completion` helper function. Running the cell below will print out a response from Claude beneath it.\n",
108 | "\n",
109 | "Feel free to play around with the prompt string to elicit different responses from Claude."
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "metadata": {},
116 | "outputs": [],
117 | "source": [
118 | "# Prompt\n",
119 | "prompt = \"Hello, Claude!\"\n",
120 | "\n",
121 | "# Get Claude's response\n",
122 | "print(get_completion(prompt))"
123 | ]
124 | },
125 | {
126 | "cell_type": "markdown",
127 | "metadata": {},
128 | "source": [
129 | "The `API_KEY` and `MODEL_NAME` variables defined earlier will be used throughout the tutorial. Just make sure to run the cells for each tutorial page from top to bottom."
130 | ]
131 | }
132 | ],
133 | "metadata": {
134 | "kernelspec": {
135 | "display_name": "py310",
136 | "language": "python",
137 | "name": "python3"
138 | },
139 | "language_info": {
140 | "codemirror_mode": {
141 | "name": "ipython",
142 | "version": 3
143 | },
144 | "file_extension": ".py",
145 | "mimetype": "text/x-python",
146 | "name": "python",
147 | "nbconvert_exporter": "python",
148 | "pygments_lexer": "ipython3",
149 | "version": "3.10.14"
150 | }
151 | },
152 | "nbformat": 4,
153 | "nbformat_minor": 2
154 | }
155 |
--------------------------------------------------------------------------------
/AmazonBedrock/anthropic/00_Tutorial_How-To.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Tutorial How-To\n",
8 | "\n",
9 | "This tutorial requires this initial notebook to be run first so that the requirements and environment variables are stored for all notebooks in the workshop"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "## How to get started\n",
17 | "\n",
18 | "1. Clone this repository to your local machine.\n",
19 | "\n",
20 | "2. Install the required dependencies by running the following command:\n",
21 | " "
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": 2,
27 | "metadata": {},
28 | "outputs": [
29 | {
30 | "name": "stdout",
31 | "output_type": "stream",
32 | "text": [
33 | "Note: you may need to restart the kernel to use updated packages.\n",
34 | "Note: you may need to restart the kernel to use updated packages.\n"
35 | ]
36 | }
37 | ],
38 | "source": [
39 | "%pip install -qU pip\n",
40 | "%pip install -qr ../requirements.txt"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "3. Restart the kernel after installing dependencies"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "metadata": {},
54 | "outputs": [],
55 | "source": [
56 | "# restart kernel\n",
57 | "from IPython.core.display import HTML\n",
58 | "HTML(\"\")"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {},
64 | "source": [
65 | "---\n",
66 | "\n",
67 | "## Usage Notes & Tips 💡\n",
68 | "\n",
69 | "- This course uses Claude 3 Haiku with temperature 0. We will talk more about temperature later in the course. For now, it's enough to understand that these settings yield more deterministic results. All prompt engineering techniques in this course also apply to previous generation legacy Claude models such as Claude 2 and Claude Instant 1.2.\n",
70 | "\n",
71 | "- You can use `Shift + Enter` to execute the cell and move to the next one.\n",
72 | "\n",
73 | "- When you reach the bottom of a tutorial page, navigate to the next numbered file in the folder, or to the next numbered folder if you're finished with the content within that chapter file.\n",
74 | "\n",
75 | "### The Anthropic SDK & the Messages API\n",
76 | "We will be using the [Anthropic python SDK](https://docs.anthropic.com/claude/reference/claude-on-amazon-bedrock) and the [Messages API](https://docs.anthropic.com/claude/reference/messages_post) throughout this tutorial.\n",
77 | "\n",
78 | "Below is an example of what running a prompt will look like in this tutorial."
79 | ]
80 | },
81 | {
82 | "cell_type": "markdown",
83 | "metadata": {},
84 | "source": [
85 | "First, we set and store the model name and region."
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": null,
91 | "metadata": {},
92 | "outputs": [],
93 | "source": [
94 | "import boto3\n",
95 | "session = boto3.Session() # create a boto3 session to dynamically get and set the region name\n",
96 | "AWS_REGION = session.region_name\n",
97 | "print(\"AWS Region:\", AWS_REGION)\n",
98 | "MODEL_NAME = \"anthropic.claude-3-haiku-20240307-v1:0\"\n",
99 | "\n",
100 | "%store MODEL_NAME\n",
101 | "%store AWS_REGION"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "metadata": {},
107 | "source": [
108 | "Then, we create `get_completion`, which is a helper function that sends a prompt to Claude and returns Claude's generated response. Run that cell now."
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "metadata": {},
115 | "outputs": [],
116 | "source": [
117 | "from anthropic import AnthropicBedrock\n",
118 | "\n",
119 | "client = AnthropicBedrock(aws_region=AWS_REGION)\n",
120 | "\n",
121 | "def get_completion(prompt, system=''):\n",
122 | " message = client.messages.create(\n",
123 | " model=MODEL_NAME,\n",
124 | " max_tokens=2000,\n",
125 | " temperature=0.0,\n",
126 | " messages=[\n",
127 | " {\"role\": \"user\", \"content\": prompt}\n",
128 | " ],\n",
129 | " system=system\n",
130 | " )\n",
131 | " return message.content[0].text"
132 | ]
133 | },
134 | {
135 | "cell_type": "markdown",
136 | "metadata": {},
137 | "source": [
138 | "Now we will write out an example prompt for Claude and print Claude's output by running our `get_completion` helper function. Running the cell below will print out a response from Claude beneath it.\n",
139 | "\n",
140 | "Feel free to play around with the prompt string to elicit different responses from Claude."
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "metadata": {},
147 | "outputs": [],
148 | "source": [
149 | "# Prompt\n",
150 | "prompt = \"Hello, Claude!\"\n",
151 | "\n",
152 | "# Get Claude's response\n",
153 | "print(get_completion(prompt))"
154 | ]
155 | },
156 | {
157 | "cell_type": "markdown",
158 | "metadata": {},
159 | "source": [
160 | "The `MODEL_NAME` and `AWS_REGION` variables defined earlier will be used throughout the tutorial. Just make sure to run the cells for each tutorial page from top to bottom."
161 | ]
162 | }
163 | ],
164 | "metadata": {
165 | "kernelspec": {
166 | "display_name": "py310",
167 | "language": "python",
168 | "name": "python3"
169 | },
170 | "language_info": {
171 | "codemirror_mode": {
172 | "name": "ipython",
173 | "version": 3
174 | },
175 | "file_extension": ".py",
176 | "mimetype": "text/x-python",
177 | "name": "python",
178 | "nbconvert_exporter": "python",
179 | "pygments_lexer": "ipython3",
180 | "version": "3.11.5"
181 | }
182 | },
183 | "nbformat": 4,
184 | "nbformat_minor": 2
185 | }
186 |
--------------------------------------------------------------------------------
/AmazonBedrock/boto3/00_Tutorial_How-To.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Tutorial How-To\n",
8 | "\n",
9 | "This tutorial requires this initial notebook to be run first so that the requirements and environment variables are stored for all notebooks in the workshop."
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "## How to get started\n",
17 | "\n",
18 | "1. Clone this repository to your local machine.\n",
19 | "\n",
20 | "2. Install the required dependencies by running the following command:\n",
21 | " "
22 | ]
23 | },
24 | {
25 | "cell_type": "markdown",
26 | "metadata": {},
27 | "source": [
28 | "> ⚠️ **Please ignore error messages related to pip's dependency resolver.**"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": null,
34 | "metadata": {},
35 | "outputs": [],
36 | "source": [
37 | "%pip install -qU pip\n",
38 | "%pip install -qr ../requirements.txt"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "metadata": {},
44 | "source": [
45 | "3. Restart the kernel after installing dependencies"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "# restart kernel\n",
55 | "from IPython.core.display import HTML\n",
56 | "HTML(\"\")"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "4. Run the notebook cells in order, following the instructions provided."
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "---\n",
71 | "\n",
72 | "## Usage Notes & Tips 💡\n",
73 | "\n",
74 | "- This course uses Claude 3 Haiku with temperature 0. We will talk more about temperature later in the course. For now, it's enough to understand that these settings yield more deterministic results. All prompt engineering techniques in this course also apply to previous generation legacy Claude models such as Claude 2 and Claude Instant 1.2.\n",
75 | "\n",
76 | "- You can use `Shift + Enter` to execute the cell and move to the next one.\n",
77 | "\n",
78 | "- When you reach the bottom of a tutorial page, navigate to the next numbered file in the folder, or to the next numbered folder if you're finished with the content within that chapter file.\n",
79 | "\n",
80 | "### The Anthropic SDK & the Messages API\n",
81 | "We will be using the [Anthropic python SDK](https://docs.anthropic.com/claude/reference/client-sdks) and the [Messages API](https://docs.anthropic.com/claude/reference/messages_post) throughout this tutorial. \n",
82 | "\n",
83 | "Below is an example of what running a prompt will look like in this tutorial. First, we create `get_completion`, which is a helper function that sends a prompt to Claude and returns Claude's generated response. Run that cell now."
84 | ]
85 | },
86 | {
87 | "cell_type": "markdown",
88 | "metadata": {},
89 | "source": [
90 | "First, we set and store the model name and region."
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": [
99 | "import boto3\n",
100 | "session = boto3.Session() # create a boto3 session to dynamically get and set the region name\n",
101 | "AWS_REGION = session.region_name\n",
102 | "print(\"AWS Region:\", AWS_REGION)\n",
103 | "MODEL_NAME = \"anthropic.claude-3-haiku-20240307-v1:0\"\n",
104 | "\n",
105 | "%store MODEL_NAME\n",
106 | "%store AWS_REGION"
107 | ]
108 | },
109 | {
110 | "cell_type": "markdown",
111 | "metadata": {},
112 | "source": [
113 | "Then, we create `get_completion`, which is a helper function that sends a prompt to Claude and returns Claude's generated response. Run that cell now."
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "execution_count": null,
119 | "metadata": {},
120 | "outputs": [],
121 | "source": [
122 | "import boto3\n",
123 | "import json\n",
124 | "\n",
125 | "bedrock = boto3.client('bedrock-runtime',region_name=AWS_REGION)\n",
126 | "\n",
127 | "def get_completion(prompt):\n",
128 | " body = json.dumps(\n",
129 | " {\n",
130 | " \"anthropic_version\": '',\n",
131 | " \"max_tokens\": 2000,\n",
132 | " \"messages\": [{\"role\": \"user\", \"content\": prompt}],\n",
133 | " \"temperature\": 0.0,\n",
134 | " \"top_p\": 1,\n",
135 | " \"system\": ''\n",
136 | " }\n",
137 | " )\n",
138 | " response = bedrock.invoke_model(body=body, modelId=MODEL_NAME)\n",
139 | " response_body = json.loads(response.get('body').read())\n",
140 | "\n",
141 | " return response_body.get('content')[0].get('text')"
142 | ]
143 | },
144 | {
145 | "cell_type": "markdown",
146 | "metadata": {},
147 | "source": [
148 | "Now we will write out an example prompt for Claude and print Claude's output by running our `get_completion` helper function. Running the cell below will print out a response from Claude beneath it.\n",
149 | "\n",
150 | "Feel free to play around with the prompt string to elicit different responses from Claude."
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": null,
156 | "metadata": {},
157 | "outputs": [],
158 | "source": [
159 | "# Prompt\n",
160 | "prompt = \"Hello, Claude!\"\n",
161 | "\n",
162 | "# Get Claude's response\n",
163 | "print(get_completion(prompt))"
164 | ]
165 | },
166 | {
167 | "cell_type": "markdown",
168 | "metadata": {},
169 | "source": [
170 | "The `MODEL_NAME` and `AWS_REGION` variables defined earlier will be used throughout the tutorial. Just make sure to run the cells for each tutorial page from top to bottom."
171 | ]
172 | }
173 | ],
174 | "metadata": {
175 | "kernelspec": {
176 | "display_name": "py310",
177 | "language": "python",
178 | "name": "python3"
179 | },
180 | "language_info": {
181 | "codemirror_mode": {
182 | "name": "ipython",
183 | "version": 3
184 | },
185 | "file_extension": ".py",
186 | "mimetype": "text/x-python",
187 | "name": "python",
188 | "nbconvert_exporter": "python",
189 | "pygments_lexer": "ipython3",
190 | "version": "3.12.0"
191 | }
192 | },
193 | "nbformat": 4,
194 | "nbformat_minor": 2
195 | }
196 |
--------------------------------------------------------------------------------
/Anthropic 1P/03_Assigning_Roles_Role_Prompting.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Chapter 3: Assigning Roles (Role Prompting)\n",
8 | "\n",
9 | "- [Lesson](#lesson)\n",
10 | "- [Exercises](#exercises)\n",
11 | "- [Example Playground](#example-playground)\n",
12 | "\n",
13 | "## Setup\n",
14 | "\n",
15 | "Run the following setup cell to load your API key and establish the `get_completion` helper function."
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "!pip install anthropic\n",
25 | "\n",
26 | "# Import python's built-in regular expression library\n",
27 | "import re\n",
28 | "import anthropic\n",
29 | "\n",
30 | "# Retrieve the API_KEY & MODEL_NAME variables from the IPython store\n",
31 | "%store -r API_KEY\n",
32 | "%store -r MODEL_NAME\n",
33 | "\n",
34 | "client = anthropic.Anthropic(api_key=API_KEY)\n",
35 | "\n",
36 | "def get_completion(prompt: str, system_prompt=\"\"):\n",
37 | " message = client.messages.create(\n",
38 | " model=MODEL_NAME,\n",
39 | " max_tokens=2000,\n",
40 | " temperature=0.0,\n",
41 | " system=system_prompt,\n",
42 | " messages=[\n",
43 | " {\"role\": \"user\", \"content\": prompt}\n",
44 | " ]\n",
45 | " )\n",
46 | " return message.content[0].text"
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "metadata": {},
52 | "source": [
53 | "---\n",
54 | "\n",
55 | "## Lesson\n",
56 | "\n",
57 | "Continuing on the theme of Claude having no context aside from what you say, it's sometimes important to **prompt Claude to inhabit a specific role (including all necessary context)**. This is also known as role prompting. The more detail to the role context, the better.\n",
58 | "\n",
59 | "**Priming Claude with a role can improve Claude's performance** in a variety of fields, from writing to coding to summarizing. It's like how humans can sometimes be helped when told to \"think like a ______\". Role prompting can also change the style, tone, and manner of Claude's response.\n",
60 | "\n",
61 | "**Note:** Role prompting can happen either in the system prompt or as part of the User message turn."
62 | ]
63 | },
64 | {
65 | "cell_type": "markdown",
66 | "metadata": {},
67 | "source": [
68 | "### Examples\n",
69 | "\n",
70 | "In the example below, we see that without role prompting, Claude provides a **straightforward and non-stylized answer** when asked to give a single sentence perspective on skateboarding.\n",
71 | "\n",
72 | "However, when we prime Claude to inhabit the role of a cat, Claude's perspective changes, and thus **Claude's response tone, style, content adapts to the new role**. \n",
73 | "\n",
74 | "**Note:** A bonus technique you can use is to **provide Claude context on its intended audience**. Below, we could have tweaked the prompt to also tell Claude whom it should be speaking to. \"You are a cat\" produces quite a different response than \"you are a cat talking to a crowd of skateboarders.\n",
75 | "\n",
76 | "Here is the prompt without role prompting in the system prompt:"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {},
83 | "outputs": [],
84 | "source": [
85 | "# Prompt\n",
86 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
87 | "\n",
88 | "# Print Claude's response\n",
89 | "print(get_completion(PROMPT))"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {},
95 | "source": [
96 | "Here is the same user question, except with role prompting."
97 | ]
98 | },
99 | {
100 | "cell_type": "code",
101 | "execution_count": null,
102 | "metadata": {},
103 | "outputs": [],
104 | "source": [
105 | "# System prompt\n",
106 | "SYSTEM_PROMPT = \"You are a cat.\"\n",
107 | "\n",
108 | "# Prompt\n",
109 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
110 | "\n",
111 | "# Print Claude's response\n",
112 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
113 | ]
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "metadata": {},
118 | "source": [
119 | "You can use role prompting as a way to get Claude to emulate certain styles in writing, speak in a certain voice, or guide the complexity of its answers. **Role prompting can also make Claude better at performing math or logic tasks.**\n",
120 | "\n",
121 | "For example, in the example below, there is a definitive correct answer, which is yes. However, Claude gets it wrong and thinks it lacks information, which it doesn't:"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": null,
127 | "metadata": {},
128 | "outputs": [],
129 | "source": [
130 | "# Prompt\n",
131 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
132 | "\n",
133 | "# Print Claude's response\n",
134 | "print(get_completion(PROMPT))"
135 | ]
136 | },
137 | {
138 | "cell_type": "markdown",
139 | "metadata": {},
140 | "source": [
141 | "Now, what if we **prime Claude to act as a logic bot**? How will that change Claude's answer? \n",
142 | "\n",
143 | "It turns out that with this new role assignment, Claude gets it right. (Although notably not for all the right reasons)"
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": null,
149 | "metadata": {},
150 | "outputs": [],
151 | "source": [
152 | "# System prompt\n",
153 | "SYSTEM_PROMPT = \"You are a logic bot designed to answer complex logic problems.\"\n",
154 | "\n",
155 | "# Prompt\n",
156 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
157 | "\n",
158 | "# Print Claude's response\n",
159 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
160 | ]
161 | },
162 | {
163 | "cell_type": "markdown",
164 | "metadata": {},
165 | "source": [
166 | "**Note:** What you'll learn throughout this course is that there are **many prompt engineering techniques you can use to derive similar results**. Which techniques you use is up to you and your preference! We encourage you to **experiment to find your own prompt engineering style**.\n",
167 | "\n",
168 | "If you would like to experiment with the lesson prompts without changing any content above, scroll all the way to the bottom of the lesson notebook to visit the [**Example Playground**](#example-playground)."
169 | ]
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "metadata": {},
174 | "source": [
175 | "---\n",
176 | "\n",
177 | "## Exercises\n",
178 | "- [Exercise 3.1 - Math Correction](#exercise-31---math-correction)"
179 | ]
180 | },
181 | {
182 | "cell_type": "markdown",
183 | "metadata": {},
184 | "source": [
185 | "### Exercise 3.1 - Math Correction\n",
186 | "In some instances, **Claude may struggle with mathematics**, even simple mathematics. Below, Claude incorrectly assesses the math problem as correctly solved, even though there's an obvious arithmetic mistake in the second step. Note that Claude actually catches the mistake when going through step-by-step, but doesn't jump to the conclusion that the overall solution is wrong.\n",
187 | "\n",
188 | "Modify the `PROMPT` and / or the `SYSTEM_PROMPT` to make Claude grade the solution as `incorrectly` solved, rather than correctly solved. \n"
189 | ]
190 | },
191 | {
192 | "cell_type": "code",
193 | "execution_count": null,
194 | "metadata": {},
195 | "outputs": [],
196 | "source": [
197 | "# System prompt - if you don't want to use a system prompt, you can leave this variable set to an empty string\n",
198 | "SYSTEM_PROMPT = \"\"\n",
199 | "\n",
200 | "# Prompt\n",
201 | "PROMPT = \"\"\"Is this equation solved correctly below?\n",
202 | "\n",
203 | "2x - 3 = 9\n",
204 | "2x = 6\n",
205 | "x = 3\"\"\"\n",
206 | "\n",
207 | "# Get Claude's response\n",
208 | "response = get_completion(PROMPT, SYSTEM_PROMPT)\n",
209 | "\n",
210 | "# Function to grade exercise correctness\n",
211 | "def grade_exercise(text):\n",
212 | " if \"incorrect\" in text or \"not correct\" in text.lower():\n",
213 | " return True\n",
214 | " else:\n",
215 | " return False\n",
216 | "\n",
217 | "# Print Claude's response and the corresponding grade\n",
218 | "print(response)\n",
219 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
220 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
221 | ]
222 | },
223 | {
224 | "cell_type": "markdown",
225 | "metadata": {},
226 | "source": [
227 | "❓ If you want a hint, run the cell below!"
228 | ]
229 | },
230 | {
231 | "cell_type": "code",
232 | "execution_count": null,
233 | "metadata": {},
234 | "outputs": [],
235 | "source": [
236 | "from hints import exercise_3_1_hint; print(exercise_3_1_hint)"
237 | ]
238 | },
239 | {
240 | "cell_type": "markdown",
241 | "metadata": {},
242 | "source": [
243 | "### Congrats!\n",
244 | "\n",
245 | "If you've solved all exercises up until this point, you're ready to move to the next chapter. Happy prompting!"
246 | ]
247 | },
248 | {
249 | "cell_type": "markdown",
250 | "metadata": {},
251 | "source": [
252 | "---\n",
253 | "\n",
254 | "## Example Playground\n",
255 | "\n",
256 | "This is an area for you to experiment freely with the prompt examples shown in this lesson and tweak prompts to see how it may affect Claude's responses."
257 | ]
258 | },
259 | {
260 | "cell_type": "code",
261 | "execution_count": null,
262 | "metadata": {},
263 | "outputs": [],
264 | "source": [
265 | "# Prompt\n",
266 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
267 | "\n",
268 | "# Print Claude's response\n",
269 | "print(get_completion(PROMPT))"
270 | ]
271 | },
272 | {
273 | "cell_type": "code",
274 | "execution_count": null,
275 | "metadata": {},
276 | "outputs": [],
277 | "source": [
278 | "# System prompt\n",
279 | "SYSTEM_PROMPT = \"You are a cat.\"\n",
280 | "\n",
281 | "# Prompt\n",
282 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
283 | "\n",
284 | "# Print Claude's response\n",
285 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
286 | ]
287 | },
288 | {
289 | "cell_type": "code",
290 | "execution_count": null,
291 | "metadata": {},
292 | "outputs": [],
293 | "source": [
294 | "# Prompt\n",
295 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
296 | "\n",
297 | "# Print Claude's response\n",
298 | "print(get_completion(PROMPT))"
299 | ]
300 | },
301 | {
302 | "cell_type": "code",
303 | "execution_count": null,
304 | "metadata": {},
305 | "outputs": [],
306 | "source": [
307 | "# System prompt\n",
308 | "SYSTEM_PROMPT = \"You are a logic bot designed to answer complex logic problems.\"\n",
309 | "\n",
310 | "# Prompt\n",
311 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
312 | "\n",
313 | "# Print Claude's response\n",
314 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
315 | ]
316 | }
317 | ],
318 | "metadata": {
319 | "language_info": {
320 | "name": "python"
321 | }
322 | },
323 | "nbformat": 4,
324 | "nbformat_minor": 2
325 | }
326 |
--------------------------------------------------------------------------------
/AmazonBedrock/anthropic/03_Assigning_Roles_Role_Prompting.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Chapter 3: Assigning Roles (Role Prompting)\n",
8 | "\n",
9 | "- [Lesson](#lesson)\n",
10 | "- [Exercises](#exercises)\n",
11 | "- [Example Playground](#example-playground)\n",
12 | "\n",
13 | "## Setup\n",
14 | "\n",
15 | "Run the following setup cell to load your API key and establish the `get_completion` helper function."
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "%pip install anthropic --quiet\n",
25 | "\n",
26 | "# Import the hints module from the utils package\n",
27 | "import os\n",
28 | "import sys\n",
29 | "module_path = \"..\"\n",
30 | "sys.path.append(os.path.abspath(module_path))\n",
31 | "from utils import hints\n",
32 | "\n",
33 | "# Import python's built-in regular expression library\n",
34 | "import re\n",
35 | "from anthropic import AnthropicBedrock\n",
36 | "\n",
37 | "%store -r MODEL_NAME\n",
38 | "%store -r AWS_REGION\n",
39 | "\n",
40 | "client = AnthropicBedrock(aws_region=AWS_REGION)\n",
41 | "\n",
42 | "def get_completion(prompt, system=''):\n",
43 | " message = client.messages.create(\n",
44 | " model=MODEL_NAME,\n",
45 | " max_tokens=2000,\n",
46 | " temperature=0.0,\n",
47 | " messages=[\n",
48 | " {\"role\": \"user\", \"content\": prompt}\n",
49 | " ],\n",
50 | " system=system\n",
51 | " )\n",
52 | " return message.content[0].text"
53 | ]
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "metadata": {},
58 | "source": [
59 | "---\n",
60 | "\n",
61 | "## Lesson\n",
62 | "\n",
63 | "Continuing on the theme of Claude having no context aside from what you say, it's sometimes important to **prompt Claude to inhabit a specific role (including all necessary context)**. This is also known as role prompting. The more detail to the role context, the better.\n",
64 | "\n",
65 | "**Priming Claude with a role can improve Claude's performance** in a variety of fields, from writing to coding to summarizing. It's like how humans can sometimes be helped when told to \"think like a ______\". Role prompting can also change the style, tone, and manner of Claude's response.\n",
66 | "\n",
67 | "**Note:** Role prompting can happen either in the system prompt or as part of the User message turn."
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {},
73 | "source": [
74 | "### Examples\n",
75 | "\n",
76 | "In the example below, we see that without role prompting, Claude provides a **straightforward and non-stylized answer** when asked to give a single sentence perspective on skateboarding.\n",
77 | "\n",
78 | "However, when we prime Claude to inhabit the role of a cat, Claude's perspective changes, and thus **Claude's response tone, style, content adapts to the new role**. \n",
79 | "\n",
80 | "**Note:** A bonus technique you can use is to **provide Claude context on its intended audience**. Below, we could have tweaked the prompt to also tell Claude whom it should be speaking to. \"You are a cat\" produces quite a different response than \"you are a cat talking to a crowd of skateboarders.\n",
81 | "\n",
82 | "Here is the prompt without role prompting in the system prompt:"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": null,
88 | "metadata": {},
89 | "outputs": [],
90 | "source": [
91 | "# Prompt\n",
92 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
93 | "\n",
94 | "# Print Claude's response\n",
95 | "print(get_completion(PROMPT))"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "metadata": {},
101 | "source": [
102 | "Here is the same user question, except with role prompting."
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": null,
108 | "metadata": {},
109 | "outputs": [],
110 | "source": [
111 | "# System prompt\n",
112 | "SYSTEM_PROMPT = \"You are a cat.\"\n",
113 | "\n",
114 | "# Prompt\n",
115 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
116 | "\n",
117 | "# Print Claude's response\n",
118 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {},
124 | "source": [
125 | "You can use role prompting as a way to get Claude to emulate certain styles in writing, speak in a certain voice, or guide the complexity of its answers. **Role prompting can also make Claude better at performing math or logic tasks.**\n",
126 | "\n",
127 | "For example, in the example below, there is a definitive correct answer, which is yes. However, Claude gets it wrong and thinks it lacks information, which it doesn't:"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": null,
133 | "metadata": {},
134 | "outputs": [],
135 | "source": [
136 | "# Prompt\n",
137 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
138 | "\n",
139 | "# Print Claude's response\n",
140 | "print(get_completion(PROMPT))"
141 | ]
142 | },
143 | {
144 | "cell_type": "markdown",
145 | "metadata": {},
146 | "source": [
147 | "Now, what if we **prime Claude to act as a logic bot**? How will that change Claude's answer? \n",
148 | "\n",
149 | "It turns out that with this new role assignment, Claude gets it right. (Although notably not for all the right reasons)"
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": null,
155 | "metadata": {},
156 | "outputs": [],
157 | "source": [
158 | "# System prompt\n",
159 | "SYSTEM_PROMPT = \"You are a logic bot designed to answer complex logic problems.\"\n",
160 | "\n",
161 | "# Prompt\n",
162 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
163 | "\n",
164 | "# Print Claude's response\n",
165 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
166 | ]
167 | },
168 | {
169 | "cell_type": "markdown",
170 | "metadata": {},
171 | "source": [
172 | "**Note:** What you'll learn throughout this course is that there are **many prompt engineering techniques you can use to derive similar results**. Which techniques you use is up to you and your preference! We encourage you to **experiment to find your own prompt engineering style**.\n",
173 | "\n",
174 | "If you would like to experiment with the lesson prompts without changing any content above, scroll all the way to the bottom of the lesson notebook to visit the [**Example Playground**](#example-playground)."
175 | ]
176 | },
177 | {
178 | "cell_type": "markdown",
179 | "metadata": {},
180 | "source": [
181 | "---\n",
182 | "\n",
183 | "## Exercises\n",
184 | "- [Exercise 3.1 - Math Correction](#exercise-31---math-correction)"
185 | ]
186 | },
187 | {
188 | "cell_type": "markdown",
189 | "metadata": {},
190 | "source": [
191 | "### Exercise 3.1 - Math Correction\n",
192 | "In some instances, **Claude may struggle with mathematics**, even simple mathematics. Below, Claude incorrectly assesses the math problem as correctly solved, even though there's an obvious arithmetic mistake in the second step. Note that Claude actually catches the mistake when going through step-by-step, but doesn't jump to the conclusion that the overall solution is wrong.\n",
193 | "\n",
194 | "Modify the `PROMPT` and / or the `SYSTEM_PROMPT` to make Claude grade the solution as `incorrectly` solved, rather than correctly solved. \n"
195 | ]
196 | },
197 | {
198 | "cell_type": "code",
199 | "execution_count": null,
200 | "metadata": {},
201 | "outputs": [],
202 | "source": [
203 | "# System prompt - if you don't want to use a system prompt, you can leave this variable set to an empty string\n",
204 | "SYSTEM_PROMPT = \"\"\n",
205 | "\n",
206 | "# Prompt\n",
207 | "PROMPT = \"\"\"Is this equation solved correctly below?\n",
208 | "\n",
209 | "2x - 3 = 9\n",
210 | "2x = 6\n",
211 | "x = 3\"\"\"\n",
212 | "\n",
213 | "# Get Claude's response\n",
214 | "response = get_completion(PROMPT, SYSTEM_PROMPT)\n",
215 | "\n",
216 | "# Function to grade exercise correctness\n",
217 | "def grade_exercise(text):\n",
218 | " if \"incorrect\" in text or \"not correct\" in text.lower():\n",
219 | " return True\n",
220 | " else:\n",
221 | " return False\n",
222 | "\n",
223 | "# Print Claude's response and the corresponding grade\n",
224 | "print(response)\n",
225 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
226 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
227 | ]
228 | },
229 | {
230 | "cell_type": "markdown",
231 | "metadata": {},
232 | "source": [
233 | "❓ If you want a hint, run the cell below!"
234 | ]
235 | },
236 | {
237 | "cell_type": "code",
238 | "execution_count": null,
239 | "metadata": {},
240 | "outputs": [],
241 | "source": [
242 | "print(hints.exercise_3_1_hint)"
243 | ]
244 | },
245 | {
246 | "cell_type": "markdown",
247 | "metadata": {},
248 | "source": [
249 | "### Congrats!\n",
250 | "\n",
251 | "If you've solved all exercises up until this point, you're ready to move to the next chapter. Happy prompting!"
252 | ]
253 | },
254 | {
255 | "cell_type": "markdown",
256 | "metadata": {},
257 | "source": [
258 | "---\n",
259 | "\n",
260 | "## Example Playground\n",
261 | "\n",
262 | "This is an area for you to experiment freely with the prompt examples shown in this lesson and tweak prompts to see how it may affect Claude's responses."
263 | ]
264 | },
265 | {
266 | "cell_type": "code",
267 | "execution_count": null,
268 | "metadata": {},
269 | "outputs": [],
270 | "source": [
271 | "# Prompt\n",
272 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
273 | "\n",
274 | "# Print Claude's response\n",
275 | "print(get_completion(PROMPT))"
276 | ]
277 | },
278 | {
279 | "cell_type": "code",
280 | "execution_count": null,
281 | "metadata": {},
282 | "outputs": [],
283 | "source": [
284 | "# System prompt\n",
285 | "SYSTEM_PROMPT = \"You are a cat.\"\n",
286 | "\n",
287 | "# Prompt\n",
288 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
289 | "\n",
290 | "# Print Claude's response\n",
291 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
292 | ]
293 | },
294 | {
295 | "cell_type": "code",
296 | "execution_count": null,
297 | "metadata": {},
298 | "outputs": [],
299 | "source": [
300 | "# Prompt\n",
301 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
302 | "\n",
303 | "# Print Claude's response\n",
304 | "print(get_completion(PROMPT))"
305 | ]
306 | },
307 | {
308 | "cell_type": "code",
309 | "execution_count": null,
310 | "metadata": {},
311 | "outputs": [],
312 | "source": [
313 | "# System prompt\n",
314 | "SYSTEM_PROMPT = \"You are a logic bot designed to answer complex logic problems.\"\n",
315 | "\n",
316 | "# Prompt\n",
317 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
318 | "\n",
319 | "# Print Claude's response\n",
320 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
321 | ]
322 | }
323 | ],
324 | "metadata": {
325 | "language_info": {
326 | "name": "python"
327 | }
328 | },
329 | "nbformat": 4,
330 | "nbformat_minor": 2
331 | }
332 |
--------------------------------------------------------------------------------
/AmazonBedrock/boto3/03_Assigning_Roles_Role_Prompting.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Chapter 3: Assigning Roles (Role Prompting)\n",
8 | "\n",
9 | "- [Lesson](#lesson)\n",
10 | "- [Exercises](#exercises)\n",
11 | "- [Example Playground](#example-playground)\n",
12 | "\n",
13 | "## Setup\n",
14 | "\n",
15 | "Run the following setup cell to load your API key and establish the `get_completion` helper function."
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "# Import python's built-in regular expression library\n",
25 | "import re\n",
26 | "import boto3\n",
27 | "import json\n",
28 | "\n",
29 | "# Import the hints module from the utils package\n",
30 | "import os\n",
31 | "import sys\n",
32 | "module_path = \"..\"\n",
33 | "sys.path.append(os.path.abspath(module_path))\n",
34 | "from utils import hints\n",
35 | "\n",
36 | "# Retrieve the MODEL_NAME variable from the IPython store\n",
37 | "%store -r MODEL_NAME\n",
38 | "%store -r AWS_REGION\n",
39 | "\n",
40 | "client = boto3.client('bedrock-runtime',region_name=AWS_REGION)\n",
41 | "\n",
42 | "def get_completion(prompt,system=''):\n",
43 | " body = json.dumps(\n",
44 | " {\n",
45 | " \"anthropic_version\": '',\n",
46 | " \"max_tokens\": 2000,\n",
47 | " \"messages\": [{\"role\": \"user\", \"content\": prompt}],\n",
48 | " \"temperature\": 0.0,\n",
49 | " \"top_p\": 1,\n",
50 | " \"system\": system\n",
51 | " }\n",
52 | " )\n",
53 | " response = client.invoke_model(body=body, modelId=MODEL_NAME)\n",
54 | " response_body = json.loads(response.get('body').read())\n",
55 | "\n",
56 | " return response_body.get('content')[0].get('text')"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "---\n",
64 | "\n",
65 | "## Lesson\n",
66 | "\n",
67 | "Continuing on the theme of Claude having no context aside from what you say, it's sometimes important to **prompt Claude to inhabit a specific role (including all necessary context)**. This is also known as role prompting. The more detail to the role context, the better.\n",
68 | "\n",
69 | "**Priming Claude with a role can improve Claude's performance** in a variety of fields, from writing to coding to summarizing. It's like how humans can sometimes be helped when told to \"think like a ______\". Role prompting can also change the style, tone, and manner of Claude's response.\n",
70 | "\n",
71 | "**Note:** Role prompting can happen either in the system prompt or as part of the User message turn."
72 | ]
73 | },
74 | {
75 | "cell_type": "markdown",
76 | "metadata": {},
77 | "source": [
78 | "### Examples\n",
79 | "\n",
80 | "In the example below, we see that without role prompting, Claude provides a **straightforward and non-stylized answer** when asked to give a single sentence perspective on skateboarding.\n",
81 | "\n",
82 | "However, when we prime Claude to inhabit the role of a cat, Claude's perspective changes, and thus **Claude's response tone, style, content adapts to the new role**. \n",
83 | "\n",
84 | "**Note:** A bonus technique you can use is to **provide Claude context on its intended audience**. Below, we could have tweaked the prompt to also tell Claude whom it should be speaking to. \"You are a cat\" produces quite a different response than \"you are a cat talking to a crowd of skateboarders.\n",
85 | "\n",
86 | "Here is the prompt without role prompting in the system prompt:"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "# Prompt\n",
96 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
97 | "\n",
98 | "# Print Claude's response\n",
99 | "print(get_completion(PROMPT))"
100 | ]
101 | },
102 | {
103 | "cell_type": "markdown",
104 | "metadata": {},
105 | "source": [
106 | "Here is the same user question, except with role prompting."
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": null,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | "# System prompt\n",
116 | "SYSTEM_PROMPT = \"You are a cat.\"\n",
117 | "\n",
118 | "# Prompt\n",
119 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
120 | "\n",
121 | "# Print Claude's response\n",
122 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
123 | ]
124 | },
125 | {
126 | "cell_type": "markdown",
127 | "metadata": {},
128 | "source": [
129 | "You can use role prompting as a way to get Claude to emulate certain styles in writing, speak in a certain voice, or guide the complexity of its answers. **Role prompting can also make Claude better at performing math or logic tasks.**\n",
130 | "\n",
131 | "For example, in the example below, there is a definitive correct answer, which is yes. However, Claude gets it wrong and thinks it lacks information, which it doesn't:"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "metadata": {},
138 | "outputs": [],
139 | "source": [
140 | "# Prompt\n",
141 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
142 | "\n",
143 | "# Print Claude's response\n",
144 | "print(get_completion(PROMPT))"
145 | ]
146 | },
147 | {
148 | "cell_type": "markdown",
149 | "metadata": {},
150 | "source": [
151 | "Now, what if we **prime Claude to act as a logic bot**? How will that change Claude's answer? \n",
152 | "\n",
153 | "It turns out that with this new role assignment, Claude gets it right. (Although notably not for all the right reasons)"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": null,
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "# System prompt\n",
163 | "SYSTEM_PROMPT = \"You are a logic bot designed to answer complex logic problems.\"\n",
164 | "\n",
165 | "# Prompt\n",
166 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
167 | "\n",
168 | "# Print Claude's response\n",
169 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
170 | ]
171 | },
172 | {
173 | "cell_type": "markdown",
174 | "metadata": {},
175 | "source": [
176 | "**Note:** What you'll learn throughout this course is that there are **many prompt engineering techniques you can use to derive similar results**. Which techniques you use is up to you and your preference! We encourage you to **experiment to find your own prompt engineering style**.\n",
177 | "\n",
178 | "If you would like to experiment with the lesson prompts without changing any content above, scroll all the way to the bottom of the lesson notebook to visit the [**Example Playground**](#example-playground)."
179 | ]
180 | },
181 | {
182 | "cell_type": "markdown",
183 | "metadata": {},
184 | "source": [
185 | "---\n",
186 | "\n",
187 | "## Exercises\n",
188 | "- [Exercise 3.1 - Math Correction](#exercise-31---math-correction)"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "metadata": {},
194 | "source": [
195 | "### Exercise 3.1 - Math Correction\n",
196 | "In some instances, **Claude may struggle with mathematics**, even simple mathematics. Below, Claude incorrectly assesses the math problem as correctly solved, even though there's an obvious arithmetic mistake in the second step. Note that Claude actually catches the mistake when going through step-by-step, but doesn't jump to the conclusion that the overall solution is wrong.\n",
197 | "\n",
198 | "Modify the `PROMPT` and / or the `SYSTEM_PROMPT` to make Claude grade the solution as `incorrectly` solved, rather than correctly solved. \n"
199 | ]
200 | },
201 | {
202 | "cell_type": "code",
203 | "execution_count": null,
204 | "metadata": {},
205 | "outputs": [],
206 | "source": [
207 | "# System prompt - if you don't want to use a system prompt, you can leave this variable set to an empty string\n",
208 | "SYSTEM_PROMPT = \"\"\n",
209 | "\n",
210 | "# Prompt\n",
211 | "PROMPT = \"\"\"Is this equation solved correctly below?\n",
212 | "\n",
213 | "2x - 3 = 9\n",
214 | "2x = 6\n",
215 | "x = 3\"\"\"\n",
216 | "\n",
217 | "# Get Claude's response\n",
218 | "response = get_completion(PROMPT, SYSTEM_PROMPT)\n",
219 | "\n",
220 | "# Function to grade exercise correctness\n",
221 | "def grade_exercise(text):\n",
222 | " if \"incorrect\" in text or \"not correct\" in text.lower():\n",
223 | " return True\n",
224 | " else:\n",
225 | " return False\n",
226 | "\n",
227 | "# Print Claude's response and the corresponding grade\n",
228 | "print(response)\n",
229 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
230 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
231 | ]
232 | },
233 | {
234 | "cell_type": "markdown",
235 | "metadata": {},
236 | "source": [
237 | "❓ If you want a hint, run the cell below!"
238 | ]
239 | },
240 | {
241 | "cell_type": "code",
242 | "execution_count": null,
243 | "metadata": {},
244 | "outputs": [],
245 | "source": [
246 | "print(hints.exercise_3_1_hint)"
247 | ]
248 | },
249 | {
250 | "cell_type": "markdown",
251 | "metadata": {},
252 | "source": [
253 | "### Congrats!\n",
254 | "\n",
255 | "If you've solved all exercises up until this point, you're ready to move to the next chapter. Happy prompting!"
256 | ]
257 | },
258 | {
259 | "cell_type": "markdown",
260 | "metadata": {},
261 | "source": [
262 | "---\n",
263 | "\n",
264 | "## Example Playground\n",
265 | "\n",
266 | "This is an area for you to experiment freely with the prompt examples shown in this lesson and tweak prompts to see how it may affect Claude's responses."
267 | ]
268 | },
269 | {
270 | "cell_type": "code",
271 | "execution_count": null,
272 | "metadata": {},
273 | "outputs": [],
274 | "source": [
275 | "# Prompt\n",
276 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
277 | "\n",
278 | "# Print Claude's response\n",
279 | "print(get_completion(PROMPT))"
280 | ]
281 | },
282 | {
283 | "cell_type": "code",
284 | "execution_count": null,
285 | "metadata": {},
286 | "outputs": [],
287 | "source": [
288 | "# System prompt\n",
289 | "SYSTEM_PROMPT = \"You are a cat.\"\n",
290 | "\n",
291 | "# Prompt\n",
292 | "PROMPT = \"In one sentence, what do you think about skateboarding?\"\n",
293 | "\n",
294 | "# Print Claude's response\n",
295 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
296 | ]
297 | },
298 | {
299 | "cell_type": "code",
300 | "execution_count": null,
301 | "metadata": {},
302 | "outputs": [],
303 | "source": [
304 | "# Prompt\n",
305 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
306 | "\n",
307 | "# Print Claude's response\n",
308 | "print(get_completion(PROMPT))"
309 | ]
310 | },
311 | {
312 | "cell_type": "code",
313 | "execution_count": null,
314 | "metadata": {},
315 | "outputs": [],
316 | "source": [
317 | "# System prompt\n",
318 | "SYSTEM_PROMPT = \"You are a logic bot designed to answer complex logic problems.\"\n",
319 | "\n",
320 | "# Prompt\n",
321 | "PROMPT = \"Jack is looking at Anne. Anne is looking at George. Jack is married, George is not, and we don’t know if Anne is married. Is a married person looking at an unmarried person?\"\n",
322 | "\n",
323 | "# Print Claude's response\n",
324 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
325 | ]
326 | }
327 | ],
328 | "metadata": {
329 | "language_info": {
330 | "name": "python"
331 | }
332 | },
333 | "nbformat": 4,
334 | "nbformat_minor": 2
335 | }
336 |
--------------------------------------------------------------------------------
/AmazonBedrock/anthropic/02_Being_Clear_and_Direct.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Chapter 2: Being Clear and Direct\n",
8 | "\n",
9 | "- [Lesson](#lesson)\n",
10 | "- [Exercises](#exercises)\n",
11 | "- [Example Playground](#example-playground)\n",
12 | "\n",
13 | "## Setup\n",
14 | "\n",
15 | "Run the following setup cell to load your API key and establish the `get_completion` helper function."
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "%pip install anthropic --quiet\n",
25 | "\n",
26 | "# Import the hints module from the utils package\n",
27 | "import os\n",
28 | "import sys\n",
29 | "module_path = \"..\"\n",
30 | "sys.path.append(os.path.abspath(module_path))\n",
31 | "from utils import hints\n",
32 | "\n",
33 | "# Import python's built-in regular expression library\n",
34 | "import re\n",
35 | "from anthropic import AnthropicBedrock\n",
36 | "\n",
37 | "%store -r MODEL_NAME\n",
38 | "%store -r AWS_REGION\n",
39 | "\n",
40 | "client = AnthropicBedrock(aws_region=AWS_REGION)\n",
41 | "\n",
42 | "def get_completion(prompt, system=''):\n",
43 | " message = client.messages.create(\n",
44 | " model=MODEL_NAME,\n",
45 | " max_tokens=2000,\n",
46 | " temperature=0.0,\n",
47 | " messages=[\n",
48 | " {\"role\": \"user\", \"content\": prompt}\n",
49 | " ],\n",
50 | " system=system\n",
51 | " )\n",
52 | " return message.content[0].text"
53 | ]
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "metadata": {},
58 | "source": [
59 | "---\n",
60 | "\n",
61 | "## Lesson\n",
62 | "\n",
63 | "**Claude responds best to clear and direct instructions.**\n",
64 | "\n",
65 | "Think of Claude like any other human that is new to the job. **Claude has no context** on what to do aside from what you literally tell it. Just as when you instruct a human for the first time on a task, the more you explain exactly what you want in a straightforward manner to Claude, the better and more accurate Claude's response will be.\"\t\t\t\t\n",
66 | "\t\t\t\t\n",
67 | "When in doubt, follow the **Golden Rule of Clear Prompting**:\n",
68 | "- Show your prompt to a colleague or friend and have them follow the instructions themselves to see if they can produce the result you want. If they're confused, Claude's confused.\t\t\t\t"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "### Examples\n",
76 | "\n",
77 | "Let's take a task like writing poetry. (Ignore any syllable mismatch - LLMs aren't great at counting syllables yet.)"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {},
84 | "outputs": [],
85 | "source": [
86 | "# Prompt\n",
87 | "PROMPT = \"Write a haiku about robots.\"\n",
88 | "\n",
89 | "# Print Claude's response\n",
90 | "print(get_completion(PROMPT))"
91 | ]
92 | },
93 | {
94 | "cell_type": "markdown",
95 | "metadata": {},
96 | "source": [
97 | "This haiku is nice enough, but users may want Claude to go directly into the poem without the \"Here is a haiku\" preamble.\n",
98 | "\n",
99 | "How do we achieve that? We **ask for it**!"
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": null,
105 | "metadata": {},
106 | "outputs": [],
107 | "source": [
108 | "# Prompt\n",
109 | "PROMPT = \"Write a haiku about robots. Skip the preamble; go straight into the poem.\"\n",
110 | "\n",
111 | "# Print Claude's response\n",
112 | "print(get_completion(PROMPT))"
113 | ]
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "metadata": {},
118 | "source": [
119 | "Here's another example. Let's ask Claude who's the best basketball player of all time. You can see below that while Claude lists a few names, **it doesn't respond with a definitive \"best\"**."
120 | ]
121 | },
122 | {
123 | "cell_type": "code",
124 | "execution_count": null,
125 | "metadata": {},
126 | "outputs": [],
127 | "source": [
128 | "# Prompt\n",
129 | "PROMPT = \"Who is the best basketball player of all time?\"\n",
130 | "\n",
131 | "# Print Claude's response\n",
132 | "print(get_completion(PROMPT))"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "metadata": {},
138 | "source": [
139 | "Can we get Claude to make up its mind and decide on a best player? Yes! Just ask!"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": null,
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "# Prompt\n",
149 | "PROMPT = \"Who is the best basketball player of all time? Yes, there are differing opinions, but if you absolutely had to pick one player, who would it be?\"\n",
150 | "\n",
151 | "# Print Claude's response\n",
152 | "print(get_completion(PROMPT))"
153 | ]
154 | },
155 | {
156 | "cell_type": "markdown",
157 | "metadata": {},
158 | "source": [
159 | "If you would like to experiment with the lesson prompts without changing any content above, scroll all the way to the bottom of the lesson notebook to visit the [**Example Playground**](#example-playground)."
160 | ]
161 | },
162 | {
163 | "cell_type": "markdown",
164 | "metadata": {},
165 | "source": [
166 | "---\n",
167 | "\n",
168 | "## Exercises\n",
169 | "- [Exercise 2.1 - Spanish](#exercise-21---spanish)\n",
170 | "- [Exercise 2.2 - One Player Only](#exercise-22---one-player-only)\n",
171 | "- [Exercise 2.3 - Write a Story](#exercise-23---write-a-story)"
172 | ]
173 | },
174 | {
175 | "cell_type": "markdown",
176 | "metadata": {},
177 | "source": [
178 | "### Exercise 2.1 - Spanish\n",
179 | "Modify the `SYSTEM_PROMPT` to make Claude output its answer in Spanish."
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": null,
185 | "metadata": {},
186 | "outputs": [],
187 | "source": [
188 | "# System prompt - this is the only field you should chnage\n",
189 | "SYSTEM_PROMPT = \"[Replace this text]\"\n",
190 | "\n",
191 | "# Prompt\n",
192 | "PROMPT = \"Hello Claude, how are you?\"\n",
193 | "\n",
194 | "# Get Claude's response\n",
195 | "response = get_completion(PROMPT, SYSTEM_PROMPT)\n",
196 | "\n",
197 | "# Function to grade exercise correctness\n",
198 | "def grade_exercise(text):\n",
199 | " return \"hola\" in text.lower()\n",
200 | "\n",
201 | "# Print Claude's response and the corresponding grade\n",
202 | "print(response)\n",
203 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
204 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
205 | ]
206 | },
207 | {
208 | "cell_type": "markdown",
209 | "metadata": {},
210 | "source": [
211 | "❓ If you want a hint, run the cell below!"
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "execution_count": null,
217 | "metadata": {},
218 | "outputs": [],
219 | "source": [
220 | "print(hints.exercise_2_1_hint)"
221 | ]
222 | },
223 | {
224 | "cell_type": "markdown",
225 | "metadata": {},
226 | "source": [
227 | "### Exercise 2.2 - One Player Only\n",
228 | "\n",
229 | "Modify the `PROMPT` so that Claude doesn't equivocate at all and responds with **ONLY** the name of one specific player, with **no other words or punctuation**. "
230 | ]
231 | },
232 | {
233 | "cell_type": "code",
234 | "execution_count": null,
235 | "metadata": {},
236 | "outputs": [],
237 | "source": [
238 | "# Prompt - this is the only field you should change\n",
239 | "PROMPT = \"[Replace this text]\"\n",
240 | "\n",
241 | "# Get Claude's response\n",
242 | "response = get_completion(PROMPT)\n",
243 | "\n",
244 | "# Function to grade exercise correctness\n",
245 | "def grade_exercise(text):\n",
246 | " return text == \"Michael Jordan\"\n",
247 | "\n",
248 | "# Print Claude's response and the corresponding grade\n",
249 | "print(response)\n",
250 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
251 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
252 | ]
253 | },
254 | {
255 | "cell_type": "markdown",
256 | "metadata": {},
257 | "source": [
258 | "❓ If you want a hint, run the cell below!"
259 | ]
260 | },
261 | {
262 | "cell_type": "code",
263 | "execution_count": null,
264 | "metadata": {},
265 | "outputs": [],
266 | "source": [
267 | "print(hints.exercise_2_2_hint)"
268 | ]
269 | },
270 | {
271 | "cell_type": "markdown",
272 | "metadata": {},
273 | "source": [
274 | "### Exercise 2.3 - Write a Story\n",
275 | "\n",
276 | "Modify the `PROMPT` so that Claude responds with as long a response as you can muster. If your answer is **over 800 words**, Claude's response will be graded as correct."
277 | ]
278 | },
279 | {
280 | "cell_type": "code",
281 | "execution_count": null,
282 | "metadata": {},
283 | "outputs": [],
284 | "source": [
285 | "# Prompt - this is the only field you should change\n",
286 | "PROMPT = \"[Replace this text]\"\n",
287 | "\n",
288 | "# Get Claude's response\n",
289 | "response = get_completion(PROMPT)\n",
290 | "\n",
291 | "# Function to grade exercise correctness\n",
292 | "def grade_exercise(text):\n",
293 | " trimmed = text.strip()\n",
294 | " words = len(trimmed.split())\n",
295 | " return words >= 800\n",
296 | "\n",
297 | "# Print Claude's response and the corresponding grade\n",
298 | "print(response)\n",
299 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
300 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
301 | ]
302 | },
303 | {
304 | "cell_type": "markdown",
305 | "metadata": {},
306 | "source": [
307 | "❓ If you want a hint, run the cell below!"
308 | ]
309 | },
310 | {
311 | "cell_type": "code",
312 | "execution_count": null,
313 | "metadata": {},
314 | "outputs": [],
315 | "source": [
316 | "print(hints.exercise_2_3_hint)"
317 | ]
318 | },
319 | {
320 | "cell_type": "markdown",
321 | "metadata": {},
322 | "source": [
323 | "### Congrats!\n",
324 | "\n",
325 | "If you've solved all exercises up until this point, you're ready to move to the next chapter. Happy prompting!"
326 | ]
327 | },
328 | {
329 | "cell_type": "markdown",
330 | "metadata": {},
331 | "source": [
332 | "---\n",
333 | "\n",
334 | "## Example Playground\n",
335 | "\n",
336 | "This is an area for you to experiment freely with the prompt examples shown in this lesson and tweak prompts to see how it may affect Claude's responses."
337 | ]
338 | },
339 | {
340 | "cell_type": "code",
341 | "execution_count": null,
342 | "metadata": {},
343 | "outputs": [],
344 | "source": [
345 | "# Prompt\n",
346 | "PROMPT = \"Write a haiku about robots.\"\n",
347 | "\n",
348 | "# Print Claude's response\n",
349 | "print(get_completion(PROMPT))"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "execution_count": null,
355 | "metadata": {},
356 | "outputs": [],
357 | "source": [
358 | "# Prompt\n",
359 | "PROMPT = \"Write a haiku about robots. Skip the preamble; go straight into the poem.\"\n",
360 | "\n",
361 | "# Print Claude's response\n",
362 | "print(get_completion(PROMPT))"
363 | ]
364 | },
365 | {
366 | "cell_type": "code",
367 | "execution_count": null,
368 | "metadata": {},
369 | "outputs": [],
370 | "source": [
371 | "# Prompt\n",
372 | "PROMPT = \"Who is the best basketball player of all time?\"\n",
373 | "\n",
374 | "# Print Claude's response\n",
375 | "print(get_completion(PROMPT))"
376 | ]
377 | },
378 | {
379 | "cell_type": "code",
380 | "execution_count": null,
381 | "metadata": {},
382 | "outputs": [],
383 | "source": [
384 | "# Prompt\n",
385 | "PROMPT = \"Who is the best basketball player of all time? Yes, there are differing opinions, but if you absolutely had to pick one player, who would it be?\"\n",
386 | "\n",
387 | "# Print Claude's response\n",
388 | "print(get_completion(PROMPT))"
389 | ]
390 | }
391 | ],
392 | "metadata": {
393 | "language_info": {
394 | "name": "python"
395 | }
396 | },
397 | "nbformat": 4,
398 | "nbformat_minor": 2
399 | }
400 |
--------------------------------------------------------------------------------
/Anthropic 1P/02_Being_Clear_and_Direct.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Chapter 2: Being Clear and Direct\n",
8 | "\n",
9 | "- [Lesson](#lesson)\n",
10 | "- [Exercises](#exercises)\n",
11 | "- [Example Playground](#example-playground)\n",
12 | "\n",
13 | "## Setup\n",
14 | "\n",
15 | "Run the following setup cell to load your API key and establish the `get_completion` helper function."
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "!pip install anthropic\n",
25 | "\n",
26 | "# Import python's built-in regular expression library\n",
27 | "import re\n",
28 | "import anthropic\n",
29 | "\n",
30 | "# Retrieve the API_KEY & MODEL_NAME variables from the IPython store\n",
31 | "%store -r API_KEY\n",
32 | "%store -r MODEL_NAME\n",
33 | "\n",
34 | "client = anthropic.Anthropic(api_key=API_KEY)\n",
35 | "\n",
36 | "# Note that we changed max_tokens to 4K just for this lesson to allow for longer completions in the exercises\n",
37 | "def get_completion(prompt: str, system_prompt=\"\"):\n",
38 | " message = client.messages.create(\n",
39 | " model=MODEL_NAME,\n",
40 | " max_tokens=4000,\n",
41 | " temperature=0.0,\n",
42 | " system=system_prompt,\n",
43 | " messages=[\n",
44 | " {\"role\": \"user\", \"content\": prompt}\n",
45 | " ]\n",
46 | " )\n",
47 | " return message.content[0].text"
48 | ]
49 | },
50 | {
51 | "cell_type": "markdown",
52 | "metadata": {},
53 | "source": [
54 | "---\n",
55 | "\n",
56 | "## Lesson\n",
57 | "\n",
58 | "**Claude responds best to clear and direct instructions.**\n",
59 | "\n",
60 | "Think of Claude like any other human that is new to the job. **Claude has no context** on what to do aside from what you literally tell it. Just as when you instruct a human for the first time on a task, the more you explain exactly what you want in a straightforward manner to Claude, the better and more accurate Claude's response will be.\"\t\t\t\t\n",
61 | "\t\t\t\t\n",
62 | "When in doubt, follow the **Golden Rule of Clear Prompting**:\n",
63 | "- Show your prompt to a colleague or friend and have them follow the instructions themselves to see if they can produce the result you want. If they're confused, Claude's confused.\t\t\t\t"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "### Examples\n",
71 | "\n",
72 | "Let's take a task like writing poetry. (Ignore any syllable mismatch - LLMs aren't great at counting syllables yet.)"
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": null,
78 | "metadata": {},
79 | "outputs": [],
80 | "source": [
81 | "# Prompt\n",
82 | "PROMPT = \"Write a haiku about robots.\"\n",
83 | "\n",
84 | "# Print Claude's response\n",
85 | "print(get_completion(PROMPT))"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "metadata": {},
91 | "source": [
92 | "This haiku is nice enough, but users may want Claude to go directly into the poem without the \"Here is a haiku\" preamble.\n",
93 | "\n",
94 | "How do we achieve that? We **ask for it**!"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "# Prompt\n",
104 | "PROMPT = \"Write a haiku about robots. Skip the preamble; go straight into the poem.\"\n",
105 | "\n",
106 | "# Print Claude's response\n",
107 | "print(get_completion(PROMPT))"
108 | ]
109 | },
110 | {
111 | "cell_type": "markdown",
112 | "metadata": {},
113 | "source": [
114 | "Here's another example. Let's ask Claude who's the best basketball player of all time. You can see below that while Claude lists a few names, **it doesn't respond with a definitive \"best\"**."
115 | ]
116 | },
117 | {
118 | "cell_type": "code",
119 | "execution_count": null,
120 | "metadata": {},
121 | "outputs": [],
122 | "source": [
123 | "# Prompt\n",
124 | "PROMPT = \"Who is the best basketball player of all time?\"\n",
125 | "\n",
126 | "# Print Claude's response\n",
127 | "print(get_completion(PROMPT))"
128 | ]
129 | },
130 | {
131 | "cell_type": "markdown",
132 | "metadata": {},
133 | "source": [
134 | "Can we get Claude to make up its mind and decide on a best player? Yes! Just ask!"
135 | ]
136 | },
137 | {
138 | "cell_type": "code",
139 | "execution_count": null,
140 | "metadata": {},
141 | "outputs": [],
142 | "source": [
143 | "# Prompt\n",
144 | "PROMPT = \"Who is the best basketball player of all time? Yes, there are differing opinions, but if you absolutely had to pick one player, who would it be?\"\n",
145 | "\n",
146 | "# Print Claude's response\n",
147 | "print(get_completion(PROMPT))"
148 | ]
149 | },
150 | {
151 | "cell_type": "markdown",
152 | "metadata": {},
153 | "source": [
154 | "If you would like to experiment with the lesson prompts without changing any content above, scroll all the way to the bottom of the lesson notebook to visit the [**Example Playground**](#example-playground)."
155 | ]
156 | },
157 | {
158 | "cell_type": "markdown",
159 | "metadata": {},
160 | "source": [
161 | "---\n",
162 | "\n",
163 | "## Exercises\n",
164 | "- [Exercise 2.1 - Spanish](#exercise-21---spanish)\n",
165 | "- [Exercise 2.2 - One Player Only](#exercise-22---one-player-only)\n",
166 | "- [Exercise 2.3 - Write a Story](#exercise-23---write-a-story)"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {},
172 | "source": [
173 | "### Exercise 2.1 - Spanish\n",
174 | "Modify the `SYSTEM_PROMPT` to make Claude output its answer in Spanish."
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": null,
180 | "metadata": {},
181 | "outputs": [],
182 | "source": [
183 | "# System prompt - this is the only field you should chnage\n",
184 | "SYSTEM_PROMPT = \"[Replace this text]\"\n",
185 | "\n",
186 | "# Prompt\n",
187 | "PROMPT = \"Hello Claude, how are you?\"\n",
188 | "\n",
189 | "# Get Claude's response\n",
190 | "response = get_completion(PROMPT, SYSTEM_PROMPT)\n",
191 | "\n",
192 | "# Function to grade exercise correctness\n",
193 | "def grade_exercise(text):\n",
194 | " return \"hola\" in text.lower()\n",
195 | "\n",
196 | "# Print Claude's response and the corresponding grade\n",
197 | "print(response)\n",
198 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
199 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "metadata": {},
205 | "source": [
206 | "❓ If you want a hint, run the cell below!"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": null,
212 | "metadata": {},
213 | "outputs": [],
214 | "source": [
215 | "from hints import exercise_2_1_hint; print(exercise_2_1_hint)"
216 | ]
217 | },
218 | {
219 | "cell_type": "markdown",
220 | "metadata": {},
221 | "source": [
222 | "### Exercise 2.2 - One Player Only\n",
223 | "\n",
224 | "Modify the `PROMPT` so that Claude doesn't equivocate at all and responds with **ONLY** the name of one specific player, with **no other words or punctuation**. "
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": null,
230 | "metadata": {},
231 | "outputs": [],
232 | "source": [
233 | "# Prompt - this is the only field you should change\n",
234 | "PROMPT = \"[Replace this text]\"\n",
235 | "\n",
236 | "# Get Claude's response\n",
237 | "response = get_completion(PROMPT)\n",
238 | "\n",
239 | "# Function to grade exercise correctness\n",
240 | "def grade_exercise(text):\n",
241 | " return text == \"Michael Jordan\"\n",
242 | "\n",
243 | "# Print Claude's response and the corresponding grade\n",
244 | "print(response)\n",
245 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
246 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
247 | ]
248 | },
249 | {
250 | "cell_type": "markdown",
251 | "metadata": {},
252 | "source": [
253 | "❓ If you want a hint, run the cell below!"
254 | ]
255 | },
256 | {
257 | "cell_type": "code",
258 | "execution_count": null,
259 | "metadata": {},
260 | "outputs": [],
261 | "source": [
262 | "from hints import exercise_2_2_hint; print(exercise_2_2_hint)"
263 | ]
264 | },
265 | {
266 | "cell_type": "markdown",
267 | "metadata": {},
268 | "source": [
269 | "### Exercise 2.3 - Write a Story\n",
270 | "\n",
271 | "Modify the `PROMPT` so that Claude responds with as long a response as you can muster. If your answer is **over 800 words**, Claude's response will be graded as correct."
272 | ]
273 | },
274 | {
275 | "cell_type": "code",
276 | "execution_count": null,
277 | "metadata": {},
278 | "outputs": [],
279 | "source": [
280 | "# Prompt - this is the only field you should change\n",
281 | "PROMPT = \"[Replace this text]\"\n",
282 | "\n",
283 | "# Get Claude's response\n",
284 | "response = get_completion(PROMPT)\n",
285 | "\n",
286 | "# Function to grade exercise correctness\n",
287 | "def grade_exercise(text):\n",
288 | " trimmed = text.strip()\n",
289 | " words = len(trimmed.split())\n",
290 | " return words >= 800\n",
291 | "\n",
292 | "# Print Claude's response and the corresponding grade\n",
293 | "print(response)\n",
294 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
295 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
296 | ]
297 | },
298 | {
299 | "cell_type": "markdown",
300 | "metadata": {},
301 | "source": [
302 | "❓ If you want a hint, run the cell below!"
303 | ]
304 | },
305 | {
306 | "cell_type": "code",
307 | "execution_count": null,
308 | "metadata": {},
309 | "outputs": [],
310 | "source": [
311 | "from hints import exercise_2_3_hint; print(exercise_2_3_hint)"
312 | ]
313 | },
314 | {
315 | "cell_type": "markdown",
316 | "metadata": {},
317 | "source": [
318 | "### Congrats!\n",
319 | "\n",
320 | "If you've solved all exercises up until this point, you're ready to move to the next chapter. Happy prompting!"
321 | ]
322 | },
323 | {
324 | "cell_type": "markdown",
325 | "metadata": {},
326 | "source": [
327 | "---\n",
328 | "\n",
329 | "## Example Playground\n",
330 | "\n",
331 | "This is an area for you to experiment freely with the prompt examples shown in this lesson and tweak prompts to see how it may affect Claude's responses."
332 | ]
333 | },
334 | {
335 | "cell_type": "code",
336 | "execution_count": null,
337 | "metadata": {},
338 | "outputs": [],
339 | "source": [
340 | "# Prompt\n",
341 | "PROMPT = \"Write a haiku about robots.\"\n",
342 | "\n",
343 | "# Print Claude's response\n",
344 | "print(get_completion(PROMPT))"
345 | ]
346 | },
347 | {
348 | "cell_type": "code",
349 | "execution_count": null,
350 | "metadata": {},
351 | "outputs": [],
352 | "source": [
353 | "# Prompt\n",
354 | "PROMPT = \"Write a haiku about robots. Skip the preamble; go straight into the poem.\"\n",
355 | "\n",
356 | "# Print Claude's response\n",
357 | "print(get_completion(PROMPT))"
358 | ]
359 | },
360 | {
361 | "cell_type": "code",
362 | "execution_count": null,
363 | "metadata": {},
364 | "outputs": [],
365 | "source": [
366 | "# Prompt\n",
367 | "PROMPT = \"Who is the best basketball player of all time?\"\n",
368 | "\n",
369 | "# Print Claude's response\n",
370 | "print(get_completion(PROMPT))"
371 | ]
372 | },
373 | {
374 | "cell_type": "code",
375 | "execution_count": null,
376 | "metadata": {},
377 | "outputs": [],
378 | "source": [
379 | "# Prompt\n",
380 | "PROMPT = \"Who is the best basketball player of all time? Yes, there are differing opinions, but if you absolutely had to pick one player, who would it be?\"\n",
381 | "\n",
382 | "# Print Claude's response\n",
383 | "print(get_completion(PROMPT))"
384 | ]
385 | }
386 | ],
387 | "metadata": {
388 | "language_info": {
389 | "name": "python"
390 | }
391 | },
392 | "nbformat": 4,
393 | "nbformat_minor": 2
394 | }
395 |
--------------------------------------------------------------------------------
/AmazonBedrock/boto3/02_Being_Clear_and_Direct.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Chapter 2: Being Clear and Direct\n",
8 | "\n",
9 | "- [Lesson](#lesson)\n",
10 | "- [Exercises](#exercises)\n",
11 | "- [Example Playground](#example-playground)\n",
12 | "\n",
13 | "## Setup\n",
14 | "\n",
15 | "Run the following setup cell to load your API key and establish the `get_completion` helper function."
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "# Import python's built-in regular expression library\n",
25 | "import re\n",
26 | "import boto3\n",
27 | "import json\n",
28 | "\n",
29 | "# Import the hints module from the utils package\n",
30 | "import os\n",
31 | "import sys\n",
32 | "module_path = \"..\"\n",
33 | "sys.path.append(os.path.abspath(module_path))\n",
34 | "from utils import hints\n",
35 | "\n",
36 | "# Retrieve the MODEL_NAME variable from the IPython store\n",
37 | "%store -r MODEL_NAME\n",
38 | "%store -r AWS_REGION\n",
39 | "\n",
40 | "client = boto3.client('bedrock-runtime',region_name=AWS_REGION)\n",
41 | "\n",
42 | "def get_completion(prompt,system=''):\n",
43 | " body = json.dumps(\n",
44 | " {\n",
45 | " \"anthropic_version\": '',\n",
46 | " \"max_tokens\": 2000,\n",
47 | " \"messages\": [{\"role\": \"user\", \"content\": prompt}],\n",
48 | " \"temperature\": 0.0,\n",
49 | " \"top_p\": 1,\n",
50 | " \"system\": system\n",
51 | " }\n",
52 | " )\n",
53 | " response = client.invoke_model(body=body, modelId=MODEL_NAME)\n",
54 | " response_body = json.loads(response.get('body').read())\n",
55 | "\n",
56 | " return response_body.get('content')[0].get('text')"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "---\n",
64 | "\n",
65 | "## Lesson\n",
66 | "\n",
67 | "**Claude responds best to clear and direct instructions.**\n",
68 | "\n",
69 | "Think of Claude like any other human that is new to the job. **Claude has no context** on what to do aside from what you literally tell it. Just as when you instruct a human for the first time on a task, the more you explain exactly what you want in a straightforward manner to Claude, the better and more accurate Claude's response will be.\"\t\t\t\t\n",
70 | "\t\t\t\t\n",
71 | "When in doubt, follow the **Golden Rule of Clear Prompting**:\n",
72 | "- Show your prompt to a colleague or friend and have them follow the instructions themselves to see if they can produce the result you want. If they're confused, Claude's confused.\t\t\t\t"
73 | ]
74 | },
75 | {
76 | "cell_type": "markdown",
77 | "metadata": {},
78 | "source": [
79 | "### Examples\n",
80 | "\n",
81 | "Let's take a task like writing poetry. (Ignore any syllable mismatch - LLMs aren't great at counting syllables yet.)"
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": null,
87 | "metadata": {},
88 | "outputs": [],
89 | "source": [
90 | "# Prompt\n",
91 | "PROMPT = \"Write a haiku about robots.\"\n",
92 | "\n",
93 | "# Print Claude's response\n",
94 | "print(get_completion(PROMPT))"
95 | ]
96 | },
97 | {
98 | "cell_type": "markdown",
99 | "metadata": {},
100 | "source": [
101 | "This haiku is nice enough, but users may want Claude to go directly into the poem without the \"Here is a haiku\" preamble.\n",
102 | "\n",
103 | "How do we achieve that? We **ask for it**!"
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": null,
109 | "metadata": {},
110 | "outputs": [],
111 | "source": [
112 | "# Prompt\n",
113 | "PROMPT = \"Write a haiku about robots. Skip the preamble; go straight into the poem.\"\n",
114 | "\n",
115 | "# Print Claude's response\n",
116 | "print(get_completion(PROMPT))"
117 | ]
118 | },
119 | {
120 | "cell_type": "markdown",
121 | "metadata": {},
122 | "source": [
123 | "Here's another example. Let's ask Claude who's the best basketball player of all time. You can see below that while Claude lists a few names, **it doesn't respond with a definitive \"best\"**."
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": null,
129 | "metadata": {},
130 | "outputs": [],
131 | "source": [
132 | "# Prompt\n",
133 | "PROMPT = \"Who is the best basketball player of all time?\"\n",
134 | "\n",
135 | "# Print Claude's response\n",
136 | "print(get_completion(PROMPT))"
137 | ]
138 | },
139 | {
140 | "cell_type": "markdown",
141 | "metadata": {},
142 | "source": [
143 | "Can we get Claude to make up its mind and decide on a best player? Yes! Just ask!"
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": null,
149 | "metadata": {},
150 | "outputs": [],
151 | "source": [
152 | "# Prompt\n",
153 | "PROMPT = \"Who is the best basketball player of all time? Yes, there are differing opinions, but if you absolutely had to pick one player, who would it be?\"\n",
154 | "\n",
155 | "# Print Claude's response\n",
156 | "print(get_completion(PROMPT))"
157 | ]
158 | },
159 | {
160 | "cell_type": "markdown",
161 | "metadata": {},
162 | "source": [
163 | "If you would like to experiment with the lesson prompts without changing any content above, scroll all the way to the bottom of the lesson notebook to visit the [**Example Playground**](#example-playground)."
164 | ]
165 | },
166 | {
167 | "cell_type": "markdown",
168 | "metadata": {},
169 | "source": [
170 | "---\n",
171 | "\n",
172 | "## Exercises\n",
173 | "- [Exercise 2.1 - Spanish](#exercise-21---spanish)\n",
174 | "- [Exercise 2.2 - One Player Only](#exercise-22---one-player-only)\n",
175 | "- [Exercise 2.3 - Write a Story](#exercise-23---write-a-story)"
176 | ]
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "metadata": {},
181 | "source": [
182 | "### Exercise 2.1 - Spanish\n",
183 | "Modify the `SYSTEM_PROMPT` to make Claude output its answer in Spanish."
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": null,
189 | "metadata": {},
190 | "outputs": [],
191 | "source": [
192 | "# System prompt - this is the only field you should chnage\n",
193 | "SYSTEM_PROMPT = \"[Replace this text]\"\n",
194 | "\n",
195 | "# Prompt\n",
196 | "PROMPT = \"Hello Claude, how are you?\"\n",
197 | "\n",
198 | "# Get Claude's response\n",
199 | "response = get_completion(PROMPT, SYSTEM_PROMPT)\n",
200 | "\n",
201 | "# Function to grade exercise correctness\n",
202 | "def grade_exercise(text):\n",
203 | " return \"hola\" in text.lower()\n",
204 | "\n",
205 | "# Print Claude's response and the corresponding grade\n",
206 | "print(response)\n",
207 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
208 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
209 | ]
210 | },
211 | {
212 | "cell_type": "markdown",
213 | "metadata": {},
214 | "source": [
215 | "❓ If you want a hint, run the cell below!"
216 | ]
217 | },
218 | {
219 | "cell_type": "code",
220 | "execution_count": null,
221 | "metadata": {},
222 | "outputs": [],
223 | "source": [
224 | "print(hints.exercise_2_1_hint)"
225 | ]
226 | },
227 | {
228 | "cell_type": "markdown",
229 | "metadata": {},
230 | "source": [
231 | "### Exercise 2.2 - One Player Only\n",
232 | "\n",
233 | "Modify the `PROMPT` so that Claude doesn't equivocate at all and responds with **ONLY** the name of one specific player, with **no other words or punctuation**. "
234 | ]
235 | },
236 | {
237 | "cell_type": "code",
238 | "execution_count": null,
239 | "metadata": {},
240 | "outputs": [],
241 | "source": [
242 | "# Prompt - this is the only field you should change\n",
243 | "PROMPT = \"[Replace this text]\"\n",
244 | "\n",
245 | "# Get Claude's response\n",
246 | "response = get_completion(PROMPT)\n",
247 | "\n",
248 | "# Function to grade exercise correctness\n",
249 | "def grade_exercise(text):\n",
250 | " return text == \"Michael Jordan\"\n",
251 | "\n",
252 | "# Print Claude's response and the corresponding grade\n",
253 | "print(response)\n",
254 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
255 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
256 | ]
257 | },
258 | {
259 | "cell_type": "markdown",
260 | "metadata": {},
261 | "source": [
262 | "❓ If you want a hint, run the cell below!"
263 | ]
264 | },
265 | {
266 | "cell_type": "code",
267 | "execution_count": null,
268 | "metadata": {},
269 | "outputs": [],
270 | "source": [
271 | "print(hints.exercise_2_2_hint)"
272 | ]
273 | },
274 | {
275 | "cell_type": "markdown",
276 | "metadata": {},
277 | "source": [
278 | "### Exercise 2.3 - Write a Story\n",
279 | "\n",
280 | "Modify the `PROMPT` so that Claude responds with as long a response as you can muster. If your answer is **over 800 words**, Claude's response will be graded as correct."
281 | ]
282 | },
283 | {
284 | "cell_type": "code",
285 | "execution_count": null,
286 | "metadata": {},
287 | "outputs": [],
288 | "source": [
289 | "# Prompt - this is the only field you should change\n",
290 | "PROMPT = \"[Replace this text]\"\n",
291 | "\n",
292 | "# Get Claude's response\n",
293 | "response = get_completion(PROMPT)\n",
294 | "\n",
295 | "# Function to grade exercise correctness\n",
296 | "def grade_exercise(text):\n",
297 | " trimmed = text.strip()\n",
298 | " words = len(trimmed.split())\n",
299 | " return words >= 800\n",
300 | "\n",
301 | "# Print Claude's response and the corresponding grade\n",
302 | "print(response)\n",
303 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
304 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
305 | ]
306 | },
307 | {
308 | "cell_type": "markdown",
309 | "metadata": {},
310 | "source": [
311 | "❓ If you want a hint, run the cell below!"
312 | ]
313 | },
314 | {
315 | "cell_type": "code",
316 | "execution_count": null,
317 | "metadata": {},
318 | "outputs": [],
319 | "source": [
320 | "print(hints.exercise_2_3_hint)"
321 | ]
322 | },
323 | {
324 | "cell_type": "markdown",
325 | "metadata": {},
326 | "source": [
327 | "### Congrats!\n",
328 | "\n",
329 | "If you've solved all exercises up until this point, you're ready to move to the next chapter. Happy prompting!"
330 | ]
331 | },
332 | {
333 | "cell_type": "markdown",
334 | "metadata": {},
335 | "source": [
336 | "---\n",
337 | "\n",
338 | "## Example Playground\n",
339 | "\n",
340 | "This is an area for you to experiment freely with the prompt examples shown in this lesson and tweak prompts to see how it may affect Claude's responses."
341 | ]
342 | },
343 | {
344 | "cell_type": "code",
345 | "execution_count": null,
346 | "metadata": {},
347 | "outputs": [],
348 | "source": [
349 | "# Prompt\n",
350 | "PROMPT = \"Write a haiku about robots.\"\n",
351 | "\n",
352 | "# Print Claude's response\n",
353 | "print(get_completion(PROMPT))"
354 | ]
355 | },
356 | {
357 | "cell_type": "code",
358 | "execution_count": null,
359 | "metadata": {},
360 | "outputs": [],
361 | "source": [
362 | "# Prompt\n",
363 | "PROMPT = \"Write a haiku about robots. Skip the preamble; go straight into the poem.\"\n",
364 | "\n",
365 | "# Print Claude's response\n",
366 | "print(get_completion(PROMPT))"
367 | ]
368 | },
369 | {
370 | "cell_type": "code",
371 | "execution_count": null,
372 | "metadata": {},
373 | "outputs": [],
374 | "source": [
375 | "# Prompt\n",
376 | "PROMPT = \"Who is the best basketball player of all time?\"\n",
377 | "\n",
378 | "# Print Claude's response\n",
379 | "print(get_completion(PROMPT))"
380 | ]
381 | },
382 | {
383 | "cell_type": "code",
384 | "execution_count": null,
385 | "metadata": {},
386 | "outputs": [],
387 | "source": [
388 | "# Prompt\n",
389 | "PROMPT = \"Who is the best basketball player of all time? Yes, there are differing opinions, but if you absolutely had to pick one player, who would it be?\"\n",
390 | "\n",
391 | "# Print Claude's response\n",
392 | "print(get_completion(PROMPT))"
393 | ]
394 | }
395 | ],
396 | "metadata": {
397 | "language_info": {
398 | "name": "python"
399 | }
400 | },
401 | "nbformat": 4,
402 | "nbformat_minor": 2
403 | }
404 |
--------------------------------------------------------------------------------
/Anthropic 1P/hints.py:
--------------------------------------------------------------------------------
1 | exercise_1_1_hint = """The grading function in this exercise is looking for an answer that contains the exact Arabic numerals "1", "2", and "3".
2 | You can often get Claude to do what you want simply by asking."""
3 |
4 | exercise_1_2_hint = """The grading function in this exercise is looking for answers that contain "soo" or "giggles".
5 | There are many ways to solve this, just by asking!"""
6 |
7 | exercise_2_1_hint ="""The grading function in this exercise is looking for any answer that includes the word "hola".
8 | Ask Claude to reply in Spanish like you would when speaking with a human. It's that simple!"""
9 |
10 | exercise_2_2_hint = """The grading function in this exercise is looking for EXACTLY "Michael Jordan".
11 | How would you ask another human to do this? Reply with no other words? Reply with only the name and nothing else? There are several ways to approach this answer."""
12 |
13 | exercise_2_3_hint = """The grading function in this cell is looking for a response that is equal to or greater than 800 words.
14 | Because LLMs aren't great at counting words yet, you may have to overshoot your target."""
15 |
16 | exercise_3_1_hint = """The grading function in this exercise is looking for an answer that includes the words "incorrect" or "not correct".
17 | Give Claude a role that might make Claude better at solving math problems!"""
18 |
19 | exercise_4_1_hint = """The grading function in this exercise is looking for a solution that includes the words "haiku" and "pig".
20 | Don't forget to include the exact phrase "{TOPIC}" wherever you want the topic to be substituted in. Changing the "TOPIC" variable value should make Claude write a haiku about a different topic."""
21 |
22 | exercise_4_2_hint = """The grading function in this exercise is looking for a response that includes the word "brown".
23 | If you surround "{QUESTION}" in XML tags, how does that change Claude's response?"""
24 |
25 | exercise_4_3_hint = """The grading function in this exercise is looking for a response that includes the word "brown".
26 | Try removing one word or section of characters at a time, starting with the parts that make the least sense. Doing this one word at a time will also help you see just how much Claude can or can't parse and understand."""
27 |
28 | exercise_5_1_hint = """The grading function for this exercise is looking for a response that includes the word "Warrior".
29 | Write more words in Claude's voice to steer Claude to act the way you want it to. For instance, instead of "Stephen Curry is the best because," you could write "Stephen Curry is the best and here are three reasons why. 1:"""
30 |
31 | exercise_5_2_hint = """The grading function looks for a response of over 5 lines in length that includes the words "cat" and "".
32 | Start simple. Currently, the prompt asks Claude for one haiku. You can change that and ask for two (or even more). Then if you run into formatting issues, change your prompt to fix that after you've already gotten Claude to write more than one haiku."""
33 |
34 | exercise_5_3_hint = """The grading function in this exercise is looking for a response that contains the words "tail", "cat", and "".
35 | It's helpful to break this exercise down to several steps.
36 | 1. Modify the initial prompt template so that Claude writes two poems.
37 | 2. Give Claude indicators as to what the poems will be about, but instead of writing in the subjects directly (e.g., dog, cat, etc.), replace those subjects with the keywords "{ANIMAL1}" and "{ANIMAL2}".
38 | 3. Run the prompt and make sure that the full prompt with variable substitutions has all the words correctly substituted. If not, check to make sure your {bracket} tags are spelled correctly and formatted correctly with single moustache brackets."""
39 |
40 | exercise_6_1_hint = """The grading function in this exercise is looking for the correct categorization letter + the closing parentheses and the first letter of the name of the category, such as "C) B" or "B) B" etc.
41 | Let's take this exercise step by step:
42 | 1. How will Claude know what categories you want to use? Tell it! Include the four categories you want directly in the prompt. Be sure to include the parenthetical letters as well for easy classification. Feel free to use XML tags to organize your prompt and make clear to Claude where the categories begin and end.
43 | 2. Try to cut down on superfluous text so that Claude immediately answers with the classification and ONLY the classification. There are several ways to do this, from speaking for Claude (providing anything from the beginning of the sentence to a single open parenthesis so that Claude knows you want the parenthetical letter as the first part of the answer) to telling Claude that you want the classification and only the classification, skipping the preamble.
44 | Refer to Chapters 2 and 5 if you want a refresher on these techniques.
45 | 3. Claude may still be incorrectly categorizing or not including the names of the categories when it answers. Fix this by telling Claude to include the full category name in its answer.)
46 | 4. Be sure that you still have {email} somewhere in your prompt template so that we can properly substitute in emails for Claude to evaluate."""
47 |
48 | exercise_6_1_solution = """
49 | USER TURN
50 | Please classify this email into the following categories: {email}
51 |
52 | Do not include any extra words except the category.
53 |
54 |
55 | (A) Pre-sale question
56 | (B) Broken or defective item
57 | (C) Billing question
58 | (D) Other (please explain)
59 |
60 |
61 | ASSISTANT TURN
62 | (
63 | """
64 |
65 | exercise_6_2_hint = """The grading function in this exercise is looking for only the correct letter wrapped in tags, such as "B". The correct categorization letters are the same as in the above exercise.
66 | Sometimes the simplest way to go about this is to give Claude an example of how you want its output to look. Just don't forget to wrap your example in tags! And don't forget that if you prefill Claude's response with anything, Claude won't actually output that as part of its response."""
67 |
68 | exercise_7_1_hint = """You're going to have to write some example emails and classify them for Claude (with the exact formatting you want). There are multiple ways to do this. Here are some guidelines below.
69 | 1. Try to have at least two example emails. Claude doesn't need an example for all categories, and the examples don't have to be long. It's more helpful to have examples for whatever you think the trickier categories are (which you were asked to think about at the bottom of Chapter 6 Exercise 1). XML tags will help you separate out your examples from the rest of your prompt, although it's unnecessary.
70 | 2. Make sure your example answer formatting is exactly the format you want Claude to use, so Claude can emulate the format as well. This format should make it so that Claude's answer ends in the letter of the category. Wherever you put the {email} placeholder, make sure that it's formatted exactly like your example emails.
71 | 3. Make sure you still have the categories listed within the prompt itself, otherwise Claude won't know what categories to reference, as well as {email} as a placeholder for substitution."""
72 |
73 | exercise_7_1_solution = """
74 | USER TURN
75 | Please classify emails into the following categories, and do not include explanations:
76 |
77 | (A) Pre-sale question
78 | (B) Broken or defective item
79 | (C) Billing question
80 | (D) Other (please explain)
81 |
82 |
83 | Here are a few examples of correct answer formatting:
84 |
85 | Q: How much does it cost to buy a Mixmaster4000?
86 | A: The correct category is: A
87 |
88 | Q: My Mixmaster won't turn on.
89 | A: The correct category is: B
90 |
91 | Q: Please remove me from your mailing list.
92 | A: The correct category is: D
93 |
94 |
95 | Here is the email for you to categorize: {email}
96 |
97 | ASSISTANT TURN
98 | The correct category is:
99 | """
100 | exercise_8_1_hint = """The grading function in this exercise is looking for a response that contains the phrase "I do not", "I don't", or "Unfortunately".
101 | What should Claude do if it doesn't know the answer?"""
102 |
103 | exercise_8_2_hint = """The grading function in this exercise is looking for a response that contains the phrase "49-fold".
104 | Make Claude show its work and thought process first by extracting relevant quotes and seeing whether or not the quotes provide sufficient evidence. Refer back to the Chapter 8 Lesson if you want a refresher."""
105 |
106 | exercise_9_1_solution = """
107 | You are a master tax acountant. Your task is to answer user questions using any provided reference documentation.
108 |
109 | Here is the material you should use to answer the user's question:
110 |
111 | {TAX_CODE}
112 |
113 |
114 | Here is an example of how to respond:
115 |
116 |
117 | What defines a "qualified" employee?
118 |
119 |
120 | For purposes of this subsection—
121 | (A)In general
122 | The term "qualified employee" means any individual who—
123 | (i)is not an excluded employee, and
124 | (ii)agrees in the election made under this subsection to meet such requirements as are determined by the Secretary to be necessary to ensure that the withholding requirements of the corporation under chapter 24 with respect to the qualified stock are met.
125 |
126 | According to the provided documentation, a "qualified employee" is defined as an individual who:
127 |
128 | 1. Is not an "excluded employee" as defined in the documentation.
129 | 2. Agrees to meet the requirements determined by the Secretary to ensure the corporation's withholding requirements under Chapter 24 are met with respect to the qualified stock.
130 |
131 |
132 | First, gather quotes in tags that are relevant to answering the user's question. If there are no quotes, write "no relevant quotes found".
133 |
134 | Then insert two paragraph breaks before answering the user question within tags. Only answer the user's question if you are confident that the quotes in tags support your answer. If not, tell the user that you unfortunately do not have enough information to answer the user's question.
135 |
136 | Here is the user question: {QUESTION}
137 | """
138 |
139 | exercise_9_2_solution = """
140 | You are Codebot, a helpful AI assistant who finds issues with code and suggests possible improvements.
141 |
142 | Act as a Socratic tutor who helps the user learn.
143 |
144 | You will be given some code from a user. Please do the following:
145 | 1. Identify any issues in the code. Put each issue inside separate tags.
146 | 2. Invite the user to write a revised version of the code to fix the issue.
147 |
148 | Here's an example:
149 |
150 |
151 |
152 | def calculate_circle_area(radius):
153 | return (3.14 * radius) ** 2
154 |
155 |
156 |
157 | 3.14 is being squared when it's actually only the radius that should be squared>
158 |
159 |
160 | That's almost right, but there's an issue related to order of operations. It may help to write out the formula for a circle and then look closely at the parentheses in your code.
161 |
162 |
163 |
164 | Here is the code you are to analyze:
165 |
166 |
167 | {CODE}
168 |
169 |
170 | Find the relevant issues and write the Socratic tutor-style response. Do not give the user too much help! Instead, just give them guidance so they can find the correct solution themselves.
171 |
172 | Put each issue in tags and put your final response in tags.
173 | """
174 |
175 | exercise_10_2_1_solution = """system_prompt = system_prompt_tools_general_explanation + \"""Here are the functions available in JSONSchema format:
176 |
177 |
178 |
179 |
180 | get_user
181 |
182 | Retrieves a user from the database by their user ID.
183 |
184 |
185 |
186 | user_id
187 | int
188 | The ID of the user to retrieve.
189 |
190 |
191 |
192 |
193 |
194 | get_product
195 |
196 | Retrieves a product from the database by its product ID.
197 |
198 |
199 |
200 | product_id
201 | int
202 | The ID of the product to retrieve.
203 |
204 |
205 |
206 |
207 |
208 | add_user
209 |
210 | Adds a new user to the database.
211 |
212 |
213 |
214 | name
215 | str
216 | The name of the user.
217 |
218 |
219 | email
220 | str
221 | The email address of the user.
222 |
223 |
224 |
225 |
226 |
227 | add_product
228 |
229 | Adds a new product to the database.
230 |
231 |
232 |
233 | name
234 | str
235 | The name of the product.
236 |
237 |
238 | price
239 | float
240 | The price of the product.
241 |
242 |
243 |
244 |
245 |
246 | """
--------------------------------------------------------------------------------
/AmazonBedrock/utils/hints.py:
--------------------------------------------------------------------------------
1 | exercise_1_1_hint = """The grading function in this exercise is looking for an answer that contains the exact Arabic numerals "1", "2", and "3".
2 | You can often get Claude to do what you want simply by asking."""
3 |
4 | exercise_1_2_hint = """The grading function in this exercise is looking for answers that contain "soo" or "giggles".
5 | There are many ways to solve this, just by asking!"""
6 |
7 | exercise_2_1_hint ="""The grading function in this exercise is looking for any answer that includes the word "hola".
8 | Ask Claude to reply in Spanish like you would when speaking with a human. It's that simple!"""
9 |
10 | exercise_2_2_hint = """The grading function in this exercise is looking for EXACTLY "Michael Jordan".
11 | How would you ask another human to do this? Reply with no other words? Reply with only the name and nothing else? There are several ways to approach this answer."""
12 |
13 | exercise_2_3_hint = """The grading function in this cell is looking for a response that is equal to or greater than 800 words.
14 | Because LLMs aren't great at counting words yet, you may have to overshoot your target."""
15 |
16 | exercise_3_1_hint = """The grading function in this exercise is looking for an answer that includes the words "incorrect" or "not correct".
17 | Give Claude a role that might make Claude better at solving math problems!"""
18 |
19 | exercise_4_1_hint = """The grading function in this exercise is looking for a solution that includes the words "haiku" and "pig".
20 | Don't forget to include the exact phrase "{TOPIC}" wherever you want the topic to be substituted in. Changing the "TOPIC" variable value should make Claude write a haiku about a different topic."""
21 |
22 | exercise_4_2_hint = """The grading function in this exercise is looking for a response that includes the word "brown".
23 | If you surround "{QUESTION}" in XML tags, how does that change Claude's response?"""
24 |
25 | exercise_4_3_hint = """The grading function in this exercise is looking for a response that includes the word "brown".
26 | Try removing one word or section of characters at a time, starting with the parts that make the least sense. Doing this one word at a time will also help you see just how much Claude can or can't parse and understand."""
27 |
28 | exercise_5_1_hint = """The grading function for this exercise is looking for a response that includes the word "Warrior".
29 | Write more words in Claude's voice to steer Claude to act the way you want it to. For instance, instead of "Stephen Curry is the best because," you could write "Stephen Curry is the best and here are three reasons why. 1:"""
30 |
31 | exercise_5_2_hint = """The grading function looks for a response of over 5 lines in length that includes the words "cat" and "".
32 | Start simple. Currently, the prompt asks Claude for one haiku. You can change that and ask for two (or even more). Then if you run into formatting issues, change your prompt to fix that after you've already gotten Claude to write more than one haiku."""
33 |
34 | exercise_5_3_hint = """The grading function in this exercise is looking for a response that contains the words "tail", "cat", and "".
35 | It's helpful to break this exercise down to several steps.
36 | 1. Modify the initial prompt template so that Claude writes two poems.
37 | 2. Give Claude indicators as to what the poems will be about, but instead of writing in the subjects directly (e.g., dog, cat, etc.), replace those subjects with the keywords "{ANIMAL1}" and "{ANIMAL2}".
38 | 3. Run the prompt and make sure that the full prompt with variable substitutions has all the words correctly substituted. If not, check to make sure your {bracket} tags are spelled correctly and formatted correctly with single moustache brackets."""
39 |
40 | exercise_6_1_hint = """The grading function in this exercise is looking for the correct categorization letter + the closing parentheses and the first letter of the name of the category, such as "C) B" or "B) B" etc.
41 | Let's take this exercise step by step:
42 | 1. How will Claude know what categories you want to use? Tell it! Include the four categories you want directly in the prompt. Be sure to include the parenthetical letters as well for easy classification. Feel free to use XML tags to organize your prompt and make clear to Claude where the categories begin and end.
43 | 2. Try to cut down on superfluous text so that Claude immediately answers with the classification and ONLY the classification. There are several ways to do this, from speaking for Claude (providing anything from the beginning of the sentence to a single open parenthesis so that Claude knows you want the parenthetical letter as the first part of the answer) to telling Claude that you want the classification and only the classification, skipping the preamble.
44 | Refer to Chapters 2 and 5 if you want a refresher on these techniques.
45 | 3. Claude may still be incorrectly categorizing or not including the names of the categories when it answers. Fix this by telling Claude to include the full category name in its answer.)
46 | 4. Be sure that you still have {email} somewhere in your prompt template so that we can properly substitute in emails for Claude to evaluate."""
47 |
48 | exercise_6_1_solution = """
49 | USER TURN
50 | Please classify this email into the following categories: {email}
51 |
52 | Do not include any extra words except the category.
53 |
54 |
55 | (A) Pre-sale question
56 | (B) Broken or defective item
57 | (C) Billing question
58 | (D) Other (please explain)
59 |
60 |
61 | ASSISTANT TURN
62 | (
63 | """
64 |
65 | exercise_6_2_hint = """The grading function in this exercise is looking for only the correct letter wrapped in tags, such as "B". The correct categorization letters are the same as in the above exercise.
66 | Sometimes the simplest way to go about this is to give Claude an example of how you want its output to look. Just don't forget to wrap your example in tags! And don't forget that if you prefill Claude's response with anything, Claude won't actually output that as part of its response."""
67 |
68 | exercise_7_1_hint = """You're going to have to write some example emails and classify them for Claude (with the exact formatting you want). There are multiple ways to do this. Here are some guidelines below.
69 | 1. Try to have at least two example emails. Claude doesn't need an example for all categories, and the examples don't have to be long. It's more helpful to have examples for whatever you think the trickier categories are (which you were asked to think about at the bottom of Chapter 6 Exercise 1). XML tags will help you separate out your examples from the rest of your prompt, although it's unnecessary.
70 | 2. Make sure your example answer formatting is exactly the format you want Claude to use, so Claude can emulate the format as well. This format should make it so that Claude's answer ends in the letter of the category. Wherever you put the {email} placeholder, make sure that it's formatted exactly like your example emails.
71 | 3. Make sure you still have the categories listed within the prompt itself, otherwise Claude won't know what categories to reference, as well as {email} as a placeholder for substitution."""
72 |
73 | exercise_7_1_solution = """
74 | USER TURN
75 | Please classify emails into the following categories, and do not include explanations:
76 |
77 | (A) Pre-sale question
78 | (B) Broken or defective item
79 | (C) Billing question
80 | (D) Other (please explain)
81 |
82 |
83 | Here are a few examples of correct answer formatting:
84 |
85 | Q: How much does it cost to buy a Mixmaster4000?
86 | A: The correct category is: A
87 |
88 | Q: My Mixmaster won't turn on.
89 | A: The correct category is: B
90 |
91 | Q: Please remove me from your mailing list.
92 | A: The correct category is: D
93 |
94 |
95 | Here is the email for you to categorize: {email}
96 |
97 | ASSISTANT TURN
98 | The correct category is:
99 | """
100 | exercise_8_1_hint = """The grading function in this exercise is looking for a response that contains the phrase "I do not", "I don't", or "Unfortunately".
101 | What should Claude do if it doesn't know the answer?"""
102 |
103 | exercise_8_2_hint = """The grading function in this exercise is looking for a response that contains the phrase "49-fold".
104 | Make Claude show its work and thought process first by extracting relevant quotes and seeing whether or not the quotes provide sufficient evidence. Refer back to the Chapter 8 Lesson if you want a refresher."""
105 |
106 | exercise_9_1_solution = """
107 | You are a master tax acountant. Your task is to answer user questions using any provided reference documentation.
108 |
109 | Here is the material you should use to answer the user's question:
110 |
111 | {TAX_CODE}
112 |
113 |
114 | Here is an example of how to respond:
115 |
116 |
117 | What defines a "qualified" employee?
118 |
119 |
120 | For purposes of this subsection—
121 | (A)In general
122 | The term "qualified employee" means any individual who—
123 | (i)is not an excluded employee, and
124 | (ii)agrees in the election made under this subsection to meet such requirements as are determined by the Secretary to be necessary to ensure that the withholding requirements of the corporation under chapter 24 with respect to the qualified stock are met.
125 |
126 | According to the provided documentation, a "qualified employee" is defined as an individual who:
127 |
128 | 1. Is not an "excluded employee" as defined in the documentation.
129 | 2. Agrees to meet the requirements determined by the Secretary to ensure the corporation's withholding requirements under Chapter 24 are met with respect to the qualified stock.
130 |
131 |
132 | First, gather quotes in tags that are relevant to answering the user's question. If there are no quotes, write "no relevant quotes found".
133 |
134 | Then insert two paragraph breaks before answering the user question within tags. Only answer the user's question if you are confident that the quotes in tags support your answer. If not, tell the user that you unfortunately do not have enough information to answer the user's question.
135 |
136 | Here is the user question: {QUESTION}
137 | """
138 |
139 | exercise_9_2_solution = """
140 | You are Codebot, a helpful AI assistant who finds issues with code and suggests possible improvements.
141 |
142 | Act as a Socratic tutor who helps the user learn.
143 |
144 | You will be given some code from a user. Please do the following:
145 | 1. Identify any issues in the code. Put each issue inside separate tags.
146 | 2. Invite the user to write a revised version of the code to fix the issue.
147 |
148 | Here's an example:
149 |
150 |
151 |
152 | def calculate_circle_area(radius):
153 | return (3.14 * radius) ** 2
154 |
155 |
156 |
157 | 3.14 is being squared when it's actually only the radius that should be squared>
158 |
159 |
160 | That's almost right, but there's an issue related to order of operations. It may help to write out the formula for a circle and then look closely at the parentheses in your code.
161 |
162 |
163 |
164 | Here is the code you are to analyze:
165 |
166 |
167 | {CODE}
168 |
169 |
170 | Find the relevant issues and write the Socratic tutor-style response. Do not give the user too much help! Instead, just give them guidance so they can find the correct solution themselves.
171 |
172 | Put each issue in tags and put your final response in tags.
173 | """
174 |
175 | exercise_10_2_1_solution = """system_prompt = system_prompt_tools_general_explanation + \"""Here are the functions available in JSONSchema format:
176 |
177 |
178 |
179 |
180 | get_user
181 |
182 | Retrieves a user from the database by their user ID.
183 |
184 |
185 |
186 | user_id
187 | int
188 | The ID of the user to retrieve.
189 |
190 |
191 |
192 |
193 |
194 | get_product
195 |
196 | Retrieves a product from the database by its product ID.
197 |
198 |
199 |
200 | product_id
201 | int
202 | The ID of the product to retrieve.
203 |
204 |
205 |
206 |
207 |
208 | add_user
209 |
210 | Adds a new user to the database.
211 |
212 |
213 |
214 | name
215 | str
216 | The name of the user.
217 |
218 |
219 | email
220 | str
221 | The email address of the user.
222 |
223 |
224 |
225 |
226 |
227 | add_product
228 |
229 | Adds a new product to the database.
230 |
231 |
232 |
233 | name
234 | str
235 | The name of the product.
236 |
237 |
238 | price
239 | float
240 | The price of the product.
241 |
242 |
243 |
244 |
245 |
246 | """
--------------------------------------------------------------------------------
/AmazonBedrock/boto3/10_3_Appendix_Empirical_Performance_Eval.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Evaluating AI Models: Code, Human, and Model-Based Grading\n",
8 | "\n",
9 | "In this notebook, we'll delve into a trio of widely-used techniques for assessing the effectiveness of AI models, like Claude v3:\n",
10 | "\n",
11 | "1. Code-based grading\n",
12 | "2. Human grading\n",
13 | "3. Model-based grading\n",
14 | "\n",
15 | "We'll illustrate each approach through examples and examine their respective advantages and limitations, when gauging AI performance."
16 | ]
17 | },
18 | {
19 | "cell_type": "markdown",
20 | "metadata": {},
21 | "source": [
22 | "## Code-Based Grading Example: Sentiment Analysis\n",
23 | "\n",
24 | "In this example, we'll evaluate Claude's ability to classify the sentiment of movie reviews as positive or negative. We can use code to check if the model's output matches the expected sentiment."
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": null,
30 | "metadata": {
31 | "tags": []
32 | },
33 | "outputs": [],
34 | "source": [
35 | "# Import python's built-in regular expression library\n",
36 | "import re\n",
37 | "\n",
38 | "# Import boto3 and json\n",
39 | "import boto3\n",
40 | "import json\n",
41 | "\n",
42 | "# Store the model name and AWS region for later use\n",
43 | "MODEL_NAME = \"anthropic.claude-3-haiku-20240307-v1:0\"\n",
44 | "AWS_REGION = \"us-west-2\"\n",
45 | "\n",
46 | "%store MODEL_NAME\n",
47 | "%store AWS_REGION"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "metadata": {
54 | "tags": []
55 | },
56 | "outputs": [],
57 | "source": [
58 | "# Function to build the input prompt for sentiment analysis\n",
59 | "def build_input_prompt(review):\n",
60 | " user_content = f\"\"\"Classify the sentiment of the following movie review as either 'positive' or 'negative' provide only one of those two choices:\n",
61 | " {review}\"\"\"\n",
62 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
63 | "\n",
64 | "# Define the evaluation data\n",
65 | "eval = [\n",
66 | " {\n",
67 | " \"review\": \"This movie was amazing! The acting was superb and the plot kept me engaged from start to finish.\",\n",
68 | " \"golden_answer\": \"positive\"\n",
69 | " },\n",
70 | " {\n",
71 | " \"review\": \"I was thoroughly disappointed by this film. The pacing was slow and the characters were one-dimensional.\",\n",
72 | " \"golden_answer\": \"negative\"\n",
73 | " }\n",
74 | "]\n",
75 | "\n",
76 | "# Function to get completions from the model\n",
77 | "client = boto3.client('bedrock-runtime',region_name=AWS_REGION)\n",
78 | "\n",
79 | "def get_completion(messages):\n",
80 | " body = json.dumps(\n",
81 | " {\n",
82 | " \"anthropic_version\": '',\n",
83 | " \"max_tokens\": 2000,\n",
84 | " \"messages\": messages,\n",
85 | " \"temperature\": 0.5,\n",
86 | " \"top_p\": 1,\n",
87 | " }\n",
88 | " )\n",
89 | " response = client.invoke_model(body=body, modelId=MODEL_NAME)\n",
90 | " response_body = json.loads(response.get('body').read())\n",
91 | "\n",
92 | " return response_body.get('content')[0].get('text')\n",
93 | "\n",
94 | "# Get completions for each input\n",
95 | "outputs = [get_completion(build_input_prompt(item[\"review\"])) for item in eval]\n",
96 | "\n",
97 | "# Print the outputs and golden answers\n",
98 | "for output, question in zip(outputs, eval):\n",
99 | " print(f\"Review: {question['review']}\\nGolden Answer: {question['golden_answer']}\\nOutput: {output}\\n\")\n",
100 | "\n",
101 | "# Function to grade the completions\n",
102 | "def grade_completion(output, golden_answer):\n",
103 | " return output.lower() == golden_answer.lower()\n",
104 | "\n",
105 | "# Grade the completions and print the accuracy\n",
106 | "grades = [grade_completion(output, item[\"golden_answer\"]) for output, item in zip(outputs, eval)]\n",
107 | "print(f\"Accuracy: {sum(grades) / len(grades) * 100}%\")"
108 | ]
109 | },
110 | {
111 | "cell_type": "markdown",
112 | "metadata": {},
113 | "source": [
114 | "## Human Grading Example: Essay Scoring\n",
115 | "\n",
116 | "Some tasks, like scoring essays, are difficult to evaluate with code alone. In this case, we can provide guidelines for human graders to assess the model's output."
117 | ]
118 | },
119 | {
120 | "cell_type": "code",
121 | "execution_count": null,
122 | "metadata": {
123 | "tags": []
124 | },
125 | "outputs": [],
126 | "source": [
127 | "# Function to build the input prompt for essay generation\n",
128 | "def build_input_prompt(topic):\n",
129 | " user_content = f\"\"\"Write a short essay discussing the following topic:\n",
130 | " {topic}\"\"\"\n",
131 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
132 | "\n",
133 | "# Define the evaluation data\n",
134 | "eval = [\n",
135 | " {\n",
136 | " \"topic\": \"The importance of education in personal development and societal progress\",\n",
137 | " \"golden_answer\": \"A high-scoring essay should have a clear thesis, well-structured paragraphs, and persuasive examples discussing how education contributes to individual growth and broader societal advancement.\"\n",
138 | " }\n",
139 | "]\n",
140 | "\n",
141 | "# Get completions for each input\n",
142 | "outputs = [get_completion(build_input_prompt(item[\"topic\"])) for item in eval]\n",
143 | "\n",
144 | "# Print the outputs and golden answers\n",
145 | "for output, item in zip(outputs, eval):\n",
146 | " print(f\"Topic: {item['topic']}\\n\\nGrading Rubric:\\n {item['golden_answer']}\\n\\nModel Output:\\n{output}\\n\")"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "## Model-Based Grading Examples\n",
154 | "\n",
155 | "We can use Claude to grade its own outputs by providing the model's response and a grading rubric. This allows us to automate the evaluation of tasks that would typically require human judgment."
156 | ]
157 | },
158 | {
159 | "cell_type": "markdown",
160 | "metadata": {},
161 | "source": [
162 | "### Example 1: Summarization\n",
163 | "\n",
164 | "In this example, we'll use Claude to assess the quality of a summary it generated. This can be useful when you need to evaluate the model's ability to capture key information from a longer text concisely and accurately. By providing a rubric that outlines the essential points that should be covered, we can automate the grading process and quickly assess the model's performance on summarization tasks."
165 | ]
166 | },
167 | {
168 | "cell_type": "code",
169 | "execution_count": null,
170 | "metadata": {
171 | "tags": []
172 | },
173 | "outputs": [],
174 | "source": [
175 | "# Function to build the input prompt for summarization\n",
176 | "def build_input_prompt(text):\n",
177 | " user_content = f\"\"\"Please summarize the main points of the following text:\n",
178 | " {text}\"\"\"\n",
179 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
180 | "\n",
181 | "# Function to build the grader prompt for assessing summary quality\n",
182 | "def build_grader_prompt(output, rubric):\n",
183 | " user_content = f\"\"\"Assess the quality of the following summary based on this rubric:\n",
184 | " {rubric}\n",
185 | " {output}\n",
186 | " Provide a score from 1-5, where 1 is poor and 5 is excellent.\"\"\"\n",
187 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
188 | "\n",
189 | "# Define the evaluation data\n",
190 | "eval = [\n",
191 | " {\n",
192 | " \"text\": \"The Magna Carta, signed in 1215, was a pivotal document in English history. It limited the powers of the monarchy and established the principle that everyone, including the king, was subject to the law. This laid the foundation for constitutional governance and the rule of law in England and influenced legal systems worldwide.\",\n",
193 | " \"golden_answer\": \"A high-quality summary should concisely capture the key points: 1) The Magna Carta's significance in English history, 2) Its role in limiting monarchical power, 3) Establishing the principle of rule of law, and 4) Its influence on legal systems around the world.\"\n",
194 | " }\n",
195 | "]\n",
196 | "\n",
197 | "# Get completions for each input\n",
198 | "outputs = [get_completion(build_input_prompt(item[\"text\"])) for item in eval]\n",
199 | "\n",
200 | "# Grade the completions\n",
201 | "grades = [get_completion(build_grader_prompt(output, item[\"golden_answer\"])) for output, item in zip(outputs, eval)]\n",
202 | "\n",
203 | "# Print the summary quality score\n",
204 | "print(f\"Summary quality score: {grades[0]}\")"
205 | ]
206 | },
207 | {
208 | "cell_type": "markdown",
209 | "metadata": {},
210 | "source": [
211 | "### Example 2: Fact-Checking\n",
212 | "\n",
213 | "In this example, we'll use Claude to fact-check a claim and then assess the accuracy of its fact-checking. This can be useful when you need to evaluate the model's ability to distinguish between accurate and inaccurate information. By providing a rubric that outlines the key points that should be covered in a correct fact-check, we can automate the grading process and quickly assess the model's performance on fact-checking tasks."
214 | ]
215 | },
216 | {
217 | "cell_type": "code",
218 | "execution_count": null,
219 | "metadata": {
220 | "tags": []
221 | },
222 | "outputs": [],
223 | "source": [
224 | "# Function to build the input prompt for fact-checking\n",
225 | "def build_input_prompt(claim):\n",
226 | " user_content = f\"\"\"Determine if the following claim is true or false:\n",
227 | " {claim}\"\"\"\n",
228 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
229 | "\n",
230 | "# Function to build the grader prompt for assessing fact-check accuracy\n",
231 | "def build_grader_prompt(output, rubric):\n",
232 | " user_content = f\"\"\"Based on the following rubric, assess whether the fact-check is correct:\n",
233 | " {rubric}\n",
234 | " {output}\"\"\"\n",
235 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
236 | "\n",
237 | "# Define the evaluation data\n",
238 | "eval = [\n",
239 | " {\n",
240 | " \"claim\": \"The Great Wall of China is visible from space.\",\n",
241 | " \"golden_answer\": \"A correct fact-check should state that this claim is false. While the Great Wall is an impressive structure, it is not visible from space with the naked eye.\"\n",
242 | " }\n",
243 | "]\n",
244 | "\n",
245 | "# Get completions for each input\n",
246 | "outputs = [get_completion(build_input_prompt(item[\"claim\"])) for item in eval]\n",
247 | "\n",
248 | "grades = []\n",
249 | "for output, item in zip(outputs, eval):\n",
250 | " # Print the claim, fact-check, and rubric\n",
251 | " print(f\"Claim: {item['claim']}\\n\")\n",
252 | " print(f\"Fact-check: {output}]\\n\")\n",
253 | " print(f\"Rubric: {item['golden_answer']}\\n\")\n",
254 | " \n",
255 | " # Grade the fact-check\n",
256 | " grader_prompt = build_grader_prompt(output, item[\"golden_answer\"])\n",
257 | " grade = get_completion(grader_prompt)\n",
258 | " grades.append(\"correct\" in grade.lower())\n",
259 | "\n",
260 | "# Print the fact-checking accuracy\n",
261 | "accuracy = sum(grades) / len(grades)\n",
262 | "print(f\"Fact-checking accuracy: {accuracy * 100}%\")"
263 | ]
264 | },
265 | {
266 | "cell_type": "markdown",
267 | "metadata": {},
268 | "source": [
269 | "### Example 3: Tone Analysis\n",
270 | "\n",
271 | "In this example, we'll use Claude to analyze the tone of a given text and then assess the accuracy of its analysis. This can be useful when you need to evaluate the model's ability to identify and interpret the emotional content and attitudes expressed in a piece of text. By providing a rubric that outlines the key aspects of tone that should be identified, we can automate the grading process and quickly assess the model's performance on tone analysis tasks."
272 | ]
273 | },
274 | {
275 | "cell_type": "code",
276 | "execution_count": null,
277 | "metadata": {
278 | "tags": []
279 | },
280 | "outputs": [],
281 | "source": [
282 | "# Function to build the input prompt for tone analysis\n",
283 | "def build_input_prompt(text):\n",
284 | " user_content = f\"\"\"Analyze the tone of the following text:\n",
285 | " {text}\"\"\"\n",
286 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
287 | "\n",
288 | "# Function to build the grader prompt for assessing tone analysis accuracy\n",
289 | "def build_grader_prompt(output, rubric):\n",
290 | " user_content = f\"\"\"Assess the accuracy of the following tone analysis based on this rubric:\n",
291 | " {rubric}\n",
292 | " {output}\"\"\"\n",
293 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
294 | "\n",
295 | "# Define the evaluation data\n",
296 | "eval = [\n",
297 | " {\n",
298 | " \"text\": \"I can't believe they canceled the event at the last minute. This is completely unacceptable and unprofessional!\",\n",
299 | " \"golden_answer\": \"The tone analysis should identify the text as expressing frustration, anger, and disappointment. Key words like 'can't believe', 'last minute', 'unacceptable', and 'unprofessional' indicate strong negative emotions.\"\n",
300 | " }\n",
301 | "]\n",
302 | "\n",
303 | "# Get completions for each input\n",
304 | "outputs = [get_completion(build_input_prompt(item[\"text\"])) for item in eval]\n",
305 | "\n",
306 | "# Grade the completions\n",
307 | "grades = [get_completion(build_grader_prompt(output, item[\"golden_answer\"])) for output, item in zip(outputs, eval)]\n",
308 | "\n",
309 | "# Print the tone analysis quality\n",
310 | "print(f\"Tone analysis quality: {grades[0]}\")"
311 | ]
312 | },
313 | {
314 | "cell_type": "markdown",
315 | "metadata": {},
316 | "source": [
317 | "These examples demonstrate how code-based, human, and model-based grading can be used to evaluate AI models like Claude on various tasks. The choice of evaluation method depends on the nature of the task and the resources available. Model-based grading offers a promising approach for automating the assessment of complex tasks that would otherwise require human judgment."
318 | ]
319 | }
320 | ],
321 | "metadata": {
322 | "kernelspec": {
323 | "display_name": "conda_pytorch_p310",
324 | "language": "python",
325 | "name": "conda_pytorch_p310"
326 | },
327 | "language_info": {
328 | "codemirror_mode": {
329 | "name": "ipython",
330 | "version": 3
331 | },
332 | "file_extension": ".py",
333 | "mimetype": "text/x-python",
334 | "name": "python",
335 | "nbconvert_exporter": "python",
336 | "pygments_lexer": "ipython3",
337 | "version": "3.10.13"
338 | }
339 | },
340 | "nbformat": 4,
341 | "nbformat_minor": 4
342 | }
343 |
--------------------------------------------------------------------------------
/AmazonBedrock/anthropic/10_3_Appendix_Empirical_Performance_Evaluations.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Evaluating AI Models: Code, Human, and Model-Based Grading\n",
8 | "\n",
9 | "In this notebook, we'll delve into a trio of widely-used techniques for assessing the effectiveness of AI models, like Claude v3:\n",
10 | "\n",
11 | "1. Code-based grading\n",
12 | "2. Human grading\n",
13 | "3. Model-based grading\n",
14 | "\n",
15 | "We'll illustrate each approach through examples and examine their respective advantages and limitations, when gauging AI performance."
16 | ]
17 | },
18 | {
19 | "cell_type": "markdown",
20 | "metadata": {},
21 | "source": [
22 | "## Code-Based Grading Example: Sentiment Analysis\n",
23 | "\n",
24 | "In this example, we'll evaluate Claude's ability to classify the sentiment of movie reviews as positive or negative. We can use code to check if the model's output matches the expected sentiment."
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": null,
30 | "metadata": {
31 | "tags": []
32 | },
33 | "outputs": [],
34 | "source": [
35 | "# Store the model name and AWS region for later use\n",
36 | "MODEL_NAME = \"anthropic.claude-3-haiku-20240307-v1:0\"\n",
37 | "AWS_REGION = \"us-west-2\"\n",
38 | "\n",
39 | "%store MODEL_NAME\n",
40 | "%store AWS_REGION"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": null,
46 | "metadata": {
47 | "tags": []
48 | },
49 | "outputs": [],
50 | "source": [
51 | "# Install the Anthropic package\n",
52 | "%pip install anthropic --quiet"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": null,
58 | "metadata": {
59 | "tags": []
60 | },
61 | "outputs": [],
62 | "source": [
63 | "# Import the AnthropicBedrock class and create a client instance\n",
64 | "from anthropic import AnthropicBedrock\n",
65 | "\n",
66 | "client = AnthropicBedrock(aws_region=AWS_REGION)"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": null,
72 | "metadata": {
73 | "tags": []
74 | },
75 | "outputs": [],
76 | "source": [
77 | "# Function to build the input prompt for sentiment analysis\n",
78 | "def build_input_prompt(review):\n",
79 | " user_content = f\"\"\"Classify the sentiment of the following movie review as either 'positive' or 'negative' provide only one of those two choices:\n",
80 | " {review}\"\"\"\n",
81 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
82 | "\n",
83 | "# Define the evaluation data\n",
84 | "eval = [\n",
85 | " {\n",
86 | " \"review\": \"This movie was amazing! The acting was superb and the plot kept me engaged from start to finish.\",\n",
87 | " \"golden_answer\": \"positive\"\n",
88 | " },\n",
89 | " {\n",
90 | " \"review\": \"I was thoroughly disappointed by this film. The pacing was slow and the characters were one-dimensional.\",\n",
91 | " \"golden_answer\": \"negative\"\n",
92 | " }\n",
93 | "]\n",
94 | "\n",
95 | "# Function to get completions from the model\n",
96 | "def get_completion(messages):\n",
97 | " message = client.messages.create(\n",
98 | " model=MODEL_NAME,\n",
99 | " max_tokens=2000,\n",
100 | " temperature=0.0,\n",
101 | " messages=messages\n",
102 | " )\n",
103 | " return message.content[0].text\n",
104 | "\n",
105 | "# Get completions for each input\n",
106 | "outputs = [get_completion(build_input_prompt(item[\"review\"])) for item in eval]\n",
107 | "\n",
108 | "# Print the outputs and golden answers\n",
109 | "for output, question in zip(outputs, eval):\n",
110 | " print(f\"Review: {question['review']}\\nGolden Answer: {question['golden_answer']}\\nOutput: {output}\\n\")\n",
111 | "\n",
112 | "# Function to grade the completions\n",
113 | "def grade_completion(output, golden_answer):\n",
114 | " return output.lower() == golden_answer.lower()\n",
115 | "\n",
116 | "# Grade the completions and print the accuracy\n",
117 | "grades = [grade_completion(output, item[\"golden_answer\"]) for output, item in zip(outputs, eval)]\n",
118 | "print(f\"Accuracy: {sum(grades) / len(grades) * 100}%\")"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {},
124 | "source": [
125 | "## Human Grading Example: Essay Scoring\n",
126 | "\n",
127 | "Some tasks, like scoring essays, are difficult to evaluate with code alone. In this case, we can provide guidelines for human graders to assess the model's output."
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": null,
133 | "metadata": {
134 | "tags": []
135 | },
136 | "outputs": [],
137 | "source": [
138 | "# Function to build the input prompt for essay generation\n",
139 | "def build_input_prompt(topic):\n",
140 | " user_content = f\"\"\"Write a short essay discussing the following topic:\n",
141 | " {topic}\"\"\"\n",
142 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
143 | "\n",
144 | "# Define the evaluation data\n",
145 | "eval = [\n",
146 | " {\n",
147 | " \"topic\": \"The importance of education in personal development and societal progress\",\n",
148 | " \"golden_answer\": \"A high-scoring essay should have a clear thesis, well-structured paragraphs, and persuasive examples discussing how education contributes to individual growth and broader societal advancement.\"\n",
149 | " }\n",
150 | "]\n",
151 | "\n",
152 | "# Get completions for each input\n",
153 | "outputs = [get_completion(build_input_prompt(item[\"topic\"])) for item in eval]\n",
154 | "\n",
155 | "# Print the outputs and golden answers\n",
156 | "for output, item in zip(outputs, eval):\n",
157 | " print(f\"Topic: {item['topic']}\\n\\nGrading Rubric:\\n {item['golden_answer']}\\n\\nModel Output:\\n{output}\\n\")"
158 | ]
159 | },
160 | {
161 | "cell_type": "markdown",
162 | "metadata": {},
163 | "source": [
164 | "## Model-Based Grading Examples\n",
165 | "\n",
166 | "We can use Claude to grade its own outputs by providing the model's response and a grading rubric. This allows us to automate the evaluation of tasks that would typically require human judgment."
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {},
172 | "source": [
173 | "### Example 1: Summarization\n",
174 | "\n",
175 | "In this example, we'll use Claude to assess the quality of a summary it generated. This can be useful when you need to evaluate the model's ability to capture key information from a longer text concisely and accurately. By providing a rubric that outlines the essential points that should be covered, we can automate the grading process and quickly assess the model's performance on summarization tasks."
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "execution_count": null,
181 | "metadata": {
182 | "tags": []
183 | },
184 | "outputs": [],
185 | "source": [
186 | "# Function to build the input prompt for summarization\n",
187 | "def build_input_prompt(text):\n",
188 | " user_content = f\"\"\"Please summarize the main points of the following text:\n",
189 | " {text}\"\"\"\n",
190 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
191 | "\n",
192 | "# Function to build the grader prompt for assessing summary quality\n",
193 | "def build_grader_prompt(output, rubric):\n",
194 | " user_content = f\"\"\"Assess the quality of the following summary based on this rubric:\n",
195 | " {rubric}\n",
196 | " {output}\n",
197 | " Provide a score from 1-5, where 1 is poor and 5 is excellent.\"\"\"\n",
198 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
199 | "\n",
200 | "# Define the evaluation data\n",
201 | "eval = [\n",
202 | " {\n",
203 | " \"text\": \"The Magna Carta, signed in 1215, was a pivotal document in English history. It limited the powers of the monarchy and established the principle that everyone, including the king, was subject to the law. This laid the foundation for constitutional governance and the rule of law in England and influenced legal systems worldwide.\",\n",
204 | " \"golden_answer\": \"A high-quality summary should concisely capture the key points: 1) The Magna Carta's significance in English history, 2) Its role in limiting monarchical power, 3) Establishing the principle of rule of law, and 4) Its influence on legal systems around the world.\"\n",
205 | " }\n",
206 | "]\n",
207 | "\n",
208 | "# Get completions for each input\n",
209 | "outputs = [get_completion(build_input_prompt(item[\"text\"])) for item in eval]\n",
210 | "\n",
211 | "# Grade the completions\n",
212 | "grades = [get_completion(build_grader_prompt(output, item[\"golden_answer\"])) for output, item in zip(outputs, eval)]\n",
213 | "\n",
214 | "# Print the summary quality score\n",
215 | "print(f\"Summary quality score: {grades[0]}\")"
216 | ]
217 | },
218 | {
219 | "cell_type": "markdown",
220 | "metadata": {},
221 | "source": [
222 | "### Example 2: Fact-Checking\n",
223 | "\n",
224 | "In this example, we'll use Claude to fact-check a claim and then assess the accuracy of its fact-checking. This can be useful when you need to evaluate the model's ability to distinguish between accurate and inaccurate information. By providing a rubric that outlines the key points that should be covered in a correct fact-check, we can automate the grading process and quickly assess the model's performance on fact-checking tasks."
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": null,
230 | "metadata": {
231 | "tags": []
232 | },
233 | "outputs": [],
234 | "source": [
235 | "# Function to build the input prompt for fact-checking\n",
236 | "def build_input_prompt(claim):\n",
237 | " user_content = f\"\"\"Determine if the following claim is true or false:\n",
238 | " {claim}\"\"\"\n",
239 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
240 | "\n",
241 | "# Function to build the grader prompt for assessing fact-check accuracy\n",
242 | "def build_grader_prompt(output, rubric):\n",
243 | " user_content = f\"\"\"Based on the following rubric, assess whether the fact-check is correct:\n",
244 | " {rubric}\n",
245 | " {output}\"\"\"\n",
246 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
247 | "\n",
248 | "# Define the evaluation data\n",
249 | "eval = [\n",
250 | " {\n",
251 | " \"claim\": \"The Great Wall of China is visible from space.\",\n",
252 | " \"golden_answer\": \"A correct fact-check should state that this claim is false. While the Great Wall is an impressive structure, it is not visible from space with the naked eye.\"\n",
253 | " }\n",
254 | "]\n",
255 | "\n",
256 | "# Get completions for each input\n",
257 | "outputs = [get_completion(build_input_prompt(item[\"claim\"])) for item in eval]\n",
258 | "\n",
259 | "grades = []\n",
260 | "for output, item in zip(outputs, eval):\n",
261 | " # Print the claim, fact-check, and rubric\n",
262 | " print(f\"Claim: {item['claim']}\\n\")\n",
263 | " print(f\"Fact-check: {output}]\\n\")\n",
264 | " print(f\"Rubric: {item['golden_answer']}\\n\")\n",
265 | " \n",
266 | " # Grade the fact-check\n",
267 | " grader_prompt = build_grader_prompt(output, item[\"golden_answer\"])\n",
268 | " grade = get_completion(grader_prompt)\n",
269 | " grades.append(\"correct\" in grade.lower())\n",
270 | "\n",
271 | "# Print the fact-checking accuracy\n",
272 | "accuracy = sum(grades) / len(grades)\n",
273 | "print(f\"Fact-checking accuracy: {accuracy * 100}%\")"
274 | ]
275 | },
276 | {
277 | "cell_type": "markdown",
278 | "metadata": {},
279 | "source": [
280 | "### Example 3: Tone Analysis\n",
281 | "\n",
282 | "In this example, we'll use Claude to analyze the tone of a given text and then assess the accuracy of its analysis. This can be useful when you need to evaluate the model's ability to identify and interpret the emotional content and attitudes expressed in a piece of text. By providing a rubric that outlines the key aspects of tone that should be identified, we can automate the grading process and quickly assess the model's performance on tone analysis tasks."
283 | ]
284 | },
285 | {
286 | "cell_type": "code",
287 | "execution_count": null,
288 | "metadata": {
289 | "tags": []
290 | },
291 | "outputs": [],
292 | "source": [
293 | "# Function to build the input prompt for tone analysis\n",
294 | "def build_input_prompt(text):\n",
295 | " user_content = f\"\"\"Analyze the tone of the following text:\n",
296 | " {text}\"\"\"\n",
297 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
298 | "\n",
299 | "# Function to build the grader prompt for assessing tone analysis accuracy\n",
300 | "def build_grader_prompt(output, rubric):\n",
301 | " user_content = f\"\"\"Assess the accuracy of the following tone analysis based on this rubric:\n",
302 | " {rubric}\n",
303 | " {output}\"\"\"\n",
304 | " return [{\"role\": \"user\", \"content\": user_content}]\n",
305 | "\n",
306 | "# Define the evaluation data\n",
307 | "eval = [\n",
308 | " {\n",
309 | " \"text\": \"I can't believe they canceled the event at the last minute. This is completely unacceptable and unprofessional!\",\n",
310 | " \"golden_answer\": \"The tone analysis should identify the text as expressing frustration, anger, and disappointment. Key words like 'can't believe', 'last minute', 'unacceptable', and 'unprofessional' indicate strong negative emotions.\"\n",
311 | " }\n",
312 | "]\n",
313 | "\n",
314 | "# Get completions for each input\n",
315 | "outputs = [get_completion(build_input_prompt(item[\"text\"])) for item in eval]\n",
316 | "\n",
317 | "# Grade the completions\n",
318 | "grades = [get_completion(build_grader_prompt(output, item[\"golden_answer\"])) for output, item in zip(outputs, eval)]\n",
319 | "\n",
320 | "# Print the tone analysis quality\n",
321 | "print(f\"Tone analysis quality: {grades[0]}\")"
322 | ]
323 | },
324 | {
325 | "cell_type": "markdown",
326 | "metadata": {},
327 | "source": [
328 | "These examples demonstrate how code-based, human, and model-based grading can be used to evaluate AI models like Claude on various tasks. The choice of evaluation method depends on the nature of the task and the resources available. Model-based grading offers a promising approach for automating the assessment of complex tasks that would otherwise require human judgment."
329 | ]
330 | }
331 | ],
332 | "metadata": {
333 | "kernelspec": {
334 | "display_name": "conda_pytorch_p310",
335 | "language": "python",
336 | "name": "conda_pytorch_p310"
337 | },
338 | "language_info": {
339 | "codemirror_mode": {
340 | "name": "ipython",
341 | "version": 3
342 | },
343 | "file_extension": ".py",
344 | "mimetype": "text/x-python",
345 | "name": "python",
346 | "nbconvert_exporter": "python",
347 | "pygments_lexer": "ipython3",
348 | "version": "3.10.13"
349 | }
350 | },
351 | "nbformat": 4,
352 | "nbformat_minor": 4
353 | }
354 |
--------------------------------------------------------------------------------
/Anthropic 1P/01_Basic_Prompt_Structure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Chapter 1: Basic Prompt Structure\n",
8 | "\n",
9 | "- [Lesson](#lesson)\n",
10 | "- [Exercises](#exercises)\n",
11 | "- [Example Playground](#example-playground)\n",
12 | "\n",
13 | "## Setup\n",
14 | "\n",
15 | "Run the following setup cell to load your API key and establish the `get_completion` helper function."
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "!pip install anthropic\n",
25 | "\n",
26 | "# Import python's built-in regular expression library\n",
27 | "import re\n",
28 | "import anthropic\n",
29 | "\n",
30 | "# Retrieve the API_KEY & MODEL_NAME variables from the IPython store\n",
31 | "%store -r API_KEY\n",
32 | "%store -r MODEL_NAME\n",
33 | "\n",
34 | "client = anthropic.Anthropic(api_key=API_KEY)\n",
35 | "\n",
36 | "def get_completion(prompt: str, system_prompt=\"\"):\n",
37 | " message = client.messages.create(\n",
38 | " model=MODEL_NAME,\n",
39 | " max_tokens=2000,\n",
40 | " temperature=0.0,\n",
41 | " system=system_prompt,\n",
42 | " messages=[\n",
43 | " {\"role\": \"user\", \"content\": prompt}\n",
44 | " ]\n",
45 | " )\n",
46 | " return message.content[0].text"
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "metadata": {},
52 | "source": [
53 | "---\n",
54 | "\n",
55 | "## Lesson\n",
56 | "\n",
57 | "Anthropic offers two APIs, the legacy [Text Completions API](https://docs.anthropic.com/claude/reference/complete_post) and the current [Messages API](https://docs.anthropic.com/claude/reference/messages_post). For this tutorial, we will be exclusively using the Messages API.\n",
58 | "\n",
59 | "At minimum, a call to Claude using the Messages API requires the following parameters:\n",
60 | "- `model`: the [API model name](https://docs.anthropic.com/claude/docs/models-overview#model-recommendations) of the model that you intend to call\n",
61 | "\n",
62 | "- `max_tokens`: the maximum number of tokens to generate before stopping. Note that Claude may stop before reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. Furthermore, this is a *hard* stop, meaning that it may cause Claude to stop generating mid-word or mid-sentence.\n",
63 | "\n",
64 | "- `messages`: an array of input messages. Our models are trained to operate on alternating `user` and `assistant` conversational turns. When creating a new `Message`, you specify the prior conversational turns with the messages parameter, and the model then generates the next `Message` in the conversation.\n",
65 | " - Each input message must be an object with a `role` and `content`. You can specify a single `user`-role message, or you can include multiple `user` and `assistant` messages (they must alternate, if so). The first message must always use the user `role`.\n",
66 | "\n",
67 | "There are also optional parameters, such as:\n",
68 | "- `system`: the system prompt - more on this below.\n",
69 | " \n",
70 | "- `temperature`: the degree of variability in Claude's response. For these lessons and exercises, we have set `temperature` to 0.\n",
71 | "\n",
72 | "For a complete list of all API parameters, visit our [API documentation](https://docs.anthropic.com/claude/reference/messages_post)."
73 | ]
74 | },
75 | {
76 | "cell_type": "markdown",
77 | "metadata": {},
78 | "source": [
79 | "### Examples\n",
80 | "\n",
81 | "Let's take a look at how Claude responds to some correctly-formatted prompts. For each of the following cells, run the cell (`shift+enter`), and Claude's response will appear below the block."
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": null,
87 | "metadata": {},
88 | "outputs": [],
89 | "source": [
90 | "# Prompt\n",
91 | "PROMPT = \"Hi Claude, how are you?\"\n",
92 | "\n",
93 | "# Print Claude's response\n",
94 | "print(get_completion(PROMPT))"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "# Prompt\n",
104 | "PROMPT = \"Can you tell me the color of the ocean?\"\n",
105 | "\n",
106 | "# Print Claude's response\n",
107 | "print(get_completion(PROMPT))"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": null,
113 | "metadata": {},
114 | "outputs": [],
115 | "source": [
116 | "# Prompt\n",
117 | "PROMPT = \"What year was Celine Dion born in?\"\n",
118 | "\n",
119 | "# Print Claude's response\n",
120 | "print(get_completion(PROMPT))"
121 | ]
122 | },
123 | {
124 | "cell_type": "markdown",
125 | "metadata": {},
126 | "source": [
127 | "Now let's take a look at some prompts that do not include the correct Messages API formatting. For these malformatted prompts, the Messages API returns an error.\n",
128 | "\n",
129 | "First, we have an example of a Messages API call that lacks `role` and `content` fields in the `messages` array."
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "# Get Claude's response\n",
139 | "response = client.messages.create(\n",
140 | " model=MODEL_NAME,\n",
141 | " max_tokens=2000,\n",
142 | " temperature=0.0,\n",
143 | " messages=[\n",
144 | " {\"Hi Claude, how are you?\"}\n",
145 | " ]\n",
146 | " )\n",
147 | "\n",
148 | "# Print Claude's response\n",
149 | "print(response[0].text)"
150 | ]
151 | },
152 | {
153 | "cell_type": "markdown",
154 | "metadata": {},
155 | "source": [
156 | "Here's a prompt that fails to alternate between the `user` and `assistant` roles."
157 | ]
158 | },
159 | {
160 | "cell_type": "code",
161 | "execution_count": null,
162 | "metadata": {},
163 | "outputs": [],
164 | "source": [
165 | "# Get Claude's response\n",
166 | "response = client.messages.create(\n",
167 | " model=MODEL_NAME,\n",
168 | " max_tokens=2000,\n",
169 | " temperature=0.0,\n",
170 | " messages=[\n",
171 | " {\"role\": \"user\", \"content\": \"What year was Celine Dion born in?\"},\n",
172 | " {\"role\": \"user\", \"content\": \"Also, can you tell me some other facts about her?\"}\n",
173 | " ]\n",
174 | " )\n",
175 | "\n",
176 | "# Print Claude's response\n",
177 | "print(response[0].text)"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {},
183 | "source": [
184 | "`user` and `assistant` messages **MUST alternate**, and messages **MUST start with a `user` turn**. You can have multiple `user` & `assistant` pairs in a prompt (as if simulating a multi-turn conversation). You can also put words into a terminal `assistant` message for Claude to continue from where you left off (more on that in later chapters).\n",
185 | "\n",
186 | "#### System Prompts\n",
187 | "\n",
188 | "You can also use **system prompts**. A system prompt is a way to **provide context, instructions, and guidelines to Claude** before presenting it with a question or task in the \"User\" turn. \n",
189 | "\n",
190 | "Structurally, system prompts exist separately from the list of `user` & `assistant` messages, and thus belong in a separate `system` parameter (take a look at the structure of the `get_completion` helper function in the [Setup](#setup) section of the notebook). \n",
191 | "\n",
192 | "Within this tutorial, wherever we might utilize a system prompt, we have provided you a `system` field in your completions function. Should you not want to use a system prompt, simply set the `SYSTEM_PROMPT` variable to an empty string."
193 | ]
194 | },
195 | {
196 | "cell_type": "markdown",
197 | "metadata": {},
198 | "source": [
199 | "#### System Prompt Example"
200 | ]
201 | },
202 | {
203 | "cell_type": "code",
204 | "execution_count": null,
205 | "metadata": {},
206 | "outputs": [],
207 | "source": [
208 | "# System prompt\n",
209 | "SYSTEM_PROMPT = \"Your answer should always be a series of critical thinking questions that further the conversation (do not provide answers to your questions). Do not actually answer the user question.\"\n",
210 | "\n",
211 | "# Prompt\n",
212 | "PROMPT = \"Why is the sky blue?\"\n",
213 | "\n",
214 | "# Print Claude's response\n",
215 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
216 | ]
217 | },
218 | {
219 | "cell_type": "markdown",
220 | "metadata": {},
221 | "source": [
222 | "Why use a system prompt? A **well-written system prompt can improve Claude's performance** in a variety of ways, such as increasing Claude's ability to follow rules and instructions. For more information, visit our documentation on [how to use system prompts](https://docs.anthropic.com/claude/docs/how-to-use-system-prompts) with Claude.\n",
223 | "\n",
224 | "Now we'll dive into some exercises. If you would like to experiment with the lesson prompts without changing any content above, scroll all the way to the bottom of the lesson notebook to visit the [**Example Playground**](#example-playground)."
225 | ]
226 | },
227 | {
228 | "cell_type": "markdown",
229 | "metadata": {},
230 | "source": [
231 | "---\n",
232 | "\n",
233 | "## Exercises\n",
234 | "- [Exercise 1.1 - Counting to Three](#exercise-11---counting-to-three)\n",
235 | "- [Exercise 1.2 - System Prompt](#exercise-12---system-prompt)"
236 | ]
237 | },
238 | {
239 | "cell_type": "markdown",
240 | "metadata": {},
241 | "source": [
242 | "### Exercise 1.1 - Counting to Three\n",
243 | "Using proper `user` / `assistant` formatting, edit the `PROMPT` below to get Claude to **count to three.** The output will also indicate whether your solution is correct."
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "execution_count": null,
249 | "metadata": {},
250 | "outputs": [],
251 | "source": [
252 | "# Prompt - this is the only field you should change\n",
253 | "PROMPT = \"[Replace this text]\"\n",
254 | "\n",
255 | "# Get Claude's response\n",
256 | "response = get_completion(PROMPT)\n",
257 | "\n",
258 | "# Function to grade exercise correctness\n",
259 | "def grade_exercise(text):\n",
260 | " pattern = re.compile(r'^(?=.*1)(?=.*2)(?=.*3).*$', re.DOTALL)\n",
261 | " return bool(pattern.match(text))\n",
262 | "\n",
263 | "# Print Claude's response and the corresponding grade\n",
264 | "print(response)\n",
265 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
266 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
267 | ]
268 | },
269 | {
270 | "cell_type": "markdown",
271 | "metadata": {},
272 | "source": [
273 | "❓ If you want a hint, run the cell below!"
274 | ]
275 | },
276 | {
277 | "cell_type": "code",
278 | "execution_count": null,
279 | "metadata": {},
280 | "outputs": [],
281 | "source": [
282 | "from hints import exercise_1_1_hint; print(exercise_1_1_hint)"
283 | ]
284 | },
285 | {
286 | "cell_type": "markdown",
287 | "metadata": {},
288 | "source": [
289 | "### Exercise 1.2 - System Prompt\n",
290 | "\n",
291 | "Modify the `SYSTEM_PROMPT` to make Claude respond like it's a 3 year old child."
292 | ]
293 | },
294 | {
295 | "cell_type": "code",
296 | "execution_count": null,
297 | "metadata": {},
298 | "outputs": [],
299 | "source": [
300 | "# System prompt - this is the only field you should change\n",
301 | "SYSTEM_PROMPT = \"[Replace this text]\"\n",
302 | "\n",
303 | "# Prompt\n",
304 | "PROMPT = \"How big is the sky?\"\n",
305 | "\n",
306 | "# Get Claude's response\n",
307 | "response = get_completion(PROMPT, SYSTEM_PROMPT)\n",
308 | "\n",
309 | "# Function to grade exercise correctness\n",
310 | "def grade_exercise(text):\n",
311 | " return bool(re.search(r\"giggles\", text) or re.search(r\"soo\", text))\n",
312 | "\n",
313 | "# Print Claude's response and the corresponding grade\n",
314 | "print(response)\n",
315 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
316 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
317 | ]
318 | },
319 | {
320 | "cell_type": "markdown",
321 | "metadata": {},
322 | "source": [
323 | "❓ If you want a hint, run the cell below!"
324 | ]
325 | },
326 | {
327 | "cell_type": "code",
328 | "execution_count": null,
329 | "metadata": {},
330 | "outputs": [],
331 | "source": [
332 | "from hints import exercise_1_2_hint; print(exercise_1_2_hint)"
333 | ]
334 | },
335 | {
336 | "cell_type": "markdown",
337 | "metadata": {},
338 | "source": [
339 | "### Congrats!\n",
340 | "\n",
341 | "If you've solved all exercises up until this point, you're ready to move to the next chapter. Happy prompting!"
342 | ]
343 | },
344 | {
345 | "cell_type": "markdown",
346 | "metadata": {},
347 | "source": [
348 | "---\n",
349 | "\n",
350 | "## Example Playground\n",
351 | "\n",
352 | "This is an area for you to experiment freely with the prompt examples shown in this lesson and tweak prompts to see how it may affect Claude's responses."
353 | ]
354 | },
355 | {
356 | "cell_type": "code",
357 | "execution_count": null,
358 | "metadata": {},
359 | "outputs": [],
360 | "source": [
361 | "# Prompt\n",
362 | "PROMPT = \"Hi Claude, how are you?\"\n",
363 | "\n",
364 | "# Print Claude's response\n",
365 | "print(get_completion(PROMPT))"
366 | ]
367 | },
368 | {
369 | "cell_type": "code",
370 | "execution_count": null,
371 | "metadata": {},
372 | "outputs": [],
373 | "source": [
374 | "# Prompt\n",
375 | "PROMPT = \"Can you tell me the color of the ocean?\"\n",
376 | "\n",
377 | "# Print Claude's response\n",
378 | "print(get_completion(PROMPT))"
379 | ]
380 | },
381 | {
382 | "cell_type": "code",
383 | "execution_count": null,
384 | "metadata": {},
385 | "outputs": [],
386 | "source": [
387 | "# Prompt\n",
388 | "PROMPT = \"What year was Celine Dion born in?\"\n",
389 | "\n",
390 | "# Print Claude's response\n",
391 | "print(get_completion(PROMPT))"
392 | ]
393 | },
394 | {
395 | "cell_type": "code",
396 | "execution_count": null,
397 | "metadata": {},
398 | "outputs": [],
399 | "source": [
400 | "# Get Claude's response\n",
401 | "response = client.messages.create(\n",
402 | " model=MODEL_NAME,\n",
403 | " max_tokens=2000,\n",
404 | " temperature=0.0,\n",
405 | " messages=[\n",
406 | " {\"Hi Claude, how are you?\"}\n",
407 | " ]\n",
408 | " )\n",
409 | "\n",
410 | "# Print Claude's response\n",
411 | "print(response[0].text)"
412 | ]
413 | },
414 | {
415 | "cell_type": "code",
416 | "execution_count": null,
417 | "metadata": {},
418 | "outputs": [],
419 | "source": [
420 | "# Get Claude's response\n",
421 | "response = client.messages.create(\n",
422 | " model=MODEL_NAME,\n",
423 | " max_tokens=2000,\n",
424 | " temperature=0.0,\n",
425 | " messages=[\n",
426 | " {\"role\": \"user\", \"content\": \"What year was Celine Dion born in?\"},\n",
427 | " {\"role\": \"user\", \"content\": \"Also, can you tell me some other facts about her?\"}\n",
428 | " ]\n",
429 | " )\n",
430 | "\n",
431 | "# Print Claude's response\n",
432 | "print(response[0].text)"
433 | ]
434 | },
435 | {
436 | "cell_type": "code",
437 | "execution_count": null,
438 | "metadata": {},
439 | "outputs": [],
440 | "source": [
441 | "# System prompt\n",
442 | "SYSTEM_PROMPT = \"Your answer should always be a series of critical thinking questions that further the conversation (do not provide answers to your questions). Do not actually answer the user question.\"\n",
443 | "\n",
444 | "# Prompt\n",
445 | "PROMPT = \"Why is the sky blue?\"\n",
446 | "\n",
447 | "# Print Claude's response\n",
448 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
449 | ]
450 | }
451 | ],
452 | "metadata": {
453 | "language_info": {
454 | "name": "python"
455 | }
456 | },
457 | "nbformat": 4,
458 | "nbformat_minor": 2
459 | }
460 |
--------------------------------------------------------------------------------
/AmazonBedrock/anthropic/01_Basic_Prompt_Structure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Chapter 1: Basic Prompt Structure\n",
8 | "\n",
9 | "- [Lesson](#lesson)\n",
10 | "- [Exercises](#exercises)\n",
11 | "- [Example Playground](#example-playground)\n",
12 | "\n",
13 | "## Setup\n",
14 | "\n",
15 | "Run the following setup cell to load your API key and establish the `get_completion` helper function."
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "%pip install anthropic --quiet\n",
25 | "\n",
26 | "# Import the hints module from the utils package\n",
27 | "import os\n",
28 | "import sys\n",
29 | "module_path = \"..\"\n",
30 | "sys.path.append(os.path.abspath(module_path))\n",
31 | "from utils import hints\n",
32 | "\n",
33 | "# Import python's built-in regular expression library\n",
34 | "import re\n",
35 | "from anthropic import AnthropicBedrock\n",
36 | "\n",
37 | "%store -r MODEL_NAME\n",
38 | "%store -r AWS_REGION\n",
39 | "\n",
40 | "client = AnthropicBedrock(aws_region=AWS_REGION)\n",
41 | "\n",
42 | "def get_completion(prompt, system=''):\n",
43 | " message = client.messages.create(\n",
44 | " model=MODEL_NAME,\n",
45 | " max_tokens=2000,\n",
46 | " temperature=0.0,\n",
47 | " messages=[\n",
48 | " {\"role\": \"user\", \"content\": prompt}\n",
49 | " ],\n",
50 | " system=system\n",
51 | " )\n",
52 | " return message.content[0].text"
53 | ]
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "metadata": {},
58 | "source": [
59 | "---\n",
60 | "\n",
61 | "## Lesson\n",
62 | "\n",
63 | "Anthropic offers two APIs, the legacy [Text Completions API](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-text-completion.html) and the current [Messages API](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html). For this tutorial, we will be exclusively using the Messages API.\n",
64 | "\n",
65 | "At minimum, a call to Claude using the Messages API requires the following parameters:\n",
66 | "- `model`: the [API model name](https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns) of the model that you intend to call\n",
67 | "\n",
68 | "- `max_tokens`: the maximum number of tokens to generate before stopping. Note that Claude may stop before reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. Furthermore, this is a *hard* stop, meaning that it may cause Claude to stop generating mid-word or mid-sentence.\n",
69 | "\n",
70 | "- `messages`: an array of input messages. Our models are trained to operate on alternating `user` and `assistant` conversational turns. When creating a new `Message`, you specify the prior conversational turns with the messages parameter, and the model then generates the next `Message` in the conversation.\n",
71 | " - Each input message must be an object with a `role` and `content`. You can specify a single `user`-role message, or you can include multiple `user` and `assistant` messages (they must alternate, if so). The first message must always use the user `role`.\n",
72 | "\n",
73 | "There are also optional parameters, such as:\n",
74 | "- `system`: the system prompt - more on this below.\n",
75 | " \n",
76 | "- `temperature`: the degree of variability in Claude's response. For these lessons and exercises, we have set `temperature` to 0.\n",
77 | "\n",
78 | "For a complete list of all API parameters, visit our [API documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-claude.html)."
79 | ]
80 | },
81 | {
82 | "cell_type": "markdown",
83 | "metadata": {},
84 | "source": [
85 | "### Examples\n",
86 | "\n",
87 | "Let's take a look at how Claude responds to some correctly-formatted prompts. For each of the following cells, run the cell (`shift+enter`), and Claude's response will appear below the block."
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "metadata": {},
94 | "outputs": [],
95 | "source": [
96 | "# Prompt\n",
97 | "PROMPT = \"Hi Claude, how are you?\"\n",
98 | "\n",
99 | "# Print Claude's response\n",
100 | "print(get_completion(PROMPT))"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "# Prompt\n",
110 | "PROMPT = \"Can you tell me the color of the ocean?\"\n",
111 | "\n",
112 | "# Print Claude's response\n",
113 | "print(get_completion(PROMPT))"
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "execution_count": null,
119 | "metadata": {},
120 | "outputs": [],
121 | "source": [
122 | "# Prompt\n",
123 | "PROMPT = \"What year was Celine Dion born in?\"\n",
124 | "\n",
125 | "# Print Claude's response\n",
126 | "print(get_completion(PROMPT))"
127 | ]
128 | },
129 | {
130 | "cell_type": "markdown",
131 | "metadata": {},
132 | "source": [
133 | "Now let's take a look at some prompts that do not include the correct Messages API formatting. For these malformatted prompts, the Messages API returns an error.\n",
134 | "\n",
135 | "First, we have an example of a Messages API call that lacks `role` and `content` fields in the `messages` array."
136 | ]
137 | },
138 | {
139 | "cell_type": "markdown",
140 | "metadata": {},
141 | "source": [
142 | "> ⚠️ **Warning:** Due to the incorrect formatting of the messages parameter in the prompt, the following cell will return an error. This is expected behavior."
143 | ]
144 | },
145 | {
146 | "cell_type": "code",
147 | "execution_count": null,
148 | "metadata": {},
149 | "outputs": [],
150 | "source": [
151 | "# Get Claude's response\n",
152 | "response = client.messages.create(\n",
153 | " model=MODEL_NAME,\n",
154 | " max_tokens=2000,\n",
155 | " temperature=0.0,\n",
156 | " messages=[\n",
157 | " {\"Hi Claude, how are you?\"}\n",
158 | " ]\n",
159 | " )\n",
160 | "\n",
161 | "# Print Claude's response\n",
162 | "print(response[0].text)"
163 | ]
164 | },
165 | {
166 | "cell_type": "markdown",
167 | "metadata": {},
168 | "source": [
169 | "Here's a prompt that fails to alternate between the `user` and `assistant` roles."
170 | ]
171 | },
172 | {
173 | "cell_type": "markdown",
174 | "metadata": {},
175 | "source": [
176 | "> ⚠️ **Warning:** Due to the lack of alternation between `user` and `assistant` roles, Claude will return an error message. This is expected behavior."
177 | ]
178 | },
179 | {
180 | "cell_type": "code",
181 | "execution_count": null,
182 | "metadata": {},
183 | "outputs": [],
184 | "source": [
185 | "# Get Claude's response\n",
186 | "response = client.messages.create(\n",
187 | " model=MODEL_NAME,\n",
188 | " max_tokens=2000,\n",
189 | " temperature=0.0,\n",
190 | " messages=[\n",
191 | " {\"role\": \"user\", \"content\": \"What year was Celine Dion born in?\"},\n",
192 | " {\"role\": \"user\", \"content\": \"Also, can you tell me some other facts about her?\"}\n",
193 | " ]\n",
194 | " )\n",
195 | "\n",
196 | "# Print Claude's response\n",
197 | "print(response[0].text)"
198 | ]
199 | },
200 | {
201 | "cell_type": "markdown",
202 | "metadata": {},
203 | "source": [
204 | "`user` and `assistant` messages **MUST alternate**, and messages **MUST start with a `user` turn**. You can have multiple `user` & `assistant` pairs in a prompt (as if simulating a multi-turn conversation). You can also put words into a terminal `assistant` message for Claude to continue from where you left off (more on that in later chapters).\n",
205 | "\n",
206 | "#### System Prompts\n",
207 | "\n",
208 | "You can also use **system prompts**. A system prompt is a way to **provide context, instructions, and guidelines to Claude** before presenting it with a question or task in the \"User\" turn. \n",
209 | "\n",
210 | "Structurally, system prompts exist separately from the list of `user` & `assistant` messages, and thus belong in a separate `system` parameter (take a look at the structure of the `get_completion` helper function in the [Setup](#setup) section of the notebook). \n",
211 | "\n",
212 | "Within this tutorial, wherever we might utilize a system prompt, we have provided you a `system` field in your completions function. Should you not want to use a system prompt, simply set the `SYSTEM_PROMPT` variable to an empty string."
213 | ]
214 | },
215 | {
216 | "cell_type": "markdown",
217 | "metadata": {},
218 | "source": [
219 | "#### System Prompt Example"
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {},
226 | "outputs": [],
227 | "source": [
228 | "# System prompt\n",
229 | "SYSTEM_PROMPT = \"Your answer should always be a series of critical thinking questions that further the conversation (do not provide answers to your questions). Do not actually answer the user question.\"\n",
230 | "\n",
231 | "# Prompt\n",
232 | "PROMPT = \"Why is the sky blue?\"\n",
233 | "\n",
234 | "# Print Claude's response\n",
235 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
236 | ]
237 | },
238 | {
239 | "cell_type": "markdown",
240 | "metadata": {},
241 | "source": [
242 | "Why use a system prompt? A **well-written system prompt can improve Claude's performance** in a variety of ways, such as increasing Claude's ability to follow rules and instructions. For more information, visit our documentation on [how to use system prompts](https://docs.anthropic.com/claude/docs/how-to-use-system-prompts) with Claude.\n",
243 | "\n",
244 | "Now we'll dive into some exercises. If you would like to experiment with the lesson prompts without changing any content above, scroll all the way to the bottom of the lesson notebook to visit the [**Example Playground**](#example-playground)."
245 | ]
246 | },
247 | {
248 | "cell_type": "markdown",
249 | "metadata": {},
250 | "source": [
251 | "---\n",
252 | "\n",
253 | "## Exercises\n",
254 | "- [Exercise 1.1 - Counting to Three](#exercise-11---counting-to-three)\n",
255 | "- [Exercise 1.2 - System Prompt](#exercise-12---system-prompt)"
256 | ]
257 | },
258 | {
259 | "cell_type": "markdown",
260 | "metadata": {},
261 | "source": [
262 | "### Exercise 1.1 - Counting to Three\n",
263 | "Using proper `user` / `assistant` formatting, edit the `PROMPT` below to get Claude to **count to three.** The output will also indicate whether your solution is correct."
264 | ]
265 | },
266 | {
267 | "cell_type": "code",
268 | "execution_count": null,
269 | "metadata": {},
270 | "outputs": [],
271 | "source": [
272 | "# Prompt - this is the only field you should change\n",
273 | "PROMPT = \"[Replace this text]\"\n",
274 | "\n",
275 | "# Get Claude's response\n",
276 | "response = get_completion(PROMPT)\n",
277 | "\n",
278 | "# Function to grade exercise correctness\n",
279 | "def grade_exercise(text):\n",
280 | " pattern = re.compile(r'^(?=.*1)(?=.*2)(?=.*3).*$', re.DOTALL)\n",
281 | " return bool(pattern.match(text))\n",
282 | "\n",
283 | "# Print Claude's response and the corresponding grade\n",
284 | "print(response)\n",
285 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
286 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
287 | ]
288 | },
289 | {
290 | "cell_type": "markdown",
291 | "metadata": {},
292 | "source": [
293 | "❓ If you want a hint, run the cell below!"
294 | ]
295 | },
296 | {
297 | "cell_type": "code",
298 | "execution_count": null,
299 | "metadata": {},
300 | "outputs": [],
301 | "source": [
302 | "print(hints.exercise_1_1_hint)"
303 | ]
304 | },
305 | {
306 | "cell_type": "markdown",
307 | "metadata": {},
308 | "source": [
309 | "### Exercise 1.2 - System Prompt\n",
310 | "\n",
311 | "Modify the `SYSTEM_PROMPT` to make Claude respond like it's a 3 year old child."
312 | ]
313 | },
314 | {
315 | "cell_type": "code",
316 | "execution_count": null,
317 | "metadata": {},
318 | "outputs": [],
319 | "source": [
320 | "# System prompt - this is the only field you should change\n",
321 | "SYSTEM_PROMPT = \"[Replace this text]\"\n",
322 | "\n",
323 | "# Prompt\n",
324 | "PROMPT = \"How big is the sky?\"\n",
325 | "\n",
326 | "# Get Claude's response\n",
327 | "response = get_completion(PROMPT, SYSTEM_PROMPT)\n",
328 | "\n",
329 | "# Function to grade exercise correctness\n",
330 | "def grade_exercise(text):\n",
331 | " return bool(re.search(r\"giggles\", text) or re.search(r\"soo\", text))\n",
332 | "\n",
333 | "# Print Claude's response and the corresponding grade\n",
334 | "print(response)\n",
335 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
336 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
337 | ]
338 | },
339 | {
340 | "cell_type": "markdown",
341 | "metadata": {},
342 | "source": [
343 | "❓ If you want a hint, run the cell below!"
344 | ]
345 | },
346 | {
347 | "cell_type": "code",
348 | "execution_count": null,
349 | "metadata": {},
350 | "outputs": [],
351 | "source": [
352 | "print(hints.exercise_1_2_hint)"
353 | ]
354 | },
355 | {
356 | "cell_type": "markdown",
357 | "metadata": {},
358 | "source": [
359 | "### Congrats!\n",
360 | "\n",
361 | "If you've solved all exercises up until this point, you're ready to move to the next chapter. Happy prompting!"
362 | ]
363 | },
364 | {
365 | "cell_type": "markdown",
366 | "metadata": {},
367 | "source": [
368 | "---\n",
369 | "\n",
370 | "## Example Playground\n",
371 | "\n",
372 | "This is an area for you to experiment freely with the prompt examples shown in this lesson and tweak prompts to see how it may affect Claude's responses."
373 | ]
374 | },
375 | {
376 | "cell_type": "code",
377 | "execution_count": null,
378 | "metadata": {},
379 | "outputs": [],
380 | "source": [
381 | "# Prompt\n",
382 | "PROMPT = \"Hi Claude, how are you?\"\n",
383 | "\n",
384 | "# Print Claude's response\n",
385 | "print(get_completion(PROMPT))"
386 | ]
387 | },
388 | {
389 | "cell_type": "code",
390 | "execution_count": null,
391 | "metadata": {},
392 | "outputs": [],
393 | "source": [
394 | "# Prompt\n",
395 | "PROMPT = \"Can you tell me the color of the ocean?\"\n",
396 | "\n",
397 | "# Print Claude's response\n",
398 | "print(get_completion(PROMPT))"
399 | ]
400 | },
401 | {
402 | "cell_type": "code",
403 | "execution_count": null,
404 | "metadata": {},
405 | "outputs": [],
406 | "source": [
407 | "# Prompt\n",
408 | "PROMPT = \"What year was Celine Dion born in?\"\n",
409 | "\n",
410 | "# Print Claude's response\n",
411 | "print(get_completion(PROMPT))"
412 | ]
413 | },
414 | {
415 | "cell_type": "code",
416 | "execution_count": null,
417 | "metadata": {},
418 | "outputs": [],
419 | "source": [
420 | "# Get Claude's response\n",
421 | "response = client.messages.create(\n",
422 | " model=MODEL_NAME,\n",
423 | " max_tokens=2000,\n",
424 | " temperature=0.0,\n",
425 | " messages=[\n",
426 | " {\"Hi Claude, how are you?\"}\n",
427 | " ]\n",
428 | " )\n",
429 | "\n",
430 | "# Print Claude's response\n",
431 | "print(response[0].text)"
432 | ]
433 | },
434 | {
435 | "cell_type": "code",
436 | "execution_count": null,
437 | "metadata": {},
438 | "outputs": [],
439 | "source": [
440 | "# Get Claude's response\n",
441 | "response = client.messages.create(\n",
442 | " model=MODEL_NAME,\n",
443 | " max_tokens=2000,\n",
444 | " temperature=0.0,\n",
445 | " messages=[\n",
446 | " {\"role\": \"user\", \"content\": \"What year was Celine Dion born in?\"},\n",
447 | " {\"role\": \"user\", \"content\": \"Also, can you tell me some other facts about her?\"}\n",
448 | " ]\n",
449 | " )\n",
450 | "\n",
451 | "# Print Claude's response\n",
452 | "print(response[0].text)"
453 | ]
454 | },
455 | {
456 | "cell_type": "code",
457 | "execution_count": null,
458 | "metadata": {},
459 | "outputs": [],
460 | "source": [
461 | "# System prompt\n",
462 | "SYSTEM_PROMPT = \"Your answer should always be a series of critical thinking questions that further the conversation (do not provide answers to your questions). Do not actually answer the user question.\"\n",
463 | "\n",
464 | "# Prompt\n",
465 | "PROMPT = \"Why is the sky blue?\"\n",
466 | "\n",
467 | "# Print Claude's response\n",
468 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
469 | ]
470 | }
471 | ],
472 | "metadata": {
473 | "kernelspec": {
474 | "display_name": "Python 3",
475 | "language": "python",
476 | "name": "python3"
477 | },
478 | "language_info": {
479 | "codemirror_mode": {
480 | "name": "ipython",
481 | "version": 3
482 | },
483 | "file_extension": ".py",
484 | "mimetype": "text/x-python",
485 | "name": "python",
486 | "nbconvert_exporter": "python",
487 | "pygments_lexer": "ipython3",
488 | "version": "3.11.5"
489 | }
490 | },
491 | "nbformat": 4,
492 | "nbformat_minor": 2
493 | }
494 |
--------------------------------------------------------------------------------
/AmazonBedrock/boto3/01_Basic_Prompt_Structure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Chapter 1: Basic Prompt Structure\n",
8 | "\n",
9 | "- [Lesson](#lesson)\n",
10 | "- [Exercises](#exercises)\n",
11 | "- [Example Playground](#example-playground)\n",
12 | "\n",
13 | "## Setup\n",
14 | "\n",
15 | "Run the following setup cell to load your API key and establish the `get_completion` helper function."
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "# Import python's built-in regular expression library\n",
25 | "import re\n",
26 | "import boto3\n",
27 | "import json\n",
28 | "\n",
29 | "# Import the hints module from the utils package\n",
30 | "import os\n",
31 | "import sys\n",
32 | "module_path = \"..\"\n",
33 | "sys.path.append(os.path.abspath(module_path))\n",
34 | "from utils import hints\n",
35 | "\n",
36 | "# Retrieve the MODEL_NAME variable from the IPython store\n",
37 | "%store -r MODEL_NAME\n",
38 | "%store -r AWS_REGION\n",
39 | "\n",
40 | "client = boto3.client('bedrock-runtime',region_name=AWS_REGION)\n",
41 | "\n",
42 | "def get_completion(prompt,system=''):\n",
43 | " body = json.dumps(\n",
44 | " {\n",
45 | " \"anthropic_version\": '',\n",
46 | " \"max_tokens\": 2000,\n",
47 | " \"messages\": [{\"role\": \"user\", \"content\": prompt}],\n",
48 | " \"temperature\": 0.0,\n",
49 | " \"top_p\": 1,\n",
50 | " \"system\": system\n",
51 | " }\n",
52 | " )\n",
53 | " response = client.invoke_model(body=body, modelId=MODEL_NAME)\n",
54 | " response_body = json.loads(response.get('body').read())\n",
55 | "\n",
56 | " return response_body.get('content')[0].get('text')"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "---\n",
64 | "\n",
65 | "## Lesson\n",
66 | "\n",
67 | "Anthropic offers two APIs, the legacy [Text Completions API](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-text-completion.html) and the current [Messages API](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html). For this tutorial, we will be exclusively using the Messages API.\n",
68 | "\n",
69 | "At minimum, a call to Claude using the Messages API requires the following parameters:\n",
70 | "- `model`: the [API model name](https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns) of the model that you intend to call\n",
71 | "\n",
72 | "- `max_tokens`: the maximum number of tokens to generate before stopping. Note that Claude may stop before reaching this maximum. This parameter only specifies the absolute maximum number of tokens to generate. Furthermore, this is a *hard* stop, meaning that it may cause Claude to stop generating mid-word or mid-sentence.\n",
73 | "\n",
74 | "- `messages`: an array of input messages. Our models are trained to operate on alternating `user` and `assistant` conversational turns. When creating a new `Message`, you specify the prior conversational turns with the messages parameter, and the model then generates the next `Message` in the conversation.\n",
75 | " - Each input message must be an object with a `role` and `content`. You can specify a single `user`-role message, or you can include multiple `user` and `assistant` messages (they must alternate, if so). The first message must always use the user `role`.\n",
76 | "\n",
77 | "There are also optional parameters, such as:\n",
78 | "- `system`: the system prompt - more on this below.\n",
79 | " \n",
80 | "- `temperature`: the degree of variability in Claude's response. For these lessons and exercises, we have set `temperature` to 0.\n",
81 | "\n",
82 | "For a complete list of all API parameters, visit our [API documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-claude.html)."
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "metadata": {},
88 | "source": [
89 | "### Examples\n",
90 | "\n",
91 | "Let's take a look at how Claude responds to some correctly-formatted prompts. For each of the following cells, run the cell (`shift+enter`), and Claude's response will appear below the block."
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": null,
97 | "metadata": {},
98 | "outputs": [],
99 | "source": [
100 | "# Prompt\n",
101 | "PROMPT = \"Hi Claude, how are you?\"\n",
102 | "\n",
103 | "# Print Claude's response\n",
104 | "print(get_completion(PROMPT))"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": null,
110 | "metadata": {},
111 | "outputs": [],
112 | "source": [
113 | "# Prompt\n",
114 | "PROMPT = \"Can you tell me the color of the ocean?\"\n",
115 | "\n",
116 | "# Print Claude's response\n",
117 | "print(get_completion(PROMPT))"
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": null,
123 | "metadata": {},
124 | "outputs": [],
125 | "source": [
126 | "# Prompt\n",
127 | "PROMPT = \"What year was Celine Dion born in?\"\n",
128 | "\n",
129 | "# Print Claude's response\n",
130 | "print(get_completion(PROMPT))"
131 | ]
132 | },
133 | {
134 | "cell_type": "markdown",
135 | "metadata": {},
136 | "source": [
137 | "Now let's take a look at some prompts that do not include the correct Messages API formatting. For these malformatted prompts, the Messages API returns an error.\n",
138 | "\n",
139 | "First, we have an example of a Messages API call that lacks `role` and `content` fields in the `messages` array."
140 | ]
141 | },
142 | {
143 | "cell_type": "markdown",
144 | "metadata": {},
145 | "source": [
146 | "> ⚠️ **Warning:** Due to the incorrect formatting of the messages parameter in the prompt, the following cell will return an error. This is expected behavior."
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "# Get Claude's response\n",
156 | "body = json.dumps(\n",
157 | " {\n",
158 | " \"anthropic_version\": '',\n",
159 | " \"max_tokens\": 2000,\n",
160 | " \"messages\": [{\"Hi Claude, how are you?\"}],\n",
161 | " \"temperature\": 0.0,\n",
162 | " \"top_p\": 1,\n",
163 | " \"system\": ''\n",
164 | " }\n",
165 | ")\n",
166 | "\n",
167 | "response = client.invoke_model(body=body, modelId=MODEL_NAME)\n",
168 | "\n",
169 | "# Print Claude's response\n",
170 | "print(response[0].text)"
171 | ]
172 | },
173 | {
174 | "cell_type": "markdown",
175 | "metadata": {},
176 | "source": [
177 | "Here's a prompt that fails to alternate between the `user` and `assistant` roles."
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {},
183 | "source": [
184 | "> ⚠️ **Warning:** Due to the lack of alternation between `user` and `assistant` roles, Claude will return an error message. This is expected behavior."
185 | ]
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": null,
190 | "metadata": {},
191 | "outputs": [],
192 | "source": [
193 | "# Get Claude's response\n",
194 | "body = json.dumps(\n",
195 | " {\n",
196 | " \"anthropic_version\": '',\n",
197 | " \"max_tokens\": 2000,\n",
198 | " \"messages\": [\n",
199 | " {\"role\": \"user\", \"content\": \"What year was Celine Dion born in?\"},\n",
200 | " {\"role\": \"user\", \"content\": \"Also, can you tell me some other facts about her?\"}\n",
201 | " ],\n",
202 | " \"temperature\": 0.0,\n",
203 | " \"top_p\": 1,\n",
204 | " \"system\": ''\n",
205 | " }\n",
206 | ")\n",
207 | "\n",
208 | "response = client.invoke_model(body=body, modelId=MODEL_NAME)\n",
209 | "\n",
210 | "# Print Claude's response\n",
211 | "print(response[0].text)"
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "metadata": {},
217 | "source": [
218 | "`user` and `assistant` messages **MUST alternate**, and messages **MUST start with a `user` turn**. You can have multiple `user` & `assistant` pairs in a prompt (as if simulating a multi-turn conversation). You can also put words into a terminal `assistant` message for Claude to continue from where you left off (more on that in later chapters).\n",
219 | "\n",
220 | "#### System Prompts\n",
221 | "\n",
222 | "You can also use **system prompts**. A system prompt is a way to **provide context, instructions, and guidelines to Claude** before presenting it with a question or task in the \"User\" turn. \n",
223 | "\n",
224 | "Structurally, system prompts exist separately from the list of `user` & `assistant` messages, and thus belong in a separate `system` parameter (take a look at the structure of the `get_completion` helper function in the [Setup](#setup) section of the notebook). \n",
225 | "\n",
226 | "Within this tutorial, wherever we might utilize a system prompt, we have provided you a `system` field in your completions function. Should you not want to use a system prompt, simply set the `SYSTEM_PROMPT` variable to an empty string."
227 | ]
228 | },
229 | {
230 | "cell_type": "markdown",
231 | "metadata": {},
232 | "source": [
233 | "#### System Prompt Example"
234 | ]
235 | },
236 | {
237 | "cell_type": "code",
238 | "execution_count": null,
239 | "metadata": {},
240 | "outputs": [],
241 | "source": [
242 | "# System prompt\n",
243 | "SYSTEM_PROMPT = \"Your answer should always be a series of critical thinking questions that further the conversation (do not provide answers to your questions). Do not actually answer the user question.\"\n",
244 | "\n",
245 | "# Prompt\n",
246 | "PROMPT = \"Why is the sky blue?\"\n",
247 | "\n",
248 | "# Print Claude's response\n",
249 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
250 | ]
251 | },
252 | {
253 | "cell_type": "markdown",
254 | "metadata": {},
255 | "source": [
256 | "Why use a system prompt? A **well-written system prompt can improve Claude's performance** in a variety of ways, such as increasing Claude's ability to follow rules and instructions. For more information, visit our documentation on [how to use system prompts](https://docs.anthropic.com/claude/docs/how-to-use-system-prompts) with Claude.\n",
257 | "\n",
258 | "Now we'll dive into some exercises. If you would like to experiment with the lesson prompts without changing any content above, scroll all the way to the bottom of the lesson notebook to visit the [**Example Playground**](#example-playground)."
259 | ]
260 | },
261 | {
262 | "cell_type": "markdown",
263 | "metadata": {},
264 | "source": [
265 | "---\n",
266 | "\n",
267 | "## Exercises\n",
268 | "- [Exercise 1.1 - Counting to Three](#exercise-11---counting-to-three)\n",
269 | "- [Exercise 1.2 - System Prompt](#exercise-12---system-prompt)"
270 | ]
271 | },
272 | {
273 | "cell_type": "markdown",
274 | "metadata": {},
275 | "source": [
276 | "### Exercise 1.1 - Counting to Three\n",
277 | "Using proper `user` / `assistant` formatting, edit the `PROMPT` below to get Claude to **count to three.** The output will also indicate whether your solution is correct."
278 | ]
279 | },
280 | {
281 | "cell_type": "code",
282 | "execution_count": null,
283 | "metadata": {},
284 | "outputs": [],
285 | "source": [
286 | "# Prompt - this is the only field you should change\n",
287 | "PROMPT = \"[Replace this text]\"\n",
288 | "\n",
289 | "# Get Claude's response\n",
290 | "response = get_completion(PROMPT)\n",
291 | "\n",
292 | "# Function to grade exercise correctness\n",
293 | "def grade_exercise(text):\n",
294 | " pattern = re.compile(r'^(?=.*1)(?=.*2)(?=.*3).*$', re.DOTALL)\n",
295 | " return bool(pattern.match(text))\n",
296 | "\n",
297 | "# Print Claude's response and the corresponding grade\n",
298 | "print(response)\n",
299 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
300 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
301 | ]
302 | },
303 | {
304 | "cell_type": "markdown",
305 | "metadata": {},
306 | "source": [
307 | "❓ If you want a hint, run the cell below!"
308 | ]
309 | },
310 | {
311 | "cell_type": "code",
312 | "execution_count": null,
313 | "metadata": {},
314 | "outputs": [],
315 | "source": [
316 | "print(hints.exercise_1_1_hint)"
317 | ]
318 | },
319 | {
320 | "cell_type": "markdown",
321 | "metadata": {},
322 | "source": [
323 | "### Exercise 1.2 - System Prompt\n",
324 | "\n",
325 | "Modify the `SYSTEM_PROMPT` to make Claude respond like it's a 3 year old child."
326 | ]
327 | },
328 | {
329 | "cell_type": "code",
330 | "execution_count": null,
331 | "metadata": {},
332 | "outputs": [],
333 | "source": [
334 | "# System prompt - this is the only field you should change\n",
335 | "SYSTEM_PROMPT = \"[Replace this text]\"\n",
336 | "\n",
337 | "# Prompt\n",
338 | "PROMPT = \"How big is the sky?\"\n",
339 | "\n",
340 | "# Get Claude's response\n",
341 | "response = get_completion(PROMPT, SYSTEM_PROMPT)\n",
342 | "\n",
343 | "# Function to grade exercise correctness\n",
344 | "def grade_exercise(text):\n",
345 | " return bool(re.search(r\"giggles\", text) or re.search(r\"soo\", text))\n",
346 | "\n",
347 | "# Print Claude's response and the corresponding grade\n",
348 | "print(response)\n",
349 | "print(\"\\n--------------------------- GRADING ---------------------------\")\n",
350 | "print(\"This exercise has been correctly solved:\", grade_exercise(response))"
351 | ]
352 | },
353 | {
354 | "cell_type": "markdown",
355 | "metadata": {},
356 | "source": [
357 | "❓ If you want a hint, run the cell below!"
358 | ]
359 | },
360 | {
361 | "cell_type": "code",
362 | "execution_count": null,
363 | "metadata": {},
364 | "outputs": [],
365 | "source": [
366 | "print(hints.exercise_1_2_hint)"
367 | ]
368 | },
369 | {
370 | "cell_type": "markdown",
371 | "metadata": {},
372 | "source": [
373 | "### Congrats!\n",
374 | "\n",
375 | "If you've solved all exercises up until this point, you're ready to move to the next chapter. Happy prompting!"
376 | ]
377 | },
378 | {
379 | "cell_type": "markdown",
380 | "metadata": {},
381 | "source": [
382 | "---\n",
383 | "\n",
384 | "## Example Playground\n",
385 | "\n",
386 | "This is an area for you to experiment freely with the prompt examples shown in this lesson and tweak prompts to see how it may affect Claude's responses."
387 | ]
388 | },
389 | {
390 | "cell_type": "code",
391 | "execution_count": null,
392 | "metadata": {},
393 | "outputs": [],
394 | "source": [
395 | "# Prompt\n",
396 | "PROMPT = \"Hi Claude, how are you?\"\n",
397 | "\n",
398 | "# Print Claude's response\n",
399 | "print(get_completion(PROMPT))"
400 | ]
401 | },
402 | {
403 | "cell_type": "code",
404 | "execution_count": null,
405 | "metadata": {},
406 | "outputs": [],
407 | "source": [
408 | "# Prompt\n",
409 | "PROMPT = \"Can you tell me the color of the ocean?\"\n",
410 | "\n",
411 | "# Print Claude's response\n",
412 | "print(get_completion(PROMPT))"
413 | ]
414 | },
415 | {
416 | "cell_type": "code",
417 | "execution_count": null,
418 | "metadata": {},
419 | "outputs": [],
420 | "source": [
421 | "# Prompt\n",
422 | "PROMPT = \"What year was Celine Dion born in?\"\n",
423 | "\n",
424 | "# Print Claude's response\n",
425 | "print(get_completion(PROMPT))"
426 | ]
427 | },
428 | {
429 | "cell_type": "code",
430 | "execution_count": null,
431 | "metadata": {},
432 | "outputs": [],
433 | "source": [
434 | "# Get Claude's response\n",
435 | "body = json.dumps(\n",
436 | " {\n",
437 | " \"anthropic_version\": '',\n",
438 | " \"max_tokens\": 2000,\n",
439 | " \"messages\": [{\"Hi Claude, how are you?\"}],\n",
440 | " \"temperature\": 0.0,\n",
441 | " \"top_p\": 1,\n",
442 | " \"system\": ''\n",
443 | " }\n",
444 | ")\n",
445 | "\n",
446 | "response = client.invoke_model(body=body, modelId=MODEL_NAME)\n",
447 | "\n",
448 | "# Print Claude's response\n",
449 | "print(response[0].text)"
450 | ]
451 | },
452 | {
453 | "cell_type": "code",
454 | "execution_count": null,
455 | "metadata": {},
456 | "outputs": [],
457 | "source": [
458 | "# Get Claude's response\n",
459 | "body = json.dumps(\n",
460 | " {\n",
461 | " \"anthropic_version\": '',\n",
462 | " \"max_tokens\": 2000,\n",
463 | " \"messages\": [\n",
464 | " {\"role\": \"user\", \"content\": \"What year was Celine Dion born in?\"},\n",
465 | " {\"role\": \"user\", \"content\": \"Also, can you tell me some other facts about her?\"}\n",
466 | " ],\n",
467 | " \"temperature\": 0.0,\n",
468 | " \"top_p\": 1,\n",
469 | " \"system\": ''\n",
470 | " }\n",
471 | ")\n",
472 | "\n",
473 | "response = client.invoke_model(body=body, modelId=MODEL_NAME)\n",
474 | "\n",
475 | "# Print Claude's response\n",
476 | "print(response[0].text)"
477 | ]
478 | },
479 | {
480 | "cell_type": "code",
481 | "execution_count": null,
482 | "metadata": {},
483 | "outputs": [],
484 | "source": [
485 | "# System prompt\n",
486 | "SYSTEM_PROMPT = \"Your answer should always be a series of critical thinking questions that further the conversation (do not provide answers to your questions). Do not actually answer the user question.\"\n",
487 | "\n",
488 | "# Prompt\n",
489 | "PROMPT = \"Why is the sky blue?\"\n",
490 | "\n",
491 | "# Print Claude's response\n",
492 | "print(get_completion(PROMPT, SYSTEM_PROMPT))"
493 | ]
494 | }
495 | ],
496 | "metadata": {
497 | "language_info": {
498 | "name": "python"
499 | }
500 | },
501 | "nbformat": 4,
502 | "nbformat_minor": 2
503 | }
504 |
--------------------------------------------------------------------------------