├── LearnFromTheExperts.jpg
├── medium-PromptLikeAPro.png
├── README.md
└── LLMdoTheRightThing_Vicuna_7b.ipynb
/LearnFromTheExperts.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/LLMdoTheRightThing/main/LearnFromTheExperts.jpg
--------------------------------------------------------------------------------
/medium-PromptLikeAPro.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/LLMdoTheRightThing/main/medium-PromptLikeAPro.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # LLMdoTheRightThing
4 | Repo of the code from the Medium article "LLM do the right thing!"
5 |
6 | ---
7 |
8 | [Open the Notebook in Google Colab](https://colab.research.google.com/github/fabiomatricardi/LLMdoTheRightThing/blob/main/LLMdoTheRightThing_Vicuna_7b.ipynb)
9 |
--------------------------------------------------------------------------------
/LLMdoTheRightThing_Vicuna_7b.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "provenance": [],
7 | "authorship_tag": "ABX9TyPYDD7N9gAltyFgZBpvgpva",
8 | "include_colab_link": true
9 | },
10 | "kernelspec": {
11 | "name": "python3",
12 | "display_name": "Python 3"
13 | },
14 | "language_info": {
15 | "name": "python"
16 | },
17 | "widgets": {
18 | "application/vnd.jupyter.widget-state+json": {
19 | "c63e8c696eac4028aeb52d9a9f623e0b": {
20 | "model_module": "@jupyter-widgets/controls",
21 | "model_name": "ButtonModel",
22 | "model_module_version": "1.5.0",
23 | "state": {
24 | "_dom_classes": [],
25 | "_model_module": "@jupyter-widgets/controls",
26 | "_model_module_version": "1.5.0",
27 | "_model_name": "ButtonModel",
28 | "_view_count": null,
29 | "_view_module": "@jupyter-widgets/controls",
30 | "_view_module_version": "1.5.0",
31 | "_view_name": "ButtonView",
32 | "button_style": "warning",
33 | "description": "Restart Runtime",
34 | "disabled": false,
35 | "icon": "check",
36 | "layout": "IPY_MODEL_3772a44cf82c4e0bbc7089cda1a07703",
37 | "style": "IPY_MODEL_4c2149f8096247708947ab80ece3f939",
38 | "tooltip": "Click me"
39 | }
40 | },
41 | "3772a44cf82c4e0bbc7089cda1a07703": {
42 | "model_module": "@jupyter-widgets/base",
43 | "model_name": "LayoutModel",
44 | "model_module_version": "1.2.0",
45 | "state": {
46 | "_model_module": "@jupyter-widgets/base",
47 | "_model_module_version": "1.2.0",
48 | "_model_name": "LayoutModel",
49 | "_view_count": null,
50 | "_view_module": "@jupyter-widgets/base",
51 | "_view_module_version": "1.2.0",
52 | "_view_name": "LayoutView",
53 | "align_content": null,
54 | "align_items": null,
55 | "align_self": null,
56 | "border": null,
57 | "bottom": null,
58 | "display": null,
59 | "flex": null,
60 | "flex_flow": null,
61 | "grid_area": null,
62 | "grid_auto_columns": null,
63 | "grid_auto_flow": null,
64 | "grid_auto_rows": null,
65 | "grid_column": null,
66 | "grid_gap": null,
67 | "grid_row": null,
68 | "grid_template_areas": null,
69 | "grid_template_columns": null,
70 | "grid_template_rows": null,
71 | "height": null,
72 | "justify_content": null,
73 | "justify_items": null,
74 | "left": null,
75 | "margin": null,
76 | "max_height": null,
77 | "max_width": null,
78 | "min_height": null,
79 | "min_width": null,
80 | "object_fit": null,
81 | "object_position": null,
82 | "order": null,
83 | "overflow": null,
84 | "overflow_x": null,
85 | "overflow_y": null,
86 | "padding": null,
87 | "right": null,
88 | "top": null,
89 | "visibility": null,
90 | "width": null
91 | }
92 | },
93 | "4c2149f8096247708947ab80ece3f939": {
94 | "model_module": "@jupyter-widgets/controls",
95 | "model_name": "ButtonStyleModel",
96 | "model_module_version": "1.5.0",
97 | "state": {
98 | "_model_module": "@jupyter-widgets/controls",
99 | "_model_module_version": "1.5.0",
100 | "_model_name": "ButtonStyleModel",
101 | "_view_count": null,
102 | "_view_module": "@jupyter-widgets/base",
103 | "_view_module_version": "1.2.0",
104 | "_view_name": "StyleView",
105 | "button_color": null,
106 | "font_weight": ""
107 | }
108 | }
109 | }
110 | }
111 | },
112 | "cells": [
113 | {
114 | "cell_type": "markdown",
115 | "metadata": {
116 | "id": "view-in-github",
117 | "colab_type": "text"
118 | },
119 | "source": [
120 | ""
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "metadata": {
127 | "id": "R4IhZMjls96U"
128 | },
129 | "outputs": [],
130 | "source": [
131 | "%%capture\n",
132 | "!pip install ctransformers>=0.2.24\n",
133 | "!pip install langchain\n",
134 | "!pip install rich\n",
135 | "!pip install transformers"
136 | ]
137 | },
138 | {
139 | "cell_type": "code",
140 | "source": [
141 | "!wget https://huggingface.co/TheBloke/vicuna-7B-v1.5-GGUF/resolve/main/vicuna-7b-v1.5.Q5_K_M.gguf"
142 | ],
143 | "metadata": {
144 | "id": "kZ8nmOwCtHZc",
145 | "colab": {
146 | "base_uri": "https://localhost:8080/"
147 | },
148 | "outputId": "5c7c0ceb-57ea-49ab-a0aa-9ce7147b2004"
149 | },
150 | "execution_count": null,
151 | "outputs": [
152 | {
153 | "output_type": "stream",
154 | "name": "stdout",
155 | "text": [
156 | "--2023-10-10 23:59:18-- https://huggingface.co/TheBloke/vicuna-7B-v1.5-GGUF/resolve/main/vicuna-7b-v1.5.Q5_K_M.gguf\n",
157 | "Resolving huggingface.co (huggingface.co)... 18.154.227.7, 18.154.227.69, 18.154.227.87, ...\n",
158 | "Connecting to huggingface.co (huggingface.co)|18.154.227.7|:443... connected.\n",
159 | "HTTP request sent, awaiting response... 302 Found\n",
160 | "Location: https://cdn-lfs.huggingface.co/repos/8b/8a/8b8a084e506faaabaff764a4316992f50a72b536795224d1dd3edce626e8c695/98377262416396f9192a695e385d7e7389f8ee5979073c2d914e7e54b11ad24a?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27vicuna-7b-v1.5.Q5_K_M.gguf%3B+filename%3D%22vicuna-7b-v1.5.Q5_K_M.gguf%22%3B&Expires=1697241558&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTY5NzI0MTU1OH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy84Yi84YS84YjhhMDg0ZTUwNmZhYWFiYWZmNzY0YTQzMTY5OTJmNTBhNzJiNTM2Nzk1MjI0ZDFkZDNlZGNlNjI2ZThjNjk1Lzk4Mzc3MjYyNDE2Mzk2ZjkxOTJhNjk1ZTM4NWQ3ZTczODlmOGVlNTk3OTA3M2MyZDkxNGU3ZTU0YjExYWQyNGE%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=G1XhV52US-ZUYRnfMsorm-5Xx%7ETBz9wHBGEwH-1or-KngwyKZIIivnynxuO7bvveuVYGv8mD19pXyO2tM-UKiZ78oDctxpKdE462iHM94TYhzEvgVphZOx2qc7waUXGRliz0LLCO-I72rMlHei0F0C5XWnmKVCANLK8pcWBn4TH%7ESF-k%7EkbJr7L7KDAPGy39ctjgf6FfsO9s7JfkJgs2Awngys1dLlosnhq3FV7feFrq9A0vXXNmzA84FKUWwhirr-x%7EdlIH%7EHj8VRrIOGsIyn9e6rOPRqiWTCk9plWTwDAlWCHJmUjWH9GhbNXntlvxJyyyj-i6hpRIsztlPfoh7A__&Key-Pair-Id=KVTP0A1DKRTAX [following]\n",
161 | "--2023-10-10 23:59:18-- https://cdn-lfs.huggingface.co/repos/8b/8a/8b8a084e506faaabaff764a4316992f50a72b536795224d1dd3edce626e8c695/98377262416396f9192a695e385d7e7389f8ee5979073c2d914e7e54b11ad24a?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27vicuna-7b-v1.5.Q5_K_M.gguf%3B+filename%3D%22vicuna-7b-v1.5.Q5_K_M.gguf%22%3B&Expires=1697241558&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTY5NzI0MTU1OH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy84Yi84YS84YjhhMDg0ZTUwNmZhYWFiYWZmNzY0YTQzMTY5OTJmNTBhNzJiNTM2Nzk1MjI0ZDFkZDNlZGNlNjI2ZThjNjk1Lzk4Mzc3MjYyNDE2Mzk2ZjkxOTJhNjk1ZTM4NWQ3ZTczODlmOGVlNTk3OTA3M2MyZDkxNGU3ZTU0YjExYWQyNGE%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=G1XhV52US-ZUYRnfMsorm-5Xx%7ETBz9wHBGEwH-1or-KngwyKZIIivnynxuO7bvveuVYGv8mD19pXyO2tM-UKiZ78oDctxpKdE462iHM94TYhzEvgVphZOx2qc7waUXGRliz0LLCO-I72rMlHei0F0C5XWnmKVCANLK8pcWBn4TH%7ESF-k%7EkbJr7L7KDAPGy39ctjgf6FfsO9s7JfkJgs2Awngys1dLlosnhq3FV7feFrq9A0vXXNmzA84FKUWwhirr-x%7EdlIH%7EHj8VRrIOGsIyn9e6rOPRqiWTCk9plWTwDAlWCHJmUjWH9GhbNXntlvxJyyyj-i6hpRIsztlPfoh7A__&Key-Pair-Id=KVTP0A1DKRTAX\n",
162 | "Resolving cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)... 108.138.64.36, 108.138.64.49, 108.138.64.111, ...\n",
163 | "Connecting to cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)|108.138.64.36|:443... connected.\n",
164 | "HTTP request sent, awaiting response... 200 OK\n",
165 | "Length: 4783156928 (4.5G) [binary/octet-stream]\n",
166 | "Saving to: ‘vicuna-7b-v1.5.Q5_K_M.gguf’\n",
167 | "\n",
168 | "vicuna-7b-v1.5.Q5_K 100%[===================>] 4.45G 46.3MB/s in 1m 51s \n",
169 | "\n",
170 | "2023-10-11 00:01:09 (41.2 MB/s) - ‘vicuna-7b-v1.5.Q5_K_M.gguf’ saved [4783156928/4783156928]\n",
171 | "\n"
172 | ]
173 | }
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "source": [
179 | "#@title Restart Runtime {display-mode: \"form\"}\n",
180 | "import ipywidgets as widgets\n",
181 | "def restart(b):\n",
182 | " exit()\n",
183 | "\n",
184 | "button2 = widgets.Button(\n",
185 | " description='Restart Runtime',\n",
186 | " disabled=False,\n",
187 | " button_style='warning', # 'success', 'info', 'warning', 'danger' or ''\n",
188 | " tooltip='Click me',\n",
189 | " icon='check' # (FontAwesome names without the `fa-` prefix)\n",
190 | ")\n",
191 | "button2.on_click(restart)\n",
192 | "button2\n"
193 | ],
194 | "metadata": {
195 | "colab": {
196 | "base_uri": "https://localhost:8080/",
197 | "height": 49,
198 | "referenced_widgets": [
199 | "c63e8c696eac4028aeb52d9a9f623e0b",
200 | "3772a44cf82c4e0bbc7089cda1a07703",
201 | "4c2149f8096247708947ab80ece3f939"
202 | ]
203 | },
204 | "outputId": "5cbe5040-603b-4789-a0c3-b1f63fba2438",
205 | "id": "-hsc5IfVET8S"
206 | },
207 | "execution_count": null,
208 | "outputs": [
209 | {
210 | "output_type": "display_data",
211 | "data": {
212 | "text/plain": [
213 | "Button(button_style='warning', description='Restart Runtime', icon='check', style=ButtonStyle(), tooltip='Clic…"
214 | ],
215 | "application/vnd.jupyter.widget-view+json": {
216 | "version_major": 2,
217 | "version_minor": 0,
218 | "model_id": "c63e8c696eac4028aeb52d9a9f623e0b"
219 | }
220 | },
221 | "metadata": {}
222 | }
223 | ]
224 | },
225 | {
226 | "cell_type": "markdown",
227 | "source": [
228 | "## Main routines"
229 | ],
230 | "metadata": {
231 | "id": "2F_gN7zvP0ju"
232 | }
233 | },
234 | {
235 | "cell_type": "code",
236 | "source": [
237 | "from rich import print\n",
238 | "from tqdm.rich import trange, tqdm\n",
239 | "from rich import console\n",
240 | "from rich.panel import Panel\n",
241 | "from rich.text import Text\n",
242 | "import datetime\n",
243 | "import warnings\n",
244 | "import textwrap\n",
245 | "from ctransformers import AutoModelForCausalLM, AutoConfig, Config\n",
246 | "import datetime\n",
247 | "warnings.filterwarnings(action='ignore')\n",
248 | "from rich.console import Console\n",
249 | "from rich.markdown import Markdown\n",
250 | "console = Console(width=80)"
251 | ],
252 | "metadata": {
253 | "id": "ryXI1qdEzk64"
254 | },
255 | "execution_count": null,
256 | "outputs": []
257 | },
258 | {
259 | "cell_type": "code",
260 | "source": [
261 | "conf = AutoConfig(Config(temperature=0.8, repetition_penalty=1.1, batch_size=52,\n",
262 | " max_new_tokens=1024, context_length=1024))\n",
263 | "llm = AutoModelForCausalLM.from_pretrained(\"/content/vicuna-7b-v1.5.Q5_K_M.gguf\",\n",
264 | " model_type=\"llama\", config = conf)"
265 | ],
266 | "metadata": {
267 | "id": "eRJCb4FxzwVK"
268 | },
269 | "execution_count": null,
270 | "outputs": []
271 | },
272 | {
273 | "cell_type": "code",
274 | "source": [
275 | "# FUNCTION TO LOG ALL CHAT MESSAGES INTO chathistory.txt\n",
276 | "def writehistory(text):\n",
277 | " with open('20231011_PromptPower_Vicuna7b-history.txt', 'a') as f:\n",
278 | " f.write(text)\n",
279 | " f.write('\\n')\n",
280 | " f.close()\n",
281 | "\n",
282 | "def vicunaQ4KM_CT(prompt):\n",
283 | " from rich.markdown import Markdown\n",
284 | " import datetime\n",
285 | " from rich.console import Console\n",
286 | " console = Console(width=80)\n",
287 | " t_vicuna = f\"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT:\"\n",
288 | " start = datetime.datetime.now()\n",
289 | " console.print(f\"[italic bold bright_red]Prompt: {prompt}\")\n",
290 | " answer = llm(t_vicuna, temperature = 0.7, repetition_penalty = 1.15,\n",
291 | " max_new_tokens = 2048)\n",
292 | " stop = datetime.datetime.now()\n",
293 | " tok2 = len(llm.tokenize(t_vicuna))\n",
294 | " tok1 = len(llm.tokenize(answer))\n",
295 | " #console.print(f\"[italic]Number of characters in orginal prompt: {len(prompt)}\") #it works with CTransformers without Langchain\n",
296 | " console.print(f\"[italic bold]Number of tokens in the prompt: {tok2}\") #it works with CTransformers without Langchain\n",
297 | " console.print(f\"[italic bold]Number of tokens in the answer: {tok1}\")\n",
298 | " console.print(Markdown(answer))\n",
299 | " console.print(f\"[bold italic green] Generated by Vicuna-7b in {stop-start}\")\n",
300 | " text = f\"user: {prompt}\\nVicuna-7b: {answer}\\nGenerated in {stop-start}\"\n",
301 | " writehistory(text)\n",
302 | " console.print(f\"[blue1] ---\")\n",
303 | " return answer"
304 | ],
305 | "metadata": {
306 | "id": "OnXvXYtftsgL"
307 | },
308 | "execution_count": null,
309 | "outputs": []
310 | },
311 | {
312 | "cell_type": "code",
313 | "source": [],
314 | "metadata": {
315 | "id": "i3FFBX75fmDN"
316 | },
317 | "execution_count": null,
318 | "outputs": []
319 | },
320 | {
321 | "cell_type": "markdown",
322 | "source": [
323 | "## TEST several Prompt templates\n",
324 | "\n",
325 | "---\n"
326 | ],
327 | "metadata": {
328 | "id": "ry3If6TgCfNR"
329 | }
330 | },
331 | {
332 | "cell_type": "markdown",
333 | "source": [
334 | "name: 'Rewrite to Formal',\n",
335 | "description: `This prompt rewrites your text into formal writing. It's useful for writing emails, essays, reports, and other formal documents.`,\n",
336 | "Template:\n",
337 | "```\n",
338 | "> \"Translate the following text into formal writing:\n",
339 | ">\n",
340 | "> ```\n",
341 | "> {text}\n",
342 | "> ```\n",
343 | "> \"\n",
344 | "```"
345 | ],
346 | "metadata": {
347 | "id": "b4Eadhj8CuIB"
348 | }
349 | },
350 | {
351 | "cell_type": "code",
352 | "source": [
353 | "text = \"\"\"This study from March 2023 takes a simple yet novel approach to prompt engineering by automatically generating prompts based on the desired input and output.\n",
354 | "In a recent article I considered the future of prompt engineering, and the possibility of soft prompts (prompt tuning). I argued that user context, ambiguity and user intent all play an important role in any conversational UI.\n",
355 | "User intent, context, ambiguity and disambiguation are all part and parcel of any conversation.\n",
356 | "The question is, can this approach accelerate the process where manually wording prompts fade into the background and interaction with the LLM is based on contextual example input and output datasets?\n",
357 | "What I like about this approach, is that context, and user intent can be mapped, while also taking into consideration possible ambiguity.\n",
358 | "Yet manually crafting prompts is tedious in the sense of trying to word a prompt in such a way to engender a desired response from the LLM. Focussing on prompt engineering also does not take into consideration an array of possible user inputs.\n",
359 | "Data Management will always be part of LLM applications.\n",
360 | "APE offers an alternative approach to prompt engineering, where via input and matching output examples, prompts can be generated on the fly.\n",
361 | "We define “prompt engineering” as optimising the language in a prompt in order to elicit the best possible performance. Notably, this does not include prompts that chain multiple LLM queries together or give the LLM access to external tools. ~ Source\n",
362 | "The basic notebook below shows how Automatic Prompt Engineering (APE) can be used to generate prompts based on a small input data set, a list of expected outputs and a prompt template.\n",
363 | "APE performs this in two steps:\n",
364 | "A LLM is used to generate a set of candidate prompts.\n",
365 | "A prompt evaluation function considers the quality of each candidate prompt; returning the prompt with the highest evaluation score. A practical implementation is, via a human-in-the-loop approach, prompts can be marked up and marked down for use on terms of accuracy and correctness.\"\"\""
366 | ],
367 | "metadata": {
368 | "id": "fN898AOsDVJi"
369 | },
370 | "execution_count": null,
371 | "outputs": []
372 | },
373 | {
374 | "cell_type": "code",
375 | "source": [
376 | "text = text.replace('\\n',' ')"
377 | ],
378 | "metadata": {
379 | "id": "llgccfIQDYtZ"
380 | },
381 | "execution_count": null,
382 | "outputs": []
383 | },
384 | {
385 | "cell_type": "code",
386 | "source": [
387 | "print(text)"
388 | ],
389 | "metadata": {
390 | "colab": {
391 | "base_uri": "https://localhost:8080/",
392 | "height": 340
393 | },
394 | "id": "yRNDrpbRDeHG",
395 | "outputId": "b85313af-f4fd-483a-809c-68362d8389a1"
396 | },
397 | "execution_count": null,
398 | "outputs": [
399 | {
400 | "output_type": "display_data",
401 | "data": {
402 | "text/plain": [
403 | "This study from March \u001b[1;36m2023\u001b[0m takes a simple yet novel approach to prompt engineering by automatically generating \n",
404 | "prompts based on the desired input and output. In a recent article I considered the future of prompt engineering, \n",
405 | "and the possibility of soft prompts \u001b[1m(\u001b[0mprompt tuning\u001b[1m)\u001b[0m. I argued that user context, ambiguity and user intent all play\n",
406 | "an important role in any conversational UI. User intent, context, ambiguity and disambiguation are all part and \n",
407 | "parcel of any conversation. The question is, can this approach accelerate the process where manually wording \n",
408 | "prompts fade into the background and interaction with the LLM is based on contextual example input and output \n",
409 | "datasets? What I like about this approach, is that context, and user intent can be mapped, while also taking into \n",
410 | "consideration possible ambiguity. Yet manually crafting prompts is tedious in the sense of trying to word a prompt \n",
411 | "in such a way to engender a desired response from the LLM. Focussing on prompt engineering also does not take into \n",
412 | "consideration an array of possible user inputs. Data Management will always be part of LLM applications. APE offers\n",
413 | "an alternative approach to prompt engineering, where via input and matching output examples, prompts can be \n",
414 | "generated on the fly. We define “prompt engineering” as optimising the language in a prompt in order to elicit the \n",
415 | "best possible performance. Notably, this does not include prompts that chain multiple LLM queries together or give \n",
416 | "the LLM access to external tools. ~ Source The basic notebook below shows how Automatic Prompt Engineering \u001b[1m(\u001b[0mAPE\u001b[1m)\u001b[0m \n",
417 | "can be used to generate prompts based on a small input data set, a list of expected outputs and a prompt template. \n",
418 | "APE performs this in two steps: A LLM is used to generate a set of candidate prompts. A prompt evaluation function \n",
419 | "considers the quality of each candidate prompt; returning the prompt with the highest evaluation score. A practical\n",
420 | "implementation is, via a human-in-the-loop approach, prompts can be marked up and marked down for use on terms of \n",
421 | "accuracy and correctness.\n"
422 | ],
423 | "text/html": [
424 | "
This study from March 2023 takes a simple yet novel approach to prompt engineering by automatically generating \n", 425 | "prompts based on the desired input and output. In a recent article I considered the future of prompt engineering, \n", 426 | "and the possibility of soft prompts (prompt tuning). I argued that user context, ambiguity and user intent all play\n", 427 | "an important role in any conversational UI. User intent, context, ambiguity and disambiguation are all part and \n", 428 | "parcel of any conversation. The question is, can this approach accelerate the process where manually wording \n", 429 | "prompts fade into the background and interaction with the LLM is based on contextual example input and output \n", 430 | "datasets? What I like about this approach, is that context, and user intent can be mapped, while also taking into \n", 431 | "consideration possible ambiguity. Yet manually crafting prompts is tedious in the sense of trying to word a prompt \n", 432 | "in such a way to engender a desired response from the LLM. Focussing on prompt engineering also does not take into \n", 433 | "consideration an array of possible user inputs. Data Management will always be part of LLM applications. APE offers\n", 434 | "an alternative approach to prompt engineering, where via input and matching output examples, prompts can be \n", 435 | "generated on the fly. We define “prompt engineering” as optimising the language in a prompt in order to elicit the \n", 436 | "best possible performance. Notably, this does not include prompts that chain multiple LLM queries together or give \n", 437 | "the LLM access to external tools. ~ Source The basic notebook below shows how Automatic Prompt Engineering (APE) \n", 438 | "can be used to generate prompts based on a small input data set, a list of expected outputs and a prompt template. \n", 439 | "APE performs this in two steps: A LLM is used to generate a set of candidate prompts. A prompt evaluation function \n", 440 | "considers the quality of each candidate prompt; returning the prompt with the highest evaluation score. A practical\n", 441 | "implementation is, via a human-in-the-loop approach, prompts can be marked up and marked down for use on terms of \n", 442 | "accuracy and correctness.\n", 443 | "\n" 444 | ] 445 | }, 446 | "metadata": {} 447 | } 448 | ] 449 | }, 450 | { 451 | "cell_type": "code", 452 | "source": [ 453 | "RewToFormal = \"\"\"\n", 454 | "Translate the following text into formal writing:\n", 455 | "\n", 456 | "```\n", 457 | "This study from March 2023 takes a simple yet novel approach to prompt engineering by automatically generating\n", 458 | "prompts based on the desired input and output. In a recent article I considered the future of prompt engineering,\n", 459 | "and the possibility of soft prompts (prompt tuning). I argued that user context, ambiguity and user intent all play\n", 460 | "an important role in any conversational UI. User intent, context, ambiguity and disambiguation are all part and\n", 461 | "parcel of any conversation. The question is, can this approach accelerate the process where manually wording\n", 462 | "prompts fade into the background and interaction with the LLM is based on contextual example input and output\n", 463 | "datasets? What I like about this approach, is that context, and user intent can be mapped, while also taking into\n", 464 | "consideration possible ambiguity. Yet manually crafting prompts is tedious in the sense of trying to word a prompt\n", 465 | "in such a way to engender a desired response from the LLM. Focussing on prompt engineering also does not take into\n", 466 | "consideration an array of possible user inputs. Data Management will always be part of LLM applications. APE offers\n", 467 | "an alternative approach to prompt engineering, where via input and matching output examples, prompts can be\n", 468 | "generated on the fly. We define “prompt engineering” as optimising the language in a prompt in order to elicit the\n", 469 | "best possible performance. Notably, this does not include prompts that chain multiple LLM queries together or give\n", 470 | "the LLM access to external tools. ~ Source The basic notebook below shows how Automatic Prompt Engineering (APE)\n", 471 | "can be used to generate prompts based on a small input data set, a list of expected outputs and a prompt template.\n", 472 | "APE performs this in two steps: A LLM is used to generate a set of candidate prompts. A prompt evaluation function\n", 473 | "considers the quality of each candidate prompt; returning the prompt with the highest evaluation score. A practical\n", 474 | "implementation is, via a human-in-the-loop approach, prompts can be marked up and marked down for use on terms of\n", 475 | "accuracy and correctness.\n", 476 | "```\n", 477 | "\n", 478 | "\"\"\"" 479 | ], 480 | "metadata": { 481 | "id": "X_Z9PS0ACfrq" 482 | }, 483 | "execution_count": null, 484 | "outputs": [] 485 | }, 486 | { 487 | "cell_type": "code", 488 | "source": [ 489 | "prompt1 = vicunaQ4KM_CT(RewToFormal)" 490 | ], 491 | "metadata": { 492 | "colab": { 493 | "base_uri": "https://localhost:8080/", 494 | "height": 1000 495 | }, 496 | "id": "d6oZVVMbCgJK", 497 | "outputId": "b1b1e2d5-78d6-4f82-e94e-abed9da7fa3b" 498 | }, 499 | "execution_count": null, 500 | "outputs": [ 501 | { 502 | "output_type": "display_data", 503 | "data": { 504 | "text/plain": [ 505 | "\u001b[1;3;91mPrompt: \u001b[0m\n", 506 | "\u001b[1;3;91mTranslate the following text into formal writing:\u001b[0m\n", 507 | "\n", 508 | "\u001b[1;3;91m```\u001b[0m\n", 509 | "\u001b[1;3;91mThis study from March \u001b[0m\u001b[1;3;91m2023\u001b[0m\u001b[1;3;91m takes a simple yet novel approach to prompt \u001b[0m\n", 510 | "\u001b[1;3;91mengineering by automatically generating \u001b[0m\n", 511 | "\u001b[1;3;91mprompts based on the desired input and output. In a recent article I considered \u001b[0m\n", 512 | "\u001b[1;3;91mthe future of prompt engineering, \u001b[0m\n", 513 | "\u001b[1;3;91mand the possibility of soft prompts \u001b[0m\u001b[1;3;91m(\u001b[0m\u001b[1;3;91mprompt tuning\u001b[0m\u001b[1;3;91m)\u001b[0m\u001b[1;3;91m. I argued that user context,\u001b[0m\n", 514 | "\u001b[1;3;91mambiguity and user intent all play\u001b[0m\n", 515 | "\u001b[1;3;91man important role in any conversational UI. User intent, context, ambiguity and \u001b[0m\n", 516 | "\u001b[1;3;91mdisambiguation are all part and \u001b[0m\n", 517 | "\u001b[1;3;91mparcel of any conversation. The question is, can this approach accelerate the \u001b[0m\n", 518 | "\u001b[1;3;91mprocess where manually wording \u001b[0m\n", 519 | "\u001b[1;3;91mprompts fade into the background and interaction with the LLM is based on \u001b[0m\n", 520 | "\u001b[1;3;91mcontextual example input and output \u001b[0m\n", 521 | "\u001b[1;3;91mdatasets? What I like about this approach, is that context, and user intent can \u001b[0m\n", 522 | "\u001b[1;3;91mbe mapped, while also taking into \u001b[0m\n", 523 | "\u001b[1;3;91mconsideration possible ambiguity. Yet manually crafting prompts is tedious in \u001b[0m\n", 524 | "\u001b[1;3;91mthe sense of trying to word a prompt \u001b[0m\n", 525 | "\u001b[1;3;91min such a way to engender a desired response from the LLM. Focussing on prompt \u001b[0m\n", 526 | "\u001b[1;3;91mengineering also does not take into \u001b[0m\n", 527 | "\u001b[1;3;91mconsideration an array of possible user inputs. Data Management will always be \u001b[0m\n", 528 | "\u001b[1;3;91mpart of LLM applications. APE offers\u001b[0m\n", 529 | "\u001b[1;3;91man alternative approach to prompt engineering, where via input and matching \u001b[0m\n", 530 | "\u001b[1;3;91moutput examples, prompts can be \u001b[0m\n", 531 | "\u001b[1;3;91mgenerated on the fly. We define “prompt engineering” as optimising the language \u001b[0m\n", 532 | "\u001b[1;3;91min a prompt in order to elicit the \u001b[0m\n", 533 | "\u001b[1;3;91mbest possible performance. Notably, this does not include prompts that chain \u001b[0m\n", 534 | "\u001b[1;3;91mmultiple LLM queries together or give \u001b[0m\n", 535 | "\u001b[1;3;91mthe LLM access to external tools. ~ Source The basic notebook below shows how \u001b[0m\n", 536 | "\u001b[1;3;91mAutomatic Prompt Engineering \u001b[0m\u001b[1;3;91m(\u001b[0m\u001b[1;3;91mAPE\u001b[0m\u001b[1;3;91m)\u001b[0m\u001b[1;3;91m \u001b[0m\n", 537 | "\u001b[1;3;91mcan be used to generate prompts based on a small input data set, a list of \u001b[0m\n", 538 | "\u001b[1;3;91mexpected outputs and a prompt template. \u001b[0m\n", 539 | "\u001b[1;3;91mAPE performs this in two steps: A LLM is used to generate a set of candidate \u001b[0m\n", 540 | "\u001b[1;3;91mprompts. A prompt evaluation function \u001b[0m\n", 541 | "\u001b[1;3;91mconsiders the quality of each candidate prompt; returning the prompt with the \u001b[0m\n", 542 | "\u001b[1;3;91mhighest evaluation score. A practical\u001b[0m\n", 543 | "\u001b[1;3;91mimplementation is, via a human-in-the-loop approach, prompts can be marked up \u001b[0m\n", 544 | "\u001b[1;3;91mand marked down for use on terms of \u001b[0m\n", 545 | "\u001b[1;3;91maccuracy and correctness.\u001b[0m\n", 546 | "\u001b[1;3;91m```\u001b[0m\n", 547 | "\n", 548 | "\n" 549 | ], 550 | "text/html": [ 551 | "
Prompt: \n", 552 | "Translate the following text into formal writing:\n", 553 | "\n", 554 | "```\n", 555 | "This study from March 2023 takes a simple yet novel approach to prompt \n", 556 | "engineering by automatically generating \n", 557 | "prompts based on the desired input and output. In a recent article I considered \n", 558 | "the future of prompt engineering, \n", 559 | "and the possibility of soft prompts (prompt tuning). I argued that user context,\n", 560 | "ambiguity and user intent all play\n", 561 | "an important role in any conversational UI. User intent, context, ambiguity and \n", 562 | "disambiguation are all part and \n", 563 | "parcel of any conversation. The question is, can this approach accelerate the \n", 564 | "process where manually wording \n", 565 | "prompts fade into the background and interaction with the LLM is based on \n", 566 | "contextual example input and output \n", 567 | "datasets? What I like about this approach, is that context, and user intent can \n", 568 | "be mapped, while also taking into \n", 569 | "consideration possible ambiguity. Yet manually crafting prompts is tedious in \n", 570 | "the sense of trying to word a prompt \n", 571 | "in such a way to engender a desired response from the LLM. Focussing on prompt \n", 572 | "engineering also does not take into \n", 573 | "consideration an array of possible user inputs. Data Management will always be \n", 574 | "part of LLM applications. APE offers\n", 575 | "an alternative approach to prompt engineering, where via input and matching \n", 576 | "output examples, prompts can be \n", 577 | "generated on the fly. We define “prompt engineering” as optimising the language \n", 578 | "in a prompt in order to elicit the \n", 579 | "best possible performance. Notably, this does not include prompts that chain \n", 580 | "multiple LLM queries together or give \n", 581 | "the LLM access to external tools. ~ Source The basic notebook below shows how \n", 582 | "Automatic Prompt Engineering (APE) \n", 583 | "can be used to generate prompts based on a small input data set, a list of \n", 584 | "expected outputs and a prompt template. \n", 585 | "APE performs this in two steps: A LLM is used to generate a set of candidate \n", 586 | "prompts. A prompt evaluation function \n", 587 | "considers the quality of each candidate prompt; returning the prompt with the \n", 588 | "highest evaluation score. A practical\n", 589 | "implementation is, via a human-in-the-loop approach, prompts can be marked up \n", 590 | "and marked down for use on terms of \n", 591 | "accuracy and correctness.\n", 592 | "```\n", 593 | "\n", 594 | "\n", 595 | "\n" 596 | ] 597 | }, 598 | "metadata": {} 599 | }, 600 | { 601 | "output_type": "display_data", 602 | "data": { 603 | "text/plain": [ 604 | "\u001b[1;3mNumber of tokens in the prompt: \u001b[0m\u001b[1;3;36m526\u001b[0m\n" 605 | ], 606 | "text/html": [ 607 | "
Number of tokens in the prompt: 526\n", 608 | "\n" 609 | ] 610 | }, 611 | "metadata": {} 612 | }, 613 | { 614 | "output_type": "display_data", 615 | "data": { 616 | "text/plain": [ 617 | "\u001b[1;3mNumber of tokens in the answer: \u001b[0m\u001b[1;3;36m460\u001b[0m\n" 618 | ], 619 | "text/html": [ 620 | "
Number of tokens in the answer: 460\n", 621 | "\n" 622 | ] 623 | }, 624 | "metadata": {} 625 | }, 626 | { 627 | "output_type": "display_data", 628 | "data": { 629 | "text/plain": [ 630 | "This study conducted in March 2023 adopts an innovative methodology for \n", 631 | "generating prompts by automatically creating them based on the desired input and\n", 632 | "output. In a recent article, I explored the future prospects of prompt \n", 633 | "engineering, specifically focusing on soft prompts (prompt tuning), as well as \n", 634 | "considering user context, ambiguity, and intent in conversational user \n", 635 | "interfaces. It is essential to understand that these factors play a crucial role\n", 636 | "in any conversation. Context, user intent, ambiguity, and disambiguation are all\n", 637 | "interconnected aspects of communication. The primary objective is whether this \n", 638 | "approach can accelerate the process where manually crafting prompts gradually \n", 639 | "becomes obsolete, and interactions with the language model (LLM) rely on \n", 640 | "contextual examples for input and output datasets. One advantage of this \n", 641 | "methodology is that it allows for mapping user intent and context while \n", 642 | "accounting for potential ambiguity. However, manually composing prompts can be \n", 643 | "time-consuming, as it requires crafting a prompt to elicit the desired response \n", 644 | "from the LLM. In addition, focusing on prompt engineering does not address a \n", 645 | "wide range of possible user inputs. Data management will always be an integral \n", 646 | "part of LLM applications. The Adaptive Prompt Engineering (APE) approach offers \n", 647 | "an alternative method for generating prompts by matching output examples based \n", 648 | "on input and creating prompts on the fly. We define \"prompt engineering\" as \n", 649 | "optimizing the language used in a prompt to elicit superior performance. \n", 650 | "Notably, this does not include prompts that concatenate multiple LLM queries or \n", 651 | "grant the LLM access to external tools. \n", 652 | "\n", 653 | "The basic notebook provided below illustrates how Automatic Prompt Engineering \n", 654 | "(APE) can be employed to generate prompts using a small input dataset, a list of\n", 655 | "expected outputs, and a template-based prompt. This process involves two steps: \n", 656 | "utilizing a language model to produce a set of candidate prompts, followed by a \n", 657 | "prompt evaluation function assessing the quality of each candidate prompt; \n", 658 | "selecting the prompt with the highest evaluation score. In practice, this can be\n", 659 | "achieved through a human-in-the-loop approach, where prompts are marked up or \n", 660 | "down based on their accuracy and correctness. \n" 661 | ], 662 | "text/html": [ 663 | "
This study conducted in March 2023 adopts an innovative methodology for \n", 664 | "generating prompts by automatically creating them based on the desired input and\n", 665 | "output. In a recent article, I explored the future prospects of prompt \n", 666 | "engineering, specifically focusing on soft prompts (prompt tuning), as well as \n", 667 | "considering user context, ambiguity, and intent in conversational user \n", 668 | "interfaces. It is essential to understand that these factors play a crucial role\n", 669 | "in any conversation. Context, user intent, ambiguity, and disambiguation are all\n", 670 | "interconnected aspects of communication. The primary objective is whether this \n", 671 | "approach can accelerate the process where manually crafting prompts gradually \n", 672 | "becomes obsolete, and interactions with the language model (LLM) rely on \n", 673 | "contextual examples for input and output datasets. One advantage of this \n", 674 | "methodology is that it allows for mapping user intent and context while \n", 675 | "accounting for potential ambiguity. However, manually composing prompts can be \n", 676 | "time-consuming, as it requires crafting a prompt to elicit the desired response \n", 677 | "from the LLM. In addition, focusing on prompt engineering does not address a \n", 678 | "wide range of possible user inputs. Data management will always be an integral \n", 679 | "part of LLM applications. The Adaptive Prompt Engineering (APE) approach offers \n", 680 | "an alternative method for generating prompts by matching output examples based \n", 681 | "on input and creating prompts on the fly. We define \"prompt engineering\" as \n", 682 | "optimizing the language used in a prompt to elicit superior performance. \n", 683 | "Notably, this does not include prompts that concatenate multiple LLM queries or \n", 684 | "grant the LLM access to external tools. \n", 685 | "\n", 686 | "The basic notebook provided below illustrates how Automatic Prompt Engineering \n", 687 | "(APE) can be employed to generate prompts using a small input dataset, a list of\n", 688 | "expected outputs, and a template-based prompt. This process involves two steps: \n", 689 | "utilizing a language model to produce a set of candidate prompts, followed by a \n", 690 | "prompt evaluation function assessing the quality of each candidate prompt; \n", 691 | "selecting the prompt with the highest evaluation score. In practice, this can be\n", 692 | "achieved through a human-in-the-loop approach, where prompts are marked up or \n", 693 | "down based on their accuracy and correctness. \n", 694 | "\n" 695 | ] 696 | }, 697 | "metadata": {} 698 | }, 699 | { 700 | "output_type": "display_data", 701 | "data": { 702 | "text/plain": [ 703 | "\u001b[1;3;32m Generated by Vicuna-7b in \u001b[0m\u001b[1;3;32m0:09:18\u001b[0m\u001b[1;3;32m.\u001b[0m\u001b[1;3;32m140666\u001b[0m\n" 704 | ], 705 | "text/html": [ 706 | "
Generated by Vicuna-7b in 0:09:18.140666\n",
707 | "\n"
708 | ]
709 | },
710 | "metadata": {}
711 | },
712 | {
713 | "output_type": "display_data",
714 | "data": {
715 | "text/plain": [
716 | "\u001b[38;5;21m ---\u001b[0m\n"
717 | ],
718 | "text/html": [
719 | " ---\n",
720 | "\n"
721 | ]
722 | },
723 | "metadata": {}
724 | }
725 | ]
726 | },
727 | {
728 | "cell_type": "markdown",
729 | "source": [
730 | "### name: 'Rewrite to Cool',\n",
731 | "description: `This prompt rewrites your text to make it more cool and elegant. It's useful for making your writing cooler and have some style in it.`,\n",
732 | "Template:\n",
733 | "\n",
734 | ">\"Rewrite the following text to make it more cool and elegant:\n",
735 | ">\n",
736 | ">```\n",
737 | ">{text}\n",
738 | ">```\n",
739 | ">\"\n",
740 | "\n",
741 | "\n",
742 | "### name: 'Proofread',\n",
743 | "description: `This prompt proofreads your text and suggests edits in it. It's useful for making your writing more professional and correct.`,\n",
744 | "Template:\n",
745 | ">\"Proofread and correct the following text and suggest improvements. If you >don't find and errors, just say \"No errors found\".:\n",
746 | ">\n",
747 | ">```\n",
748 | ">{text}\n",
749 | ">```\n",
750 | ">\"\n",
751 | "\n",
752 | "\n",
753 | "### name: 'Proofread and Correct',\n",
754 | "description: `This prompt proofreads your text and corrects any mistakes in it. It's useful for making your writing more professional and correct.`,\n",
755 | "Template:\n",
756 | ">\"Proofread and correct the following text and rewrite the corrected version. >If you don't find and errors, just say \"No errors found\". Don't use any >punctuation around the text:\n",
757 | ">\n",
758 | ">```\n",
759 | ">{text}\n",
760 | ">```\n",
761 | ">\"\n",
762 | "\n"
763 | ],
764 | "metadata": {
765 | "id": "l_WccTYnFN6r"
766 | }
767 | },
768 | {
769 | "cell_type": "code",
770 | "source": [
771 | "RewToCool = \"\"\"\n",
772 | "Rewrite the following text to make it more cool and elegant:\n",
773 | "\n",
774 | "```\n",
775 | "This study from March 2023 takes a simple yet novel approach to prompt engineering by automatically generating\n",
776 | "prompts based on the desired input and output. In a recent article I considered the future of prompt engineering,\n",
777 | "and the possibility of soft prompts (prompt tuning). I argued that user context, ambiguity and user intent all play\n",
778 | "an important role in any conversational UI. User intent, context, ambiguity and disambiguation are all part and\n",
779 | "parcel of any conversation. The question is, can this approach accelerate the process where manually wording\n",
780 | "prompts fade into the background and interaction with the LLM is based on contextual example input and output\n",
781 | "datasets? What I like about this approach, is that context, and user intent can be mapped, while also taking into\n",
782 | "consideration possible ambiguity. Yet manually crafting prompts is tedious in the sense of trying to word a prompt\n",
783 | "in such a way to engender a desired response from the LLM. Focussing on prompt engineering also does not take into\n",
784 | "consideration an array of possible user inputs. Data Management will always be part of LLM applications. APE offers\n",
785 | "an alternative approach to prompt engineering, where via input and matching output examples, prompts can be\n",
786 | "generated on the fly. We define “prompt engineering” as optimising the language in a prompt in order to elicit the\n",
787 | "best possible performance. Notably, this does not include prompts that chain multiple LLM queries together or give\n",
788 | "the LLM access to external tools. ~ Source The basic notebook below shows how Automatic Prompt Engineering (APE)\n",
789 | "can be used to generate prompts based on a small input data set, a list of expected outputs and a prompt template.\n",
790 | "APE performs this in two steps: A LLM is used to generate a set of candidate prompts. A prompt evaluation function\n",
791 | "considers the quality of each candidate prompt; returning the prompt with the highest evaluation score. A practical\n",
792 | "implementation is, via a human-in-the-loop approach, prompts can be marked up and marked down for use on terms of\n",
793 | "accuracy and correctness.\n",
794 | "```\n",
795 | "\"\"\""
796 | ],
797 | "metadata": {
798 | "id": "PNRrVee3GIhJ"
799 | },
800 | "execution_count": null,
801 | "outputs": []
802 | },
803 | {
804 | "cell_type": "code",
805 | "source": [
806 | "prompt2 = vicunaQ4KM_CT(RewToCool)"
807 | ],
808 | "metadata": {
809 | "colab": {
810 | "base_uri": "https://localhost:8080/",
811 | "height": 1000
812 | },
813 | "id": "zFh4q9-tGIaj",
814 | "outputId": "759a5d61-24dc-4512-8c36-1e9cdbbbf72e"
815 | },
816 | "execution_count": null,
817 | "outputs": [
818 | {
819 | "output_type": "display_data",
820 | "data": {
821 | "text/plain": [
822 | "\u001b[1;3;91mPrompt: \u001b[0m\n",
823 | "\u001b[1;3;91mRewrite the following text to make it more cool and elegant:\u001b[0m\n",
824 | "\n",
825 | "\u001b[1;3;91m```\u001b[0m\n",
826 | "\u001b[1;3;91mThis study from March \u001b[0m\u001b[1;3;91m2023\u001b[0m\u001b[1;3;91m takes a simple yet novel approach to prompt \u001b[0m\n",
827 | "\u001b[1;3;91mengineering by automatically generating \u001b[0m\n",
828 | "\u001b[1;3;91mprompts based on the desired input and output. In a recent article I considered \u001b[0m\n",
829 | "\u001b[1;3;91mthe future of prompt engineering, \u001b[0m\n",
830 | "\u001b[1;3;91mand the possibility of soft prompts \u001b[0m\u001b[1;3;91m(\u001b[0m\u001b[1;3;91mprompt tuning\u001b[0m\u001b[1;3;91m)\u001b[0m\u001b[1;3;91m. I argued that user context,\u001b[0m\n",
831 | "\u001b[1;3;91mambiguity and user intent all play\u001b[0m\n",
832 | "\u001b[1;3;91man important role in any conversational UI. User intent, context, ambiguity and \u001b[0m\n",
833 | "\u001b[1;3;91mdisambiguation are all part and \u001b[0m\n",
834 | "\u001b[1;3;91mparcel of any conversation. The question is, can this approach accelerate the \u001b[0m\n",
835 | "\u001b[1;3;91mprocess where manually wording \u001b[0m\n",
836 | "\u001b[1;3;91mprompts fade into the background and interaction with the LLM is based on \u001b[0m\n",
837 | "\u001b[1;3;91mcontextual example input and output \u001b[0m\n",
838 | "\u001b[1;3;91mdatasets? What I like about this approach, is that context, and user intent can \u001b[0m\n",
839 | "\u001b[1;3;91mbe mapped, while also taking into \u001b[0m\n",
840 | "\u001b[1;3;91mconsideration possible ambiguity. Yet manually crafting prompts is tedious in \u001b[0m\n",
841 | "\u001b[1;3;91mthe sense of trying to word a prompt \u001b[0m\n",
842 | "\u001b[1;3;91min such a way to engender a desired response from the LLM. Focussing on prompt \u001b[0m\n",
843 | "\u001b[1;3;91mengineering also does not take into \u001b[0m\n",
844 | "\u001b[1;3;91mconsideration an array of possible user inputs. Data Management will always be \u001b[0m\n",
845 | "\u001b[1;3;91mpart of LLM applications. APE offers\u001b[0m\n",
846 | "\u001b[1;3;91man alternative approach to prompt engineering, where via input and matching \u001b[0m\n",
847 | "\u001b[1;3;91moutput examples, prompts can be \u001b[0m\n",
848 | "\u001b[1;3;91mgenerated on the fly. We define “prompt engineering” as optimising the language \u001b[0m\n",
849 | "\u001b[1;3;91min a prompt in order to elicit the \u001b[0m\n",
850 | "\u001b[1;3;91mbest possible performance. Notably, this does not include prompts that chain \u001b[0m\n",
851 | "\u001b[1;3;91mmultiple LLM queries together or give \u001b[0m\n",
852 | "\u001b[1;3;91mthe LLM access to external tools. ~ Source The basic notebook below shows how \u001b[0m\n",
853 | "\u001b[1;3;91mAutomatic Prompt Engineering \u001b[0m\u001b[1;3;91m(\u001b[0m\u001b[1;3;91mAPE\u001b[0m\u001b[1;3;91m)\u001b[0m\u001b[1;3;91m \u001b[0m\n",
854 | "\u001b[1;3;91mcan be used to generate prompts based on a small input data set, a list of \u001b[0m\n",
855 | "\u001b[1;3;91mexpected outputs and a prompt template. \u001b[0m\n",
856 | "\u001b[1;3;91mAPE performs this in two steps: A LLM is used to generate a set of candidate \u001b[0m\n",
857 | "\u001b[1;3;91mprompts. A prompt evaluation function \u001b[0m\n",
858 | "\u001b[1;3;91mconsiders the quality of each candidate prompt; returning the prompt with the \u001b[0m\n",
859 | "\u001b[1;3;91mhighest evaluation score. A practical\u001b[0m\n",
860 | "\u001b[1;3;91mimplementation is, via a human-in-the-loop approach, prompts can be marked up \u001b[0m\n",
861 | "\u001b[1;3;91mand marked down for use on terms of \u001b[0m\n",
862 | "\u001b[1;3;91maccuracy and correctness.\u001b[0m\n",
863 | "\u001b[1;3;91m```\u001b[0m\n",
864 | "\n"
865 | ],
866 | "text/html": [
867 | "Prompt: \n", 868 | "Rewrite the following text to make it more cool and elegant:\n", 869 | "\n", 870 | "```\n", 871 | "This study from March 2023 takes a simple yet novel approach to prompt \n", 872 | "engineering by automatically generating \n", 873 | "prompts based on the desired input and output. In a recent article I considered \n", 874 | "the future of prompt engineering, \n", 875 | "and the possibility of soft prompts (prompt tuning). I argued that user context,\n", 876 | "ambiguity and user intent all play\n", 877 | "an important role in any conversational UI. User intent, context, ambiguity and \n", 878 | "disambiguation are all part and \n", 879 | "parcel of any conversation. The question is, can this approach accelerate the \n", 880 | "process where manually wording \n", 881 | "prompts fade into the background and interaction with the LLM is based on \n", 882 | "contextual example input and output \n", 883 | "datasets? What I like about this approach, is that context, and user intent can \n", 884 | "be mapped, while also taking into \n", 885 | "consideration possible ambiguity. Yet manually crafting prompts is tedious in \n", 886 | "the sense of trying to word a prompt \n", 887 | "in such a way to engender a desired response from the LLM. Focussing on prompt \n", 888 | "engineering also does not take into \n", 889 | "consideration an array of possible user inputs. Data Management will always be \n", 890 | "part of LLM applications. APE offers\n", 891 | "an alternative approach to prompt engineering, where via input and matching \n", 892 | "output examples, prompts can be \n", 893 | "generated on the fly. We define “prompt engineering” as optimising the language \n", 894 | "in a prompt in order to elicit the \n", 895 | "best possible performance. Notably, this does not include prompts that chain \n", 896 | "multiple LLM queries together or give \n", 897 | "the LLM access to external tools. ~ Source The basic notebook below shows how \n", 898 | "Automatic Prompt Engineering (APE) \n", 899 | "can be used to generate prompts based on a small input data set, a list of \n", 900 | "expected outputs and a prompt template. \n", 901 | "APE performs this in two steps: A LLM is used to generate a set of candidate \n", 902 | "prompts. A prompt evaluation function \n", 903 | "considers the quality of each candidate prompt; returning the prompt with the \n", 904 | "highest evaluation score. A practical\n", 905 | "implementation is, via a human-in-the-loop approach, prompts can be marked up \n", 906 | "and marked down for use on terms of \n", 907 | "accuracy and correctness.\n", 908 | "```\n", 909 | "\n", 910 | "\n" 911 | ] 912 | }, 913 | "metadata": {} 914 | }, 915 | { 916 | "output_type": "display_data", 917 | "data": { 918 | "text/plain": [ 919 | "\u001b[1;3mNumber of tokens in the prompt: \u001b[0m\u001b[1;3;36m529\u001b[0m\n" 920 | ], 921 | "text/html": [ 922 | "
Number of tokens in the prompt: 529\n", 923 | "\n" 924 | ] 925 | }, 926 | "metadata": {} 927 | }, 928 | { 929 | "output_type": "display_data", 930 | "data": { 931 | "text/plain": [ 932 | "\u001b[1;3mNumber of tokens in the answer: \u001b[0m\u001b[1;3;36m353\u001b[0m\n" 933 | ], 934 | "text/html": [ 935 | "
Number of tokens in the answer: 353\n", 936 | "\n" 937 | ] 938 | }, 939 | "metadata": {} 940 | }, 941 | { 942 | "output_type": "display_data", 943 | "data": { 944 | "text/plain": [ 945 | "In this innovative study conducted in March 2023, we employ an unconventional \n", 946 | "yet effective method of prompt engineering by automatically generating prompts \n", 947 | "based on desired input and output. Recently, I explored the future of prompt \n", 948 | "engineering, including the potential for soft prompts (prompt tuning), and \n", 949 | "emphasized the importance of user context, ambiguity, intent, and disambiguation\n", 950 | "in any conversational interface. \n", 951 | "\n", 952 | "Our approach seamlessly merges context, user intent, and possible ambiguities to\n", 953 | "streamline the process of manually crafted prompts while enhancing interaction \n", 954 | "with large language models (LLMs). This methodology enables a more efficient way\n", 955 | "of generating prompts that are tailored to specific input-output datasets. \n", 956 | "\n", 957 | "By using Automatic Prompt Engineering (APE), we can generate prompts on-the-fly \n", 958 | "through input and matching output examples. APE optimizes the language in a \n", 959 | "prompt, aiming for the best possible performance without involving multiple LLM \n", 960 | "queries or granting external access to the LLM. This novel approach offers a \n", 961 | "refreshing perspective on prompt engineering by focusing on the context of user \n", 962 | "intent while considering ambiguity as an essential factor in any conversation. \n", 963 | "\n", 964 | "The basic notebook below illustrates how APE can generate prompts based on a \n", 965 | "small input dataset, a list of expected outputs, and a prompt template: \n", 966 | "\n", 967 | "\u001b[1;33m 1 \u001b[0mAn LLM generates candidate prompts. \n", 968 | "\u001b[1;33m 2 \u001b[0mA prompt evaluation function assesses the quality of each candidate prompt by\n", 969 | "\u001b[1;33m \u001b[0mreturning the one with the highest evaluation score. \n", 970 | "\u001b[1;33m 3 \u001b[0mIn practice, human-in-the-loop approach allows for marking up or down prompts\n", 971 | "\u001b[1;33m \u001b[0mbased on accuracy and correctness. \n" 972 | ], 973 | "text/html": [ 974 | "
In this innovative study conducted in March 2023, we employ an unconventional \n", 975 | "yet effective method of prompt engineering by automatically generating prompts \n", 976 | "based on desired input and output. Recently, I explored the future of prompt \n", 977 | "engineering, including the potential for soft prompts (prompt tuning), and \n", 978 | "emphasized the importance of user context, ambiguity, intent, and disambiguation\n", 979 | "in any conversational interface. \n", 980 | "\n", 981 | "Our approach seamlessly merges context, user intent, and possible ambiguities to\n", 982 | "streamline the process of manually crafted prompts while enhancing interaction \n", 983 | "with large language models (LLMs). This methodology enables a more efficient way\n", 984 | "of generating prompts that are tailored to specific input-output datasets. \n", 985 | "\n", 986 | "By using Automatic Prompt Engineering (APE), we can generate prompts on-the-fly \n", 987 | "through input and matching output examples. APE optimizes the language in a \n", 988 | "prompt, aiming for the best possible performance without involving multiple LLM \n", 989 | "queries or granting external access to the LLM. This novel approach offers a \n", 990 | "refreshing perspective on prompt engineering by focusing on the context of user \n", 991 | "intent while considering ambiguity as an essential factor in any conversation. \n", 992 | "\n", 993 | "The basic notebook below illustrates how APE can generate prompts based on a \n", 994 | "small input dataset, a list of expected outputs, and a prompt template: \n", 995 | "\n", 996 | " 1 An LLM generates candidate prompts. \n", 997 | " 2 A prompt evaluation function assesses the quality of each candidate prompt by\n", 998 | " returning the one with the highest evaluation score. \n", 999 | " 3 In practice, human-in-the-loop approach allows for marking up or down prompts\n", 1000 | " based on accuracy and correctness. \n", 1001 | "\n" 1002 | ] 1003 | }, 1004 | "metadata": {} 1005 | }, 1006 | { 1007 | "output_type": "display_data", 1008 | "data": { 1009 | "text/plain": [ 1010 | "\u001b[1;3;32m Generated by Vicuna-7b in \u001b[0m\u001b[1;3;32m0:07:52\u001b[0m\u001b[1;3;32m.\u001b[0m\u001b[1;3;32m909556\u001b[0m\n" 1011 | ], 1012 | "text/html": [ 1013 | "
Generated by Vicuna-7b in 0:07:52.909556\n",
1014 | "\n"
1015 | ]
1016 | },
1017 | "metadata": {}
1018 | },
1019 | {
1020 | "output_type": "display_data",
1021 | "data": {
1022 | "text/plain": [
1023 | "\u001b[38;5;21m ---\u001b[0m\n"
1024 | ],
1025 | "text/html": [
1026 | " ---\n",
1027 | "\n"
1028 | ]
1029 | },
1030 | "metadata": {}
1031 | }
1032 | ]
1033 | },
1034 | {
1035 | "cell_type": "markdown",
1036 | "source": [
1037 | "### name: 'Question with Reasoning',\n",
1038 | "description: `This prompt guides critical thinking by instructing the LLM to provide it's initial thoughts, critique them, and provide a final answer.`,\n",
1039 | "Template:\n",
1040 | "```\n",
1041 | "\"{question}\n",
1042 | "\n",
1043 | "Reply in the following pattern:\n",
1044 | "THOUGHT: // Your thought here\n",
1045 | "CRITICISM: // Criticism of your thought\n",
1046 | "ANSWER: // Your final answer\"\n",
1047 | "```\n"
1048 | ],
1049 | "metadata": {
1050 | "id": "2cT56wJJGrUN"
1051 | }
1052 | },
1053 | {
1054 | "cell_type": "code",
1055 | "source": [
1056 | "QuesWithReas = \"\"\"\n",
1057 | "Will Artificial Intelligence replace humans in the near future?\n",
1058 | "\n",
1059 | "Reply in the following pattern:\n",
1060 | "THOUGHT: // Your thought here\n",
1061 | "CRITICISM: // Criticism of your thought\n",
1062 | "ANSWER: // Your final answer\n",
1063 | "\"\"\""
1064 | ],
1065 | "metadata": {
1066 | "id": "rflbLqIoGIXs"
1067 | },
1068 | "execution_count": null,
1069 | "outputs": []
1070 | },
1071 | {
1072 | "cell_type": "code",
1073 | "source": [
1074 | "prompt3 = vicunaQ4KM_CT(QuesWithReas)"
1075 | ],
1076 | "metadata": {
1077 | "colab": {
1078 | "base_uri": "https://localhost:8080/",
1079 | "height": 442
1080 | },
1081 | "id": "0ufvXgsIGsW8",
1082 | "outputId": "9647cf38-85f8-491f-baf8-3d3788d24ce2"
1083 | },
1084 | "execution_count": null,
1085 | "outputs": [
1086 | {
1087 | "output_type": "display_data",
1088 | "data": {
1089 | "text/plain": [
1090 | "\u001b[1;3;91mPrompt: \u001b[0m\n",
1091 | "\u001b[1;3;91mWill Artificial Intelligence replace humans in the near future?\u001b[0m\n",
1092 | "\n",
1093 | "\u001b[1;3;91mReply in the following pattern:\u001b[0m\n",
1094 | "\u001b[1;3;91mTHOUGHT: \u001b[0m\u001b[1;3;91m/\u001b[0m\u001b[1;3;91m/\u001b[0m\u001b[1;3;91m Your thought here\u001b[0m\n",
1095 | "\u001b[1;3;91mCRITICISM: \u001b[0m\u001b[1;3;91m/\u001b[0m\u001b[1;3;91m/\u001b[0m\u001b[1;3;91m Criticism of your thought\u001b[0m\n",
1096 | "\u001b[1;3;91mANSWER: \u001b[0m\u001b[1;3;91m/\u001b[0m\u001b[1;3;91m/\u001b[0m\u001b[1;3;91m Your final answer\u001b[0m\n",
1097 | "\n"
1098 | ],
1099 | "text/html": [
1100 | "Prompt: \n", 1101 | "Will Artificial Intelligence replace humans in the near future?\n", 1102 | "\n", 1103 | "Reply in the following pattern:\n", 1104 | "THOUGHT: // Your thought here\n", 1105 | "CRITICISM: // Criticism of your thought\n", 1106 | "ANSWER: // Your final answer\n", 1107 | "\n", 1108 | "\n" 1109 | ] 1110 | }, 1111 | "metadata": {} 1112 | }, 1113 | { 1114 | "output_type": "display_data", 1115 | "data": { 1116 | "text/plain": [ 1117 | "\u001b[1;3mNumber of tokens in the prompt: \u001b[0m\u001b[1;3;36m97\u001b[0m\n" 1118 | ], 1119 | "text/html": [ 1120 | "
Number of tokens in the prompt: 97\n", 1121 | "\n" 1122 | ] 1123 | }, 1124 | "metadata": {} 1125 | }, 1126 | { 1127 | "output_type": "display_data", 1128 | "data": { 1129 | "text/plain": [ 1130 | "\u001b[1;3mNumber of tokens in the answer: \u001b[0m\u001b[1;3;36m179\u001b[0m\n" 1131 | ], 1132 | "text/html": [ 1133 | "
Number of tokens in the answer: 179\n", 1134 | "\n" 1135 | ] 1136 | }, 1137 | "metadata": {} 1138 | }, 1139 | { 1140 | "output_type": "display_data", 1141 | "data": { 1142 | "text/plain": [ 1143 | "THOUGHT: Artificial intelligence is advancing rapidly, and it's possible that it\n", 1144 | "may eventually surpass human capabilities in certain areas. However, it's \n", 1145 | "unlikely that AI will completely replace humans in the near future. \n", 1146 | "\n", 1147 | "CRITICISM: This thought overlooks the potential ethical implications of advanced\n", 1148 | "AI, such as the risk of job displacement and loss of privacy for individuals. It\n", 1149 | "also doesn't consider the possibility of AI being used to enhance human \n", 1150 | "capabilities rather than replace them. \n", 1151 | "\n", 1152 | "ANSWER: While it's true that AI has made significant progress in recent years, \n", 1153 | "it's unlikely to completely replace humans in the near future. Instead, we \n", 1154 | "should focus on developing AI that complements and enhances human abilities \n", 1155 | "while also addressing ethical concerns related to its deployment. \n" 1156 | ], 1157 | "text/html": [ 1158 | "
THOUGHT: Artificial intelligence is advancing rapidly, and it's possible that it\n", 1159 | "may eventually surpass human capabilities in certain areas. However, it's \n", 1160 | "unlikely that AI will completely replace humans in the near future. \n", 1161 | "\n", 1162 | "CRITICISM: This thought overlooks the potential ethical implications of advanced\n", 1163 | "AI, such as the risk of job displacement and loss of privacy for individuals. It\n", 1164 | "also doesn't consider the possibility of AI being used to enhance human \n", 1165 | "capabilities rather than replace them. \n", 1166 | "\n", 1167 | "ANSWER: While it's true that AI has made significant progress in recent years, \n", 1168 | "it's unlikely to completely replace humans in the near future. Instead, we \n", 1169 | "should focus on developing AI that complements and enhances human abilities \n", 1170 | "while also addressing ethical concerns related to its deployment. \n", 1171 | "\n" 1172 | ] 1173 | }, 1174 | "metadata": {} 1175 | }, 1176 | { 1177 | "output_type": "display_data", 1178 | "data": { 1179 | "text/plain": [ 1180 | "\u001b[1;3;32m Generated by Vicuna-7b in \u001b[0m\u001b[1;3;32m0:02:38\u001b[0m\u001b[1;3;32m.\u001b[0m\u001b[1;3;32m361321\u001b[0m\n" 1181 | ], 1182 | "text/html": [ 1183 | "
Generated by Vicuna-7b in 0:02:38.361321\n",
1184 | "\n"
1185 | ]
1186 | },
1187 | "metadata": {}
1188 | },
1189 | {
1190 | "output_type": "display_data",
1191 | "data": {
1192 | "text/plain": [
1193 | "\u001b[38;5;21m ---\u001b[0m\n"
1194 | ],
1195 | "text/html": [
1196 | " ---\n",
1197 | "\n"
1198 | ]
1199 | },
1200 | "metadata": {}
1201 | }
1202 | ]
1203 | },
1204 | {
1205 | "cell_type": "markdown",
1206 | "source": [
1207 | "### question with context\n",
1208 | "```\n",
1209 | "\"\"\"Answer the question based on the context below. Keep the answer short. Respond \"Unsure about answer\" if not sure about the answer.\n",
1210 | "\n",
1211 | "Context: {context}\n",
1212 | "\n",
1213 | "Question: {question}\n",
1214 | "\n",
1215 | "Answer:\"\"\"\n",
1216 | "\n",
1217 | "```"
1218 | ],
1219 | "metadata": {
1220 | "id": "H6P6N96GRiEh"
1221 | }
1222 | },
1223 | {
1224 | "cell_type": "code",
1225 | "source": [
1226 | "QuestWithCont = \"\"\"Answer the question based on the context below. Keep the answer short. Respond \"Unsure about answer\" if not sure about the answer.\n",
1227 | "\n",
1228 | "Context: This study from March 2023 takes a simple yet novel approach to prompt engineering by automatically generating\n",
1229 | "prompts based on the desired input and output. In a recent article I considered the future of prompt engineering,\n",
1230 | "and the possibility of soft prompts (prompt tuning). I argued that user context, ambiguity and user intent all play\n",
1231 | "an important role in any conversational UI. User intent, context, ambiguity and disambiguation are all part and\n",
1232 | "parcel of any conversation. The question is, can this approach accelerate the process where manually wording\n",
1233 | "prompts fade into the background and interaction with the LLM is based on contextual example input and output\n",
1234 | "datasets? What I like about this approach, is that context, and user intent can be mapped, while also taking into\n",
1235 | "consideration possible ambiguity. Yet manually crafting prompts is tedious in the sense of trying to word a prompt\n",
1236 | "in such a way to engender a desired response from the LLM. Focussing on prompt engineering also does not take into\n",
1237 | "consideration an array of possible user inputs. Data Management will always be part of LLM applications. APE offers\n",
1238 | "an alternative approach to prompt engineering, where via input and matching output examples, prompts can be\n",
1239 | "generated on the fly. We define “prompt engineering” as optimising the language in a prompt in order to elicit the\n",
1240 | "best possible performance. Notably, this does not include prompts that chain multiple LLM queries together or give\n",
1241 | "the LLM access to external tools. ~ Source The basic notebook below shows how Automatic Prompt Engineering (APE)\n",
1242 | "can be used to generate prompts based on a small input data set, a list of expected outputs and a prompt template.\n",
1243 | "APE performs this in two steps: A LLM is used to generate a set of candidate prompts. A prompt evaluation function\n",
1244 | "considers the quality of each candidate prompt; returning the prompt with the highest evaluation score. A practical\n",
1245 | "implementation is, via a human-in-the-loop approach, prompts can be marked up and marked down for use on terms of\n",
1246 | "accuracy and correctness.\n",
1247 | "\n",
1248 | "Question: What is APE?\n",
1249 | "\n",
1250 | "Answer:\"\"\""
1251 | ],
1252 | "metadata": {
1253 | "id": "6gBN8v6CCf_q"
1254 | },
1255 | "execution_count": null,
1256 | "outputs": []
1257 | },
1258 | {
1259 | "cell_type": "code",
1260 | "source": [
1261 | "prompt4 = vicunaQ4KM_CT(QuestWithCont)"
1262 | ],
1263 | "metadata": {
1264 | "colab": {
1265 | "base_uri": "https://localhost:8080/",
1266 | "height": 901
1267 | },
1268 | "id": "KWZ4cWwlR9bC",
1269 | "outputId": "008a7483-537d-49d1-804c-5e27e55f6a45"
1270 | },
1271 | "execution_count": null,
1272 | "outputs": [
1273 | {
1274 | "output_type": "display_data",
1275 | "data": {
1276 | "text/plain": [
1277 | "\u001b[1;3;91mPrompt: Answer the question based on the context below. Keep the answer short. \u001b[0m\n",
1278 | "\u001b[1;3;91mRespond \u001b[0m\u001b[1;3;91m\"Unsure about answer\"\u001b[0m\u001b[1;3;91m if not sure about the answer.\u001b[0m\n",
1279 | "\n",
1280 | "\u001b[1;3;91mContext: This study from March \u001b[0m\u001b[1;3;91m2023\u001b[0m\u001b[1;3;91m takes a simple yet novel approach to prompt \u001b[0m\n",
1281 | "\u001b[1;3;91mengineering by automatically generating \u001b[0m\n",
1282 | "\u001b[1;3;91mprompts based on the desired input and output. In a recent article I considered \u001b[0m\n",
1283 | "\u001b[1;3;91mthe future of prompt engineering, \u001b[0m\n",
1284 | "\u001b[1;3;91mand the possibility of soft prompts \u001b[0m\u001b[1;3;91m(\u001b[0m\u001b[1;3;91mprompt tuning\u001b[0m\u001b[1;3;91m)\u001b[0m\u001b[1;3;91m. I argued that user context,\u001b[0m\n",
1285 | "\u001b[1;3;91mambiguity and user intent all play\u001b[0m\n",
1286 | "\u001b[1;3;91man important role in any conversational UI. User intent, context, ambiguity and \u001b[0m\n",
1287 | "\u001b[1;3;91mdisambiguation are all part and \u001b[0m\n",
1288 | "\u001b[1;3;91mparcel of any conversation. The question is, can this approach accelerate the \u001b[0m\n",
1289 | "\u001b[1;3;91mprocess where manually wording \u001b[0m\n",
1290 | "\u001b[1;3;91mprompts fade into the background and interaction with the LLM is based on \u001b[0m\n",
1291 | "\u001b[1;3;91mcontextual example input and output \u001b[0m\n",
1292 | "\u001b[1;3;91mdatasets? What I like about this approach, is that context, and user intent can \u001b[0m\n",
1293 | "\u001b[1;3;91mbe mapped, while also taking into \u001b[0m\n",
1294 | "\u001b[1;3;91mconsideration possible ambiguity. Yet manually crafting prompts is tedious in \u001b[0m\n",
1295 | "\u001b[1;3;91mthe sense of trying to word a prompt \u001b[0m\n",
1296 | "\u001b[1;3;91min such a way to engender a desired response from the LLM. Focussing on prompt \u001b[0m\n",
1297 | "\u001b[1;3;91mengineering also does not take into \u001b[0m\n",
1298 | "\u001b[1;3;91mconsideration an array of possible user inputs. Data Management will always be \u001b[0m\n",
1299 | "\u001b[1;3;91mpart of LLM applications. APE offers\u001b[0m\n",
1300 | "\u001b[1;3;91man alternative approach to prompt engineering, where via input and matching \u001b[0m\n",
1301 | "\u001b[1;3;91moutput examples, prompts can be \u001b[0m\n",
1302 | "\u001b[1;3;91mgenerated on the fly. We define “prompt engineering” as optimising the language \u001b[0m\n",
1303 | "\u001b[1;3;91min a prompt in order to elicit the \u001b[0m\n",
1304 | "\u001b[1;3;91mbest possible performance. Notably, this does not include prompts that chain \u001b[0m\n",
1305 | "\u001b[1;3;91mmultiple LLM queries together or give \u001b[0m\n",
1306 | "\u001b[1;3;91mthe LLM access to external tools. ~ Source The basic notebook below shows how \u001b[0m\n",
1307 | "\u001b[1;3;91mAutomatic Prompt Engineering \u001b[0m\u001b[1;3;91m(\u001b[0m\u001b[1;3;91mAPE\u001b[0m\u001b[1;3;91m)\u001b[0m\u001b[1;3;91m \u001b[0m\n",
1308 | "\u001b[1;3;91mcan be used to generate prompts based on a small input data set, a list of \u001b[0m\n",
1309 | "\u001b[1;3;91mexpected outputs and a prompt template. \u001b[0m\n",
1310 | "\u001b[1;3;91mAPE performs this in two steps: A LLM is used to generate a set of candidate \u001b[0m\n",
1311 | "\u001b[1;3;91mprompts. A prompt evaluation function \u001b[0m\n",
1312 | "\u001b[1;3;91mconsiders the quality of each candidate prompt; returning the prompt with the \u001b[0m\n",
1313 | "\u001b[1;3;91mhighest evaluation score. A practical\u001b[0m\n",
1314 | "\u001b[1;3;91mimplementation is, via a human-in-the-loop approach, prompts can be marked up \u001b[0m\n",
1315 | "\u001b[1;3;91mand marked down for use on terms of \u001b[0m\n",
1316 | "\u001b[1;3;91maccuracy and correctness.\u001b[0m\n",
1317 | "\n",
1318 | "\u001b[1;3;91mQuestion: What is APE?\u001b[0m\n",
1319 | "\n",
1320 | "\u001b[1;3;91mAnswer:\u001b[0m\n"
1321 | ],
1322 | "text/html": [
1323 | "Prompt: Answer the question based on the context below. Keep the answer short. \n", 1324 | "Respond \"Unsure about answer\" if not sure about the answer.\n", 1325 | "\n", 1326 | "Context: This study from March 2023 takes a simple yet novel approach to prompt \n", 1327 | "engineering by automatically generating \n", 1328 | "prompts based on the desired input and output. In a recent article I considered \n", 1329 | "the future of prompt engineering, \n", 1330 | "and the possibility of soft prompts (prompt tuning). I argued that user context,\n", 1331 | "ambiguity and user intent all play\n", 1332 | "an important role in any conversational UI. User intent, context, ambiguity and \n", 1333 | "disambiguation are all part and \n", 1334 | "parcel of any conversation. The question is, can this approach accelerate the \n", 1335 | "process where manually wording \n", 1336 | "prompts fade into the background and interaction with the LLM is based on \n", 1337 | "contextual example input and output \n", 1338 | "datasets? What I like about this approach, is that context, and user intent can \n", 1339 | "be mapped, while also taking into \n", 1340 | "consideration possible ambiguity. Yet manually crafting prompts is tedious in \n", 1341 | "the sense of trying to word a prompt \n", 1342 | "in such a way to engender a desired response from the LLM. Focussing on prompt \n", 1343 | "engineering also does not take into \n", 1344 | "consideration an array of possible user inputs. Data Management will always be \n", 1345 | "part of LLM applications. APE offers\n", 1346 | "an alternative approach to prompt engineering, where via input and matching \n", 1347 | "output examples, prompts can be \n", 1348 | "generated on the fly. We define “prompt engineering” as optimising the language \n", 1349 | "in a prompt in order to elicit the \n", 1350 | "best possible performance. Notably, this does not include prompts that chain \n", 1351 | "multiple LLM queries together or give \n", 1352 | "the LLM access to external tools. ~ Source The basic notebook below shows how \n", 1353 | "Automatic Prompt Engineering (APE) \n", 1354 | "can be used to generate prompts based on a small input data set, a list of \n", 1355 | "expected outputs and a prompt template. \n", 1356 | "APE performs this in two steps: A LLM is used to generate a set of candidate \n", 1357 | "prompts. A prompt evaluation function \n", 1358 | "considers the quality of each candidate prompt; returning the prompt with the \n", 1359 | "highest evaluation score. A practical\n", 1360 | "implementation is, via a human-in-the-loop approach, prompts can be marked up \n", 1361 | "and marked down for use on terms of \n", 1362 | "accuracy and correctness.\n", 1363 | "\n", 1364 | "Question: What is APE?\n", 1365 | "\n", 1366 | "Answer:\n", 1367 | "\n" 1368 | ] 1369 | }, 1370 | "metadata": {} 1371 | }, 1372 | { 1373 | "output_type": "display_data", 1374 | "data": { 1375 | "text/plain": [ 1376 | "\u001b[1;3mNumber of tokens in the prompt: \u001b[0m\u001b[1;3;36m553\u001b[0m\n" 1377 | ], 1378 | "text/html": [ 1379 | "
Number of tokens in the prompt: 553\n", 1380 | "\n" 1381 | ] 1382 | }, 1383 | "metadata": {} 1384 | }, 1385 | { 1386 | "output_type": "display_data", 1387 | "data": { 1388 | "text/plain": [ 1389 | "\u001b[1;3mNumber of tokens in the answer: \u001b[0m\u001b[1;3;36m56\u001b[0m\n" 1390 | ], 1391 | "text/html": [ 1392 | "
Number of tokens in the answer: 56\n", 1393 | "\n" 1394 | ] 1395 | }, 1396 | "metadata": {} 1397 | }, 1398 | { 1399 | "output_type": "display_data", 1400 | "data": { 1401 | "text/plain": [ 1402 | "Automatic Prompt Engineering (APE) is an approach to prompt engineering that \n", 1403 | "generates prompts based on a desired input, output, and a template. It uses \n", 1404 | "natural language processing techniques to generate candidate prompts, which are \n", 1405 | "evaluated for quality by a prompt evaluation function. \n" 1406 | ], 1407 | "text/html": [ 1408 | "
Automatic Prompt Engineering (APE) is an approach to prompt engineering that \n", 1409 | "generates prompts based on a desired input, output, and a template. It uses \n", 1410 | "natural language processing techniques to generate candidate prompts, which are \n", 1411 | "evaluated for quality by a prompt evaluation function. \n", 1412 | "\n" 1413 | ] 1414 | }, 1415 | "metadata": {} 1416 | }, 1417 | { 1418 | "output_type": "display_data", 1419 | "data": { 1420 | "text/plain": [ 1421 | "\u001b[1;3;32m Generated by Vicuna-7b in \u001b[0m\u001b[1;3;32m0:04:53\u001b[0m\u001b[1;3;32m.\u001b[0m\u001b[1;3;32m860025\u001b[0m\n" 1422 | ], 1423 | "text/html": [ 1424 | "
Generated by Vicuna-7b in 0:04:53.860025\n",
1425 | "\n"
1426 | ]
1427 | },
1428 | "metadata": {}
1429 | },
1430 | {
1431 | "output_type": "display_data",
1432 | "data": {
1433 | "text/plain": [
1434 | "\u001b[38;5;21m ---\u001b[0m\n"
1435 | ],
1436 | "text/html": [
1437 | " ---\n",
1438 | "\n"
1439 | ]
1440 | },
1441 | "metadata": {}
1442 | }
1443 | ]
1444 | },
1445 | {
1446 | "cell_type": "markdown",
1447 | "source": [
1448 | "### name: 'Zero-Shot CoT',\n",
1449 | "// https://arxiv.org/abs/2205.11916\n",
1450 | "description: `Chain-of-thought (CoT) prompting enables complex reasoning capabilities through intermediate reasoning steps. Useful for more complex arithmetic, commonsense, and symbolic reasoning tasks.`,\n",
1451 | "paper: 'https://arxiv.org/abs/2205.11916',\n",
1452 | "Template:\n",
1453 | "```\n",
1454 | "\"\"\"{question}\n",
1455 | "\n",
1456 | "Let's think step by step.\"\"\"\"\n",
1457 | "\n",
1458 | "```\n",
1459 | "\n",
1460 | "\n",
1461 | "\n",
1462 | "### name: 'Zero-Shot CoT (APE)',\n",
1463 | "// https://arxiv.org/abs/2211.01910\n",
1464 | "description: `Chain-of-thought (CoT) prompt discovered by automatic prompt engineer (APE) which is better than the human engineered zero-shot CoT prompt.`,\n",
1465 | "paper: 'https://arxiv.org/abs/2211.01910',\n",
1466 | "Template:\n",
1467 | "```\n",
1468 | "\"\"\"{question}\n",
1469 | "\n",
1470 | "Let's work this out in a step by step way to be sure we have the right answer.\"\"\"\n",
1471 | "```\n",
1472 | "\n"
1473 | ],
1474 | "metadata": {
1475 | "id": "5hBMK-lfTBFI"
1476 | }
1477 | },
1478 | {
1479 | "cell_type": "code",
1480 | "source": [
1481 | "Zero_Shot_CoT = \"\"\"what will be the impact of Artificial Intelligence on human reasoning and learning in the near future?\n",
1482 | "\n",
1483 | "Let's think step by step.\"\"\""
1484 | ],
1485 | "metadata": {
1486 | "id": "91Xs2qKaTBZ8"
1487 | },
1488 | "execution_count": null,
1489 | "outputs": []
1490 | },
1491 | {
1492 | "cell_type": "code",
1493 | "source": [
1494 | "prompt5 = vicunaQ4KM_CT(Zero_Shot_CoT)"
1495 | ],
1496 | "metadata": {
1497 | "colab": {
1498 | "base_uri": "https://localhost:8080/",
1499 | "height": 918
1500 | },
1501 | "id": "GnjDNQVzTCQf",
1502 | "outputId": "6a75b585-ce76-4c6a-ae93-1bbb44ec97f4"
1503 | },
1504 | "execution_count": null,
1505 | "outputs": [
1506 | {
1507 | "output_type": "display_data",
1508 | "data": {
1509 | "text/plain": [
1510 | "\u001b[1;3;91mPrompt: what will be the impact of Artificial Intelligence on human reasoning \u001b[0m\n",
1511 | "\u001b[1;3;91mand learning in the near future?\u001b[0m\n",
1512 | "\n",
1513 | "\u001b[1;3;91mLet's think step by step.\u001b[0m\n"
1514 | ],
1515 | "text/html": [
1516 | "Prompt: what will be the impact of Artificial Intelligence on human reasoning \n", 1517 | "and learning in the near future?\n", 1518 | "\n", 1519 | "Let's think step by step.\n", 1520 | "\n" 1521 | ] 1522 | }, 1523 | "metadata": {} 1524 | }, 1525 | { 1526 | "output_type": "display_data", 1527 | "data": { 1528 | "text/plain": [ 1529 | "\u001b[1;3mNumber of tokens in the prompt: \u001b[0m\u001b[1;3;36m70\u001b[0m\n" 1530 | ], 1531 | "text/html": [ 1532 | "
Number of tokens in the prompt: 70\n", 1533 | "\n" 1534 | ] 1535 | }, 1536 | "metadata": {} 1537 | }, 1538 | { 1539 | "output_type": "display_data", 1540 | "data": { 1541 | "text/plain": [ 1542 | "\u001b[1;3mNumber of tokens in the answer: \u001b[0m\u001b[1;3;36m628\u001b[0m\n" 1543 | ], 1544 | "text/html": [ 1545 | "
Number of tokens in the answer: 628\n", 1546 | "\n" 1547 | ] 1548 | }, 1549 | "metadata": {} 1550 | }, 1551 | { 1552 | "output_type": "display_data", 1553 | "data": { 1554 | "text/plain": [ 1555 | "Sure, I can provide my insights on how artificial intelligence (AI) may impact \n", 1556 | "human reasoning and learning in the near future. Here are some steps we could \n", 1557 | "take to explore this question: \n", 1558 | "\n", 1559 | "\u001b[1;33m 1 \u001b[0mUnderstand what AI is and its capabilities: Firstly, it's important to \n", 1560 | "\u001b[1;33m \u001b[0munderstand that AI refers to computer systems capable of performing tasks \n", 1561 | "\u001b[1;33m \u001b[0mthat typically require human intelligence, such as perception, speech \n", 1562 | "\u001b[1;33m \u001b[0mrecognition, decision-making, and language translation. While some tasks are \n", 1563 | "\u001b[1;33m \u001b[0malready being performed by AI systems with high accuracy, there are still \n", 1564 | "\u001b[1;33m \u001b[0mmany limitations in terms of creativity, empathy, and understanding context. \n", 1565 | "\u001b[1;33m 2 \u001b[0mAnalyze the impact of AI on reasoning: One potential impact of AI on human \n", 1566 | "\u001b[1;33m \u001b[0mreasoning is that it could challenge our ability to think critically and \n", 1567 | "\u001b[1;33m \u001b[0mcreatively. For example, if we rely too heavily on AI systems for \n", 1568 | "\u001b[1;33m \u001b[0mdecision-making or problem-solving, we may become less adept at using our own\n", 1569 | "\u001b[1;33m \u001b[0mreasoning skills. However, this depends on how AI systems are designed and \n", 1570 | "\u001b[1;33m \u001b[0mused in practice. \n", 1571 | "\u001b[1;33m 3 \u001b[0mConsider the impact of AI on learning: Another potential impact of AI on \n", 1572 | "\u001b[1;33m \u001b[0mhuman learning is that it could change the way we access information and \n", 1573 | "\u001b[1;33m \u001b[0mlearn new things. For example, if we have access to a vast library of \n", 1574 | "\u001b[1;33m \u001b[0mknowledge stored in an AI system, we may be able to learn more efficiently by\n", 1575 | "\u001b[1;33m \u001b[0masking questions or exploring different topics with greater ease than before.\n", 1576 | "\u001b[1;33m \u001b[0mHowever, this also raises concerns about whether humans will become too \n", 1577 | "\u001b[1;33m \u001b[0mreliant on technology for learning and forget how to think independently. \n", 1578 | "\u001b[1;33m 4 \u001b[0mExplore potential benefits of AI in education: One potential benefit of using\n", 1579 | "\u001b[1;33m \u001b[0mAI systems in education is that they could provide personalized learning \n", 1580 | "\u001b[1;33m \u001b[0mexperiences tailored to individual needs. This could help students learn more\n", 1581 | "\u001b[1;33m \u001b[0meffectively, particularly those who struggle with traditional educational \n", 1582 | "\u001b[1;33m \u001b[0mmethods. Additionally, AI systems could assist teachers by automating \n", 1583 | "\u001b[1;33m \u001b[0madministrative tasks and providing insights into student progress, allowing \n", 1584 | "\u001b[1;33m \u001b[0mthem to focus on teaching rather than paperwork. \n", 1585 | "\u001b[1;33m 5 \u001b[0mAssess the potential risks of over-reliance on AI: There are also potential \n", 1586 | "\u001b[1;33m \u001b[0mrisks associated with over-reliance on AI in education or reasoning. For \n", 1587 | "\u001b[1;33m \u001b[0mexample, if we become too reliant on AI systems for decision-making or \n", 1588 | "\u001b[1;33m \u001b[0mproblem-solving, we may lose our ability to think critically and creatively, \n", 1589 | "\u001b[1;33m \u001b[0mwhich could have negative consequences in a variety of contexts. \n", 1590 | "\u001b[1;33m \u001b[0mAdditionally, there are concerns about bias and discrimination embedded in AI\n", 1591 | "\u001b[1;33m \u001b[0malgorithms that could perpetuate existing social inequalities if not properly\n", 1592 | "\u001b[1;33m \u001b[0maddressed. \n", 1593 | "\u001b[1;33m 6 \u001b[0mConsider the role of human values in AI development: Finally, it's important \n", 1594 | "\u001b[1;33m \u001b[0mto consider the role of human values in developing AI systems that support \n", 1595 | "\u001b[1;33m \u001b[0mrather than hinder human reasoning and learning. This includes ensuring that \n", 1596 | "\u001b[1;33m \u001b[0mAI algorithms are transparent and explainable so that humans can understand \n", 1597 | "\u001b[1;33m \u001b[0mhow decisions are being made. Additionally, there needs to be a concerted \n", 1598 | "\u001b[1;33m \u001b[0meffort to address ethical concerns around data privacy, bias, and \n", 1599 | "\u001b[1;33m \u001b[0maccountability in the development of AI systems. \n" 1600 | ], 1601 | "text/html": [ 1602 | "
Sure, I can provide my insights on how artificial intelligence (AI) may impact \n", 1603 | "human reasoning and learning in the near future. Here are some steps we could \n", 1604 | "take to explore this question: \n", 1605 | "\n", 1606 | " 1 Understand what AI is and its capabilities: Firstly, it's important to \n", 1607 | " understand that AI refers to computer systems capable of performing tasks \n", 1608 | " that typically require human intelligence, such as perception, speech \n", 1609 | " recognition, decision-making, and language translation. While some tasks are \n", 1610 | " already being performed by AI systems with high accuracy, there are still \n", 1611 | " many limitations in terms of creativity, empathy, and understanding context. \n", 1612 | " 2 Analyze the impact of AI on reasoning: One potential impact of AI on human \n", 1613 | " reasoning is that it could challenge our ability to think critically and \n", 1614 | " creatively. For example, if we rely too heavily on AI systems for \n", 1615 | " decision-making or problem-solving, we may become less adept at using our own\n", 1616 | " reasoning skills. However, this depends on how AI systems are designed and \n", 1617 | " used in practice. \n", 1618 | " 3 Consider the impact of AI on learning: Another potential impact of AI on \n", 1619 | " human learning is that it could change the way we access information and \n", 1620 | " learn new things. For example, if we have access to a vast library of \n", 1621 | " knowledge stored in an AI system, we may be able to learn more efficiently by\n", 1622 | " asking questions or exploring different topics with greater ease than before.\n", 1623 | " However, this also raises concerns about whether humans will become too \n", 1624 | " reliant on technology for learning and forget how to think independently. \n", 1625 | " 4 Explore potential benefits of AI in education: One potential benefit of using\n", 1626 | " AI systems in education is that they could provide personalized learning \n", 1627 | " experiences tailored to individual needs. This could help students learn more\n", 1628 | " effectively, particularly those who struggle with traditional educational \n", 1629 | " methods. Additionally, AI systems could assist teachers by automating \n", 1630 | " administrative tasks and providing insights into student progress, allowing \n", 1631 | " them to focus on teaching rather than paperwork. \n", 1632 | " 5 Assess the potential risks of over-reliance on AI: There are also potential \n", 1633 | " risks associated with over-reliance on AI in education or reasoning. For \n", 1634 | " example, if we become too reliant on AI systems for decision-making or \n", 1635 | " problem-solving, we may lose our ability to think critically and creatively, \n", 1636 | " which could have negative consequences in a variety of contexts. \n", 1637 | " Additionally, there are concerns about bias and discrimination embedded in AI\n", 1638 | " algorithms that could perpetuate existing social inequalities if not properly\n", 1639 | " addressed. \n", 1640 | " 6 Consider the role of human values in AI development: Finally, it's important \n", 1641 | " to consider the role of human values in developing AI systems that support \n", 1642 | " rather than hinder human reasoning and learning. This includes ensuring that \n", 1643 | " AI algorithms are transparent and explainable so that humans can understand \n", 1644 | " how decisions are being made. Additionally, there needs to be a concerted \n", 1645 | " effort to address ethical concerns around data privacy, bias, and \n", 1646 | " accountability in the development of AI systems. \n", 1647 | "\n" 1648 | ] 1649 | }, 1650 | "metadata": {} 1651 | }, 1652 | { 1653 | "output_type": "display_data", 1654 | "data": { 1655 | "text/plain": [ 1656 | "\u001b[1;3;32m Generated by Vicuna-7b in \u001b[0m\u001b[1;3;32m0:06:58\u001b[0m\u001b[1;3;32m.\u001b[0m\u001b[1;3;32m430866\u001b[0m\n" 1657 | ], 1658 | "text/html": [ 1659 | "
Generated by Vicuna-7b in 0:06:58.430866\n",
1660 | "\n"
1661 | ]
1662 | },
1663 | "metadata": {}
1664 | },
1665 | {
1666 | "output_type": "display_data",
1667 | "data": {
1668 | "text/plain": [
1669 | "\u001b[38;5;21m ---\u001b[0m\n"
1670 | ],
1671 | "text/html": [
1672 | " ---\n",
1673 | "\n"
1674 | ]
1675 | },
1676 | "metadata": {}
1677 | }
1678 | ]
1679 | },
1680 | {
1681 | "cell_type": "code",
1682 | "source": [
1683 | "Zero_Shot_CoT_APE = \"\"\"what will be the impact of Artificial Intelligence on human reasoning and learning in the near future?\n",
1684 | "\n",
1685 | "Let's work this out in a step by step way to be sure we have the right answer.\"\"\""
1686 | ],
1687 | "metadata": {
1688 | "id": "y2yijVUcTCE3"
1689 | },
1690 | "execution_count": null,
1691 | "outputs": []
1692 | },
1693 | {
1694 | "cell_type": "code",
1695 | "source": [
1696 | "prompt6 = vicunaQ4KM_CT(Zero_Shot_CoT_APE)"
1697 | ],
1698 | "metadata": {
1699 | "colab": {
1700 | "base_uri": "https://localhost:8080/",
1701 | "height": 935
1702 | },
1703 | "id": "dPbNAbbOVaRX",
1704 | "outputId": "eedaff4b-c11c-41c6-b26c-c1304427f88c"
1705 | },
1706 | "execution_count": null,
1707 | "outputs": [
1708 | {
1709 | "output_type": "display_data",
1710 | "data": {
1711 | "text/plain": [
1712 | "\u001b[1;3;91mPrompt: what will be the impact of Artificial Intelligence on human reasoning \u001b[0m\n",
1713 | "\u001b[1;3;91mand learning in the near future?\u001b[0m\n",
1714 | "\n",
1715 | "\u001b[1;3;91mLet's work this out in a step by step way to be sure we have the right answer.\u001b[0m\n"
1716 | ],
1717 | "text/html": [
1718 | "Prompt: what will be the impact of Artificial Intelligence on human reasoning \n", 1719 | "and learning in the near future?\n", 1720 | "\n", 1721 | "Let's work this out in a step by step way to be sure we have the right answer.\n", 1722 | "\n" 1723 | ] 1724 | }, 1725 | "metadata": {} 1726 | }, 1727 | { 1728 | "output_type": "display_data", 1729 | "data": { 1730 | "text/plain": [ 1731 | "\u001b[1;3mNumber of tokens in the prompt: \u001b[0m\u001b[1;3;36m83\u001b[0m\n" 1732 | ], 1733 | "text/html": [ 1734 | "
Number of tokens in the prompt: 83\n", 1735 | "\n" 1736 | ] 1737 | }, 1738 | "metadata": {} 1739 | }, 1740 | { 1741 | "output_type": "display_data", 1742 | "data": { 1743 | "text/plain": [ 1744 | "\u001b[1;3mNumber of tokens in the answer: \u001b[0m\u001b[1;3;36m665\u001b[0m\n" 1745 | ], 1746 | "text/html": [ 1747 | "
Number of tokens in the answer: 665\n", 1748 | "\n" 1749 | ] 1750 | }, 1751 | "metadata": {} 1752 | }, 1753 | { 1754 | "output_type": "display_data", 1755 | "data": { 1756 | "text/plain": [ 1757 | "Sure, I can help you explore that question! Here are some steps we could follow:\n", 1758 | "\n", 1759 | "\u001b[1;33m 1 \u001b[0mUnderstand what artificial intelligence (AI) is and how it works: We need to \n", 1760 | "\u001b[1;33m \u001b[0mdefine what we mean by \"artificial intelligence\" before we can consider its \n", 1761 | "\u001b[1;33m \u001b[0mimpact on human reasoning and learning. AI refers to a range of \n", 1762 | "\u001b[1;33m \u001b[0mcomputer-based techniques that allow machines or computers to mimic human \n", 1763 | "\u001b[1;33m \u001b[0mbehavior, learn from data, reason using algorithms, and make decisions \n", 1764 | "\u001b[1;33m \u001b[0mwithout being explicitly programmed for each task. \n", 1765 | "\u001b[1;33m 2 \u001b[0mAnalyze the current state of AI: We need to understand where AI is currently \n", 1766 | "\u001b[1;33m \u001b[0mat in terms of its capabilities and limitations. While AI has made \n", 1767 | "\u001b[1;33m \u001b[0msignificant progress over the past few years, it still lacks some critical \n", 1768 | "\u001b[1;33m \u001b[0maspects that are essential for human reasoning and learning, such as \n", 1769 | "\u001b[1;33m \u001b[0memotional intelligence, creativity, and common sense reasoning. \n", 1770 | "\u001b[1;33m 3 \u001b[0mConsider how AI can impact human reasoning: If we assume that AI will \n", 1771 | "\u001b[1;33m \u001b[0mcontinue to advance in the near future, what could be its potential impact on\n", 1772 | "\u001b[1;33m \u001b[0mhuman reasoning? One possibility is that AI may help humans reason more \n", 1773 | "\u001b[1;33m \u001b[0mefficiently by providing faster and more accurate data analysis, pattern \n", 1774 | "\u001b[1;33m \u001b[0mrecognition, and prediction. However, it's also possible that excessive \n", 1775 | "\u001b[1;33m \u001b[0mreliance on AI might lead to a decrease in critical thinking skills as humans\n", 1776 | "\u001b[1;33m \u001b[0mbecome overly dependent on technology for decision-making. \n", 1777 | "\u001b[1;33m 4 \u001b[0mExplore the impact of AI on human learning: How could AI affect human \n", 1778 | "\u001b[1;33m \u001b[0mlearning? One potential benefit is that AI could personalize and adapt \n", 1779 | "\u001b[1;33m \u001b[0meducational content based on each individual's unique needs, preferences, and\n", 1780 | "\u001b[1;33m \u001b[0mlearning styles. This might lead to more effective and efficient learning for\n", 1781 | "\u001b[1;33m \u001b[0mmany individuals. However, there are also concerns about the role of AI in \n", 1782 | "\u001b[1;33m \u001b[0mreplacing teachers or exacerbating existing social inequalities in access to \n", 1783 | "\u001b[1;33m \u001b[0meducation and job opportunities. \n", 1784 | "\u001b[1;33m 5 \u001b[0mConsider potential ethical implications: As we explore the impacts of AI on \n", 1785 | "\u001b[1;33m \u001b[0mhuman reasoning and learning, it's crucial to consider potential ethical \n", 1786 | "\u001b[1;33m \u001b[0mimplications. For example, who will be responsible for ensuring that AI \n", 1787 | "\u001b[1;33m \u001b[0msystems are fair, unbiased, and transparent? How can we ensure that AI is \n", 1788 | "\u001b[1;33m \u001b[0mused in ways that promote social good rather than harm? \n", 1789 | "\u001b[1;33m 6 \u001b[0mMake predictions about the future: Based on our analysis of current trends \n", 1790 | "\u001b[1;33m \u001b[0mand potential impacts, what might the future look like for human reasoning \n", 1791 | "\u001b[1;33m \u001b[0mand learning with AI integration? It's difficult to predict exactly how \n", 1792 | "\u001b[1;33m \u001b[0mthings will play out, but one possible scenario is that humans may need to \n", 1793 | "\u001b[1;33m \u001b[0madapt their skills and knowledge base to work alongside AI systems. This \n", 1794 | "\u001b[1;33m \u001b[0mcould involve reorienting education towards teaching critical thinking, \n", 1795 | "\u001b[1;33m \u001b[0mcreativity, and emotional intelligence in order for individuals to \n", 1796 | "\u001b[1;33m \u001b[0meffectively collaborate with AI tools. \n", 1797 | "\u001b[1;33m 7 \u001b[0mRevisit the question: Finally, we can revisit our initial question about the \n", 1798 | "\u001b[1;33m \u001b[0mimpact of AI on human reasoning and learning. Based on what we've explored \n", 1799 | "\u001b[1;33m \u001b[0mabove, it seems that while there may be some benefits and challenges \n", 1800 | "\u001b[1;33m \u001b[0massociated with integrating AI into education and decision-making processes, \n", 1801 | "\u001b[1;33m \u001b[0multimately the extent to which AI will influence human behavior depends on \n", 1802 | "\u001b[1;33m \u001b[0mhow it is designed, implemented, and regulated by society as a whole. \n" 1803 | ], 1804 | "text/html": [ 1805 | "
Sure, I can help you explore that question! Here are some steps we could follow:\n", 1806 | "\n", 1807 | " 1 Understand what artificial intelligence (AI) is and how it works: We need to \n", 1808 | " define what we mean by \"artificial intelligence\" before we can consider its \n", 1809 | " impact on human reasoning and learning. AI refers to a range of \n", 1810 | " computer-based techniques that allow machines or computers to mimic human \n", 1811 | " behavior, learn from data, reason using algorithms, and make decisions \n", 1812 | " without being explicitly programmed for each task. \n", 1813 | " 2 Analyze the current state of AI: We need to understand where AI is currently \n", 1814 | " at in terms of its capabilities and limitations. While AI has made \n", 1815 | " significant progress over the past few years, it still lacks some critical \n", 1816 | " aspects that are essential for human reasoning and learning, such as \n", 1817 | " emotional intelligence, creativity, and common sense reasoning. \n", 1818 | " 3 Consider how AI can impact human reasoning: If we assume that AI will \n", 1819 | " continue to advance in the near future, what could be its potential impact on\n", 1820 | " human reasoning? One possibility is that AI may help humans reason more \n", 1821 | " efficiently by providing faster and more accurate data analysis, pattern \n", 1822 | " recognition, and prediction. However, it's also possible that excessive \n", 1823 | " reliance on AI might lead to a decrease in critical thinking skills as humans\n", 1824 | " become overly dependent on technology for decision-making. \n", 1825 | " 4 Explore the impact of AI on human learning: How could AI affect human \n", 1826 | " learning? One potential benefit is that AI could personalize and adapt \n", 1827 | " educational content based on each individual's unique needs, preferences, and\n", 1828 | " learning styles. This might lead to more effective and efficient learning for\n", 1829 | " many individuals. However, there are also concerns about the role of AI in \n", 1830 | " replacing teachers or exacerbating existing social inequalities in access to \n", 1831 | " education and job opportunities. \n", 1832 | " 5 Consider potential ethical implications: As we explore the impacts of AI on \n", 1833 | " human reasoning and learning, it's crucial to consider potential ethical \n", 1834 | " implications. For example, who will be responsible for ensuring that AI \n", 1835 | " systems are fair, unbiased, and transparent? How can we ensure that AI is \n", 1836 | " used in ways that promote social good rather than harm? \n", 1837 | " 6 Make predictions about the future: Based on our analysis of current trends \n", 1838 | " and potential impacts, what might the future look like for human reasoning \n", 1839 | " and learning with AI integration? It's difficult to predict exactly how \n", 1840 | " things will play out, but one possible scenario is that humans may need to \n", 1841 | " adapt their skills and knowledge base to work alongside AI systems. This \n", 1842 | " could involve reorienting education towards teaching critical thinking, \n", 1843 | " creativity, and emotional intelligence in order for individuals to \n", 1844 | " effectively collaborate with AI tools. \n", 1845 | " 7 Revisit the question: Finally, we can revisit our initial question about the \n", 1846 | " impact of AI on human reasoning and learning. Based on what we've explored \n", 1847 | " above, it seems that while there may be some benefits and challenges \n", 1848 | " associated with integrating AI into education and decision-making processes, \n", 1849 | " ultimately the extent to which AI will influence human behavior depends on \n", 1850 | " how it is designed, implemented, and regulated by society as a whole. \n", 1851 | "\n" 1852 | ] 1853 | }, 1854 | "metadata": {} 1855 | }, 1856 | { 1857 | "output_type": "display_data", 1858 | "data": { 1859 | "text/plain": [ 1860 | "\u001b[1;3;32m Generated by Vicuna-7b in \u001b[0m\u001b[1;3;32m0:07:32\u001b[0m\u001b[1;3;32m.\u001b[0m\u001b[1;3;32m049468\u001b[0m\n" 1861 | ], 1862 | "text/html": [ 1863 | "
Generated by Vicuna-7b in 0:07:32.049468\n",
1864 | "\n"
1865 | ]
1866 | },
1867 | "metadata": {}
1868 | },
1869 | {
1870 | "output_type": "display_data",
1871 | "data": {
1872 | "text/plain": [
1873 | "\u001b[38;5;21m ---\u001b[0m\n"
1874 | ],
1875 | "text/html": [
1876 | " ---\n",
1877 | "\n"
1878 | ]
1879 | },
1880 | "metadata": {}
1881 | }
1882 | ]
1883 | },
1884 | {
1885 | "cell_type": "markdown",
1886 | "source": [
1887 | "### name: 'Tree of Thought V2',\n",
1888 | "description: \"The Tree of Thoughts (ToT) framework improves language models' problem-solving abilities by allowing deliberate decision making through exploration and strategic lookahead\",\n",
1889 | "paper: 'https://arxiv.org/abs/2305.10601',\n",
1890 | "content:\n",
1891 | "```\n",
1892 | "\"\"\"Simulate three brilliant, logical experts collaboratively answering a question.\n",
1893 | "Each one verbosely explains their thought process in real-time, considering the prior explanations of others and openly acknowledging mistakes.\n",
1894 | "At each step, whenever possible, each expert refines and builds upon the thoughts of others, acknowledging their contributions.\n",
1895 | "They continue until there is a definitive answer to the question.\n",
1896 | "For clarity, your entire response should be in a markdown table. The question is...\n",
1897 | "\n",
1898 | "{question}\n",
1899 | "\"\"\"\n",
1900 | "```\n"
1901 | ],
1902 | "metadata": {
1903 | "id": "my1BNhTMTCuM"
1904 | }
1905 | },
1906 | {
1907 | "cell_type": "code",
1908 | "source": [
1909 | "ToT_V2 = \"\"\"Simulate three brilliant, logical experts collaboratively answering a question.\n",
1910 | "Each one verbosely explains their thought process in real-time, considering the prior explanations of others and openly acknowledging mistakes.\n",
1911 | "At each step, whenever possible, each expert refines and builds upon the thoughts of others, acknowledging their contributions.\n",
1912 | "They continue until there is a definitive answer to the question.\n",
1913 | "For clarity, your entire response should be in a markdown table. The question is...\n",
1914 | "\n",
1915 | "How can we fix global climate change?\n",
1916 | "\"\"\""
1917 | ],
1918 | "metadata": {
1919 | "id": "JObswIL0hoW_"
1920 | },
1921 | "execution_count": null,
1922 | "outputs": []
1923 | },
1924 | {
1925 | "cell_type": "code",
1926 | "source": [
1927 | "prompt7 = vicunaQ4KM_CT(ToT_V2)"
1928 | ],
1929 | "metadata": {
1930 | "colab": {
1931 | "base_uri": "https://localhost:8080/",
1932 | "height": 1000
1933 | },
1934 | "id": "yGGSxFwXTDMf",
1935 | "outputId": "173c397c-509e-4a40-fe89-452a8a51587d"
1936 | },
1937 | "execution_count": null,
1938 | "outputs": [
1939 | {
1940 | "output_type": "display_data",
1941 | "data": {
1942 | "text/plain": [
1943 | "\u001b[1;3;91mPrompt: Simulate three brilliant, logical experts collaboratively answering a \u001b[0m\n",
1944 | "\u001b[1;3;91mquestion.\u001b[0m\n",
1945 | "\u001b[1;3;91mEach one verbosely explains their thought process in real-time, considering the \u001b[0m\n",
1946 | "\u001b[1;3;91mprior explanations of others and openly acknowledging mistakes.\u001b[0m\n",
1947 | "\u001b[1;3;91mAt each step, whenever possible, each expert refines and builds upon the \u001b[0m\n",
1948 | "\u001b[1;3;91mthoughts of others, acknowledging their contributions.\u001b[0m\n",
1949 | "\u001b[1;3;91mThey continue until there is a definitive answer to the question.\u001b[0m\n",
1950 | "\u001b[1;3;91mFor clarity, your entire response should be in a markdown table. The question \u001b[0m\n",
1951 | "\u001b[1;3;91mis\u001b[0m\u001b[1;3;91m...\u001b[0m\n",
1952 | "\n",
1953 | "\u001b[1;3;91mHow can we fix global climate change?\u001b[0m\n",
1954 | "\n"
1955 | ],
1956 | "text/html": [
1957 | "Prompt: Simulate three brilliant, logical experts collaboratively answering a \n", 1958 | "question.\n", 1959 | "Each one verbosely explains their thought process in real-time, considering the \n", 1960 | "prior explanations of others and openly acknowledging mistakes.\n", 1961 | "At each step, whenever possible, each expert refines and builds upon the \n", 1962 | "thoughts of others, acknowledging their contributions.\n", 1963 | "They continue until there is a definitive answer to the question.\n", 1964 | "For clarity, your entire response should be in a markdown table. The question \n", 1965 | "is...\n", 1966 | "\n", 1967 | "How can we fix global climate change?\n", 1968 | "\n", 1969 | "\n" 1970 | ] 1971 | }, 1972 | "metadata": {} 1973 | }, 1974 | { 1975 | "output_type": "display_data", 1976 | "data": { 1977 | "text/plain": [ 1978 | "\u001b[1;3mNumber of tokens in the prompt: \u001b[0m\u001b[1;3;36m152\u001b[0m\n" 1979 | ], 1980 | "text/html": [ 1981 | "
Number of tokens in the prompt: 152\n", 1982 | "\n" 1983 | ] 1984 | }, 1985 | "metadata": {} 1986 | }, 1987 | { 1988 | "output_type": "display_data", 1989 | "data": { 1990 | "text/plain": [ 1991 | "\u001b[1;3mNumber of tokens in the answer: \u001b[0m\u001b[1;3;36m366\u001b[0m\n" 1992 | ], 1993 | "text/html": [ 1994 | "
Number of tokens in the answer: 366\n", 1995 | "\n" 1996 | ] 1997 | }, 1998 | "metadata": {} 1999 | }, 2000 | { 2001 | "output_type": "display_data", 2002 | "data": { 2003 | "text/plain": [ 2004 | "\n", 2005 | "\n", 2006 | "\n", 2007 | "\n", 2008 | " \n", 2009 | " \u001b[1m \u001b[0m\u001b[1mExpert 1\u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m \u001b[1m \u001b[0m\u001b[1mExpert 2\u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m \u001b[1m \u001b[0m\u001b[1mExpert 3\u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m \u001b[1m \u001b[0m\u001b[1mAnswer\u001b[0m\u001b[1m \u001b[0m\u001b[1m \u001b[0m \n", 2010 | " ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ \n", 2011 | " \u001b[3mClimate change is\u001b[0m \u001b[3mAgreed.\u001b[0m\u001b[3mWe also \u001b[0m \u001b[3mExactly.\u001b[0m\u001b[3mIn \u001b[0m The key to \n", 2012 | " \u001b[3mcaused by the \u001b[0m \u001b[3mneed to consider \u001b[0m \u001b[3maddition, we must\u001b[0m fixing global \n", 2013 | " \u001b[3moveruse of fossil\u001b[0m \u001b[3mthe role of \u001b[0m \u001b[3maddress the issue\u001b[0m climate change \n", 2014 | " \u001b[3mfuels, which \u001b[0m \u001b[3mdeforestation in \u001b[0m \u001b[3mof carbon \u001b[0m is transitioning \n", 2015 | " \u001b[3mrelease carbon \u001b[0m \u001b[3mcontributing to \u001b[0m \u001b[3memissions from \u001b[0m away from fossil \n", 2016 | " \u001b[3mdioxide and other\u001b[0m \u001b[3mclimate change. \u001b[0m \u001b[3mtransportation \u001b[0m fuels and \n", 2017 | " \u001b[3mgreenhouse gases \u001b[0m \u001b[3mWe can combat \u001b[0m \u001b[3mand industry. One\u001b[0m towards clean \n", 2018 | " \u001b[3minto the \u001b[0m \u001b[3mthis by promoting\u001b[0m \u001b[3msolution is to \u001b[0m energy sources \n", 2019 | " \u001b[3matmosphere. We \u001b[0m \u001b[3mreforestation \u001b[0m \u001b[3mdevelop more \u001b[0m like solar, \n", 2020 | " \u001b[3mneed to \u001b[0m \u001b[3mefforts, \u001b[0m \u001b[3mefficient \u001b[0m wind, \n", 2021 | " \u001b[3mtransition to \u001b[0m \u001b[3mespecially in \u001b[0m \u001b[3mvehicles and \u001b[0m hydroelectric \n", 2022 | " \u001b[3mclean energy \u001b[0m \u001b[3mareas that have \u001b[0m \u001b[3mindustrial \u001b[0m power, and \n", 2023 | " \u001b[3msources like \u001b[0m \u001b[3mbeen affected by \u001b[0m \u001b[3mprocesses that \u001b[0m geothermal \n", 2024 | " \u001b[3msolar, wind, \u001b[0m \u001b[3mlogging or forest\u001b[0m \u001b[3mreduce emissions,\u001b[0m energy. We must \n", 2025 | " \u001b[3mhydroelectric \u001b[0m \u001b[3mfires.\u001b[0m \u001b[3mas well as \u001b[0m also address \n", 2026 | " \u001b[3mpower, and \u001b[0m \u001b[3minvesting in \u001b[0m deforestation, \n", 2027 | " \u001b[3mgeothermal \u001b[0m \u001b[3mpublic transit \u001b[0m promote \n", 2028 | " \u001b[3menergy.\u001b[0m\u001b[3mFirstly, \u001b[0m \u001b[3msystems and \u001b[0m reforestation \n", 2029 | " \u001b[3mwe must invest in\u001b[0m \u001b[3mencouraging \u001b[0m efforts, and \n", 2030 | " \u001b[3mresearch and \u001b[0m \u001b[3mtelecommuting \u001b[0m work on reducing \n", 2031 | " \u001b[3mdevelopment for \u001b[0m \u001b[3mwhen possible.\u001b[0m carbon emissions \n", 2032 | " \u001b[3mrenewable energy \u001b[0m in \n", 2033 | " \u001b[3mtechnologies, so \u001b[0m transportation \n", 2034 | " \u001b[3mthat they become \u001b[0m and industry \n", 2035 | " \u001b[3mmore efficient \u001b[0m through more \n", 2036 | " \u001b[3mand \u001b[0m efficient \n", 2037 | " \u001b[3mcost-effective. \u001b[0m vehicles and \n", 2038 | " \u001b[3mSecondly, we \u001b[0m industrial \n", 2039 | " \u001b[3mshould implement \u001b[0m processes as \n", 2040 | " \u001b[3mpolicies and \u001b[0m well as public \n", 2041 | " \u001b[3mregulations that \u001b[0m transit systems \n", 2042 | " \u001b[3mincentivize the \u001b[0m and \n", 2043 | " \u001b[3muse of clean \u001b[0m telecommuting \n", 2044 | " \u001b[3menergy sources by\u001b[0m when possible. \n", 2045 | " \u001b[3mreducing tariffs \u001b[0m \n", 2046 | " \u001b[3mon them and \u001b[0m \n", 2047 | " \u001b[3mincreasing taxes \u001b[0m \n", 2048 | " \u001b[3mon fossil fuels.\u001b[0m \n", 2049 | " \n" 2050 | ], 2051 | "text/html": [ 2052 | "
\n", 2053 | "\n", 2054 | "\n", 2055 | "\n", 2056 | " \n", 2057 | " Expert 1 Expert 2 Expert 3 Answer \n", 2058 | " ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ \n", 2059 | " Climate change is Agreed.We also Exactly.In The key to \n", 2060 | " caused by the need to consider addition, we must fixing global \n", 2061 | " overuse of fossil the role of address the issue climate change \n", 2062 | " fuels, which deforestation in of carbon is transitioning \n", 2063 | " release carbon contributing to emissions from away from fossil \n", 2064 | " dioxide and other climate change. transportation fuels and \n", 2065 | " greenhouse gases We can combat and industry. One towards clean \n", 2066 | " into the this by promoting solution is to energy sources \n", 2067 | " atmosphere. We reforestation develop more like solar, \n", 2068 | " need to efforts, efficient wind, \n", 2069 | " transition to especially in vehicles and hydroelectric \n", 2070 | " clean energy areas that have industrial power, and \n", 2071 | " sources like been affected by processes that geothermal \n", 2072 | " solar, wind, logging or forest reduce emissions, energy. We must \n", 2073 | " hydroelectric fires. as well as also address \n", 2074 | " power, and investing in deforestation, \n", 2075 | " geothermal public transit promote \n", 2076 | " energy.Firstly, systems and reforestation \n", 2077 | " we must invest in encouraging efforts, and \n", 2078 | " research and telecommuting work on reducing \n", 2079 | " development for when possible. carbon emissions \n", 2080 | " renewable energy in \n", 2081 | " technologies, so transportation \n", 2082 | " that they become and industry \n", 2083 | " more efficient through more \n", 2084 | " and efficient \n", 2085 | " cost-effective. vehicles and \n", 2086 | " Secondly, we industrial \n", 2087 | " should implement processes as \n", 2088 | " policies and well as public \n", 2089 | " regulations that transit systems \n", 2090 | " incentivize the and \n", 2091 | " use of clean telecommuting \n", 2092 | " energy sources by when possible. \n", 2093 | " reducing tariffs \n", 2094 | " on them and \n", 2095 | " increasing taxes \n", 2096 | " on fossil fuels. \n", 2097 | " \n", 2098 | "\n" 2099 | ] 2100 | }, 2101 | "metadata": {} 2102 | }, 2103 | { 2104 | "output_type": "display_data", 2105 | "data": { 2106 | "text/plain": [ 2107 | "\u001b[1;3;32m Generated by Vicuna-7b in \u001b[0m\u001b[1;3;32m0:04:54\u001b[0m\u001b[1;3;32m.\u001b[0m\u001b[1;3;32m620494\u001b[0m\n" 2108 | ], 2109 | "text/html": [ 2110 | "
Generated by Vicuna-7b in 0:04:54.620494\n",
2111 | "\n"
2112 | ]
2113 | },
2114 | "metadata": {}
2115 | },
2116 | {
2117 | "output_type": "display_data",
2118 | "data": {
2119 | "text/plain": [
2120 | "\u001b[38;5;21m ---\u001b[0m\n"
2121 | ],
2122 | "text/html": [
2123 | " ---\n",
2124 | "\n"
2125 | ]
2126 | },
2127 | "metadata": {}
2128 | }
2129 | ]
2130 | },
2131 | {
2132 | "cell_type": "markdown",
2133 | "source": [
2134 | "### name: 'Summarization Into List',\n",
2135 | "description: `This prompt asks the LLM to summarize a given text into a list of bullet points.`,\n",
2136 | "Promt Template:\n",
2137 | "```\n",
2138 | "\"\"\"Write a concise summary of the following text delimited by triple backquotes.\n",
2139 | "Return your response in bullet points which covers the key points of the text.\n",
2140 | "\n",
2141 | "```\n",
2142 | "{text}\n",
2143 | "```\n",
2144 | "\n",
2145 | "BULLET POINT SUMMARY:\"\"\"\n",
2146 | "```\n",
2147 | "\n"
2148 | ],
2149 | "metadata": {
2150 | "id": "b09G0ZX-iq17"
2151 | }
2152 | },
2153 | {
2154 | "cell_type": "code",
2155 | "source": [
2156 | "SummarizToList = \"\"\"Write a concise summary of the following text delimited by triple backquotes.\n",
2157 | "Return your response in bullet points which covers the key points of the text.\n",
2158 | "\n",
2159 | "```\n",
2160 | "This study from March 2023 takes a simple yet novel approach to prompt engineering by automatically generating\n",
2161 | "prompts based on the desired input and output. In a recent article I considered the future of prompt engineering,\n",
2162 | "and the possibility of soft prompts (prompt tuning). I argued that user context, ambiguity and user intent all play\n",
2163 | "an important role in any conversational UI. User intent, context, ambiguity and disambiguation are all part and\n",
2164 | "parcel of any conversation. The question is, can this approach accelerate the process where manually wording\n",
2165 | "prompts fade into the background and interaction with the LLM is based on contextual example input and output\n",
2166 | "datasets? What I like about this approach, is that context, and user intent can be mapped, while also taking into\n",
2167 | "consideration possible ambiguity. Yet manually crafting prompts is tedious in the sense of trying to word a prompt\n",
2168 | "in such a way to engender a desired response from the LLM. Focussing on prompt engineering also does not take into\n",
2169 | "consideration an array of possible user inputs. Data Management will always be part of LLM applications. APE offers\n",
2170 | "an alternative approach to prompt engineering, where via input and matching output examples, prompts can be\n",
2171 | "generated on the fly. We define “prompt engineering” as optimising the language in a prompt in order to elicit the\n",
2172 | "best possible performance. Notably, this does not include prompts that chain multiple LLM queries together or give\n",
2173 | "the LLM access to external tools. ~ Source The basic notebook below shows how Automatic Prompt Engineering (APE)\n",
2174 | "can be used to generate prompts based on a small input data set, a list of expected outputs and a prompt template.\n",
2175 | "APE performs this in two steps: A LLM is used to generate a set of candidate prompts. A prompt evaluation function\n",
2176 | "considers the quality of each candidate prompt; returning the prompt with the highest evaluation score. A practical\n",
2177 | "implementation is, via a human-in-the-loop approach, prompts can be marked up and marked down for use on terms of\n",
2178 | "accuracy and correctness.\n",
2179 | "```\n",
2180 | "\n",
2181 | "BULLET POINT SUMMARY:\"\"\""
2182 | ],
2183 | "metadata": {
2184 | "id": "OEd2WVxnTB9W"
2185 | },
2186 | "execution_count": null,
2187 | "outputs": []
2188 | },
2189 | {
2190 | "cell_type": "code",
2191 | "source": [
2192 | "prompt8 = vicunaQ4KM_CT(SummarizToList)"
2193 | ],
2194 | "metadata": {
2195 | "colab": {
2196 | "base_uri": "https://localhost:8080/",
2197 | "height": 1000
2198 | },
2199 | "id": "sOEVQ5wOTBzP",
2200 | "outputId": "2a56a09d-bb5c-4ecd-815e-e21cdd994389"
2201 | },
2202 | "execution_count": null,
2203 | "outputs": [
2204 | {
2205 | "output_type": "display_data",
2206 | "data": {
2207 | "text/plain": [
2208 | "\u001b[1;3;91mPrompt: Write a concise summary of the following text delimited by triple \u001b[0m\n",
2209 | "\u001b[1;3;91mbackquotes.\u001b[0m\n",
2210 | "\u001b[1;3;91mReturn your response in bullet points which covers the key points of the text.\u001b[0m\n",
2211 | "\n",
2212 | "\u001b[1;3;91m```\u001b[0m\n",
2213 | "\u001b[1;3;91mThis study from March \u001b[0m\u001b[1;3;91m2023\u001b[0m\u001b[1;3;91m takes a simple yet novel approach to prompt \u001b[0m\n",
2214 | "\u001b[1;3;91mengineering by automatically generating\u001b[0m\n",
2215 | "\u001b[1;3;91mprompts based on the desired input and output. In a recent article I considered \u001b[0m\n",
2216 | "\u001b[1;3;91mthe future of prompt engineering,\u001b[0m\n",
2217 | "\u001b[1;3;91mand the possibility of soft prompts \u001b[0m\u001b[1;3;91m(\u001b[0m\u001b[1;3;91mprompt tuning\u001b[0m\u001b[1;3;91m)\u001b[0m\u001b[1;3;91m. I argued that user context,\u001b[0m\n",
2218 | "\u001b[1;3;91mambiguity and user intent all play\u001b[0m\n",
2219 | "\u001b[1;3;91man important role in any conversational UI. User intent, context, ambiguity and \u001b[0m\n",
2220 | "\u001b[1;3;91mdisambiguation are all part and\u001b[0m\n",
2221 | "\u001b[1;3;91mparcel of any conversation. The question is, can this approach accelerate the \u001b[0m\n",
2222 | "\u001b[1;3;91mprocess where manually wording\u001b[0m\n",
2223 | "\u001b[1;3;91mprompts fade into the background and interaction with the LLM is based on \u001b[0m\n",
2224 | "\u001b[1;3;91mcontextual example input and output\u001b[0m\n",
2225 | "\u001b[1;3;91mdatasets? What I like about this approach, is that context, and user intent can \u001b[0m\n",
2226 | "\u001b[1;3;91mbe mapped, while also taking into\u001b[0m\n",
2227 | "\u001b[1;3;91mconsideration possible ambiguity. Yet manually crafting prompts is tedious in \u001b[0m\n",
2228 | "\u001b[1;3;91mthe sense of trying to word a prompt\u001b[0m\n",
2229 | "\u001b[1;3;91min such a way to engender a desired response from the LLM. Focussing on prompt \u001b[0m\n",
2230 | "\u001b[1;3;91mengineering also does not take into\u001b[0m\n",
2231 | "\u001b[1;3;91mconsideration an array of possible user inputs. Data Management will always be \u001b[0m\n",
2232 | "\u001b[1;3;91mpart of LLM applications. APE offers\u001b[0m\n",
2233 | "\u001b[1;3;91man alternative approach to prompt engineering, where via input and matching \u001b[0m\n",
2234 | "\u001b[1;3;91moutput examples, prompts can be\u001b[0m\n",
2235 | "\u001b[1;3;91mgenerated on the fly. We define “prompt engineering” as optimising the language \u001b[0m\n",
2236 | "\u001b[1;3;91min a prompt in order to elicit the\u001b[0m\n",
2237 | "\u001b[1;3;91mbest possible performance. Notably, this does not include prompts that chain \u001b[0m\n",
2238 | "\u001b[1;3;91mmultiple LLM queries together or give\u001b[0m\n",
2239 | "\u001b[1;3;91mthe LLM access to external tools. ~ Source The basic notebook below shows how \u001b[0m\n",
2240 | "\u001b[1;3;91mAutomatic Prompt Engineering \u001b[0m\u001b[1;3;91m(\u001b[0m\u001b[1;3;91mAPE\u001b[0m\u001b[1;3;91m)\u001b[0m\n",
2241 | "\u001b[1;3;91mcan be used to generate prompts based on a small input data set, a list of \u001b[0m\n",
2242 | "\u001b[1;3;91mexpected outputs and a prompt template.\u001b[0m\n",
2243 | "\u001b[1;3;91mAPE performs this in two steps: A LLM is used to generate a set of candidate \u001b[0m\n",
2244 | "\u001b[1;3;91mprompts. A prompt evaluation function\u001b[0m\n",
2245 | "\u001b[1;3;91mconsiders the quality of each candidate prompt; returning the prompt with the \u001b[0m\n",
2246 | "\u001b[1;3;91mhighest evaluation score. A practical\u001b[0m\n",
2247 | "\u001b[1;3;91mimplementation is, via a human-in-the-loop approach, prompts can be marked up \u001b[0m\n",
2248 | "\u001b[1;3;91mand marked down for use on terms of\u001b[0m\n",
2249 | "\u001b[1;3;91maccuracy and correctness.\u001b[0m\n",
2250 | "\u001b[1;3;91m```\u001b[0m\n",
2251 | "\n",
2252 | "\u001b[1;3;91mBULLET POINT SUMMARY:\u001b[0m\n"
2253 | ],
2254 | "text/html": [
2255 | "Prompt: Write a concise summary of the following text delimited by triple \n", 2256 | "backquotes.\n", 2257 | "Return your response in bullet points which covers the key points of the text.\n", 2258 | "\n", 2259 | "```\n", 2260 | "This study from March 2023 takes a simple yet novel approach to prompt \n", 2261 | "engineering by automatically generating\n", 2262 | "prompts based on the desired input and output. In a recent article I considered \n", 2263 | "the future of prompt engineering,\n", 2264 | "and the possibility of soft prompts (prompt tuning). I argued that user context,\n", 2265 | "ambiguity and user intent all play\n", 2266 | "an important role in any conversational UI. User intent, context, ambiguity and \n", 2267 | "disambiguation are all part and\n", 2268 | "parcel of any conversation. The question is, can this approach accelerate the \n", 2269 | "process where manually wording\n", 2270 | "prompts fade into the background and interaction with the LLM is based on \n", 2271 | "contextual example input and output\n", 2272 | "datasets? What I like about this approach, is that context, and user intent can \n", 2273 | "be mapped, while also taking into\n", 2274 | "consideration possible ambiguity. Yet manually crafting prompts is tedious in \n", 2275 | "the sense of trying to word a prompt\n", 2276 | "in such a way to engender a desired response from the LLM. Focussing on prompt \n", 2277 | "engineering also does not take into\n", 2278 | "consideration an array of possible user inputs. Data Management will always be \n", 2279 | "part of LLM applications. APE offers\n", 2280 | "an alternative approach to prompt engineering, where via input and matching \n", 2281 | "output examples, prompts can be\n", 2282 | "generated on the fly. We define “prompt engineering” as optimising the language \n", 2283 | "in a prompt in order to elicit the\n", 2284 | "best possible performance. Notably, this does not include prompts that chain \n", 2285 | "multiple LLM queries together or give\n", 2286 | "the LLM access to external tools. ~ Source The basic notebook below shows how \n", 2287 | "Automatic Prompt Engineering (APE)\n", 2288 | "can be used to generate prompts based on a small input data set, a list of \n", 2289 | "expected outputs and a prompt template.\n", 2290 | "APE performs this in two steps: A LLM is used to generate a set of candidate \n", 2291 | "prompts. A prompt evaluation function\n", 2292 | "considers the quality of each candidate prompt; returning the prompt with the \n", 2293 | "highest evaluation score. A practical\n", 2294 | "implementation is, via a human-in-the-loop approach, prompts can be marked up \n", 2295 | "and marked down for use on terms of\n", 2296 | "accuracy and correctness.\n", 2297 | "```\n", 2298 | "\n", 2299 | "BULLET POINT SUMMARY:\n", 2300 | "\n" 2301 | ] 2302 | }, 2303 | "metadata": {} 2304 | }, 2305 | { 2306 | "output_type": "display_data", 2307 | "data": { 2308 | "text/plain": [ 2309 | "\u001b[1;3mNumber of tokens in the prompt: \u001b[0m\u001b[1;3;36m545\u001b[0m\n" 2310 | ], 2311 | "text/html": [ 2312 | "
Number of tokens in the prompt: 545\n", 2313 | "\n" 2314 | ] 2315 | }, 2316 | "metadata": {} 2317 | }, 2318 | { 2319 | "output_type": "display_data", 2320 | "data": { 2321 | "text/plain": [ 2322 | "\u001b[1;3mNumber of tokens in the answer: \u001b[0m\u001b[1;3;36m143\u001b[0m\n" 2323 | ], 2324 | "text/html": [ 2325 | "
Number of tokens in the answer: 143\n", 2326 | "\n" 2327 | ] 2328 | }, 2329 | "metadata": {} 2330 | }, 2331 | { 2332 | "output_type": "display_data", 2333 | "data": { 2334 | "text/plain": [ 2335 | "\n", 2336 | "\u001b[1;33m • \u001b[0mThe study involves Automatic Prompt Engineering (APE) to generate prompts \n", 2337 | "\u001b[1;33m \u001b[0mbased on desired input and output, eliminating the need for manually crafting\n", 2338 | "\u001b[1;33m \u001b[0mthem. \n", 2339 | "\u001b[1;33m • \u001b[0mThis approach takes into consideration user context, ambiguity, and intent in\n", 2340 | "\u001b[1;33m \u001b[0mconversational UIs. \n", 2341 | "\u001b[1;33m • \u001b[0mThe aim is to accelerate the process of fading away from manually worded \n", 2342 | "\u001b[1;33m \u001b[0mprompts and focusing on contextual examples. \n", 2343 | "\u001b[1;33m • \u001b[0mAPE generates prompts on-the-fly through input-output matching examples, \n", 2344 | "\u001b[1;33m \u001b[0mrather than considering a range of user inputs. \n", 2345 | "\u001b[1;33m • \u001b[0mPrompt engineering involves optimizing language in a prompt for best possible\n", 2346 | "\u001b[1;33m \u001b[0mperformance without chaining multiple LLM queries or giving access to \n", 2347 | "\u001b[1;33m \u001b[0mexternal tools. \n" 2348 | ], 2349 | "text/html": [ 2350 | "
\n", 2351 | " • The study involves Automatic Prompt Engineering (APE) to generate prompts \n", 2352 | " based on desired input and output, eliminating the need for manually crafting\n", 2353 | " them. \n", 2354 | " • This approach takes into consideration user context, ambiguity, and intent in\n", 2355 | " conversational UIs. \n", 2356 | " • The aim is to accelerate the process of fading away from manually worded \n", 2357 | " prompts and focusing on contextual examples. \n", 2358 | " • APE generates prompts on-the-fly through input-output matching examples, \n", 2359 | " rather than considering a range of user inputs. \n", 2360 | " • Prompt engineering involves optimizing language in a prompt for best possible\n", 2361 | " performance without chaining multiple LLM queries or giving access to \n", 2362 | " external tools. \n", 2363 | "\n" 2364 | ] 2365 | }, 2366 | "metadata": {} 2367 | }, 2368 | { 2369 | "output_type": "display_data", 2370 | "data": { 2371 | "text/plain": [ 2372 | "\u001b[1;3;32m Generated by Vicuna-7b in \u001b[0m\u001b[1;3;32m0:05:48\u001b[0m\u001b[1;3;32m.\u001b[0m\u001b[1;3;32m725935\u001b[0m\n" 2373 | ], 2374 | "text/html": [ 2375 | "
Generated by Vicuna-7b in 0:05:48.725935\n",
2376 | "\n"
2377 | ]
2378 | },
2379 | "metadata": {}
2380 | },
2381 | {
2382 | "output_type": "display_data",
2383 | "data": {
2384 | "text/plain": [
2385 | "\u001b[38;5;21m ---\u001b[0m\n"
2386 | ],
2387 | "text/html": [
2388 | " ---\n",
2389 | "\n"
2390 | ]
2391 | },
2392 | "metadata": {}
2393 | }
2394 | ]
2395 | }
2396 | ]
2397 | }
--------------------------------------------------------------------------------