├── images ├── dpa.jpg └── speech_openai.jpg ├── notebook └── openai_azure_search_with_summary_all.ipynb ├── python └── qa_aoai_speech.py ├── readme.md └── requirements.txt /images/dpa.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alezhao/openai/eaf3d59162ec71a2ffaf7f41cc3c112e691b10ec/images/dpa.jpg -------------------------------------------------------------------------------- /images/speech_openai.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alezhao/openai/eaf3d59162ec71a2ffaf7f41cc3c112e691b10ec/images/speech_openai.jpg -------------------------------------------------------------------------------- /notebook/openai_azure_search_with_summary_all.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "9e3839a6-9146-4f60-b74b-19abbc24278d", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import openai\n", 11 | "import requests\n", 12 | "import json\n", 13 | "import subprocess\n", 14 | "import uuid" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "id": "71747683", 21 | "metadata": {}, 22 | "outputs": [], 23 | "source": [ 24 | "# Set your azure OpenAI deployment name of complete model \n", 25 | "# text-davinci-003 corresponds to GPT-3.51\n", 26 | "COMPLETIONS_MODEL_ENGINE = \"text-davinci-003\"" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "id": "43de526b", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "# set your api key\n", 37 | "openai.api_key = \"\"\n", 38 | "# your endpoint should look like the following https://YOUR_RESOURCE_NAME.openai.azure.com/\n", 39 | "openai.api_base = \"\" \n", 40 | "# this may change in the future\n", 41 | "openai.api_type = \"azure\"\n", 42 | "# this may change in the future\n", 43 | "openai.api_version = \"2022-12-01\" \n", 44 | "# set your azure search api key\n", 45 | "AZURE_SEARCH_API_KEY = \"\"\n", 46 | "# set your azure search endpoint should look like the following https://YOUR_RESOURCE_NAME.search.windows.net/\n", 47 | "AZURE_SEARCH_ENDPOINT = \"\"\n", 48 | "# set your azure search indexer name\n", 49 | "AZURE_SEARCH_INDEXER_NAME = \"\"" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": null, 55 | "id": "b8c340f3", 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "# Details refer: https://platform.openai.com/docs/api-reference/completions/create\n", 60 | "COMPLETIONS_API_PARAMS = {\n", 61 | " # Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered\n", 62 | " \"top_p\": 1,\n", 63 | " # Controls randomness: lowering temperature results in less random completions. \n", 64 | " # At zero the model becomes deterministic\n", 65 | " \"temperature\": 0.0,\n", 66 | " # How much to penalize new tokens based on their existing frequency in the text so far. \n", 67 | " # Decreases the model's likelihood to repeat the same line verbatim\n", 68 | " \"frequency_penalty\" : 0,\n", 69 | " # How much to penalize new tokens based on whether they appear in the text so far. \n", 70 | " # Increases the model's likelihood to talk about new topics\n", 71 | " \"presence_penalty\" : 0,\n", 72 | " # The maximum number of tokens to generate in the output. One token is roughly 4 characters\n", 73 | " \"max_tokens\": 2000,\n", 74 | " \"engine\": COMPLETIONS_MODEL_ENGINE,\n", 75 | "}" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "id": "ec420b41", 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "def construct_prompt_python_api(question: str, top: int = 0) -> str:\n", 86 | "\n", 87 | " header =\"##write azure search python restful API with question below\"\n", 88 | " header += \"\\n#question: \" + question + \"\"\n", 89 | " if top > 0:\n", 90 | " header += \"\\n#select top \"+str(top)\n", 91 | " header += \"\\n#api-key: \"+ AZURE_SEARCH_API_KEY\n", 92 | " header += \"\\n#azure_search_endpint: \" + AZURE_SEARCH_ENDPOINT\n", 93 | " header += \"\\n#azure_search_indexer: \" + AZURE_SEARCH_INDEXER_NAME\n", 94 | " header += \"\\n#api-version: 2021-04-30-Preview\"\n", 95 | " header += \"\\n#API key is not permitted in the URI question string\"\n", 96 | " header += \"\\n#return response.text\"\n", 97 | " header += \"\\n\\n\"\n", 98 | " return header\n", 99 | "\n", 100 | "def construct_prompt_url(question: str, top: int = 0) -> str:\n", 101 | "\n", 102 | " header =\"#write a url about question below by azure search Lucene question language\"\n", 103 | " header += \"\\n#question: \" + question + \"\"\n", 104 | " if top > 0:\n", 105 | " header += \"\\n#select top \"+str(top)\n", 106 | " header += \"\\n#azure_search_endpint: \" + AZURE_SEARCH_ENDPOINT\n", 107 | " header += \"\\n#azure_search_indexer: \" + AZURE_SEARCH_INDEXER_NAME\n", 108 | " header += \"\\n#api-version: 2021-04-30-Preview\"\n", 109 | " header += \"\\n#result: \"\n", 110 | " header += \"\\n\\n\"\n", 111 | " return header\n", 112 | "\n", 113 | "def construct_prompt_from_question(question: str, prompt_from_search: str) -> str:\n", 114 | " header = \"\"\"Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say \"I don't know.\"\\n\\nContext:\\n\"\"\"\n", 115 | " return header + \"\".join(prompt_from_search) + \"\\n\\n Q: \" + question + \"\\n A:\"\n", 116 | "\n", 117 | "def construct_prompt_from_question_summary(prompt_from_search: str) -> str:\n", 118 | " header = \"\"\"please summary the context below.\"\\n\\nContext:\\n\"\"\"\n", 119 | " return header + \"\".join(prompt_from_search) + \"\\n\"" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "id": "d1c5a78a", 126 | "metadata": {}, 127 | "outputs": [], 128 | "source": [ 129 | "def generate_python_api_from_question(\n", 130 | " question: str,\n", 131 | " top: int = 0,\n", 132 | " show_prompt: bool = False\n", 133 | ") -> str:\n", 134 | " prompt = construct_prompt_python_api(\n", 135 | " question,\n", 136 | " top\n", 137 | " )\n", 138 | " \n", 139 | " if show_prompt:\n", 140 | " print(prompt)\n", 141 | "\n", 142 | " response = openai.Completion.create(\n", 143 | " prompt=prompt,\n", 144 | " **COMPLETIONS_API_PARAMS\n", 145 | " )\n", 146 | "\n", 147 | " return response[\"choices\"][0][\"text\"].strip(\" \\n\")\n", 148 | "\n", 149 | "def question_search_content_by_python_api(filename: str, code: str) -> str:\n", 150 | "\n", 151 | " with open(filename, \"w\") as f:\n", 152 | " f.write(code)\n", 153 | " \n", 154 | " resp_py = json.loads(subprocess.getoutput(\"python3.9 \"+filename))\n", 155 | " subprocess.getoutput(\"rm -rf \"+filename)\n", 156 | " return resp_py\n", 157 | "\n", 158 | "def generate_url_from_question(\n", 159 | " question: str,\n", 160 | " top: int = 0,\n", 161 | " show_prompt: bool = False\n", 162 | ") -> str:\n", 163 | " prompt = construct_prompt_url(\n", 164 | " question,\n", 165 | " top\n", 166 | " )\n", 167 | " \n", 168 | " if show_prompt:\n", 169 | " print(prompt)\n", 170 | "\n", 171 | " response = openai.Completion.create(\n", 172 | " prompt=prompt,\n", 173 | " **COMPLETIONS_API_PARAMS\n", 174 | " )\n", 175 | "\n", 176 | " return response[\"choices\"][0][\"text\"].strip(\" \\n\")\n", 177 | "\n", 178 | "def question_search_content_by_url(url: str) -> str:\n", 179 | " # API version\n", 180 | " api_version = '2021-04-30-Preview'\n", 181 | "\n", 182 | " # API key\n", 183 | " api_key = AZURE_SEARCH_API_KEY\n", 184 | "\n", 185 | " # Set the authorization header\n", 186 | " headers = {'api-key': api_key}\n", 187 | " \n", 188 | " response = requests.get(url, headers=headers)\n", 189 | " response.raise_for_status()\n", 190 | " return response.text\n", 191 | "\n", 192 | "def answer_question_from_search_context(\n", 193 | " question: str,\n", 194 | " prompt_from_search: str,\n", 195 | " show_prompt: bool = False\n", 196 | ") -> str:\n", 197 | " prompt = construct_prompt_from_question(\n", 198 | " question,\n", 199 | " prompt_from_search\n", 200 | " )\n", 201 | " \n", 202 | " if show_prompt:\n", 203 | " print(prompt)\n", 204 | "\n", 205 | " response = openai.Completion.create(\n", 206 | " prompt=prompt,\n", 207 | " **COMPLETIONS_API_PARAMS\n", 208 | " )\n", 209 | "\n", 210 | " return response[\"choices\"][0][\"text\"].strip(\" \\n\")\n", 211 | "\n", 212 | "def summary_search_context(\n", 213 | " prompt_from_search: str,\n", 214 | " show_prompt: bool = False\n", 215 | ") -> str:\n", 216 | " prompt = construct_prompt_from_question_summary(\n", 217 | " prompt_from_search\n", 218 | " )\n", 219 | " \n", 220 | " if show_prompt:\n", 221 | " print(prompt)\n", 222 | "\n", 223 | " response = openai.Completion.create(\n", 224 | " prompt=prompt,\n", 225 | " **COMPLETIONS_API_PARAMS\n", 226 | " )\n", 227 | "\n", 228 | " return response[\"choices\"][0][\"text\"].strip(\" \\n\")" 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": null, 234 | "id": "d1ef4069", 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "def query_search_content_by_generate_python_api(question: str, top: int = 0) -> str:\n", 239 | " generate_response_python_api = generate_python_api_from_question(question, top, show_prompt=False)\n", 240 | " print(generate_response_python_api)\n", 241 | "\n", 242 | " # gererate \n", 243 | " uid = uuid.uuid4()\n", 244 | " generate_file_name = \"search_restful_\"+str(uid)+\".py\"\n", 245 | "\n", 246 | " question_response_python_api = question_search_content_by_python_api(generate_file_name, generate_response_python_api)\n", 247 | " final_response_python_api = question_response_python_api\n", 248 | " return final_response_python_api" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": null, 254 | "id": "d80f662e", 255 | "metadata": {}, 256 | "outputs": [], 257 | "source": [ 258 | "def query_search_content_by_generate_url(question: str, top: int = 0) -> str:\n", 259 | " generate_response_url = generate_url_from_question(question, top, show_prompt=False)\n", 260 | " print(generate_response_url)\n", 261 | "\n", 262 | " # question search content by url\n", 263 | " question_response_url = question_search_content_by_url(url=generate_response_url)\n", 264 | " final_response_url = json.loads(question_response_url)\n", 265 | " return final_response_url" 266 | ] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "execution_count": null, 271 | "id": "931644f6", 272 | "metadata": {}, 273 | "outputs": [], 274 | "source": [ 275 | "question = \"how people describe New York and location is USA\"" 276 | ] 277 | }, 278 | { 279 | "cell_type": "code", 280 | "execution_count": null, 281 | "id": "fc46056f", 282 | "metadata": {}, 283 | "outputs": [], 284 | "source": [ 285 | "final_response_python_api = query_search_content_by_generate_python_api(question, top=2)" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": null, 291 | "id": "41720f93", 292 | "metadata": {}, 293 | "outputs": [], 294 | "source": [ 295 | "prompt_from_search_python_api = \"\"\n", 296 | "for i in final_response_python_api[\"value\"]:\n", 297 | " prompt_from_search_python_api += i[\"merged_text\"] + \"\\n\"" 298 | ] 299 | }, 300 | { 301 | "cell_type": "code", 302 | "execution_count": null, 303 | "id": "6ade3f47", 304 | "metadata": {}, 305 | "outputs": [], 306 | "source": [ 307 | "answer_question_from_search_context(question, prompt_from_search_python_api, show_prompt=True)" 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "execution_count": null, 313 | "id": "16bc0423", 314 | "metadata": {}, 315 | "outputs": [], 316 | "source": [ 317 | "final_response_url = query_search_content_by_generate_url(question, top=2)" 318 | ] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "execution_count": null, 323 | "id": "854dc24c", 324 | "metadata": {}, 325 | "outputs": [], 326 | "source": [ 327 | "prompt_from_search_url = \"\"\n", 328 | "for i in final_response_url[\"value\"]:\n", 329 | " prompt_from_search_url += i[\"merged_text\"] + \"\\n\"" 330 | ] 331 | }, 332 | { 333 | "cell_type": "code", 334 | "execution_count": null, 335 | "id": "9fafe8e2", 336 | "metadata": {}, 337 | "outputs": [], 338 | "source": [ 339 | "answer_question_from_search_context(question, prompt_from_search_url, show_prompt=True)" 340 | ] 341 | }, 342 | { 343 | "cell_type": "code", 344 | "execution_count": null, 345 | "id": "495adcc0", 346 | "metadata": {}, 347 | "outputs": [], 348 | "source": [ 349 | "final_response_python_api_summary = query_search_content_by_generate_python_api(question)" 350 | ] 351 | }, 352 | { 353 | "cell_type": "code", 354 | "execution_count": null, 355 | "id": "1b4f8584", 356 | "metadata": {}, 357 | "outputs": [], 358 | "source": [ 359 | "for i in final_response_python_api_summary[\"value\"]:\n", 360 | " i[\"merged_text\"] = summary_search_context(i[\"merged_text\"])\n", 361 | " del i[\"content\"]\n", 362 | " del i[\"entities\"]\n", 363 | "\n", 364 | "final_response_python_api_summary" 365 | ] 366 | } 367 | ], 368 | "metadata": { 369 | "kernelspec": { 370 | "display_name": "Python 3", 371 | "language": "python", 372 | "name": "python3" 373 | }, 374 | "language_info": { 375 | "codemirror_mode": { 376 | "name": "ipython", 377 | "version": 3 378 | }, 379 | "file_extension": ".py", 380 | "mimetype": "text/x-python", 381 | "name": "python", 382 | "nbconvert_exporter": "python", 383 | "pygments_lexer": "ipython3", 384 | "version": "3.10.2" 385 | }, 386 | "vscode": { 387 | "interpreter": { 388 | "hash": "c9c3b609469d25d1aac2c871cf98e525cf19c42915ce6a9a380b0866c8d2e111" 389 | } 390 | } 391 | }, 392 | "nbformat": 4, 393 | "nbformat_minor": 5 394 | } 395 | -------------------------------------------------------------------------------- /python/qa_aoai_speech.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | import time 4 | import openai 5 | 6 | from azure.cognitiveservices.speech import SpeechConfig 7 | 8 | 9 | # Set Env keys 10 | YOUR_SPEECH_KEY = "" 11 | YOUR_SPEECH_REGION = "" 12 | YOUR_OPENAI_KEY = "" 13 | YOUR_OPENAI_BASE = "" 14 | YOUR_OPENAI_ENGINE = "text-davinci-003" 15 | 16 | try: 17 | import azure.cognitiveservices.speech as speechsdk 18 | except ImportError: 19 | print(""" 20 | Importing the Speech SDK for Python failed. 21 | Refer to 22 | https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-python for 23 | installation instructions. 24 | """) 25 | import sys 26 | sys.exit(1) 27 | 28 | # Set up the subscription info for the Speech Service: 29 | # Replace with your own subscription key and service region (e.g., "westus"). 30 | speech_key, service_region = YOUR_SPEECH_KEY, YOUR_SPEECH_REGION 31 | 32 | prompt_context = "The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\n" 33 | 34 | last_prompt = "" 35 | bye = False 36 | 37 | def callGPT3(prompt): 38 | openai.api_type = "azure" 39 | openai.api_base = YOUR_OPENAI_BASE 40 | openai.api_version = "2022-12-01" 41 | openai.api_key = YOUR_OPENAI_KEY 42 | 43 | 44 | 45 | # print(prompt) 46 | global prompt_with_context, last_prompt 47 | prompt_with_context = prompt_context + last_prompt + prompt + "\n" 48 | 49 | response = openai.Completion.create( 50 | engine=YOUR_OPENAI_ENGINE, 51 | prompt=prompt_with_context, 52 | temperature=0.9, 53 | max_tokens=400, 54 | top_p=1, 55 | frequency_penalty=0.0, 56 | presence_penalty=0.0, 57 | stop=None 58 | ) 59 | # print (response) 60 | 61 | gpt_resp = response.choices[0].text 62 | print("GPT: " + gpt_resp) 63 | last_prompt = prompt + "\n" + gpt_resp + "\n" 64 | # print(last_prompt) 65 | tts(gpt_resp) 66 | 67 | if prompt == "Goodbye.": 68 | exit() 69 | 70 | 71 | def speech_recognize_once_from_mic(): 72 | 73 | # 74 | speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region, speech_recognition_language="en-us") 75 | # Creates a speech recognizer using microphone as audio input. 76 | # The default language is "en-us". 77 | speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config) 78 | 79 | done = False 80 | 81 | # GPT-3 prompt 82 | myprompt = "" 83 | 84 | def stop_cb(evt): 85 | """callback that signals to stop continuous recognition upon receiving an event `evt`""" 86 | # print('CLOSING on {}'.format(evt)) 87 | nonlocal done 88 | done = True 89 | 90 | def get_text(evt): 91 | #print('RECOGNIZED: {}'.format(evt)) 92 | nonlocal myprompt 93 | myprompt = myprompt + evt.result.text 94 | print(myprompt) 95 | nonlocal done 96 | done = True 97 | 98 | # Connect callbacks to the events fired by the speech recognizer 99 | #speech_recognizer.recognizing.connect(lambda evt: print('RECOGNIZING: {}'.format(evt))) 100 | #speech_recognizer.recognized.connect(lambda evt: print('RECOGNIZED: {}'.format(evt))) 101 | speech_recognizer.recognized.connect(lambda evt: get_text(evt)) 102 | 103 | speech_recognizer.session_started.connect(lambda evt: print("Human:")) 104 | # speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt))) 105 | speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt))) 106 | # stop continuous recognition on either session stopped or canceled events 107 | speech_recognizer.session_stopped.connect(stop_cb) 108 | speech_recognizer.canceled.connect(stop_cb) 109 | 110 | # Start continuous speech recognition 111 | speech_recognizer.start_continuous_recognition() 112 | while not done: 113 | time.sleep(.1) 114 | 115 | speech_recognizer.stop_continuous_recognition() 116 | # 117 | 118 | callGPT3(myprompt) 119 | 120 | 121 | 122 | 123 | def tts(text): 124 | 125 | speech_config = SpeechConfig(subscription=speech_key, region=service_region) 126 | 127 | speech_config.speech_synthesis_language = "en-us" 128 | speech_config.speech_synthesis_voice_name ="en-US-AriaNeural" 129 | 130 | # Creates a speech synthesizer for the specified language, 131 | # using the default speaker as audio output. 132 | speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config) 133 | 134 | # Receives a text from console input and synthesizes it to speaker. 135 | result = speech_synthesizer.speak_text_async(text).get() 136 | 137 | 138 | 139 | while not bye: 140 | speech_recognize_once_from_mic() 141 | 142 | 143 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Azure OpenAI Service Advanced Samples 2 | 3 | This repository provides sample code and instructions on how to use Azure OpenAI Service, Azure Search Service, and Azure Speech Service together to perform natural language searches and question answering. 4 | 5 | ## Prerequisites 6 | 7 | Before you begin, you must have the following: 8 | 9 | - A Windows machine with [WSL 2.0](https://learn.microsoft.com/en-us/windows/wsl/install), Ubuntu 20.04, and [VSCode](https://code.visualstudio.com/) installed, or a Linux machine with [VSCode](https://code.visualstudio.com/) installed. 10 | - [Python 3.9](https://www.python.org/downloads/release/python-390/) or above and relevant pip packages installed. 11 | - An Azure account with an active subscription. If you don't have an account, you can create a [free trial account](https://azure.microsoft.com/en-us/free/). 12 | - An instance of [Azure OpenAI Service](https://azure.microsoft.com/en-us/services/cognitive-services/openai/). 13 | - An instance of [Azure Search Service](https://azure.microsoft.com/en-us/services/search/). 14 | - An instance of [Azure Speech Service](https://azure.microsoft.com/en-us/services/cognitive-services/speech-services/). 15 | 16 | ## Getting Started 17 | 18 | To use this code, follow these steps: 19 | 20 | 1. Clone this repository to your local machine: 21 | 22 | ``` 23 | git clone https://github.com/alezhao/openai.git 24 | ``` 25 | 26 | 2. Navigate to the `notebook` folder. 27 | 28 | 3. Open the desired notebook in VSCode. 29 | 30 | 4. Replace the placeholder values in the notebook with your Azure service credentials e.g. . 31 | 32 | 5. Run the notebook or python code, following the instructions provided in the comments. 33 | 34 | ## Samples 35 | 36 | #### **Document Process Automation** 37 | 38 | ![](images/dpa.jpg) 39 | 40 | ### [openai_azure_search_with_summary_all.ipynb](https://github.com/alezhao/openai/blob/main/notebook/openai_azure_search_with_summary_all.ipynb) 41 | 42 | This notebook provides an example of how to use Azure OpenAI Service and Azure Search Service together to perform natural language searches on a given query. It demonstrates how to use the OpenAI Service to process natural language queries, and Azure Search Service to perform the search and the summary against the query. 43 | 44 |
45 | 46 | #### **Contact Center Analytics using Speech API & OpenAI (only include key code)** 47 | 48 | ![](images/speech_openai.jpg) 49 | 50 | ### [qa_aoai_speech.py](https://github.com/alezhao/openai/blob/main/python/qa_aoai_speech.py) 51 | 52 | This notebook provides an example of how to use Azure OpenAI Service and Azure Speech Service together to perform question answering, including speech-to-text (STT) and text-to-speech (TTS). It demonstrates how to use the OpenAI Service to process natural language queries and generate responses, and Azure Speech Service to convert audio input to text and text output to audio. 53 | 54 |
55 | 56 | ## References 57 | 58 | - [Azure OpenAI Service documentation](https://docs.microsoft.com/en-us/azure/cognitive-services/openai-index) 59 | - [Azure Search Service documentation](https://docs.microsoft.com/en-us/azure/search/) 60 | - [Azure Speech Service documentation](https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/index) 61 | - [Python documentation](https://docs.python.org/3/) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openai 2 | requests 3 | json 4 | subprocess 5 | uuid 6 | azure-cognitiveservices-speech --------------------------------------------------------------------------------