├── .gitignore ├── data ├── about-me.txt ├── blog-post.txt ├── car.png ├── livestream-intro.m4a ├── spanish-audio.m4a └── whisper-test.m4a ├── notebooks ├── devday-2023.ipynb ├── function-calling-stream.ipynb ├── function-calling.ipynb ├── image-generation.ipynb ├── livestream-image-generation.ipynb ├── openai-api-interaction.ipynb ├── prompt-engineering-stream.ipynb ├── prompt-engineering.ipynb └── whisper-api.ipynb └── src ├── chat-ui.py ├── chat.py ├── combined-dalle-ui.py ├── convo-sim.py ├── image-generator.py ├── jarjar.png ├── similar-image-generator.py └── whisper.py /.gitignore: -------------------------------------------------------------------------------- 1 | ## The .gitignore file specifies things that git should ignore. 2 | ## This default template includes entries for R, Python and visual studio 3 | 4 | ## 5 | ## Add custom entries below here. 6 | ## 7 | dst-env/ 8 | .cache/v/cache/lastfailed 9 | tests/.cache/v/cache/lastfailed 10 | .vscode/settings.json 11 | 12 | ## 13 | ## R Section - See https://github.com/github/gitignore/blob/master/R.gitignore 14 | ## 15 | 16 | # History files 17 | .Rhistory 18 | .Rapp.history 19 | 20 | # Session Data files 21 | .RData 22 | 23 | # Example code in package build process 24 | *-Ex.R 25 | 26 | # Output files from R CMD build 27 | /*.tar.gz 28 | 29 | # Output files from R CMD check 30 | /*.Rcheck/ 31 | 32 | # RStudio files 33 | .Rproj.user/ 34 | 35 | # produced vignettes 36 | vignettes/*.html 37 | vignettes/*.pdf 38 | 39 | # OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 40 | .httr-oauth 41 | 42 | # knitr and R markdown default cache directories 43 | /*_cache/ 44 | /cache/ 45 | 46 | # Temporary files created by R markdown 47 | *.utf8.md 48 | *.knit.md 49 | 50 | ## 51 | ## Python Section - See https://github.com/github/gitignore/blob/master/Python.gitignore 52 | ## 53 | 54 | # PyCharm ide files 55 | .idea 56 | 57 | # Byte-compiled / optimized / DLL files 58 | __pycache__/ 59 | *.py[cod] 60 | *$py.class 61 | 62 | # C extensions 63 | *.so 64 | 65 | # Distribution / packaging 66 | .Python 67 | env/ 68 | build/ 69 | develop-eggs/ 70 | dist/ 71 | downloads/ 72 | eggs/ 73 | .eggs/ 74 | lib/ 75 | lib64/ 76 | parts/ 77 | sdist/ 78 | var/ 79 | wheels/ 80 | *.egg-info/ 81 | .installed.cfg 82 | *.egg 83 | .DS_Store 84 | 85 | # PyInstaller 86 | # Usually these files are written by a python script from a template 87 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 88 | *.manifest 89 | *.spec 90 | 91 | # Installer logs 92 | pip-log.txt 93 | pip-delete-this-directory.txt 94 | 95 | # Unit test / coverage reports 96 | htmlcov/ 97 | .tox/ 98 | .coverage 99 | .coverage.* 100 | .cache 101 | nosetests.xml 102 | coverage.xml 103 | *.cover 104 | .hypothesis/ 105 | 106 | # Translations 107 | *.mo 108 | *.pot 109 | 110 | # Django stuff: 111 | *.log 112 | local_settings.py 113 | 114 | # Flask stuff: 115 | instance/ 116 | .webassets-cache 117 | 118 | # Scrapy stuff: 119 | .scrapy 120 | 121 | # Sphinx documentation 122 | docs/_build/ 123 | 124 | # PyBuilder 125 | target/ 126 | 127 | # Jupyter Notebook 128 | .ipynb_checkpoints 129 | 130 | # pyenv 131 | .python-version 132 | 133 | # celery beat schedule file 134 | celerybeat-schedule 135 | 136 | # SageMath parsed files 137 | *.sage.py 138 | 139 | # dotenv 140 | .env 141 | 142 | # virtualenv 143 | .venv 144 | venv/ 145 | ENV/ 146 | 147 | # Spyder project settings 148 | .spyderproject 149 | .spyproject 150 | 151 | # Rope project settings 152 | .ropeproject 153 | 154 | # mkdocs documentation 155 | /site 156 | 157 | # mypy 158 | .mypy_cache/ 159 | 160 | ## Ignore Visual Studio temporary files, build results, and 161 | ## files generated by popular Visual Studio add-ons. 162 | ## 163 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore 164 | 165 | # User-specific files 166 | *.suo 167 | *.user 168 | *.userosscache 169 | *.sln.docstates 170 | 171 | # User-specific files (MonoDevelop/Xamarin Studio) 172 | *.userprefs 173 | 174 | # Build results 175 | [Dd]ebug/ 176 | [Dd]ebugPublic/ 177 | [Rr]elease/ 178 | [Rr]eleases/ 179 | x64/ 180 | x86/ 181 | bld/ 182 | [Bb]in/ 183 | [Oo]bj/ 184 | [Ll]og/ 185 | 186 | # Visual Studio 2015 cache/options directory 187 | .vs/ 188 | # Uncomment if you have tasks that create the project's static files in wwwroot 189 | #wwwroot/ 190 | 191 | # MSTest test Results 192 | [Tt]est[Rr]esult*/ 193 | [Bb]uild[Ll]og.* 194 | 195 | # NUNIT 196 | *.VisualState.xml 197 | TestResult.xml 198 | 199 | # Build Results of an ATL Project 200 | [Dd]ebugPS/ 201 | [Rr]eleasePS/ 202 | dlldata.c 203 | 204 | # Benchmark Results 205 | BenchmarkDotNet.Artifacts/ 206 | 207 | # .NET Core 208 | project.lock.json 209 | project.fragment.lock.json 210 | artifacts/ 211 | **/Properties/launchSettings.json 212 | 213 | *_i.c 214 | *_p.c 215 | *_i.h 216 | *.ilk 217 | *.meta 218 | *.obj 219 | *.pch 220 | *.pdb 221 | *.pgc 222 | *.pgd 223 | *.rsp 224 | *.sbr 225 | *.tlb 226 | *.tli 227 | *.tlh 228 | *.tmp 229 | *.tmp_proj 230 | *.log 231 | *.vspscc 232 | *.vssscc 233 | .builds 234 | *.pidb 235 | *.svclog 236 | *.scc 237 | 238 | # Chutzpah Test files 239 | _Chutzpah* 240 | 241 | # Visual C++ cache files 242 | ipch/ 243 | *.aps 244 | *.ncb 245 | *.opendb 246 | *.opensdf 247 | *.sdf 248 | *.cachefile 249 | *.VC.db 250 | *.VC.VC.opendb 251 | 252 | # Visual Studio profiler 253 | *.psess 254 | *.vsp 255 | *.vspx 256 | *.sap 257 | 258 | # Visual Studio Trace Files 259 | *.e2e 260 | 261 | # TFS 2012 Local Workspace 262 | $tf/ 263 | 264 | # Guidance Automation Toolkit 265 | *.gpState 266 | 267 | # ReSharper is a .NET coding add-in 268 | _ReSharper*/ 269 | *.[Rr]e[Ss]harper 270 | *.DotSettings.user 271 | 272 | # JustCode is a .NET coding add-in 273 | .JustCode 274 | 275 | # TeamCity is a build add-in 276 | _TeamCity* 277 | 278 | # DotCover is a Code Coverage Tool 279 | *.dotCover 280 | 281 | # AxoCover is a Code Coverage Tool 282 | .axoCover/* 283 | !.axoCover/settings.json 284 | 285 | # Visual Studio code coverage results 286 | *.coverage 287 | *.coveragexml 288 | 289 | # NCrunch 290 | _NCrunch_* 291 | .*crunch*.local.xml 292 | nCrunchTemp_* 293 | 294 | # MightyMoose 295 | *.mm.* 296 | AutoTest.Net/ 297 | 298 | # Web workbench (sass) 299 | .sass-cache/ 300 | 301 | # Installshield output folder 302 | [Ee]xpress/ 303 | 304 | # DocProject is a documentation generator add-in 305 | DocProject/buildhelp/ 306 | DocProject/Help/*.HxT 307 | DocProject/Help/*.HxC 308 | DocProject/Help/*.hhc 309 | DocProject/Help/*.hhk 310 | DocProject/Help/*.hhp 311 | DocProject/Help/Html2 312 | DocProject/Help/html 313 | 314 | # Click-Once directory 315 | publish/ 316 | 317 | # Publish Web Output 318 | *.[Pp]ublish.xml 319 | *.azurePubxml 320 | # Note: Comment the next line if you want to checkin your web deploy settings, 321 | # but database connection strings (with potential passwords) will be unencrypted 322 | *.pubxml 323 | *.publishproj 324 | 325 | # Microsoft Azure Web App publish settings. Comment the next line if you want to 326 | # checkin your Azure Web App publish settings, but sensitive information contained 327 | # in these scripts will be unencrypted 328 | PublishScripts/ 329 | 330 | # NuGet Packages 331 | *.nupkg 332 | # The packages folder can be ignored because of Package Restore 333 | **/[Pp]ackages/* 334 | # except build/, which is used as an MSBuild target. 335 | !**/[Pp]ackages/build/ 336 | # Uncomment if necessary however generally it will be regenerated when needed 337 | #!**/[Pp]ackages/repositories.config 338 | # NuGet v3's project.json files produces more ignorable files 339 | *.nuget.props 340 | *.nuget.targets 341 | 342 | # Microsoft Azure Build Output 343 | csx/ 344 | *.build.csdef 345 | 346 | # Microsoft Azure Emulator 347 | ecf/ 348 | rcf/ 349 | 350 | # Windows Store app package directories and files 351 | AppPackages/ 352 | BundleArtifacts/ 353 | Package.StoreAssociation.xml 354 | _pkginfo.txt 355 | *.appx 356 | 357 | # Visual Studio cache files 358 | # files ending in .cache can be ignored 359 | *.[Cc]ache 360 | # but keep track of directories ending in .cache 361 | !*.[Cc]ache/ 362 | 363 | # Others 364 | ClientBin/ 365 | ~$* 366 | *~ 367 | *.dbmdl 368 | *.dbproj.schemaview 369 | *.jfm 370 | *.pfx 371 | *.publishsettings 372 | orleans.codegen.cs 373 | 374 | # Since there are multiple workflows, uncomment next line to ignore bower_components 375 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) 376 | #bower_components/ 377 | 378 | # RIA/Silverlight projects 379 | Generated_Code/ 380 | 381 | # Backup & report files from converting an old project file 382 | # to a newer Visual Studio version. Backup files are not needed, 383 | # because we have git ;-) 384 | _UpgradeReport_Files/ 385 | Backup*/ 386 | UpgradeLog*.XML 387 | UpgradeLog*.htm 388 | 389 | # SQL Server files 390 | *.mdf 391 | *.ldf 392 | *.ndf 393 | 394 | # Business Intelligence projects 395 | *.rdl.data 396 | *.bim.layout 397 | *.bim_*.settings 398 | 399 | # Microsoft Fakes 400 | FakesAssemblies/ 401 | 402 | # GhostDoc plugin setting file 403 | *.GhostDoc.xml 404 | 405 | # Node.js Tools for Visual Studio 406 | .ntvs_analysis.dat 407 | node_modules/ 408 | 409 | # Typescript v1 declaration files 410 | typings/ 411 | 412 | # Visual Studio 6 build log 413 | *.plg 414 | 415 | # Visual Studio 6 workspace options file 416 | *.opt 417 | 418 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) 419 | *.vbw 420 | 421 | # Visual Studio LightSwitch build output 422 | **/*.HTMLClient/GeneratedArtifacts 423 | **/*.DesktopClient/GeneratedArtifacts 424 | **/*.DesktopClient/ModelManifest.xml 425 | **/*.Server/GeneratedArtifacts 426 | **/*.Server/ModelManifest.xml 427 | _Pvt_Extensions 428 | 429 | # Paket dependency manager 430 | .paket/paket.exe 431 | paket-files/ 432 | 433 | # FAKE - F# Make 434 | .fake/ 435 | 436 | # JetBrains Rider 437 | .idea/ 438 | *.sln.iml 439 | 440 | # CodeRush 441 | .cr/ 442 | 443 | # Python Tools for Visual Studio (PTVS) 444 | __pycache__/ 445 | *.pyc 446 | 447 | # Cake - Uncomment if you are using it 448 | # tools/** 449 | # !tools/packages.config 450 | 451 | # Tabs Studio 452 | *.tss 453 | 454 | # Telerik's JustMock configuration file 455 | *.jmconfig 456 | 457 | # BizTalk build output 458 | *.btp.cs 459 | *.btm.cs 460 | *.odx.cs 461 | *.xsd.cs 462 | 463 | # OpenCover UI analysis results 464 | OpenCover/ 465 | junit/ 466 | 467 | # Catboost Directory 468 | catboost_info/ 469 | 470 | # Key Configurations Directory (added by dkhundley) 471 | keys/ 472 | sensitive 473 | 474 | # SageMaker Test Direrectory Ignores (added by dkhundley) 475 | tests/sagemaker_test_dir/input/train/data/ 476 | tests/sagemaker_test_dir/model/ 477 | tests/sagemaker_test_dir/output/ -------------------------------------------------------------------------------- /data/about-me.txt: -------------------------------------------------------------------------------- 1 | Hello! My name is David Hundley. I am a principal machine learning engineer at State Farm. I enjoy learning about AI and teaching what I learn back to others. I have two daughters. I drive a Tesla Model 3, and my favorite video game series is The Legend of Zelda. -------------------------------------------------------------------------------- /data/blog-post.txt: -------------------------------------------------------------------------------- 1 | I am a principal machine learning (ML) engineer at a Fortune 50 company, and one of the questions that I am asked most often is the same one posed in the title of this post: “What is an ML engineer?” The truth is that there doesn’t seem to be a one-size-fits-all answer. While I am definitely not actively looking for a new role, I actually enjoy looking at job postings from other companies just to get a feel of what other companies are looking for in terms of skillsets for ML engineers. The reality is that when it comes to roles like data scientists, ML engineers, and even some kinds of software engineers, there is a lot of overlap in terms of what is expected of skillsets. Company A might define the role of an ML engineer the exact same way that Company B defines a data scientist role, with each of these roles performing the exact same activities despite the differences in title. These definitions really do range quite a bit! 2 | That said, I’m not going to try to nail down a very precise definition of an ML engineer. (Because I’m sure I’d make somebody mad if I tried to do that!) Instead, I think a more useful examination would be an understanding of the skillsets generally associated to ML engineers. After analyzing those skillsets, we’ll specifically juxtapose the skillsets of a general ML engineer with a general data scientist, specifically since one of the questions I hear most often is, “What is the difference between a data scientist and an ML engineer?” Finally, we’ll wrap things up with an analysis on the direction I expect the industry head so that you can best prepare yourself for the future. 3 | Primary Skillsets of an ML Engineer 4 | Though it is difficult to nail down a precise definition of an ML engineer, we do at least see enough commonality in skillsets that we can broadly group them into three collective categories: machine learning / deep learning, software engineering, and technical architecture. Below we’ll analyze more closely what each of these three categories entail. Before moving forward, I do want to address one skillset you might notice curiously missing from the list: data engineering. I intentionally omitted data engineering from this list because I actually find more often than not that this skillset often requires its own role, so while it’s definitely not impossible for an ML engineer to perform data engineering in their role, it’s more often relegated to a role like a data analyst or data engineer. 5 | Machine Learning / Deep Learning 6 | This one is the no brainer; it’s right in the title! This involves an individual building a predictive model to solve some sort of business problem using machine learning and deep learning algorithms. We’ll get into this a bit more in the next section, but it actually seems that ML engineers more so focus on more computationally complex problems that end up getting integrated into real-time systems. In other words, I hear a lot more ML engineers focusing on more deep learning problems like computer vision or natural language processing (NLP) than I hear them solving problems that involve structured, tabular data. Again, this isn’t to say that an ML engineer never builds predictive models for tabular data. Part of the reason that I believe that an ML engineer often spends more time focusing on building more computationally complex models is because it requires our next skillset… 7 | Software Engineering 8 | When it comes to deep learning in particular, it is an absolute “must” for an ML engineer to have a software engineering skillset. Regardless of if you choose to work with TensorFlow, PyTorch, or even other less computationally complex algorithmic libraries, pretty much all machine learning manifests itself in the form of software-engineered code. Most ML engineers are required to understand the Python coding language specifically, but it’s not uncommon to request an ML engineer to understand a secondary language like Swift if building iOS applications or Java if building Android applications. Additionally, because applied machine learning manifests itself as a software product, ML engineers are often required to understand the basic things that go along with any other general software engineer role: unit testing, security scanning, CI/CD pipelining, and more. 9 | Technical Architecture 10 | Because applied machine learning / deep learning manifests as a software product, it is very common for an ML engineer to have a technical architecture skillset. This is the whole idea of building a “blueprint” that demonstrates how a whole system works together and functions appropriately. This sort of work is often not completed in a vacuum. In my own role as an ML engineer, I partner alongside other more general technology / software engineers to understand how my machine learning APIs and batch inference solutions play a role in the whole, overarching system. Because many companies are moving to the cloud, it is common for a company to request that an ML engineer understand cloud services like Amazon Web Services (AWS), Google Cloud Platform (GCP), or Microsoft Azure on a pretty intricate level. It’s often required that an ML engineer knows how multiple services within one of these cloud platforms play together in order to appropriately deploy a machine learning solution. 11 | Juxtaposing the Data Scientist Role with the ML Engineer 12 | Before jumping into this next section, allow me to reiterate once again that companies define these roles quite ambiguously, so don’t be surprised if these definitions do not match your own experience. A question I hear frequently is “What is the difference between a data scientist and an ML engineer?” Or even more succinctly, “Is there a difference between a data scientist and an ML engineer?” I think the answer to the latter question is yes, but “drawing that line in the sand” is a difficult task. 13 | Let’s take a step back to understand what data science is in general. Data science is referred to as such because it mirrors the scientific method. Yes, this is the same scientific method that you probably learned in your high school biology class. Essentially, data science looks for patterns amongst data by setting an experimental group and a control group and analyzing the probabalistic difference (p-value) between them to understand if there is any statistical significance. If we can find a statistical significance, then we can build predictive models that can draw inferences on future data. 14 | In this form, data science does not have a direct, 1-to-1 correlation to machine learning. In other words, we don’t always have to use machine learning to build predictive models. The insurance industry in particular has used mathematical algorithms like generalized linear models (GLMs) for well over a century to great effect without the need for machine learning. In fact, many actuarial analysts and statisticians have actually been recently re-titled to the role “data scientist” for this reason, and I would agree this is a very fair action. (Fair, but it feels to me more like a marketing / recruitment tool more than anything. 😂) 15 | With this understanding of data science, you can start to see a dichotomy between the skillsets of a data scientist and an ML engineer. A data scientist is often required to have a stronger mathematical background but less of a software engineering background since they can rely on “low code” tools like the industry favorite SAS tool to complete their work. I have the pleasure of mentoring many undergraduate and graduate students at many of the top-name universities majoring in data science, and they will tell you this is definitely the focus. What is curiously NOT the focus with any of the students I’ve talked with is any emphasis on software engineering nor technical architecture. When it comes to machine learning, students often spend the bulk of their time coding a machine learning algorithm (e.g. support vector machine, naive bayes, gradient boosted trees) from scratch in the C++ programming language. 16 | The Future of the ML Engineer Role 17 | I want to be very careful so that it didn’t sound like I was downplaying the value of a traditional data scientist role. Data scientists are still extremely valuable, as they have proven their worth time and time again by building these great predictive models that provide a lot of value to a company. At the end of the day, it doesn’t matter if it’s machine learning or not: business value is business value. (And to be clear, a lot of data scientists do use machine learning to derive that business value.) 18 | That said, we are already seeing the emergence of new technologies that are going to require the additional skillsets of software engineering and technical architecture not traditionally found in the data scientist role. In my analysis of job postings across every major company, I see more of an emphasis placed on these additional engineering skillsets and thus more of a growing need for ML engineers in general. Sure, they might not necessarily go by the title “ML engineer”, but you can absolutely expect to see a continued interest in individuals with a combination of the three skillsets mentioned above. 19 | The good news is that if you’re a data scientist today with a strong mathematical background, I personally think that is the hardest skillset to acquire. Yes, it is not easy to pivot into a new skillset, but I would imagine it’s easier for somebody with a strong data science background to obtain a software engineering skillset than vice versa. 20 | Hope you enjoyed this post! Reiterating for the final time: your experience with how the ML engineer role is defined will vary. I personally have very much enjoyed my time in this role! It provides this great middle ground of right brained creativity when building models that solve really interesting problems with left brained engineering to implement those models. I’m very excited to see where the future of our industry goes! 😃 -------------------------------------------------------------------------------- /data/car.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dkhundley/openai-api-tutorial/7ba59ec2862aea2599333c1356c7f68e61ad6880/data/car.png -------------------------------------------------------------------------------- /data/livestream-intro.m4a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dkhundley/openai-api-tutorial/7ba59ec2862aea2599333c1356c7f68e61ad6880/data/livestream-intro.m4a -------------------------------------------------------------------------------- /data/spanish-audio.m4a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dkhundley/openai-api-tutorial/7ba59ec2862aea2599333c1356c7f68e61ad6880/data/spanish-audio.m4a -------------------------------------------------------------------------------- /data/whisper-test.m4a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dkhundley/openai-api-tutorial/7ba59ec2862aea2599333c1356c7f68e61ad6880/data/whisper-test.m4a -------------------------------------------------------------------------------- /notebooks/function-calling-stream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import openai\n", 10 | "import json\n", 11 | "import yaml" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "# Loading the API key and organization ID from file (NOT pushed to GitHub)\n", 21 | "with open('../keys/openai-keys.yaml') as f:\n", 22 | " keys_yaml = yaml.safe_load(f)\n", 23 | "\n", 24 | "# Applying our API key and organization ID to OpenAI\n", 25 | "openai.organization = keys_yaml['ORG_ID']\n", 26 | "openai.api_key = keys_yaml['API_KEY']" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 3, 32 | "metadata": {}, 33 | "outputs": [ 34 | { 35 | "data": { 36 | "text/plain": [ 37 | "'Hello! My name is David Hundley. I am a principal machine learning engineer at State Farm. I enjoy learning about AI and teaching what I learn back to others. I have two daughters. I drive a Tesla Model 3, and my favorite video game series is The Legend of Zelda.'" 38 | ] 39 | }, 40 | "execution_count": 3, 41 | "metadata": {}, 42 | "output_type": "execute_result" 43 | } 44 | ], 45 | "source": [ 46 | "with open('../data/about-me.txt', 'r') as f:\n", 47 | " about_me = f.read()\n", 48 | "\n", 49 | "about_me" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 4, 55 | "metadata": {}, 56 | "outputs": [], 57 | "source": [ 58 | "about_me_prompt = f'''\n", 59 | "Please extract information as a JSON object. Please look for the following pieces of information:\n", 60 | "Name\n", 61 | "Job title\n", 62 | "Company\n", 63 | "Number of children as a single integer\n", 64 | "Car make\n", 65 | "Car model\n", 66 | "Favorite video game series\n", 67 | "\n", 68 | "This is the body of text to extract the information from:\n", 69 | "{about_me}\n", 70 | "'''" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 5, 76 | "metadata": {}, 77 | "outputs": [ 78 | { 79 | "data": { 80 | "text/plain": [ 81 | " JSON: {\n", 82 | " \"id\": \"chatcmpl-7a3uMnJrdc5gmErAGynuRreOiQQRO\",\n", 83 | " \"object\": \"chat.completion\",\n", 84 | " \"created\": 1688829186,\n", 85 | " \"model\": \"gpt-3.5-turbo-0613\",\n", 86 | " \"choices\": [\n", 87 | " {\n", 88 | " \"index\": 0,\n", 89 | " \"message\": {\n", 90 | " \"role\": \"assistant\",\n", 91 | " \"content\": \"{\\n \\\"Name\\\": \\\"David Hundley\\\",\\n \\\"Job title\\\": \\\"Principal Machine Learning Engineer\\\",\\n \\\"Company\\\": \\\"State Farm\\\",\\n \\\"Number of children\\\": 2,\\n \\\"Car make\\\": \\\"Tesla\\\",\\n \\\"Car model\\\": \\\"Model 3\\\",\\n \\\"Favorite video game series\\\": \\\"The Legend of Zelda\\\"\\n}\"\n", 92 | " },\n", 93 | " \"finish_reason\": \"stop\"\n", 94 | " }\n", 95 | " ],\n", 96 | " \"usage\": {\n", 97 | " \"prompt_tokens\": 122,\n", 98 | " \"completion_tokens\": 70,\n", 99 | " \"total_tokens\": 192\n", 100 | " }\n", 101 | "}" 102 | ] 103 | }, 104 | "execution_count": 5, 105 | "metadata": {}, 106 | "output_type": "execute_result" 107 | } 108 | ], 109 | "source": [ 110 | "openai_response = openai.ChatCompletion.create(\n", 111 | " model = 'gpt-3.5-turbo',\n", 112 | " messages = [{'role': 'user', 'content': about_me_prompt}]\n", 113 | ")\n", 114 | "\n", 115 | "openai_response" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 6, 121 | "metadata": {}, 122 | "outputs": [ 123 | { 124 | "data": { 125 | "text/plain": [ 126 | "{'Name': 'David Hundley',\n", 127 | " 'Job title': 'Principal Machine Learning Engineer',\n", 128 | " 'Company': 'State Farm',\n", 129 | " 'Number of children': 2,\n", 130 | " 'Car make': 'Tesla',\n", 131 | " 'Car model': 'Model 3',\n", 132 | " 'Favorite video game series': 'The Legend of Zelda'}" 133 | ] 134 | }, 135 | "execution_count": 6, 136 | "metadata": {}, 137 | "output_type": "execute_result" 138 | } 139 | ], 140 | "source": [ 141 | "json.loads(openai_response['choices'][0]['message']['content'])" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": 10, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "string1 = 'David'\n", 151 | "string2 = 'david'\n", 152 | "string3 = 'David.'\n", 153 | "string4 = 'David'" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": 11, 159 | "metadata": {}, 160 | "outputs": [ 161 | { 162 | "data": { 163 | "text/plain": [ 164 | "False" 165 | ] 166 | }, 167 | "execution_count": 11, 168 | "metadata": {}, 169 | "output_type": "execute_result" 170 | } 171 | ], 172 | "source": [ 173 | "string1 == string2" 174 | ] 175 | }, 176 | { 177 | "cell_type": "code", 178 | "execution_count": 12, 179 | "metadata": {}, 180 | "outputs": [ 181 | { 182 | "data": { 183 | "text/plain": [ 184 | "True" 185 | ] 186 | }, 187 | "execution_count": 12, 188 | "metadata": {}, 189 | "output_type": "execute_result" 190 | } 191 | ], 192 | "source": [ 193 | "string1 == string4" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": 39, 199 | "metadata": {}, 200 | "outputs": [], 201 | "source": [ 202 | "functions = [\n", 203 | " {\n", 204 | " 'name': 'extract_person_info',\n", 205 | " 'description': 'Get \"About Me\" information from the body of the input text.',\n", 206 | " 'parameters': {\n", 207 | " 'type': 'object',\n", 208 | " 'properties': {\n", 209 | " 'name': {\n", 210 | " 'type': 'string',\n", 211 | " 'description': 'Name of the person'\n", 212 | " },\n", 213 | " 'job_title': {\n", 214 | " 'type': 'string',\n", 215 | " 'description': 'Job title of person' \n", 216 | " },\n", 217 | " 'num_children': {\n", 218 | " 'type': 'integer',\n", 219 | " 'description': 'Number of children the person is a parent to'\n", 220 | " }\n", 221 | " }\n", 222 | " }\n", 223 | " },\n", 224 | " {\n", 225 | " 'name': 'extract_car_info',\n", 226 | " 'description': 'Extract the make and model of the person\\'s car',\n", 227 | " 'parameters': {\n", 228 | " 'type': 'object',\n", 229 | " 'properties': {\n", 230 | " 'vehicle_make': {\n", 231 | " 'type': 'string',\n", 232 | " 'description': 'Make of the person\\'s vehicle'\n", 233 | " },\n", 234 | " 'vehicle_model': {\n", 235 | " 'type': 'string',\n", 236 | " 'description': 'Model of the person\\'s vehicle'\n", 237 | " }\n", 238 | " }\n", 239 | " }\n", 240 | " },\n", 241 | " {\n", 242 | " 'name': 'extract_all_info',\n", 243 | " 'description': 'Extract all information about a person including their vehicle make and model',\n", 244 | " 'parameters': {\n", 245 | " 'type': 'object',\n", 246 | " 'properties': {\n", 247 | " 'name': {\n", 248 | " 'type': 'string',\n", 249 | " 'description': 'Name of the person'\n", 250 | " },\n", 251 | " 'job_title': {\n", 252 | " 'type': 'string',\n", 253 | " 'description': 'Job title of the person'\n", 254 | " },\n", 255 | " 'num_children': {\n", 256 | " 'type': 'integer',\n", 257 | " 'description': 'Number of children the person is a parent to'\n", 258 | " },\n", 259 | " 'vehicle_make': {\n", 260 | " 'type': 'string',\n", 261 | " 'description': 'Make of the person\\'s vehicle'\n", 262 | " },\n", 263 | " 'vehicle_model': {\n", 264 | " 'type': 'string',\n", 265 | " 'description': 'Model of the person\\'s vehicle'\n", 266 | " },\n", 267 | " 'company_name': {\n", 268 | " 'type': 'string',\n", 269 | " 'description': 'Name of the company the person works for'\n", 270 | " },\n", 271 | " 'favorite_vg_series': {\n", 272 | " 'type': 'string',\n", 273 | " 'description': 'Name of the person\\'s favorite video game series'\n", 274 | " }\n", 275 | " }\n", 276 | " }\n", 277 | " }\n", 278 | "]" 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "execution_count": 21, 284 | "metadata": {}, 285 | "outputs": [ 286 | { 287 | "data": { 288 | "text/plain": [ 289 | " JSON: {\n", 290 | " \"id\": \"chatcmpl-7a4CdkHofcT03WPSOVmRRTj8jcEYp\",\n", 291 | " \"object\": \"chat.completion\",\n", 292 | " \"created\": 1688830319,\n", 293 | " \"model\": \"gpt-3.5-turbo-0613\",\n", 294 | " \"choices\": [\n", 295 | " {\n", 296 | " \"index\": 0,\n", 297 | " \"message\": {\n", 298 | " \"role\": \"assistant\",\n", 299 | " \"content\": null,\n", 300 | " \"function_call\": {\n", 301 | " \"name\": \"extract_person_info\",\n", 302 | " \"arguments\": \"{\\n \\\"name\\\": \\\"David Hundley\\\",\\n \\\"job_title\\\": \\\"Principal Machine Learning Engineer\\\",\\n \\\"num_children\\\": 2\\n}\"\n", 303 | " }\n", 304 | " },\n", 305 | " \"finish_reason\": \"function_call\"\n", 306 | " }\n", 307 | " ],\n", 308 | " \"usage\": {\n", 309 | " \"prompt_tokens\": 146,\n", 310 | " \"completion_tokens\": 37,\n", 311 | " \"total_tokens\": 183\n", 312 | " }\n", 313 | "}" 314 | ] 315 | }, 316 | "execution_count": 21, 317 | "metadata": {}, 318 | "output_type": "execute_result" 319 | } 320 | ], 321 | "source": [ 322 | "openai_response = openai.ChatCompletion.create(\n", 323 | " model = 'gpt-3.5-turbo',\n", 324 | " messages = [{'role': 'user', 'content': about_me}],\n", 325 | " functions = functions,\n", 326 | " function_call = 'auto'\n", 327 | ")\n", 328 | "\n", 329 | "openai_response" 330 | ] 331 | }, 332 | { 333 | "cell_type": "code", 334 | "execution_count": 23, 335 | "metadata": {}, 336 | "outputs": [], 337 | "source": [ 338 | "json_response = json.loads(openai_response['choices'][0]['message']['function_call']['arguments'])" 339 | ] 340 | }, 341 | { 342 | "cell_type": "code", 343 | "execution_count": 25, 344 | "metadata": {}, 345 | "outputs": [ 346 | { 347 | "data": { 348 | "text/plain": [ 349 | "{'name': 'David Hundley',\n", 350 | " 'job_title': 'Principal Machine Learning Engineer',\n", 351 | " 'num_children': 2}" 352 | ] 353 | }, 354 | "execution_count": 25, 355 | "metadata": {}, 356 | "output_type": "execute_result" 357 | } 358 | ], 359 | "source": [ 360 | "json_response" 361 | ] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "execution_count": 26, 366 | "metadata": {}, 367 | "outputs": [], 368 | "source": [ 369 | "def extract_person_info(name, job_title, num_children):\n", 370 | " return f'The person\\'s name is {name}. Their job title is {job_title}. They have {num_children} children.'" 371 | ] 372 | }, 373 | { 374 | "cell_type": "code", 375 | "execution_count": 33, 376 | "metadata": {}, 377 | "outputs": [ 378 | { 379 | "data": { 380 | "text/plain": [ 381 | " JSON: {\n", 382 | " \"id\": \"chatcmpl-7a4CdkHofcT03WPSOVmRRTj8jcEYp\",\n", 383 | " \"object\": \"chat.completion\",\n", 384 | " \"created\": 1688830319,\n", 385 | " \"model\": \"gpt-3.5-turbo-0613\",\n", 386 | " \"choices\": [\n", 387 | " {\n", 388 | " \"index\": 0,\n", 389 | " \"message\": {\n", 390 | " \"role\": \"assistant\",\n", 391 | " \"content\": null,\n", 392 | " \"function_call\": {\n", 393 | " \"name\": \"extract_person_info\",\n", 394 | " \"arguments\": \"{\\n \\\"name\\\": \\\"David Hundley\\\",\\n \\\"job_title\\\": \\\"Principal Machine Learning Engineer\\\",\\n \\\"num_children\\\": 2\\n}\"\n", 395 | " }\n", 396 | " },\n", 397 | " \"finish_reason\": \"function_call\"\n", 398 | " }\n", 399 | " ],\n", 400 | " \"usage\": {\n", 401 | " \"prompt_tokens\": 146,\n", 402 | " \"completion_tokens\": 37,\n", 403 | " \"total_tokens\": 183\n", 404 | " }\n", 405 | "}" 406 | ] 407 | }, 408 | "execution_count": 33, 409 | "metadata": {}, 410 | "output_type": "execute_result" 411 | } 412 | ], 413 | "source": [ 414 | "openai_response" 415 | ] 416 | }, 417 | { 418 | "cell_type": "code", 419 | "execution_count": 37, 420 | "metadata": {}, 421 | "outputs": [ 422 | { 423 | "name": "stdout", 424 | "output_type": "stream", 425 | "text": [ 426 | "The person's name is David Hundley. Their job title is Principal Machine Learning Engineer. They have 2 children.\n" 427 | ] 428 | } 429 | ], 430 | "source": [ 431 | "if openai_response[\"choices\"][0][\"message\"].get('function_call'):\n", 432 | "\n", 433 | " json_response = json.loads(openai_response['choices'][0]['message']['function_call']['arguments'])\n", 434 | "\n", 435 | " available_functions = {\n", 436 | " 'extract_person_info': extract_person_info,\n", 437 | " 'extract_car_information'\n", 438 | " }\n", 439 | "\n", 440 | " function_to_call = openai_response['choices'][0]['message']['function_call']['name']\n", 441 | "\n", 442 | " print(available_functions[function_to_call](json_response['name'], json_response['job_title'], json_response['num_children']))" 443 | ] 444 | }, 445 | { 446 | "cell_type": "code", 447 | "execution_count": 43, 448 | "metadata": {}, 449 | "outputs": [ 450 | { 451 | "data": { 452 | "text/plain": [ 453 | " JSON: {\n", 454 | " \"id\": \"chatcmpl-7a4SSkw8TErZcpeblPvzPDBRd1ps7\",\n", 455 | " \"object\": \"chat.completion\",\n", 456 | " \"created\": 1688831300,\n", 457 | " \"model\": \"gpt-3.5-turbo-0613\",\n", 458 | " \"choices\": [\n", 459 | " {\n", 460 | " \"index\": 0,\n", 461 | " \"message\": {\n", 462 | " \"role\": \"assistant\",\n", 463 | " \"content\": null,\n", 464 | " \"function_call\": {\n", 465 | " \"name\": \"extract_person_info\",\n", 466 | " \"arguments\": \"{\\n \\\"name\\\": \\\"David Hundley\\\",\\n \\\"job_title\\\": \\\"Principal Machine Learning Engineer\\\",\\n \\\"num_children\\\": 2\\n}\"\n", 467 | " }\n", 468 | " },\n", 469 | " \"finish_reason\": \"function_call\"\n", 470 | " }\n", 471 | " ],\n", 472 | " \"usage\": {\n", 473 | " \"prompt_tokens\": 281,\n", 474 | " \"completion_tokens\": 37,\n", 475 | " \"total_tokens\": 318\n", 476 | " }\n", 477 | "}" 478 | ] 479 | }, 480 | "execution_count": 43, 481 | "metadata": {}, 482 | "output_type": "execute_result" 483 | } 484 | ], 485 | "source": [ 486 | "openai_response = openai.ChatCompletion.create(\n", 487 | " model = 'gpt-3.5-turbo',\n", 488 | " messages = [{'role': 'user', 'content': 'My name is David Hundley. I work as a principal machine learning engineer. I have two daughters.'}],\n", 489 | " functions = functions,\n", 490 | " function_call = 'auto'\n", 491 | ")\n", 492 | "\n", 493 | "openai_response" 494 | ] 495 | }, 496 | { 497 | "cell_type": "code", 498 | "execution_count": null, 499 | "metadata": {}, 500 | "outputs": [], 501 | "source": [] 502 | } 503 | ], 504 | "metadata": { 505 | "kernelspec": { 506 | "display_name": "Python 3", 507 | "language": "python", 508 | "name": "python3" 509 | }, 510 | "language_info": { 511 | "codemirror_mode": { 512 | "name": "ipython", 513 | "version": 3 514 | }, 515 | "file_extension": ".py", 516 | "mimetype": "text/x-python", 517 | "name": "python", 518 | "nbconvert_exporter": "python", 519 | "pygments_lexer": "ipython3", 520 | "version": "3.10.7" 521 | }, 522 | "orig_nbformat": 4 523 | }, 524 | "nbformat": 4, 525 | "nbformat_minor": 2 526 | } 527 | -------------------------------------------------------------------------------- /notebooks/function-calling.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "metadata": {}, 7 | "source": [ 8 | "# OpenAI Function Calling 101\n", 9 | "One of the struggles of using LLMs like ChatGPT is that they do not produce a structured data output. This is important for programmatic systems that largely rely on structured data for system interaction. For example, if you want to build a program that analyzes the sentiment of a movie review, you might have to execute a prompt that looks like the following:\n", 10 | "\n", 11 | "```\n", 12 | "prompt = f'''\n", 13 | "Please perform a sentiment analysis on the following movie review:\n", 14 | "{MOVIE_REVIEW_TEXT}\n", 15 | "Please output your response as a single word: either \"Positive\" or \"Negative\". Do not add any extra characters.\n", 16 | "'''\n", 17 | "```\n", 18 | "\n", 19 | "The problem with this is that it doesn't always work. It's pretty common that the LLM will throw in an undesired period or longer explanation like: \"The sentiment of this movie is: Positive.\" While you can regex out the answer (🤢), this is obviously not ideal. What would be ideal is if the LLM would return the output as something like the following structured JSON:\n", 20 | "\n", 21 | "```\n", 22 | "{\n", 23 | " 'sentiment': 'positive'\n", 24 | "}\n", 25 | "```\n", 26 | "\n", 27 | "Enter **OpenAI's new function calling**! Function calling is precisely the answer to the problem above. This Jupyter notebook will demonstrate a simple example of how to use OpenAI's new function calling in Python. If you would like to see the full documentation, [please check out this link](https://platform.openai.com/docs/guides/gpt/function-calling)." 28 | ] 29 | }, 30 | { 31 | "attachments": {}, 32 | "cell_type": "markdown", 33 | "metadata": {}, 34 | "source": [ 35 | "## Notebook Setup\n", 36 | "Let's start with our imports. Now, you may already have the `openai` Python client already installed, but you'll most likely need to upgrade it to get the new function calling functionality. Here's how to do this upgrade in your Terminal / Powershell with `pip`:\n", 37 | "\n", 38 | "```\n", 39 | "pip install openai --upgrade\n", 40 | "```" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 1, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "# Importing the necessary Python libraries\n", 50 | "import os\n", 51 | "import json\n", 52 | "import yaml\n", 53 | "import openai" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": 2, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "# Loading the API key and organization ID from file (NOT pushed to GitHub)\n", 63 | "with open('../keys/openai-keys.yaml') as f:\n", 64 | " keys_yaml = yaml.safe_load(f)\n", 65 | "\n", 66 | "# Applying our API key and organization ID to OpenAI\n", 67 | "openai.organization = keys_yaml['ORG_ID']\n", 68 | "openai.api_key = keys_yaml['API_KEY']\n", 69 | "os.environ['OPENAI_API_KEY'] = keys_yaml['API_KEY']" 70 | ] 71 | }, 72 | { 73 | "attachments": {}, 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "To test out the function calling functionality, I wrote a short \"About Me\" containing particular facts that we'll be parsing out into appropriate data structures, including integers and strings. Let's load in this text" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 3, 83 | "metadata": {}, 84 | "outputs": [ 85 | { 86 | "name": "stdout", 87 | "output_type": "stream", 88 | "text": [ 89 | "Hello! My name is David Hundley. I am a principal machine learning engineer at State Farm. I enjoy learning about AI and teaching what I learn back to others. I have two daughters. I drive a Tesla Model 3, and my favorite video game series is The Legend of Zelda.\n" 90 | ] 91 | } 92 | ], 93 | "source": [ 94 | "# Loading the \"About Me\" text from local file\n", 95 | "with open('../data/about-me.txt', 'r') as f:\n", 96 | " about_me = f.read()\n", 97 | "\n", 98 | "print(about_me)" 99 | ] 100 | }, 101 | { 102 | "attachments": {}, 103 | "cell_type": "markdown", 104 | "metadata": {}, 105 | "source": [ 106 | "## The Pre-Function Calling Days\n", 107 | "Before we demonstrate function calling, let's demonstrate how we used to use prompt engineering and Regex to produce a structure JSON that we can programmatically work with down the road." 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": 4, 113 | "metadata": {}, 114 | "outputs": [], 115 | "source": [ 116 | "# Engineering a prompt to extract as much information from \"About Me\" as a JSON object\n", 117 | "about_me_prompt = f'''\n", 118 | "Please extract information as a JSON object. Please look for the following pieces of information.\n", 119 | "Name\n", 120 | "Job title\n", 121 | "Company\n", 122 | "Number of children as a single number\n", 123 | "Car make\n", 124 | "Car model\n", 125 | "Favorite video game series\n", 126 | "\n", 127 | "This is the body of text to extract the information from:\n", 128 | "{about_me}\n", 129 | "'''" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": 5, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "# Getting the response back from ChatGPT (gpt-3.5-turbo)\n", 139 | "openai_response = openai.ChatCompletion.create(\n", 140 | " model = 'gpt-3.5-turbo',\n", 141 | " messages = [{'role': 'user', 'content': about_me_prompt}]\n", 142 | ")" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": 6, 148 | "metadata": {}, 149 | "outputs": [ 150 | { 151 | "data": { 152 | "text/plain": [ 153 | "{'Name': 'David Hundley',\n", 154 | " 'Job title': 'Principal Machine Learning Engineer',\n", 155 | " 'Company': 'State Farm',\n", 156 | " 'Number of children': 2,\n", 157 | " 'Car make': 'Tesla',\n", 158 | " 'Car model': 'Model 3',\n", 159 | " 'Favorite video game series': 'The Legend of Zelda'}" 160 | ] 161 | }, 162 | "execution_count": 6, 163 | "metadata": {}, 164 | "output_type": "execute_result" 165 | } 166 | ], 167 | "source": [ 168 | "# Loading the response as a JSON object\n", 169 | "json_response = json.loads(openai_response['choices'][0]['message']['content'])\n", 170 | "json_response" 171 | ] 172 | }, 173 | { 174 | "cell_type": "markdown", 175 | "metadata": {}, 176 | "source": [ 177 | "## Using the New Function Calling Capabilities\n", 178 | "Now that we've demonstrated how we used to get structured JSON in the pre-function calling days, let's move into how we can now make use of function calling to extract the same results but in a more consistent manner for our systematic usage. We'll start more simply with a single custom function and then address a few more \"advanced\" functionalities." 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": 21, 184 | "metadata": {}, 185 | "outputs": [], 186 | "source": [ 187 | "# Defining our initial extract_person_info function\n", 188 | "def extract_person_info(name, job_title, num_children):\n", 189 | " '''\n", 190 | " Prints basic \"About Me\" information\n", 191 | "\n", 192 | " Inputs:\n", 193 | " name (str): Name of the person\n", 194 | " job_title (str): Job title of the person\n", 195 | " num_chilren (int): The number of children the parent has.\n", 196 | " '''\n", 197 | " \n", 198 | " print(f'This person\\'s name is {name}. Their job title is {job_title}, and they have {num_children} children.')" 199 | ] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": 8, 204 | "metadata": {}, 205 | "outputs": [], 206 | "source": [ 207 | "# Defining how we want ChatGPT to call our custom functions\n", 208 | "my_custom_functions = [\n", 209 | " {\n", 210 | " 'name': 'extract_person_info',\n", 211 | " 'description': 'Get \"About Me\" information from the body of the input text',\n", 212 | " 'parameters': {\n", 213 | " 'type': 'object',\n", 214 | " 'properties': {\n", 215 | " 'name': {\n", 216 | " 'type': 'string',\n", 217 | " 'description': 'Name of the person'\n", 218 | " },\n", 219 | " 'job_title': {\n", 220 | " 'type': 'string',\n", 221 | " 'description': 'Job title of the person'\n", 222 | " },\n", 223 | " 'num_children': {\n", 224 | " 'type': 'integer',\n", 225 | " 'description': 'Number of children the person is a parent to'\n", 226 | " }\n", 227 | " }\n", 228 | " }\n", 229 | " }\n", 230 | "]" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 10, 236 | "metadata": {}, 237 | "outputs": [ 238 | { 239 | "name": "stdout", 240 | "output_type": "stream", 241 | "text": [ 242 | "{\n", 243 | " \"id\": \"chatcmpl-7aSokNbBAHjNFOMAnGqA7uuXsmLjr\",\n", 244 | " \"object\": \"chat.completion\",\n", 245 | " \"created\": 1688924938,\n", 246 | " \"model\": \"gpt-3.5-turbo-0613\",\n", 247 | " \"choices\": [\n", 248 | " {\n", 249 | " \"index\": 0,\n", 250 | " \"message\": {\n", 251 | " \"role\": \"assistant\",\n", 252 | " \"content\": null,\n", 253 | " \"function_call\": {\n", 254 | " \"name\": \"extract_person_info\",\n", 255 | " \"arguments\": \"{\\n \\\"name\\\": \\\"David Hundley\\\",\\n \\\"job_title\\\": \\\"Principal Machine Learning Engineer\\\",\\n \\\"num_children\\\": 2\\n}\"\n", 256 | " }\n", 257 | " },\n", 258 | " \"finish_reason\": \"function_call\"\n", 259 | " }\n", 260 | " ],\n", 261 | " \"usage\": {\n", 262 | " \"prompt_tokens\": 147,\n", 263 | " \"completion_tokens\": 37,\n", 264 | " \"total_tokens\": 184\n", 265 | " }\n", 266 | "}\n", 267 | "{'name': 'David Hundley', 'job_title': 'Principal Machine Learning Engineer', 'num_children': 2}\n" 268 | ] 269 | } 270 | ], 271 | "source": [ 272 | "# Getting the response back from ChatGPT (gpt-3.5-turbo)\n", 273 | "openai_response = openai.ChatCompletion.create(\n", 274 | " model = 'gpt-3.5-turbo',\n", 275 | " messages = [{'role': 'user', 'content': about_me}],\n", 276 | " functions = my_custom_functions,\n", 277 | " function_call = 'auto'\n", 278 | ")\n", 279 | "\n", 280 | "print(openai_response)" 281 | ] 282 | }, 283 | { 284 | "cell_type": "markdown", 285 | "metadata": {}, 286 | "source": [ 287 | "### What if the prompt I submit doesn't contain the information I want to extract per my custom function?\n", 288 | "In our original example, our custom function sought to extract three very specific bits of information, and we demonstrated that this worked successfully by passing in my custom \"About Me\" text as a prompt. But you might be wondering, what happens if you pass in any other prompt that doesn't contain that information?\n", 289 | "\n", 290 | "Recall that we set a parameter in our API client call called function_call that we set to auto. We'll explore this even deeper in the next subsection, but what this parameter is essentially doing is telling ChatGPT to use its best judgment in figuring out when to structure the output for one of our custom functions.\n", 291 | "\n", 292 | "So what happens when we submit a prompt that doesn't match any of our custom functions? Simply put, it defaults to typical behavior as if function calling doesn't exist. Let's test this out with an arbitrary prompt: \"How tall is the Eiffel Tower?\"" 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": 11, 298 | "metadata": {}, 299 | "outputs": [ 300 | { 301 | "name": "stdout", 302 | "output_type": "stream", 303 | "text": [ 304 | "{\n", 305 | " \"id\": \"chatcmpl-7aSywAKIMPCUT2mCNxoJM0OkwYqLJ\",\n", 306 | " \"object\": \"chat.completion\",\n", 307 | " \"created\": 1688925570,\n", 308 | " \"model\": \"gpt-3.5-turbo-0613\",\n", 309 | " \"choices\": [\n", 310 | " {\n", 311 | " \"index\": 0,\n", 312 | " \"message\": {\n", 313 | " \"role\": \"assistant\",\n", 314 | " \"content\": \"The Eiffel Tower is approximately 330 meters (1,083 feet) tall.\"\n", 315 | " },\n", 316 | " \"finish_reason\": \"stop\"\n", 317 | " }\n", 318 | " ],\n", 319 | " \"usage\": {\n", 320 | " \"prompt_tokens\": 97,\n", 321 | " \"completion_tokens\": 19,\n", 322 | " \"total_tokens\": 116\n", 323 | " }\n", 324 | "}\n" 325 | ] 326 | } 327 | ], 328 | "source": [ 329 | "# Getting the response back from ChatGPT (gpt-3.5-turbo)\n", 330 | "openai_response = openai.ChatCompletion.create(\n", 331 | " model = 'gpt-3.5-turbo',\n", 332 | " messages = [{'role': 'user', 'content': 'How tall is the Eiffel Tower?'}],\n", 333 | " functions = my_custom_functions,\n", 334 | " function_call = 'auto'\n", 335 | ")\n", 336 | "\n", 337 | "print(openai_response)" 338 | ] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "execution_count": 41, 343 | "metadata": {}, 344 | "outputs": [], 345 | "source": [ 346 | "# Defining a function to extract only vehicle information\n", 347 | "def extract_vehicle_info(vehicle_make, vehicle_model):\n", 348 | " '''\n", 349 | " Prints basic vehicle information\n", 350 | "\n", 351 | " Inputs:\n", 352 | " - vehicle_make (str): Make of the vehicle\n", 353 | " - vehicle_model (str): Model of the vehicle\n", 354 | " '''\n", 355 | " \n", 356 | " print(f'Vehicle make: {vehicle_make}\\nVehicle model: {vehicle_model}')\n", 357 | "\n", 358 | "\n", 359 | "\n", 360 | "# Defining a function to extract all information provided in the original \"About Me\" prompt\n", 361 | "def extract_all_info(name, job_title, num_children, vehicle_make, vehicle_model, company_name, favorite_vg_series):\n", 362 | " '''\n", 363 | " Prints the full \"About Me\" information\n", 364 | "\n", 365 | " Inputs:\n", 366 | " - name (str): Name of the person\n", 367 | " - job_title (str): Job title of the person\n", 368 | " - num_chilren (int): The number of children the parent has\n", 369 | " - vehicle_make (str): Make of the vehicle\n", 370 | " - vehicle_model (str): Model of the vehicle\n", 371 | " - company_name (str): Name of the company the person works for\n", 372 | " - favorite_vg_series (str): Person's favorite video game series.\n", 373 | " '''\n", 374 | " \n", 375 | " print(f'''\n", 376 | " This person\\'s name is {name}. Their job title is {job_title}, and they have {num_children} children.\n", 377 | " They drive a {vehicle_make} {vehicle_model}.\n", 378 | " They work for {company_name}.\n", 379 | " Their favorite video game series is {favorite_vg_series}.\n", 380 | " ''')" 381 | ] 382 | }, 383 | { 384 | "cell_type": "code", 385 | "execution_count": 5, 386 | "metadata": {}, 387 | "outputs": [], 388 | "source": [ 389 | "# Defining how we want ChatGPT to call our custom functions\n", 390 | "my_custom_functions = [\n", 391 | " {\n", 392 | " 'name': 'extract_person_info',\n", 393 | " 'description': 'Get \"About Me\" information from the body of the input text',\n", 394 | " 'parameters': {\n", 395 | " 'type': 'object',\n", 396 | " 'properties': {\n", 397 | " 'name': {\n", 398 | " 'type': 'string',\n", 399 | " 'description': 'Name of the person'\n", 400 | " },\n", 401 | " 'job_title': {\n", 402 | " 'type': 'string',\n", 403 | " 'description': 'Job title of the person'\n", 404 | " },\n", 405 | " 'num_children': {\n", 406 | " 'type': 'integer',\n", 407 | " 'description': 'Number of children the person is a parent to'\n", 408 | " }\n", 409 | " }\n", 410 | " }\n", 411 | " },\n", 412 | " {\n", 413 | " 'name': 'extract_vehicle_info',\n", 414 | " 'description': 'Extract the make and model of the person\\'s car',\n", 415 | " 'parameters': {\n", 416 | " 'type': 'object',\n", 417 | " 'properties': {\n", 418 | " 'vehicle_make': {\n", 419 | " 'type': 'string',\n", 420 | " 'description': 'Make of the person\\'s vehicle'\n", 421 | " },\n", 422 | " 'vehicle_model': {\n", 423 | " 'type': 'string',\n", 424 | " 'description': 'Model of the person\\'s vehicle'\n", 425 | " }\n", 426 | " }\n", 427 | " }\n", 428 | " },\n", 429 | " {\n", 430 | " 'name': 'extract_all_info',\n", 431 | " 'description': 'Extract all information about a person including their vehicle make and model',\n", 432 | " 'parameters': {\n", 433 | " 'type': 'object',\n", 434 | " 'properties': {\n", 435 | " 'name': {\n", 436 | " 'type': 'string',\n", 437 | " 'description': 'Name of the person'\n", 438 | " },\n", 439 | " 'job_title': {\n", 440 | " 'type': 'string',\n", 441 | " 'description': 'Job title of the person'\n", 442 | " },\n", 443 | " 'num_children': {\n", 444 | " 'type': 'integer',\n", 445 | " 'description': 'Number of children the person is a parent to'\n", 446 | " },\n", 447 | " 'vehicle_make': {\n", 448 | " 'type': 'string',\n", 449 | " 'description': 'Make of the person\\'s vehicle'\n", 450 | " },\n", 451 | " 'vehicle_model': {\n", 452 | " 'type': 'string',\n", 453 | " 'description': 'Model of the person\\'s vehicle'\n", 454 | " },\n", 455 | " 'company_name': {\n", 456 | " 'type': 'string',\n", 457 | " 'description': 'Name of the company the person works for'\n", 458 | " },\n", 459 | " 'favorite_vg_series': {\n", 460 | " 'type': 'string',\n", 461 | " 'description': 'Name of the person\\'s favorite video game series'\n", 462 | " }\n", 463 | " }\n", 464 | " }\n", 465 | " }\n", 466 | "]" 467 | ] 468 | }, 469 | { 470 | "cell_type": "markdown", 471 | "metadata": {}, 472 | "source": [ 473 | "Now let's demonstrate what happens when we apply 3 different samples to all of the custom functions." 474 | ] 475 | }, 476 | { 477 | "cell_type": "code", 478 | "execution_count": 43, 479 | "metadata": {}, 480 | "outputs": [], 481 | "source": [ 482 | "# Defining a list of samples\n", 483 | "samples = [\n", 484 | " str(about_me),\n", 485 | " 'My name is David Hundley. I am a principal machine learning engineer, and I have two daughters.',\n", 486 | " 'She drives a Kia Sportage.'\n", 487 | "]" 488 | ] 489 | }, 490 | { 491 | "cell_type": "code", 492 | "execution_count": 44, 493 | "metadata": {}, 494 | "outputs": [ 495 | { 496 | "name": "stdout", 497 | "output_type": "stream", 498 | "text": [ 499 | "Sample #1's results:\n", 500 | "{\n", 501 | " \"id\": \"chatcmpl-7aTjJLoSCkICSQZM2Eab3HG3IbGca\",\n", 502 | " \"object\": \"chat.completion\",\n", 503 | " \"created\": 1688928445,\n", 504 | " \"model\": \"gpt-3.5-turbo-0613\",\n", 505 | " \"choices\": [\n", 506 | " {\n", 507 | " \"index\": 0,\n", 508 | " \"message\": {\n", 509 | " \"role\": \"assistant\",\n", 510 | " \"content\": null,\n", 511 | " \"function_call\": {\n", 512 | " \"name\": \"extract_all_info\",\n", 513 | " \"arguments\": \"{\\n \\\"name\\\": \\\"David Hundley\\\",\\n \\\"job_title\\\": \\\"principal machine learning engineer\\\",\\n \\\"num_children\\\": 2,\\n \\\"vehicle_make\\\": \\\"Tesla\\\",\\n \\\"vehicle_model\\\": \\\"Model 3\\\",\\n \\\"company_name\\\": \\\"State Farm\\\",\\n \\\"favorite_vg_series\\\": \\\"The Legend of Zelda\\\"\\n}\"\n", 514 | " }\n", 515 | " },\n", 516 | " \"finish_reason\": \"function_call\"\n", 517 | " }\n", 518 | " ],\n", 519 | " \"usage\": {\n", 520 | " \"prompt_tokens\": 320,\n", 521 | " \"completion_tokens\": 77,\n", 522 | " \"total_tokens\": 397\n", 523 | " }\n", 524 | "}\n", 525 | "Sample #2's results:\n", 526 | "{\n", 527 | " \"id\": \"chatcmpl-7aTjLnQMyvRBpfpukfM2SYXKMuI5C\",\n", 528 | " \"object\": \"chat.completion\",\n", 529 | " \"created\": 1688928447,\n", 530 | " \"model\": \"gpt-3.5-turbo-0613\",\n", 531 | " \"choices\": [\n", 532 | " {\n", 533 | " \"index\": 0,\n", 534 | " \"message\": {\n", 535 | " \"role\": \"assistant\",\n", 536 | " \"content\": null,\n", 537 | " \"function_call\": {\n", 538 | " \"name\": \"extract_person_info\",\n", 539 | " \"arguments\": \"{\\n \\\"name\\\": \\\"David Hundley\\\",\\n \\\"job_title\\\": \\\"principal machine learning engineer\\\",\\n \\\"num_children\\\": 2\\n}\"\n", 540 | " }\n", 541 | " },\n", 542 | " \"finish_reason\": \"function_call\"\n", 543 | " }\n", 544 | " ],\n", 545 | " \"usage\": {\n", 546 | " \"prompt_tokens\": 282,\n", 547 | " \"completion_tokens\": 37,\n", 548 | " \"total_tokens\": 319\n", 549 | " }\n", 550 | "}\n", 551 | "Sample #3's results:\n", 552 | "{\n", 553 | " \"id\": \"chatcmpl-7aTjMPXvNHG05xxsgSPkgoDreHNkO\",\n", 554 | " \"object\": \"chat.completion\",\n", 555 | " \"created\": 1688928448,\n", 556 | " \"model\": \"gpt-3.5-turbo-0613\",\n", 557 | " \"choices\": [\n", 558 | " {\n", 559 | " \"index\": 0,\n", 560 | " \"message\": {\n", 561 | " \"role\": \"assistant\",\n", 562 | " \"content\": null,\n", 563 | " \"function_call\": {\n", 564 | " \"name\": \"extract_vehicle_info\",\n", 565 | " \"arguments\": \"{\\n \\\"vehicle_make\\\": \\\"Kia\\\",\\n \\\"vehicle_model\\\": \\\"Sportage\\\"\\n}\"\n", 566 | " }\n", 567 | " },\n", 568 | " \"finish_reason\": \"function_call\"\n", 569 | " }\n", 570 | " ],\n", 571 | " \"usage\": {\n", 572 | " \"prompt_tokens\": 268,\n", 573 | " \"completion_tokens\": 27,\n", 574 | " \"total_tokens\": 295\n", 575 | " }\n", 576 | "}\n" 577 | ] 578 | } 579 | ], 580 | "source": [ 581 | "# Iterating over the three samples\n", 582 | "for i, sample in enumerate(samples):\n", 583 | " \n", 584 | " print(f'Sample #{i + 1}\\'s results:')\n", 585 | "\n", 586 | " # Getting the response back from ChatGPT (gpt-3.5-turbo)\n", 587 | " openai_response = openai.ChatCompletion.create(\n", 588 | " model = 'gpt-3.5-turbo',\n", 589 | " messages = [{'role': 'user', 'content': sample}],\n", 590 | " functions = my_custom_functions,\n", 591 | " function_call = 'auto'\n", 592 | " )\n", 593 | "\n", 594 | " # Printing the sample's response\n", 595 | " print(openai_response)" 596 | ] 597 | }, 598 | { 599 | "cell_type": "markdown", 600 | "metadata": {}, 601 | "source": [ 602 | "With each of the respective prompts, ChatGPT selected the correct custom function, and we can specifically note that in the `name` value under `function_call` in the API's response object. In addition to this being a handy way to identify which function to use the arguments for, we can programmatically map our actual custom Python function to this value to run the correct code appropriately." 603 | ] 604 | }, 605 | { 606 | "cell_type": "code", 607 | "execution_count": 48, 608 | "metadata": {}, 609 | "outputs": [ 610 | { 611 | "name": "stdout", 612 | "output_type": "stream", 613 | "text": [ 614 | "Sample #1's results:\n", 615 | "\n", 616 | " This person's name is David Hundley. Their job title is principal machine learning engineer, and they have 2 children.\n", 617 | " They drive a Tesla Model 3.\n", 618 | " They work for State Farm.\n", 619 | " Their favorite video game series is The Legend of Zelda.\n", 620 | " \n", 621 | "Sample #2's results:\n", 622 | "This person's name is David Hundley. Their job title is Principal Machine Learning Engineer, and they have 2 children.\n", 623 | "Sample #3's results:\n", 624 | "Vehicle make: Kia\n", 625 | "Vehicle model: Sportage\n" 626 | ] 627 | } 628 | ], 629 | "source": [ 630 | "# Iterating over the three samples\n", 631 | "for i, sample in enumerate(samples):\n", 632 | " \n", 633 | " print(f'Sample #{i + 1}\\'s results:')\n", 634 | "\n", 635 | " # Getting the response back from ChatGPT (gpt-3.5-turbo)\n", 636 | " openai_response = openai.ChatCompletion.create(\n", 637 | " model = 'gpt-3.5-turbo',\n", 638 | " messages = [{'role': 'user', 'content': sample}],\n", 639 | " functions = my_custom_functions,\n", 640 | " function_call = 'auto'\n", 641 | " )['choices'][0]['message']\n", 642 | "\n", 643 | " # Checking to see that a function call was invoked\n", 644 | " if openai_response.get('function_call'):\n", 645 | "\n", 646 | " # Checking to see which specific function call was invoked\n", 647 | " function_called = openai_response['function_call']['name']\n", 648 | "\n", 649 | " # Extracting the arguments of the function call\n", 650 | " function_args = json.loads(openai_response['function_call']['arguments'])\n", 651 | "\n", 652 | " # Invoking the proper functions\n", 653 | " if function_called == 'extract_person_info':\n", 654 | " extract_person_info(*list(function_args.values()))\n", 655 | " elif function_called == 'extract_vehicle_info':\n", 656 | " extract_vehicle_info(*list(function_args.values()))\n", 657 | " elif function_called == 'extract_all_info':\n", 658 | " extract_all_info(*list(function_args.values()))" 659 | ] 660 | }, 661 | { 662 | "cell_type": "markdown", 663 | "metadata": {}, 664 | "source": [ 665 | "## OpenAI Function Calling with LangChain\n", 666 | "Given its popularity amongst the Generative AI community, I thought I'd re-visit this notebook and add some code to show how you might make use of this exact same functionality in LangChain" 667 | ] 668 | }, 669 | { 670 | "cell_type": "code", 671 | "execution_count": 6, 672 | "metadata": {}, 673 | "outputs": [], 674 | "source": [ 675 | "# Importing the LangChain objects\n", 676 | "from langchain.chat_models import ChatOpenAI\n", 677 | "from langchain.chains import LLMChain\n", 678 | "from langchain.prompts.chat import ChatPromptTemplate\n", 679 | "from langchain.chains.openai_functions import create_structured_output_chain" 680 | ] 681 | }, 682 | { 683 | "cell_type": "code", 684 | "execution_count": 17, 685 | "metadata": {}, 686 | "outputs": [], 687 | "source": [ 688 | "# Setting the proper instance of the OpenAI model\n", 689 | "llm = ChatOpenAI(model = 'gpt-3.5-turbo-0613')\n", 690 | "\n", 691 | "# Setting a LangChain ChatPromptTemplate\n", 692 | "chat_prompt_template = ChatPromptTemplate.from_template('{my_prompt}')\n", 693 | "\n", 694 | "# Setting the JSON schema for extracting vehicle information\n", 695 | "langchain_json_schema = {\n", 696 | " 'name': 'extract_vehicle_info',\n", 697 | " 'description': 'Extract the make and model of the person\\'s car',\n", 698 | " 'type': 'object',\n", 699 | " 'properties': {\n", 700 | " 'vehicle_make': {\n", 701 | " 'title': 'Vehicle Make',\n", 702 | " 'type': 'string',\n", 703 | " 'description': 'Make of the person\\'s vehicle'\n", 704 | " },\n", 705 | " 'vehicle_model': {\n", 706 | " 'title': 'Vehicle Model',\n", 707 | " 'type': 'string',\n", 708 | " 'description': 'Model of the person\\'s vehicle'\n", 709 | " }\n", 710 | " }\n", 711 | "}" 712 | ] 713 | }, 714 | { 715 | "cell_type": "code", 716 | "execution_count": 18, 717 | "metadata": {}, 718 | "outputs": [], 719 | "source": [ 720 | "# Defining the LangChain chain object for function calling\n", 721 | "chain = create_structured_output_chain(output_schema = langchain_json_schema,\n", 722 | " llm = llm,\n", 723 | " prompt = chat_prompt_template)" 724 | ] 725 | }, 726 | { 727 | "cell_type": "code", 728 | "execution_count": 19, 729 | "metadata": {}, 730 | "outputs": [ 731 | { 732 | "name": "stdout", 733 | "output_type": "stream", 734 | "text": [ 735 | "{'vehicle_make': 'Tesla', 'vehicle_model': 'Model 3'}\n" 736 | ] 737 | } 738 | ], 739 | "source": [ 740 | "# Getting results with a demo prompt\n", 741 | "print(chain.run(my_prompt = 'I drive a Tesla Model 3'))" 742 | ] 743 | }, 744 | { 745 | "cell_type": "code", 746 | "execution_count": null, 747 | "metadata": {}, 748 | "outputs": [], 749 | "source": [] 750 | } 751 | ], 752 | "metadata": { 753 | "kernelspec": { 754 | "display_name": "Python 3", 755 | "language": "python", 756 | "name": "python3" 757 | }, 758 | "language_info": { 759 | "codemirror_mode": { 760 | "name": "ipython", 761 | "version": 3 762 | }, 763 | "file_extension": ".py", 764 | "mimetype": "text/x-python", 765 | "name": "python", 766 | "nbconvert_exporter": "python", 767 | "pygments_lexer": "ipython3", 768 | "version": "3.11.4" 769 | }, 770 | "orig_nbformat": 4 771 | }, 772 | "nbformat": 4, 773 | "nbformat_minor": 2 774 | } 775 | -------------------------------------------------------------------------------- /notebooks/image-generation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "metadata": {}, 7 | "source": [ 8 | "# Image Generation with DALL-E" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "## Notebook Setup" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 2, 21 | "metadata": {}, 22 | "outputs": [], 23 | "source": [ 24 | "# Importing the necessary Python libraries\n", 25 | "import os\n", 26 | "import json\n", 27 | "from base64 import b64decode\n", 28 | "from IPython import display\n", 29 | "import yaml\n", 30 | "import openai" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 3, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# Loading the API key and organization ID from file (NOT pushed to GitHub)\n", 40 | "with open('../keys/openai-keys.yaml') as f:\n", 41 | " keys_yaml = yaml.safe_load(f)\n", 42 | "\n", 43 | "# Applying our API key and organization ID to OpenAI\n", 44 | "openai.organization = keys_yaml['ORG_ID']\n", 45 | "openai.api_key = keys_yaml['API_KEY']" 46 | ] 47 | }, 48 | { 49 | "cell_type": "markdown", 50 | "metadata": {}, 51 | "source": [ 52 | "## Generating Simple Image" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 4, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "response = openai.Image.create(\n", 62 | " prompt = \"A cute baby sea otter\",\n", 63 | " n = 2,\n", 64 | " size = \"1024x1024\",\n", 65 | " response_format = 'b64_json'\n", 66 | ")" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": 5, 72 | "metadata": {}, 73 | "outputs": [], 74 | "source": [ 75 | "decoded_image = b64decode(response['data'][0]['b64_json'])" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 6, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "with open('test.png', 'wb') as png:\n", 85 | " png.write(decoded_image)" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": 7, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "test = 'asdlkfjaskldjfaslkdfksf'" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": 8, 100 | "metadata": {}, 101 | "outputs": [ 102 | { 103 | "data": { 104 | "text/plain": [ 105 | "23" 106 | ] 107 | }, 108 | "execution_count": 8, 109 | "metadata": {}, 110 | "output_type": "execute_result" 111 | } 112 | ], 113 | "source": [ 114 | "len(test)" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 9, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "response = openai.Image.create_variation(\n", 124 | " image = open('../data/car.png', 'rb'),\n", 125 | " n = 1,\n", 126 | " size = '1024x1024'\n", 127 | ")" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": 10, 133 | "metadata": {}, 134 | "outputs": [ 135 | { 136 | "data": { 137 | "text/plain": [ 138 | " JSON: {\n", 139 | " \"created\": 1687658767,\n", 140 | " \"data\": [\n", 141 | " {\n", 142 | " \"url\": \"https://oaidalleapiprodscus.blob.core.windows.net/private/org-9nEs4QfGBXfgXc7gJmunNY2C/user-C5zAdGGLkMvnSLt5L6Ii2Xm6/img-01qfK3mqlBhq9wAwUEXEwhpn.png?st=2023-06-25T01%3A06%3A07Z&se=2023-06-25T03%3A06%3A07Z&sp=r&sv=2021-08-06&sr=b&rscd=inline&rsct=image/png&skoid=6aaadede-4fb3-4698-a8f6-684d7786b067&sktid=a48cca56-e6da-484e-a814-9c849652bcb3&skt=2023-06-25T01%3A51%3A42Z&ske=2023-06-26T01%3A51%3A42Z&sks=b&skv=2021-08-06&sig=U8SrxEGUhSLPXIb4aN5VXriejysFR%2BnwQFFftJdiopg%3D\"\n", 143 | " }\n", 144 | " ]\n", 145 | "}" 146 | ] 147 | }, 148 | "execution_count": 10, 149 | "metadata": {}, 150 | "output_type": "execute_result" 151 | } 152 | ], 153 | "source": [ 154 | "response" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": null, 160 | "metadata": {}, 161 | "outputs": [], 162 | "source": [] 163 | } 164 | ], 165 | "metadata": { 166 | "kernelspec": { 167 | "display_name": "Python 3", 168 | "language": "python", 169 | "name": "python3" 170 | }, 171 | "language_info": { 172 | "codemirror_mode": { 173 | "name": "ipython", 174 | "version": 3 175 | }, 176 | "file_extension": ".py", 177 | "mimetype": "text/x-python", 178 | "name": "python", 179 | "nbconvert_exporter": "python", 180 | "pygments_lexer": "ipython3", 181 | "version": "3.10.7" 182 | }, 183 | "orig_nbformat": 4 184 | }, 185 | "nbformat": 4, 186 | "nbformat_minor": 2 187 | } 188 | -------------------------------------------------------------------------------- /notebooks/openai-api-interaction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "6e05850d", 6 | "metadata": {}, 7 | "source": [ 8 | "# OpenAI API Interaction\n", 9 | "In this notebook, I'll demonstrate for you how we can interact with the OpenAI API in a number of ways, specifically for its text-oriented features. I do not personally have access to GPT-4, so we will primarily sticking with GPT-3.5, more specifically `gpt-3.5-turbo` and `text-davinci-003`. This tutorial does require you to have your own OpenAI API key, which does cost a little bit of money." 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "id": "c25226d4", 15 | "metadata": {}, 16 | "source": [ 17 | "## Notebook Setup\n", 18 | "This first brief section covers the very basic configuration for interacting with the API. I am personally loading my organization ID and API key from a file on my local computer, which is not uploaded to GitHub. **Please be careful NOT to expose your API key.** This is a sensitive piece of information, and somebody can run up your OpenAI bill if accidnetally exposed!" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 1, 24 | "id": "b301e1ff", 25 | "metadata": { 26 | "ExecuteTime": { 27 | "end_time": "2023-04-14T15:43:22.706353Z", 28 | "start_time": "2023-04-14T15:43:22.306471Z" 29 | } 30 | }, 31 | "outputs": [], 32 | "source": [ 33 | "# Importing the Python libraries we will be using in this notebook\n", 34 | "import os\n", 35 | "import yaml\n", 36 | "import openai" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 5, 42 | "id": "8a8981be", 43 | "metadata": { 44 | "ExecuteTime": { 45 | "end_time": "2023-04-13T20:35:13.704891Z", 46 | "start_time": "2023-04-13T20:35:13.693601Z" 47 | } 48 | }, 49 | "outputs": [], 50 | "source": [ 51 | "# Loading the API key and organization ID from file (NOT pushed to GitHub)\n", 52 | "with open('../keys/openai-keys.yaml') as f:\n", 53 | " keys_yaml = yaml.safe_load(f)" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | <<<<<<< HEAD 59 | "execution_count": 3, 60 | ======= 61 | "execution_count": 6, 62 | >>>>>>> 060eac35c2416599e8256f93faee17f0bfd2bec1 63 | "id": "71773350", 64 | "metadata": { 65 | "ExecuteTime": { 66 | "end_time": "2023-04-13T20:35:40.337876Z", 67 | "start_time": "2023-04-13T20:35:40.328897Z" 68 | } 69 | }, 70 | "outputs": [], 71 | "source": [ 72 | "# Applying our API key and organization ID to OpenAI\n", 73 | "openai.organization = keys_yaml['ORG_ID']\n", 74 | "openai.api_key = keys_yaml['API_KEY']" 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "id": "92a8a69c", 80 | "metadata": {}, 81 | "source": [ 82 | "## Models\n", 83 | "We'll start off more simply with viewing the models available from OpenAI." 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | <<<<<<< HEAD 89 | "execution_count": 4, 90 | ======= 91 | "execution_count": 7, 92 | >>>>>>> 060eac35c2416599e8256f93faee17f0bfd2bec1 93 | "id": "ed5c951d", 94 | "metadata": { 95 | "ExecuteTime": { 96 | "end_time": "2023-04-13T20:42:40.607485Z", 97 | "start_time": "2023-04-13T20:42:40.196861Z" 98 | } 99 | }, 100 | "outputs": [ 101 | { 102 | "data": { 103 | "text/plain": [ 104 | <<<<<<< HEAD 105 | " JSON: {\n", 106 | ======= 107 | " JSON: {\n", 108 | >>>>>>> 060eac35c2416599e8256f93faee17f0bfd2bec1 109 | " \"data\": [\n", 110 | " {\n", 111 | " \"created\": 1649358449,\n", 112 | " \"id\": \"babbage\",\n", 113 | " \"object\": \"model\",\n", 114 | " \"owned_by\": \"openai\",\n", 115 | " \"parent\": null,\n", 116 | " \"permission\": [\n", 117 | " {\n", 118 | " \"allow_create_engine\": false,\n", 119 | " \"allow_fine_tuning\": false,\n", 120 | " \"allow_logprobs\": true,\n", 121 | " \"allow_sampling\": true,\n", 122 | " \"allow_search_indices\": false,\n", 123 | " \"allow_view\": true,\n", 124 | " \"created\": 1669085501,\n", 125 | " \"group\": null,\n", 126 | " \"id\": \"modelperm-49FUp5v084tBB49tC4z8LPH5\",\n", 127 | " \"is_blocking\": false,\n", 128 | " \"object\": \"model_permission\",\n", 129 | " \"organization\": \"*\"\n", 130 | " }\n", 131 | " ],\n", 132 | " \"root\": \"babbage\"\n", 133 | " },\n", 134 | " {\n", 135 | " \"created\": 1649359874,\n", 136 | " \"id\": \"davinci\",\n", 137 | " \"object\": \"model\",\n", 138 | " \"owned_by\": \"openai\",\n", 139 | " \"parent\": null,\n", 140 | " \"permission\": [\n", 141 | " {\n", 142 | " \"allow_create_engine\": false,\n", 143 | " \"allow_fine_tuning\": false,\n", 144 | " \"allow_logprobs\": true,\n", 145 | " \"allow_sampling\": true,\n", 146 | " \"allow_search_indices\": false,\n", 147 | " \"allow_view\": true,\n", 148 | " \"created\": 1669066355,\n", 149 | " \"group\": null,\n", 150 | " \"id\": \"modelperm-U6ZwlyAd0LyMk4rcMdz33Yc3\",\n", 151 | " \"is_blocking\": false,\n", 152 | " \"object\": \"model_permission\",\n", 153 | " \"organization\": \"*\"\n", 154 | " }\n", 155 | " ],\n", 156 | " \"root\": \"davinci\"\n", 157 | " },\n", 158 | " {\n", 159 | " \"created\": 1649809179,\n", 160 | " \"id\": \"text-davinci-edit-001\",\n", 161 | " \"object\": \"model\",\n", 162 | " \"owned_by\": \"openai\",\n", 163 | " \"parent\": null,\n", 164 | " \"permission\": [\n", 165 | " {\n", 166 | " \"allow_create_engine\": false,\n", 167 | " \"allow_fine_tuning\": false,\n", 168 | " \"allow_logprobs\": true,\n", 169 | " \"allow_sampling\": true,\n", 170 | " \"allow_search_indices\": false,\n", 171 | " \"allow_view\": true,\n", 172 | " \"created\": 1679934178,\n", 173 | " \"group\": null,\n", 174 | " \"id\": \"modelperm-otmQSS0hmabtVGHI9QB3bct3\",\n", 175 | " \"is_blocking\": false,\n", 176 | " \"object\": \"model_permission\",\n", 177 | " \"organization\": \"*\"\n", 178 | " }\n", 179 | " ],\n", 180 | " \"root\": \"text-davinci-edit-001\"\n", 181 | " },\n", 182 | " {\n", 183 | " \"created\": 1677649963,\n", 184 | " \"id\": \"gpt-3.5-turbo-0301\",\n", 185 | " \"object\": \"model\",\n", 186 | " \"owned_by\": \"openai\",\n", 187 | " \"parent\": null,\n", 188 | " \"permission\": [\n", 189 | " {\n", 190 | " \"allow_create_engine\": false,\n", 191 | " \"allow_fine_tuning\": false,\n", 192 | " \"allow_logprobs\": true,\n", 193 | " \"allow_sampling\": true,\n", 194 | " \"allow_search_indices\": false,\n", 195 | " \"allow_view\": true,\n", 196 | <<<<<<< HEAD 197 | " \"created\": 1682525090,\n", 198 | " \"group\": null,\n", 199 | " \"id\": \"modelperm-AevwyYr2NhfqaFiH7hJZIk3D\",\n", 200 | ======= 201 | " \"created\": 1681938856,\n", 202 | " \"group\": null,\n", 203 | " \"id\": \"modelperm-yDCC5ePuUJKmUe3ld1q1pKvA\",\n", 204 | >>>>>>> 060eac35c2416599e8256f93faee17f0bfd2bec1 205 | " \"is_blocking\": false,\n", 206 | " \"object\": \"model_permission\",\n", 207 | " \"organization\": \"*\"\n", 208 | " }\n", 209 | " ],\n", 210 | " \"root\": \"gpt-3.5-turbo-0301\"\n", 211 | " },\n", 212 | " {\n", 213 | " \"created\": 1651172509,\n", 214 | " \"id\": \"babbage-code-search-code\",\n", 215 | " \"object\": \"model\",\n", 216 | " \"owned_by\": \"openai-dev\",\n", 217 | " \"parent\": null,\n", 218 | " \"permission\": [\n", 219 | " {\n", 220 | " \"allow_create_engine\": false,\n", 221 | " \"allow_fine_tuning\": false,\n", 222 | " \"allow_logprobs\": true,\n", 223 | " \"allow_sampling\": true,\n", 224 | " \"allow_search_indices\": true,\n", 225 | " \"allow_view\": true,\n", 226 | " \"created\": 1669085863,\n", 227 | " \"group\": null,\n", 228 | " \"id\": \"modelperm-4qRnA3Hj8HIJbgo0cGbcmErn\",\n", 229 | " \"is_blocking\": false,\n", 230 | " \"object\": \"model_permission\",\n", 231 | " \"organization\": \"*\"\n", 232 | " }\n", 233 | " ],\n", 234 | " \"root\": \"babbage-code-search-code\"\n", 235 | " },\n", 236 | " {\n", 237 | " \"created\": 1651172505,\n", 238 | " \"id\": \"text-similarity-babbage-001\",\n", 239 | " \"object\": \"model\",\n", 240 | " \"owned_by\": \"openai-dev\",\n", 241 | " \"parent\": null,\n", 242 | " \"permission\": [\n", 243 | " {\n", 244 | " \"allow_create_engine\": false,\n", 245 | " \"allow_fine_tuning\": false,\n", 246 | " \"allow_logprobs\": true,\n", 247 | " \"allow_sampling\": true,\n", 248 | " \"allow_search_indices\": true,\n", 249 | " \"allow_view\": true,\n", 250 | " \"created\": 1669081947,\n", 251 | " \"group\": null,\n", 252 | " \"id\": \"modelperm-48kcCHhfzvnfY84OtJf5m8Cz\",\n", 253 | " \"is_blocking\": false,\n", 254 | " \"object\": \"model_permission\",\n", 255 | " \"organization\": \"*\"\n", 256 | " }\n", 257 | " ],\n", 258 | " \"root\": \"text-similarity-babbage-001\"\n", 259 | " },\n", 260 | " {\n", 261 | " \"created\": 1677610602,\n", 262 | " \"id\": \"gpt-3.5-turbo\",\n", 263 | " \"object\": \"model\",\n", 264 | " \"owned_by\": \"openai\",\n", 265 | " \"parent\": null,\n", 266 | " \"permission\": [\n", 267 | " {\n", 268 | " \"allow_create_engine\": false,\n", 269 | " \"allow_fine_tuning\": false,\n", 270 | " \"allow_logprobs\": true,\n", 271 | " \"allow_sampling\": true,\n", 272 | " \"allow_search_indices\": false,\n", 273 | " \"allow_view\": true,\n", 274 | " \"created\": 1681938917,\n", 275 | " \"group\": null,\n", 276 | " \"id\": \"modelperm-nH8mIzR6ChcOTZNawZgHl7yK\",\n", 277 | " \"is_blocking\": false,\n", 278 | " \"object\": \"model_permission\",\n", 279 | " \"organization\": \"*\"\n", 280 | " }\n", 281 | " ],\n", 282 | " \"root\": \"gpt-3.5-turbo\"\n", 283 | " },\n", 284 | " {\n", 285 | " \"created\": 1649880484,\n", 286 | " \"id\": \"code-davinci-edit-001\",\n", 287 | " \"object\": \"model\",\n", 288 | " \"owned_by\": \"openai\",\n", 289 | " \"parent\": null,\n", 290 | " \"permission\": [\n", 291 | " {\n", 292 | " \"allow_create_engine\": false,\n", 293 | " \"allow_fine_tuning\": false,\n", 294 | " \"allow_logprobs\": true,\n", 295 | " \"allow_sampling\": true,\n", 296 | " \"allow_search_indices\": false,\n", 297 | " \"allow_view\": true,\n", 298 | " \"created\": 1679934178,\n", 299 | " \"group\": null,\n", 300 | " \"id\": \"modelperm-Foe5Y4TvaKveYxt74oKMw8IB\",\n", 301 | " \"is_blocking\": false,\n", 302 | " \"object\": \"model_permission\",\n", 303 | " \"organization\": \"*\"\n", 304 | " }\n", 305 | " ],\n", 306 | " \"root\": \"code-davinci-edit-001\"\n", 307 | " },\n", 308 | " {\n", 309 | " \"created\": 1649364042,\n", 310 | " \"id\": \"text-davinci-001\",\n", 311 | " \"object\": \"model\",\n", 312 | " \"owned_by\": \"openai\",\n", 313 | " \"parent\": null,\n", 314 | " \"permission\": [\n", 315 | " {\n", 316 | " \"allow_create_engine\": false,\n", 317 | " \"allow_fine_tuning\": false,\n", 318 | " \"allow_logprobs\": true,\n", 319 | " \"allow_sampling\": true,\n", 320 | " \"allow_search_indices\": false,\n", 321 | " \"allow_view\": true,\n", 322 | " \"created\": 1669066355,\n", 323 | " \"group\": null,\n", 324 | " \"id\": \"modelperm-MVM5NfoRjXkDve3uQW3YZDDt\",\n", 325 | " \"is_blocking\": false,\n", 326 | " \"object\": \"model_permission\",\n", 327 | " \"organization\": \"*\"\n", 328 | " }\n", 329 | " ],\n", 330 | " \"root\": \"text-davinci-001\"\n", 331 | " },\n", 332 | " {\n", 333 | " \"created\": 1669599635,\n", 334 | " \"id\": \"text-davinci-003\",\n", 335 | " \"object\": \"model\",\n", 336 | " \"owned_by\": \"openai-internal\",\n", 337 | " \"parent\": null,\n", 338 | " \"permission\": [\n", 339 | " {\n", 340 | " \"allow_create_engine\": false,\n", 341 | " \"allow_fine_tuning\": false,\n", 342 | " \"allow_logprobs\": true,\n", 343 | " \"allow_sampling\": true,\n", 344 | " \"allow_search_indices\": false,\n", 345 | " \"allow_view\": true,\n", 346 | " \"created\": 1682112392,\n", 347 | " \"group\": null,\n", 348 | " \"id\": \"modelperm-oyykgcqEhcKfSCh5Ca9BETGH\",\n", 349 | " \"is_blocking\": false,\n", 350 | " \"object\": \"model_permission\",\n", 351 | " \"organization\": \"*\"\n", 352 | " }\n", 353 | " ],\n", 354 | " \"root\": \"text-davinci-003\"\n", 355 | " },\n", 356 | " {\n", 357 | " \"created\": 1649357491,\n", 358 | " \"id\": \"ada\",\n", 359 | " \"object\": \"model\",\n", 360 | " \"owned_by\": \"openai\",\n", 361 | " \"parent\": null,\n", 362 | " \"permission\": [\n", 363 | " {\n", 364 | " \"allow_create_engine\": false,\n", 365 | " \"allow_fine_tuning\": false,\n", 366 | " \"allow_logprobs\": true,\n", 367 | " \"allow_sampling\": true,\n", 368 | " \"allow_search_indices\": false,\n", 369 | " \"allow_view\": true,\n", 370 | " \"created\": 1675997661,\n", 371 | " \"group\": null,\n", 372 | " \"id\": \"modelperm-u0nKN4ub7EVQudgMuvCuvDjc\",\n", 373 | " \"is_blocking\": false,\n", 374 | " \"object\": \"model_permission\",\n", 375 | " \"organization\": \"*\"\n", 376 | " }\n", 377 | " ],\n", 378 | " \"root\": \"ada\"\n", 379 | " },\n", 380 | " {\n", 381 | " \"created\": 1651172509,\n", 382 | " \"id\": \"babbage-code-search-text\",\n", 383 | " \"object\": \"model\",\n", 384 | " \"owned_by\": \"openai-dev\",\n", 385 | " \"parent\": null,\n", 386 | " \"permission\": [\n", 387 | " {\n", 388 | " \"allow_create_engine\": false,\n", 389 | " \"allow_fine_tuning\": false,\n", 390 | " \"allow_logprobs\": true,\n", 391 | " \"allow_sampling\": true,\n", 392 | " \"allow_search_indices\": true,\n", 393 | " \"allow_view\": true,\n", 394 | " \"created\": 1669085863,\n", 395 | " \"group\": null,\n", 396 | " \"id\": \"modelperm-Lftf8H4ZPDxNxVs0hHPJBUoe\",\n", 397 | " \"is_blocking\": false,\n", 398 | " \"object\": \"model_permission\",\n", 399 | " \"organization\": \"*\"\n", 400 | " }\n", 401 | " ],\n", 402 | " \"root\": \"babbage-code-search-text\"\n", 403 | " },\n", 404 | " {\n", 405 | " \"created\": 1651172505,\n", 406 | " \"id\": \"babbage-similarity\",\n", 407 | " \"object\": \"model\",\n", 408 | " \"owned_by\": \"openai-dev\",\n", 409 | " \"parent\": null,\n", 410 | " \"permission\": [\n", 411 | " {\n", 412 | " \"allow_create_engine\": false,\n", 413 | " \"allow_fine_tuning\": false,\n", 414 | " \"allow_logprobs\": true,\n", 415 | " \"allow_sampling\": true,\n", 416 | " \"allow_search_indices\": true,\n", 417 | " \"allow_view\": true,\n", 418 | " \"created\": 1669081947,\n", 419 | " \"group\": null,\n", 420 | " \"id\": \"modelperm-mS20lnPqhebTaFPrcCufyg7m\",\n", 421 | " \"is_blocking\": false,\n", 422 | " \"object\": \"model_permission\",\n", 423 | " \"organization\": \"*\"\n", 424 | " }\n", 425 | " ],\n", 426 | " \"root\": \"babbage-similarity\"\n", 427 | " },\n", 428 | " {\n", 429 | " \"created\": 1651172507,\n", 430 | " \"id\": \"code-search-babbage-text-001\",\n", 431 | " \"object\": \"model\",\n", 432 | " \"owned_by\": \"openai-dev\",\n", 433 | " \"parent\": null,\n", 434 | " \"permission\": [\n", 435 | " {\n", 436 | " \"allow_create_engine\": false,\n", 437 | " \"allow_fine_tuning\": false,\n", 438 | " \"allow_logprobs\": true,\n", 439 | " \"allow_sampling\": true,\n", 440 | " \"allow_search_indices\": true,\n", 441 | " \"allow_view\": true,\n", 442 | " \"created\": 1669085863,\n", 443 | " \"group\": null,\n", 444 | " \"id\": \"modelperm-EC5ASz4NLChtEV1Cwkmrwm57\",\n", 445 | " \"is_blocking\": false,\n", 446 | " \"object\": \"model_permission\",\n", 447 | " \"organization\": \"*\"\n", 448 | " }\n", 449 | " ],\n", 450 | " \"root\": \"code-search-babbage-text-001\"\n", 451 | " },\n", 452 | " {\n", 453 | " \"created\": 1649364043,\n", 454 | " \"id\": \"text-curie-001\",\n", 455 | " \"object\": \"model\",\n", 456 | " \"owned_by\": \"openai\",\n", 457 | " \"parent\": null,\n", 458 | " \"permission\": [\n", 459 | " {\n", 460 | " \"allow_create_engine\": false,\n", 461 | " \"allow_fine_tuning\": false,\n", 462 | " \"allow_logprobs\": true,\n", 463 | " \"allow_sampling\": true,\n", 464 | " \"allow_search_indices\": false,\n", 465 | " \"allow_view\": true,\n", 466 | " \"created\": 1679310997,\n", 467 | " \"group\": null,\n", 468 | " \"id\": \"modelperm-8InhPV3CZfN3F5QHKoJd4zRD\",\n", 469 | " \"is_blocking\": false,\n", 470 | " \"object\": \"model_permission\",\n", 471 | " \"organization\": \"*\"\n", 472 | " }\n", 473 | " ],\n", 474 | " \"root\": \"text-curie-001\"\n", 475 | " },\n", 476 | " {\n", 477 | " \"created\": 1677532384,\n", 478 | " \"id\": \"whisper-1\",\n", 479 | " \"object\": \"model\",\n", 480 | " \"owned_by\": \"openai-internal\",\n", 481 | " \"parent\": null,\n", 482 | " \"permission\": [\n", 483 | " {\n", 484 | " \"allow_create_engine\": false,\n", 485 | " \"allow_fine_tuning\": false,\n", 486 | " \"allow_logprobs\": true,\n", 487 | " \"allow_sampling\": true,\n", 488 | " \"allow_search_indices\": false,\n", 489 | " \"allow_view\": true,\n", 490 | " \"created\": 1680896832,\n", 491 | " \"group\": null,\n", 492 | " \"id\": \"modelperm-JdDYm8KjLd5xnGMGVlwX1UAp\",\n", 493 | " \"is_blocking\": false,\n", 494 | " \"object\": \"model_permission\",\n", 495 | " \"organization\": \"*\"\n", 496 | " }\n", 497 | " ],\n", 498 | " \"root\": \"whisper-1\"\n", 499 | " },\n", 500 | " {\n", 501 | " \"created\": 1651172507,\n", 502 | " \"id\": \"code-search-babbage-code-001\",\n", 503 | " \"object\": \"model\",\n", 504 | " \"owned_by\": \"openai-dev\",\n", 505 | " \"parent\": null,\n", 506 | " \"permission\": [\n", 507 | " {\n", 508 | " \"allow_create_engine\": false,\n", 509 | " \"allow_fine_tuning\": false,\n", 510 | " \"allow_logprobs\": true,\n", 511 | " \"allow_sampling\": true,\n", 512 | " \"allow_search_indices\": true,\n", 513 | " \"allow_view\": true,\n", 514 | " \"created\": 1669085864,\n", 515 | " \"group\": null,\n", 516 | " \"id\": \"modelperm-64LWHdlANgak2rHzc3K5Stt0\",\n", 517 | " \"is_blocking\": false,\n", 518 | " \"object\": \"model_permission\",\n", 519 | " \"organization\": \"*\"\n", 520 | " }\n", 521 | " ],\n", 522 | " \"root\": \"code-search-babbage-code-001\"\n", 523 | " },\n", 524 | " {\n", 525 | " \"created\": 1649364042,\n", 526 | " \"id\": \"text-ada-001\",\n", 527 | " \"object\": \"model\",\n", 528 | " \"owned_by\": \"openai\",\n", 529 | " \"parent\": null,\n", 530 | " \"permission\": [\n", 531 | " {\n", 532 | " \"allow_create_engine\": false,\n", 533 | " \"allow_fine_tuning\": false,\n", 534 | " \"allow_logprobs\": true,\n", 535 | " \"allow_sampling\": true,\n", 536 | " \"allow_search_indices\": false,\n", 537 | " \"allow_view\": true,\n", 538 | " \"created\": 1669088497,\n", 539 | " \"group\": null,\n", 540 | " \"id\": \"modelperm-KN5dRBCEW4az6gwcGXkRkMwK\",\n", 541 | " \"is_blocking\": false,\n", 542 | " \"object\": \"model_permission\",\n", 543 | " \"organization\": \"*\"\n", 544 | " }\n", 545 | " ],\n", 546 | " \"root\": \"text-ada-001\"\n", 547 | " },\n", 548 | " {\n", 549 | " \"created\": 1671217299,\n", 550 | " \"id\": \"text-embedding-ada-002\",\n", 551 | " \"object\": \"model\",\n", 552 | " \"owned_by\": \"openai-internal\",\n", 553 | " \"parent\": null,\n", 554 | " \"permission\": [\n", 555 | " {\n", 556 | " \"allow_create_engine\": false,\n", 557 | " \"allow_fine_tuning\": false,\n", 558 | " \"allow_logprobs\": true,\n", 559 | " \"allow_sampling\": true,\n", 560 | " \"allow_search_indices\": true,\n", 561 | " \"allow_view\": true,\n", 562 | " \"created\": 1678892857,\n", 563 | " \"group\": null,\n", 564 | " \"id\": \"modelperm-Dbv2FOgMdlDjO8py8vEjD5Mi\",\n", 565 | " \"is_blocking\": false,\n", 566 | " \"object\": \"model_permission\",\n", 567 | " \"organization\": \"*\"\n", 568 | " }\n", 569 | " ],\n", 570 | " \"root\": \"text-embedding-ada-002\"\n", 571 | " },\n", 572 | " {\n", 573 | " \"created\": 1651172505,\n", 574 | " \"id\": \"text-similarity-ada-001\",\n", 575 | " \"object\": \"model\",\n", 576 | " \"owned_by\": \"openai-dev\",\n", 577 | " \"parent\": null,\n", 578 | " \"permission\": [\n", 579 | " {\n", 580 | " \"allow_create_engine\": false,\n", 581 | " \"allow_fine_tuning\": false,\n", 582 | " \"allow_logprobs\": true,\n", 583 | " \"allow_sampling\": true,\n", 584 | " \"allow_search_indices\": true,\n", 585 | " \"allow_view\": true,\n", 586 | " \"created\": 1669092759,\n", 587 | " \"group\": null,\n", 588 | " \"id\": \"modelperm-DdCqkqmORpqxqdg4TkFRAgmw\",\n", 589 | " \"is_blocking\": false,\n", 590 | " \"object\": \"model_permission\",\n", 591 | " \"organization\": \"*\"\n", 592 | " }\n", 593 | " ],\n", 594 | " \"root\": \"text-similarity-ada-001\"\n", 595 | " },\n", 596 | " {\n", 597 | " \"created\": 1649364042,\n", 598 | " \"id\": \"curie-instruct-beta\",\n", 599 | " \"object\": \"model\",\n", 600 | " \"owned_by\": \"openai\",\n", 601 | " \"parent\": null,\n", 602 | " \"permission\": [\n", 603 | " {\n", 604 | " \"allow_create_engine\": false,\n", 605 | " \"allow_fine_tuning\": false,\n", 606 | " \"allow_logprobs\": true,\n", 607 | " \"allow_sampling\": true,\n", 608 | " \"allow_search_indices\": false,\n", 609 | " \"allow_view\": true,\n", 610 | " \"created\": 1680267269,\n", 611 | " \"group\": null,\n", 612 | " \"id\": \"modelperm-bsg59MlOi88CMf1MjnIKrT5a\",\n", 613 | " \"is_blocking\": false,\n", 614 | " \"object\": \"model_permission\",\n", 615 | " \"organization\": \"*\"\n", 616 | " }\n", 617 | " ],\n", 618 | " \"root\": \"curie-instruct-beta\"\n", 619 | " },\n", 620 | " {\n", 621 | " \"created\": 1651172505,\n", 622 | " \"id\": \"ada-code-search-code\",\n", 623 | " \"object\": \"model\",\n", 624 | " \"owned_by\": \"openai-dev\",\n", 625 | " \"parent\": null,\n", 626 | " \"permission\": [\n", 627 | " {\n", 628 | " \"allow_create_engine\": false,\n", 629 | " \"allow_fine_tuning\": false,\n", 630 | " \"allow_logprobs\": true,\n", 631 | " \"allow_sampling\": true,\n", 632 | " \"allow_search_indices\": true,\n", 633 | " \"allow_view\": true,\n", 634 | " \"created\": 1669087421,\n", 635 | " \"group\": null,\n", 636 | " \"id\": \"modelperm-wa8tg4Pi9QQNaWdjMTM8dkkx\",\n", 637 | " \"is_blocking\": false,\n", 638 | " \"object\": \"model_permission\",\n", 639 | " \"organization\": \"*\"\n", 640 | " }\n", 641 | " ],\n", 642 | " \"root\": \"ada-code-search-code\"\n", 643 | " },\n", 644 | " {\n", 645 | " \"created\": 1651172507,\n", 646 | " \"id\": \"ada-similarity\",\n", 647 | " \"object\": \"model\",\n", 648 | " \"owned_by\": \"openai-dev\",\n", 649 | " \"parent\": null,\n", 650 | " \"permission\": [\n", 651 | " {\n", 652 | " \"allow_create_engine\": false,\n", 653 | " \"allow_fine_tuning\": false,\n", 654 | " \"allow_logprobs\": true,\n", 655 | " \"allow_sampling\": true,\n", 656 | " \"allow_search_indices\": true,\n", 657 | " \"allow_view\": true,\n", 658 | " \"created\": 1669092759,\n", 659 | " \"group\": null,\n", 660 | " \"id\": \"modelperm-LtSIwCEReeDcvGTmM13gv6Fg\",\n", 661 | " \"is_blocking\": false,\n", 662 | " \"object\": \"model_permission\",\n", 663 | " \"organization\": \"*\"\n", 664 | " }\n", 665 | " ],\n", 666 | " \"root\": \"ada-similarity\"\n", 667 | " },\n", 668 | " {\n", 669 | " \"created\": 1677610602,\n", 670 | " \"id\": \"gpt-3.5-turbo\",\n", 671 | " \"object\": \"model\",\n", 672 | " \"owned_by\": \"openai\",\n", 673 | " \"parent\": null,\n", 674 | " \"permission\": [\n", 675 | " {\n", 676 | " \"allow_create_engine\": false,\n", 677 | " \"allow_fine_tuning\": false,\n", 678 | " \"allow_logprobs\": true,\n", 679 | " \"allow_sampling\": true,\n", 680 | " \"allow_search_indices\": false,\n", 681 | " \"allow_view\": true,\n", 682 | " \"created\": 1682538796,\n", 683 | " \"group\": null,\n", 684 | " \"id\": \"modelperm-le6F1Qusxwnxz4Lcc2trugLT\",\n", 685 | " \"is_blocking\": false,\n", 686 | " \"object\": \"model_permission\",\n", 687 | " \"organization\": \"*\"\n", 688 | " }\n", 689 | " ],\n", 690 | " \"root\": \"gpt-3.5-turbo\"\n", 691 | " },\n", 692 | " {\n", 693 | " \"created\": 1651172507,\n", 694 | " \"id\": \"code-search-ada-text-001\",\n", 695 | " \"object\": \"model\",\n", 696 | " \"owned_by\": \"openai-dev\",\n", 697 | " \"parent\": null,\n", 698 | " \"permission\": [\n", 699 | " {\n", 700 | " \"allow_create_engine\": false,\n", 701 | " \"allow_fine_tuning\": false,\n", 702 | " \"allow_logprobs\": true,\n", 703 | " \"allow_sampling\": true,\n", 704 | " \"allow_search_indices\": true,\n", 705 | " \"allow_view\": true,\n", 706 | " \"created\": 1669087421,\n", 707 | " \"group\": null,\n", 708 | " \"id\": \"modelperm-JBssaJSmbgvJfTkX71y71k2J\",\n", 709 | " \"is_blocking\": false,\n", 710 | " \"object\": \"model_permission\",\n", 711 | " \"organization\": \"*\"\n", 712 | " }\n", 713 | " ],\n", 714 | " \"root\": \"code-search-ada-text-001\"\n", 715 | " },\n", 716 | " {\n", 717 | " \"created\": 1651172505,\n", 718 | " \"id\": \"text-search-ada-query-001\",\n", 719 | " \"object\": \"model\",\n", 720 | " \"owned_by\": \"openai-dev\",\n", 721 | " \"parent\": null,\n", 722 | " \"permission\": [\n", 723 | " {\n", 724 | " \"allow_create_engine\": false,\n", 725 | " \"allow_fine_tuning\": false,\n", 726 | " \"allow_logprobs\": true,\n", 727 | " \"allow_sampling\": true,\n", 728 | " \"allow_search_indices\": true,\n", 729 | " \"allow_view\": true,\n", 730 | " \"created\": 1669092640,\n", 731 | " \"group\": null,\n", 732 | " \"id\": \"modelperm-1YiiBMYC8it0mpQCBK7t8uSP\",\n", 733 | " \"is_blocking\": false,\n", 734 | " \"object\": \"model_permission\",\n", 735 | " \"organization\": \"*\"\n", 736 | " }\n", 737 | " ],\n", 738 | " \"root\": \"text-search-ada-query-001\"\n", 739 | " },\n", 740 | " {\n", 741 | " \"created\": 1651172509,\n", 742 | " \"id\": \"davinci-search-document\",\n", 743 | " \"object\": \"model\",\n", 744 | " \"owned_by\": \"openai-dev\",\n", 745 | " \"parent\": null,\n", 746 | " \"permission\": [\n", 747 | " {\n", 748 | " \"allow_create_engine\": false,\n", 749 | " \"allow_fine_tuning\": false,\n", 750 | " \"allow_logprobs\": true,\n", 751 | " \"allow_sampling\": true,\n", 752 | " \"allow_search_indices\": true,\n", 753 | " \"allow_view\": true,\n", 754 | " \"created\": 1669066355,\n", 755 | " \"group\": null,\n", 756 | " \"id\": \"modelperm-M43LVJQRGxz6ode34ctLrCaG\",\n", 757 | " \"is_blocking\": false,\n", 758 | " \"object\": \"model_permission\",\n", 759 | " \"organization\": \"*\"\n", 760 | " }\n", 761 | " ],\n", 762 | " \"root\": \"davinci-search-document\"\n", 763 | " },\n", 764 | " {\n", 765 | " \"created\": 1651172510,\n", 766 | " \"id\": \"ada-code-search-text\",\n", 767 | " \"object\": \"model\",\n", 768 | " \"owned_by\": \"openai-dev\",\n", 769 | " \"parent\": null,\n", 770 | " \"permission\": [\n", 771 | " {\n", 772 | " \"allow_create_engine\": false,\n", 773 | " \"allow_fine_tuning\": false,\n", 774 | " \"allow_logprobs\": true,\n", 775 | " \"allow_sampling\": true,\n", 776 | " \"allow_search_indices\": true,\n", 777 | " \"allow_view\": true,\n", 778 | " \"created\": 1669087421,\n", 779 | " \"group\": null,\n", 780 | " \"id\": \"modelperm-kFc17wOI4d1FjZEaCqnk4Frg\",\n", 781 | " \"is_blocking\": false,\n", 782 | " \"object\": \"model_permission\",\n", 783 | " \"organization\": \"*\"\n", 784 | " }\n", 785 | " ],\n", 786 | " \"root\": \"ada-code-search-text\"\n", 787 | " },\n", 788 | " {\n", 789 | " \"created\": 1651172507,\n", 790 | " \"id\": \"text-search-ada-doc-001\",\n", 791 | " \"object\": \"model\",\n", 792 | " \"owned_by\": \"openai-dev\",\n", 793 | " \"parent\": null,\n", 794 | " \"permission\": [\n", 795 | " {\n", 796 | " \"allow_create_engine\": false,\n", 797 | " \"allow_fine_tuning\": false,\n", 798 | " \"allow_logprobs\": true,\n", 799 | " \"allow_sampling\": true,\n", 800 | " \"allow_search_indices\": true,\n", 801 | " \"allow_view\": true,\n", 802 | " \"created\": 1669092640,\n", 803 | " \"group\": null,\n", 804 | " \"id\": \"modelperm-kbHvYouDlkD78ehcmMOGdKpK\",\n", 805 | " \"is_blocking\": false,\n", 806 | " \"object\": \"model_permission\",\n", 807 | " \"organization\": \"*\"\n", 808 | " }\n", 809 | " ],\n", 810 | " \"root\": \"text-search-ada-doc-001\"\n", 811 | " },\n", 812 | " {\n", 813 | " \"created\": 1649364042,\n", 814 | " \"id\": \"davinci-instruct-beta\",\n", 815 | " \"object\": \"model\",\n", 816 | " \"owned_by\": \"openai\",\n", 817 | " \"parent\": null,\n", 818 | " \"permission\": [\n", 819 | " {\n", 820 | " \"allow_create_engine\": false,\n", 821 | " \"allow_fine_tuning\": false,\n", 822 | " \"allow_logprobs\": true,\n", 823 | " \"allow_sampling\": true,\n", 824 | " \"allow_search_indices\": false,\n", 825 | " \"allow_view\": true,\n", 826 | " \"created\": 1669066356,\n", 827 | " \"group\": null,\n", 828 | " \"id\": \"modelperm-k9kuMYlfd9nvFiJV2ug0NWws\",\n", 829 | " \"is_blocking\": false,\n", 830 | " \"object\": \"model_permission\",\n", 831 | " \"organization\": \"*\"\n", 832 | " }\n", 833 | " ],\n", 834 | " \"root\": \"davinci-instruct-beta\"\n", 835 | " },\n", 836 | " {\n", 837 | " \"created\": 1651172507,\n", 838 | " \"id\": \"text-similarity-curie-001\",\n", 839 | " \"object\": \"model\",\n", 840 | " \"owned_by\": \"openai-dev\",\n", 841 | " \"parent\": null,\n", 842 | " \"permission\": [\n", 843 | " {\n", 844 | " \"allow_create_engine\": false,\n", 845 | " \"allow_fine_tuning\": false,\n", 846 | " \"allow_logprobs\": true,\n", 847 | " \"allow_sampling\": true,\n", 848 | " \"allow_search_indices\": true,\n", 849 | " \"allow_view\": true,\n", 850 | " \"created\": 1669079883,\n", 851 | " \"group\": null,\n", 852 | " \"id\": \"modelperm-6dgTTyXrZE7d53Licw4hYkvd\",\n", 853 | " \"is_blocking\": false,\n", 854 | " \"object\": \"model_permission\",\n", 855 | " \"organization\": \"*\"\n", 856 | " }\n", 857 | " ],\n", 858 | " \"root\": \"text-similarity-curie-001\"\n", 859 | " },\n", 860 | " {\n", 861 | " \"created\": 1651172507,\n", 862 | " \"id\": \"code-search-ada-code-001\",\n", 863 | " \"object\": \"model\",\n", 864 | " \"owned_by\": \"openai-dev\",\n", 865 | " \"parent\": null,\n", 866 | " \"permission\": [\n", 867 | " {\n", 868 | " \"allow_create_engine\": false,\n", 869 | " \"allow_fine_tuning\": false,\n", 870 | " \"allow_logprobs\": true,\n", 871 | " \"allow_sampling\": true,\n", 872 | " \"allow_search_indices\": true,\n", 873 | " \"allow_view\": true,\n", 874 | " \"created\": 1669087421,\n", 875 | " \"group\": null,\n", 876 | " \"id\": \"modelperm-8soch45iiGvux5Fg1ORjdC4s\",\n", 877 | " \"is_blocking\": false,\n", 878 | " \"object\": \"model_permission\",\n", 879 | " \"organization\": \"*\"\n", 880 | " }\n", 881 | " ],\n", 882 | " \"root\": \"code-search-ada-code-001\"\n", 883 | " },\n", 884 | " {\n", 885 | " \"created\": 1651172505,\n", 886 | " \"id\": \"ada-search-query\",\n", 887 | " \"object\": \"model\",\n", 888 | " \"owned_by\": \"openai-dev\",\n", 889 | " \"parent\": null,\n", 890 | " \"permission\": [\n", 891 | " {\n", 892 | " \"allow_create_engine\": false,\n", 893 | " \"allow_fine_tuning\": false,\n", 894 | " \"allow_logprobs\": true,\n", 895 | " \"allow_sampling\": true,\n", 896 | " \"allow_search_indices\": true,\n", 897 | " \"allow_view\": true,\n", 898 | " \"created\": 1669092640,\n", 899 | " \"group\": null,\n", 900 | " \"id\": \"modelperm-b753xmIzAUkluQ1L20eDZLtQ\",\n", 901 | " \"is_blocking\": false,\n", 902 | " \"object\": \"model_permission\",\n", 903 | " \"organization\": \"*\"\n", 904 | " }\n", 905 | " ],\n", 906 | " \"root\": \"ada-search-query\"\n", 907 | " },\n", 908 | " {\n", 909 | " \"created\": 1651172505,\n", 910 | " \"id\": \"text-search-davinci-query-001\",\n", 911 | " \"object\": \"model\",\n", 912 | " \"owned_by\": \"openai-dev\",\n", 913 | " \"parent\": null,\n", 914 | " \"permission\": [\n", 915 | " {\n", 916 | " \"allow_create_engine\": false,\n", 917 | " \"allow_fine_tuning\": false,\n", 918 | " \"allow_logprobs\": true,\n", 919 | " \"allow_sampling\": true,\n", 920 | " \"allow_search_indices\": true,\n", 921 | " \"allow_view\": true,\n", 922 | " \"created\": 1669066353,\n", 923 | " \"group\": null,\n", 924 | " \"id\": \"modelperm-9McKbsEYSaDshU9M3bp6ejUb\",\n", 925 | " \"is_blocking\": false,\n", 926 | " \"object\": \"model_permission\",\n", 927 | " \"organization\": \"*\"\n", 928 | " }\n", 929 | " ],\n", 930 | " \"root\": \"text-search-davinci-query-001\"\n", 931 | " },\n", 932 | " {\n", 933 | " \"created\": 1651172509,\n", 934 | " \"id\": \"curie-search-query\",\n", 935 | " \"object\": \"model\",\n", 936 | " \"owned_by\": \"openai-dev\",\n", 937 | " \"parent\": null,\n", 938 | " \"permission\": [\n", 939 | " {\n", 940 | " \"allow_create_engine\": false,\n", 941 | " \"allow_fine_tuning\": false,\n", 942 | " \"allow_logprobs\": true,\n", 943 | " \"allow_sampling\": true,\n", 944 | " \"allow_search_indices\": true,\n", 945 | " \"allow_view\": true,\n", 946 | " \"created\": 1677273417,\n", 947 | " \"group\": null,\n", 948 | " \"id\": \"modelperm-sIbfSwzVpVBtymQgOQSLBpxe\",\n", 949 | " \"is_blocking\": false,\n", 950 | " \"object\": \"model_permission\",\n", 951 | " \"organization\": \"*\"\n", 952 | " }\n", 953 | " ],\n", 954 | " \"root\": \"curie-search-query\"\n", 955 | " },\n", 956 | " {\n", 957 | " \"created\": 1651172505,\n", 958 | " \"id\": \"davinci-search-query\",\n", 959 | " \"object\": \"model\",\n", 960 | " \"owned_by\": \"openai-dev\",\n", 961 | " \"parent\": null,\n", 962 | " \"permission\": [\n", 963 | " {\n", 964 | " \"allow_create_engine\": false,\n", 965 | " \"allow_fine_tuning\": false,\n", 966 | " \"allow_logprobs\": true,\n", 967 | " \"allow_sampling\": true,\n", 968 | " \"allow_search_indices\": true,\n", 969 | " \"allow_view\": true,\n", 970 | " \"created\": 1669066353,\n", 971 | " \"group\": null,\n", 972 | " \"id\": \"modelperm-lYkiTZMmJMWm8jvkPx2duyHE\",\n", 973 | " \"is_blocking\": false,\n", 974 | " \"object\": \"model_permission\",\n", 975 | " \"organization\": \"*\"\n", 976 | " }\n", 977 | " ],\n", 978 | " \"root\": \"davinci-search-query\"\n", 979 | " },\n", 980 | " {\n", 981 | " \"created\": 1651172510,\n", 982 | " \"id\": \"babbage-search-document\",\n", 983 | " \"object\": \"model\",\n", 984 | " \"owned_by\": \"openai-dev\",\n", 985 | " \"parent\": null,\n", 986 | " \"permission\": [\n", 987 | " {\n", 988 | " \"allow_create_engine\": false,\n", 989 | " \"allow_fine_tuning\": false,\n", 990 | " \"allow_logprobs\": true,\n", 991 | " \"allow_sampling\": true,\n", 992 | " \"allow_search_indices\": true,\n", 993 | " \"allow_view\": true,\n", 994 | " \"created\": 1669084981,\n", 995 | " \"group\": null,\n", 996 | " \"id\": \"modelperm-5qFV9kxCRGKIXpBEP75chmp7\",\n", 997 | " \"is_blocking\": false,\n", 998 | " \"object\": \"model_permission\",\n", 999 | " \"organization\": \"*\"\n", 1000 | " }\n", 1001 | " ],\n", 1002 | " \"root\": \"babbage-search-document\"\n", 1003 | " },\n", 1004 | " {\n", 1005 | " \"created\": 1651172507,\n", 1006 | " \"id\": \"ada-search-document\",\n", 1007 | " \"object\": \"model\",\n", 1008 | " \"owned_by\": \"openai-dev\",\n", 1009 | " \"parent\": null,\n", 1010 | " \"permission\": [\n", 1011 | " {\n", 1012 | " \"allow_create_engine\": false,\n", 1013 | " \"allow_fine_tuning\": false,\n", 1014 | " \"allow_logprobs\": true,\n", 1015 | " \"allow_sampling\": true,\n", 1016 | " \"allow_search_indices\": true,\n", 1017 | " \"allow_view\": true,\n", 1018 | " \"created\": 1669092640,\n", 1019 | " \"group\": null,\n", 1020 | " \"id\": \"modelperm-8qUMuMAbo4EwedbGamV7e9hq\",\n", 1021 | " \"is_blocking\": false,\n", 1022 | " \"object\": \"model_permission\",\n", 1023 | " \"organization\": \"*\"\n", 1024 | " }\n", 1025 | " ],\n", 1026 | " \"root\": \"ada-search-document\"\n", 1027 | " },\n", 1028 | " {\n", 1029 | " \"created\": 1651172509,\n", 1030 | " \"id\": \"text-search-curie-query-001\",\n", 1031 | " \"object\": \"model\",\n", 1032 | " \"owned_by\": \"openai-dev\",\n", 1033 | " \"parent\": null,\n", 1034 | " \"permission\": [\n", 1035 | " {\n", 1036 | " \"allow_create_engine\": false,\n", 1037 | " \"allow_fine_tuning\": false,\n", 1038 | " \"allow_logprobs\": true,\n", 1039 | " \"allow_sampling\": true,\n", 1040 | " \"allow_search_indices\": true,\n", 1041 | " \"allow_view\": true,\n", 1042 | " \"created\": 1677273417,\n", 1043 | " \"group\": null,\n", 1044 | " \"id\": \"modelperm-Iion0NCpsXPNtIkQ0owQLi7V\",\n", 1045 | " \"is_blocking\": false,\n", 1046 | " \"object\": \"model_permission\",\n", 1047 | " \"organization\": \"*\"\n", 1048 | " }\n", 1049 | " ],\n", 1050 | " \"root\": \"text-search-curie-query-001\"\n", 1051 | " },\n", 1052 | " {\n", 1053 | " \"created\": 1651172509,\n", 1054 | " \"id\": \"text-search-babbage-doc-001\",\n", 1055 | " \"object\": \"model\",\n", 1056 | " \"owned_by\": \"openai-dev\",\n", 1057 | " \"parent\": null,\n", 1058 | " \"permission\": [\n", 1059 | " {\n", 1060 | " \"allow_create_engine\": false,\n", 1061 | " \"allow_fine_tuning\": false,\n", 1062 | " \"allow_logprobs\": true,\n", 1063 | " \"allow_sampling\": true,\n", 1064 | " \"allow_search_indices\": true,\n", 1065 | " \"allow_view\": true,\n", 1066 | " \"created\": 1669084981,\n", 1067 | " \"group\": null,\n", 1068 | " \"id\": \"modelperm-ao2r26P2Th7nhRFleHwy2gn5\",\n", 1069 | " \"is_blocking\": false,\n", 1070 | " \"object\": \"model_permission\",\n", 1071 | " \"organization\": \"*\"\n", 1072 | " }\n", 1073 | " ],\n", 1074 | " \"root\": \"text-search-babbage-doc-001\"\n", 1075 | " },\n", 1076 | " {\n", 1077 | " \"created\": 1651172508,\n", 1078 | " \"id\": \"curie-search-document\",\n", 1079 | " \"object\": \"model\",\n", 1080 | " \"owned_by\": \"openai-dev\",\n", 1081 | " \"parent\": null,\n", 1082 | " \"permission\": [\n", 1083 | " {\n", 1084 | " \"allow_create_engine\": false,\n", 1085 | " \"allow_fine_tuning\": false,\n", 1086 | " \"allow_logprobs\": true,\n", 1087 | " \"allow_sampling\": true,\n", 1088 | " \"allow_search_indices\": true,\n", 1089 | " \"allow_view\": true,\n", 1090 | " \"created\": 1677273417,\n", 1091 | " \"group\": null,\n", 1092 | " \"id\": \"modelperm-LDsN5wW8eKVuh1OsyciHntE9\",\n", 1093 | " \"is_blocking\": false,\n", 1094 | " \"object\": \"model_permission\",\n", 1095 | " \"organization\": \"*\"\n", 1096 | " }\n", 1097 | " ],\n", 1098 | " \"root\": \"curie-search-document\"\n", 1099 | " },\n", 1100 | " {\n", 1101 | " \"created\": 1651172509,\n", 1102 | " \"id\": \"text-search-curie-doc-001\",\n", 1103 | " \"object\": \"model\",\n", 1104 | " \"owned_by\": \"openai-dev\",\n", 1105 | " \"parent\": null,\n", 1106 | " \"permission\": [\n", 1107 | " {\n", 1108 | " \"allow_create_engine\": false,\n", 1109 | " \"allow_fine_tuning\": false,\n", 1110 | " \"allow_logprobs\": true,\n", 1111 | " \"allow_sampling\": true,\n", 1112 | " \"allow_search_indices\": true,\n", 1113 | " \"allow_view\": true,\n", 1114 | " \"created\": 1677273417,\n", 1115 | " \"group\": null,\n", 1116 | " \"id\": \"modelperm-taUGRSku7bQLa24SNIwYPEsi\",\n", 1117 | " \"is_blocking\": false,\n", 1118 | " \"object\": \"model_permission\",\n", 1119 | " \"organization\": \"*\"\n", 1120 | " }\n", 1121 | " ],\n", 1122 | " \"root\": \"text-search-curie-doc-001\"\n", 1123 | " },\n", 1124 | " {\n", 1125 | " \"created\": 1651172509,\n", 1126 | " \"id\": \"babbage-search-query\",\n", 1127 | " \"object\": \"model\",\n", 1128 | " \"owned_by\": \"openai-dev\",\n", 1129 | " \"parent\": null,\n", 1130 | " \"permission\": [\n", 1131 | " {\n", 1132 | " \"allow_create_engine\": false,\n", 1133 | " \"allow_fine_tuning\": false,\n", 1134 | " \"allow_logprobs\": true,\n", 1135 | " \"allow_sampling\": true,\n", 1136 | " \"allow_search_indices\": true,\n", 1137 | " \"allow_view\": true,\n", 1138 | " \"created\": 1669084981,\n", 1139 | " \"group\": null,\n", 1140 | " \"id\": \"modelperm-wSs1hMXDKsrcErlbN8HmzlLE\",\n", 1141 | " \"is_blocking\": false,\n", 1142 | " \"object\": \"model_permission\",\n", 1143 | " \"organization\": \"*\"\n", 1144 | " }\n", 1145 | " ],\n", 1146 | " \"root\": \"babbage-search-query\"\n", 1147 | " },\n", 1148 | " {\n", 1149 | " \"created\": 1649364043,\n", 1150 | " \"id\": \"text-babbage-001\",\n", 1151 | " \"object\": \"model\",\n", 1152 | " \"owned_by\": \"openai\",\n", 1153 | " \"parent\": null,\n", 1154 | " \"permission\": [\n", 1155 | " {\n", 1156 | " \"allow_create_engine\": false,\n", 1157 | " \"allow_fine_tuning\": false,\n", 1158 | " \"allow_logprobs\": true,\n", 1159 | " \"allow_sampling\": true,\n", 1160 | " \"allow_search_indices\": false,\n", 1161 | " \"allow_view\": true,\n", 1162 | " \"created\": 1675105935,\n", 1163 | " \"group\": null,\n", 1164 | " \"id\": \"modelperm-a3Ph5FIBbJxsoA4wvx7VYC7R\",\n", 1165 | " \"is_blocking\": false,\n", 1166 | " \"object\": \"model_permission\",\n", 1167 | " \"organization\": \"*\"\n", 1168 | " }\n", 1169 | " ],\n", 1170 | " \"root\": \"text-babbage-001\"\n", 1171 | " },\n", 1172 | " {\n", 1173 | " \"created\": 1651172505,\n", 1174 | " \"id\": \"text-search-davinci-doc-001\",\n", 1175 | " \"object\": \"model\",\n", 1176 | " \"owned_by\": \"openai-dev\",\n", 1177 | " \"parent\": null,\n", 1178 | " \"permission\": [\n", 1179 | " {\n", 1180 | " \"allow_create_engine\": false,\n", 1181 | " \"allow_fine_tuning\": false,\n", 1182 | " \"allow_logprobs\": true,\n", 1183 | " \"allow_sampling\": true,\n", 1184 | " \"allow_search_indices\": true,\n", 1185 | " \"allow_view\": true,\n", 1186 | " \"created\": 1669066353,\n", 1187 | " \"group\": null,\n", 1188 | " \"id\": \"modelperm-qhSf1j2MJMujcu3t7cHnF1DN\",\n", 1189 | " \"is_blocking\": false,\n", 1190 | " \"object\": \"model_permission\",\n", 1191 | " \"organization\": \"*\"\n", 1192 | " }\n", 1193 | " ],\n", 1194 | " \"root\": \"text-search-davinci-doc-001\"\n", 1195 | " },\n", 1196 | " {\n", 1197 | " \"created\": 1651172509,\n", 1198 | " \"id\": \"text-search-babbage-query-001\",\n", 1199 | " \"object\": \"model\",\n", 1200 | " \"owned_by\": \"openai-dev\",\n", 1201 | " \"parent\": null,\n", 1202 | " \"permission\": [\n", 1203 | " {\n", 1204 | " \"allow_create_engine\": false,\n", 1205 | " \"allow_fine_tuning\": false,\n", 1206 | " \"allow_logprobs\": true,\n", 1207 | " \"allow_sampling\": true,\n", 1208 | " \"allow_search_indices\": true,\n", 1209 | " \"allow_view\": true,\n", 1210 | " \"created\": 1669084981,\n", 1211 | " \"group\": null,\n", 1212 | " \"id\": \"modelperm-Kg70kkFxD93QQqsVe4Zw8vjc\",\n", 1213 | " \"is_blocking\": false,\n", 1214 | " \"object\": \"model_permission\",\n", 1215 | " \"organization\": \"*\"\n", 1216 | " }\n", 1217 | " ],\n", 1218 | " \"root\": \"text-search-babbage-query-001\"\n", 1219 | " },\n", 1220 | " {\n", 1221 | " \"created\": 1651172510,\n", 1222 | " \"id\": \"curie-similarity\",\n", 1223 | " \"object\": \"model\",\n", 1224 | " \"owned_by\": \"openai-dev\",\n", 1225 | " \"parent\": null,\n", 1226 | " \"permission\": [\n", 1227 | " {\n", 1228 | " \"allow_create_engine\": false,\n", 1229 | " \"allow_fine_tuning\": false,\n", 1230 | " \"allow_logprobs\": true,\n", 1231 | " \"allow_sampling\": true,\n", 1232 | " \"allow_search_indices\": true,\n", 1233 | " \"allow_view\": true,\n", 1234 | " \"created\": 1675106290,\n", 1235 | " \"group\": null,\n", 1236 | " \"id\": \"modelperm-zhWKExSloaQiJgzjVHFmh2wR\",\n", 1237 | " \"is_blocking\": false,\n", 1238 | " \"object\": \"model_permission\",\n", 1239 | " \"organization\": \"*\"\n", 1240 | " }\n", 1241 | " ],\n", 1242 | " \"root\": \"curie-similarity\"\n", 1243 | " },\n", 1244 | " {\n", 1245 | " \"created\": 1649359874,\n", 1246 | " \"id\": \"curie\",\n", 1247 | " \"object\": \"model\",\n", 1248 | " \"owned_by\": \"openai\",\n", 1249 | " \"parent\": null,\n", 1250 | " \"permission\": [\n", 1251 | " {\n", 1252 | " \"allow_create_engine\": false,\n", 1253 | " \"allow_fine_tuning\": false,\n", 1254 | " \"allow_logprobs\": true,\n", 1255 | " \"allow_sampling\": true,\n", 1256 | " \"allow_search_indices\": false,\n", 1257 | " \"allow_view\": true,\n", 1258 | " \"created\": 1675106503,\n", 1259 | " \"group\": null,\n", 1260 | " \"id\": \"modelperm-oPaljeveTjEIDbhDjzFiyf4V\",\n", 1261 | " \"is_blocking\": false,\n", 1262 | " \"object\": \"model_permission\",\n", 1263 | " \"organization\": \"*\"\n", 1264 | " }\n", 1265 | " ],\n", 1266 | " \"root\": \"curie\"\n", 1267 | " },\n", 1268 | " {\n", 1269 | " \"created\": 1651172505,\n", 1270 | " \"id\": \"text-similarity-davinci-001\",\n", 1271 | " \"object\": \"model\",\n", 1272 | " \"owned_by\": \"openai-dev\",\n", 1273 | " \"parent\": null,\n", 1274 | " \"permission\": [\n", 1275 | " {\n", 1276 | " \"allow_create_engine\": false,\n", 1277 | " \"allow_fine_tuning\": false,\n", 1278 | " \"allow_logprobs\": true,\n", 1279 | " \"allow_sampling\": true,\n", 1280 | " \"allow_search_indices\": true,\n", 1281 | " \"allow_view\": true,\n", 1282 | " \"created\": 1669066356,\n", 1283 | " \"group\": null,\n", 1284 | " \"id\": \"modelperm-OvmcfYoq5V9SF9xTYw1Oz6Ue\",\n", 1285 | " \"is_blocking\": false,\n", 1286 | " \"object\": \"model_permission\",\n", 1287 | " \"organization\": \"*\"\n", 1288 | " }\n", 1289 | " ],\n", 1290 | " \"root\": \"text-similarity-davinci-001\"\n", 1291 | " },\n", 1292 | " {\n", 1293 | " \"created\": 1649880484,\n", 1294 | " \"id\": \"text-davinci-002\",\n", 1295 | " \"object\": \"model\",\n", 1296 | " \"owned_by\": \"openai\",\n", 1297 | " \"parent\": null,\n", 1298 | " \"permission\": [\n", 1299 | " {\n", 1300 | " \"allow_create_engine\": false,\n", 1301 | " \"allow_fine_tuning\": false,\n", 1302 | " \"allow_logprobs\": true,\n", 1303 | " \"allow_sampling\": true,\n", 1304 | " \"allow_search_indices\": false,\n", 1305 | " \"allow_view\": true,\n", 1306 | " \"created\": 1679355287,\n", 1307 | " \"group\": null,\n", 1308 | " \"id\": \"modelperm-l4EU6QlN1HcS0so0jU16kyg8\",\n", 1309 | " \"is_blocking\": false,\n", 1310 | " \"object\": \"model_permission\",\n", 1311 | " \"organization\": \"*\"\n", 1312 | " }\n", 1313 | " ],\n", 1314 | " \"root\": \"text-davinci-002\"\n", 1315 | " },\n", 1316 | " {\n", 1317 | " \"created\": 1651172509,\n", 1318 | " \"id\": \"davinci-similarity\",\n", 1319 | " \"object\": \"model\",\n", 1320 | " \"owned_by\": \"openai-dev\",\n", 1321 | " \"parent\": null,\n", 1322 | " \"permission\": [\n", 1323 | " {\n", 1324 | " \"allow_create_engine\": false,\n", 1325 | " \"allow_fine_tuning\": false,\n", 1326 | " \"allow_logprobs\": true,\n", 1327 | " \"allow_sampling\": true,\n", 1328 | " \"allow_search_indices\": true,\n", 1329 | " \"allow_view\": true,\n", 1330 | " \"created\": 1669066353,\n", 1331 | " \"group\": null,\n", 1332 | " \"id\": \"modelperm-lYYgng3LM0Y97HvB5CDc8no2\",\n", 1333 | " \"is_blocking\": false,\n", 1334 | " \"object\": \"model_permission\",\n", 1335 | " \"organization\": \"*\"\n", 1336 | " }\n", 1337 | " ],\n", 1338 | " \"root\": \"davinci-similarity\"\n", 1339 | " },\n", 1340 | " {\n", 1341 | " \"created\": 1590625110,\n", 1342 | " \"id\": \"cushman:2020-05-03\",\n", 1343 | " \"object\": \"model\",\n", 1344 | " \"owned_by\": \"system\",\n", 1345 | " \"parent\": null,\n", 1346 | " \"permission\": [\n", 1347 | " {\n", 1348 | " \"allow_create_engine\": false,\n", 1349 | " \"allow_fine_tuning\": true,\n", 1350 | " \"allow_logprobs\": true,\n", 1351 | " \"allow_sampling\": true,\n", 1352 | " \"allow_search_indices\": false,\n", 1353 | " \"allow_view\": true,\n", 1354 | " \"created\": 1590625111,\n", 1355 | " \"group\": null,\n", 1356 | " \"id\": \"snapperm-FAup8P1KqclNlTsunLDRiesT\",\n", 1357 | " \"is_blocking\": false,\n", 1358 | " \"object\": \"model_permission\",\n", 1359 | " \"organization\": \"*\"\n", 1360 | " }\n", 1361 | " ],\n", 1362 | " \"root\": \"cushman:2020-05-03\"\n", 1363 | " },\n", 1364 | " {\n", 1365 | " \"created\": 1607631625,\n", 1366 | " \"id\": \"ada:2020-05-03\",\n", 1367 | " \"object\": \"model\",\n", 1368 | " \"owned_by\": \"system\",\n", 1369 | " \"parent\": null,\n", 1370 | " \"permission\": [\n", 1371 | " {\n", 1372 | " \"allow_create_engine\": false,\n", 1373 | " \"allow_fine_tuning\": false,\n", 1374 | " \"allow_logprobs\": true,\n", 1375 | " \"allow_sampling\": true,\n", 1376 | " \"allow_search_indices\": false,\n", 1377 | " \"allow_view\": true,\n", 1378 | " \"created\": 1607631626,\n", 1379 | " \"group\": null,\n", 1380 | " \"id\": \"snapperm-9TYofAqUs54vytKYL0IX91rX\",\n", 1381 | " \"is_blocking\": false,\n", 1382 | " \"object\": \"model_permission\",\n", 1383 | " \"organization\": \"*\"\n", 1384 | " }\n", 1385 | " ],\n", 1386 | " \"root\": \"ada:2020-05-03\"\n", 1387 | " },\n", 1388 | " {\n", 1389 | " \"created\": 1607632611,\n", 1390 | " \"id\": \"babbage:2020-05-03\",\n", 1391 | " \"object\": \"model\",\n", 1392 | " \"owned_by\": \"system\",\n", 1393 | " \"parent\": null,\n", 1394 | " \"permission\": [\n", 1395 | " {\n", 1396 | " \"allow_create_engine\": false,\n", 1397 | " \"allow_fine_tuning\": false,\n", 1398 | " \"allow_logprobs\": true,\n", 1399 | " \"allow_sampling\": true,\n", 1400 | " \"allow_search_indices\": false,\n", 1401 | " \"allow_view\": true,\n", 1402 | " \"created\": 1607632613,\n", 1403 | " \"group\": null,\n", 1404 | " \"id\": \"snapperm-jaLAcmyyNuaVmalCE1BGTGwf\",\n", 1405 | " \"is_blocking\": false,\n", 1406 | " \"object\": \"model_permission\",\n", 1407 | " \"organization\": \"*\"\n", 1408 | " }\n", 1409 | " ],\n", 1410 | " \"root\": \"babbage:2020-05-03\"\n", 1411 | " },\n", 1412 | " {\n", 1413 | " \"created\": 1607632725,\n", 1414 | " \"id\": \"curie:2020-05-03\",\n", 1415 | " \"object\": \"model\",\n", 1416 | " \"owned_by\": \"system\",\n", 1417 | " \"parent\": null,\n", 1418 | " \"permission\": [\n", 1419 | " {\n", 1420 | " \"allow_create_engine\": false,\n", 1421 | " \"allow_fine_tuning\": false,\n", 1422 | " \"allow_logprobs\": true,\n", 1423 | " \"allow_sampling\": true,\n", 1424 | " \"allow_search_indices\": false,\n", 1425 | " \"allow_view\": true,\n", 1426 | " \"created\": 1607632727,\n", 1427 | " \"group\": null,\n", 1428 | " \"id\": \"snapperm-bt6R8PWbB2SwK5evFo0ZxSs4\",\n", 1429 | " \"is_blocking\": false,\n", 1430 | " \"object\": \"model_permission\",\n", 1431 | " \"organization\": \"*\"\n", 1432 | " }\n", 1433 | " ],\n", 1434 | " \"root\": \"curie:2020-05-03\"\n", 1435 | " },\n", 1436 | " {\n", 1437 | " \"created\": 1607640163,\n", 1438 | " \"id\": \"davinci:2020-05-03\",\n", 1439 | " \"object\": \"model\",\n", 1440 | " \"owned_by\": \"system\",\n", 1441 | " \"parent\": null,\n", 1442 | " \"permission\": [\n", 1443 | " {\n", 1444 | " \"allow_create_engine\": false,\n", 1445 | " \"allow_fine_tuning\": false,\n", 1446 | " \"allow_logprobs\": true,\n", 1447 | " \"allow_sampling\": true,\n", 1448 | " \"allow_search_indices\": false,\n", 1449 | " \"allow_view\": true,\n", 1450 | " \"created\": 1607640164,\n", 1451 | " \"group\": null,\n", 1452 | " \"id\": \"snapperm-99cbfQTYDVeLkTYndX3UMpSr\",\n", 1453 | " \"is_blocking\": false,\n", 1454 | " \"object\": \"model_permission\",\n", 1455 | " \"organization\": \"*\"\n", 1456 | " }\n", 1457 | " ],\n", 1458 | " \"root\": \"davinci:2020-05-03\"\n", 1459 | " },\n", 1460 | " {\n", 1461 | " \"created\": 1610745990,\n", 1462 | " \"id\": \"if-davinci-v2\",\n", 1463 | " \"object\": \"model\",\n", 1464 | " \"owned_by\": \"openai\",\n", 1465 | " \"parent\": null,\n", 1466 | " \"permission\": [\n", 1467 | " {\n", 1468 | " \"allow_create_engine\": false,\n", 1469 | " \"allow_fine_tuning\": false,\n", 1470 | " \"allow_logprobs\": true,\n", 1471 | " \"allow_sampling\": true,\n", 1472 | " \"allow_search_indices\": false,\n", 1473 | " \"allow_view\": true,\n", 1474 | " \"created\": 1610746036,\n", 1475 | " \"group\": null,\n", 1476 | " \"id\": \"snapperm-58q0TdK2K4kMgL3MoHvGWMlH\",\n", 1477 | " \"is_blocking\": false,\n", 1478 | " \"object\": \"model_permission\",\n", 1479 | " \"organization\": \"*\"\n", 1480 | " }\n", 1481 | " ],\n", 1482 | " \"root\": \"if-davinci-v2\"\n", 1483 | " },\n", 1484 | " {\n", 1485 | " \"created\": 1610745968,\n", 1486 | " \"id\": \"if-curie-v2\",\n", 1487 | " \"object\": \"model\",\n", 1488 | " \"owned_by\": \"openai\",\n", 1489 | " \"parent\": null,\n", 1490 | " \"permission\": [\n", 1491 | " {\n", 1492 | " \"allow_create_engine\": false,\n", 1493 | " \"allow_fine_tuning\": false,\n", 1494 | " \"allow_logprobs\": true,\n", 1495 | " \"allow_sampling\": true,\n", 1496 | " \"allow_search_indices\": false,\n", 1497 | " \"allow_view\": true,\n", 1498 | " \"created\": 1610746043,\n", 1499 | " \"group\": null,\n", 1500 | " \"id\": \"snapperm-fwAseHVq6NGe6Ple6tKfzRSK\",\n", 1501 | " \"is_blocking\": false,\n", 1502 | " \"object\": \"model_permission\",\n", 1503 | " \"organization\": \"*\"\n", 1504 | " }\n", 1505 | " ],\n", 1506 | " \"root\": \"if-curie-v2\"\n", 1507 | " },\n", 1508 | " {\n", 1509 | " \"created\": 1629420755,\n", 1510 | " \"id\": \"if-davinci:3.0.0\",\n", 1511 | " \"object\": \"model\",\n", 1512 | " \"owned_by\": \"openai\",\n", 1513 | " \"parent\": null,\n", 1514 | " \"permission\": [\n", 1515 | " {\n", 1516 | " \"allow_create_engine\": false,\n", 1517 | " \"allow_fine_tuning\": true,\n", 1518 | " \"allow_logprobs\": true,\n", 1519 | " \"allow_sampling\": true,\n", 1520 | " \"allow_search_indices\": false,\n", 1521 | " \"allow_view\": true,\n", 1522 | " \"created\": 1629421809,\n", 1523 | " \"group\": null,\n", 1524 | " \"id\": \"snapperm-T53lssiyMWwiuJwhyO9ic53z\",\n", 1525 | " \"is_blocking\": false,\n", 1526 | " \"object\": \"model_permission\",\n", 1527 | " \"organization\": \"*\"\n", 1528 | " }\n", 1529 | " ],\n", 1530 | " \"root\": \"if-davinci:3.0.0\"\n", 1531 | " },\n", 1532 | " {\n", 1533 | " \"created\": 1629498070,\n", 1534 | " \"id\": \"davinci-if:3.0.0\",\n", 1535 | " \"object\": \"model\",\n", 1536 | " \"owned_by\": \"openai\",\n", 1537 | " \"parent\": null,\n", 1538 | " \"permission\": [\n", 1539 | " {\n", 1540 | " \"allow_create_engine\": false,\n", 1541 | " \"allow_fine_tuning\": true,\n", 1542 | " \"allow_logprobs\": true,\n", 1543 | " \"allow_sampling\": true,\n", 1544 | " \"allow_search_indices\": false,\n", 1545 | " \"allow_view\": true,\n", 1546 | " \"created\": 1629498084,\n", 1547 | " \"group\": null,\n", 1548 | " \"id\": \"snapperm-s6ZIAVMwlZwrLGGClTXqSK3Q\",\n", 1549 | " \"is_blocking\": false,\n", 1550 | " \"object\": \"model_permission\",\n", 1551 | " \"organization\": \"*\"\n", 1552 | " }\n", 1553 | " ],\n", 1554 | " \"root\": \"davinci-if:3.0.0\"\n", 1555 | " },\n", 1556 | " {\n", 1557 | " \"created\": 1629501914,\n", 1558 | " \"id\": \"davinci-instruct-beta:2.0.0\",\n", 1559 | " \"object\": \"model\",\n", 1560 | " \"owned_by\": \"openai\",\n", 1561 | " \"parent\": null,\n", 1562 | " \"permission\": [\n", 1563 | " {\n", 1564 | " \"allow_create_engine\": false,\n", 1565 | " \"allow_fine_tuning\": true,\n", 1566 | " \"allow_logprobs\": true,\n", 1567 | " \"allow_sampling\": true,\n", 1568 | " \"allow_search_indices\": false,\n", 1569 | " \"allow_view\": true,\n", 1570 | " \"created\": 1629501939,\n", 1571 | " \"group\": null,\n", 1572 | " \"id\": \"snapperm-c70U4TBfiOD839xptP5pJzyc\",\n", 1573 | " \"is_blocking\": false,\n", 1574 | " \"object\": \"model_permission\",\n", 1575 | " \"organization\": \"*\"\n", 1576 | " }\n", 1577 | " ],\n", 1578 | " \"root\": \"davinci-instruct-beta:2.0.0\"\n", 1579 | " },\n", 1580 | " {\n", 1581 | " \"created\": 1641949608,\n", 1582 | " \"id\": \"text-ada:001\",\n", 1583 | " \"object\": \"model\",\n", 1584 | " \"owned_by\": \"system\",\n", 1585 | " \"parent\": null,\n", 1586 | " \"permission\": [\n", 1587 | " {\n", 1588 | " \"allow_create_engine\": false,\n", 1589 | " \"allow_fine_tuning\": false,\n", 1590 | " \"allow_logprobs\": true,\n", 1591 | " \"allow_sampling\": true,\n", 1592 | " \"allow_search_indices\": false,\n", 1593 | " \"allow_view\": true,\n", 1594 | " \"created\": 1641949610,\n", 1595 | " \"group\": null,\n", 1596 | " \"id\": \"snapperm-d2PSnwFG1Yn9of6PvrrhkBcU\",\n", 1597 | " \"is_blocking\": false,\n", 1598 | " \"object\": \"model_permission\",\n", 1599 | " \"organization\": \"*\"\n", 1600 | " }\n", 1601 | " ],\n", 1602 | " \"root\": \"text-ada:001\"\n", 1603 | " },\n", 1604 | " {\n", 1605 | " \"created\": 1641943966,\n", 1606 | " \"id\": \"text-davinci:001\",\n", 1607 | " \"object\": \"model\",\n", 1608 | " \"owned_by\": \"system\",\n", 1609 | " \"parent\": null,\n", 1610 | " \"permission\": [\n", 1611 | " {\n", 1612 | " \"allow_create_engine\": false,\n", 1613 | " \"allow_fine_tuning\": false,\n", 1614 | " \"allow_logprobs\": true,\n", 1615 | " \"allow_sampling\": true,\n", 1616 | " \"allow_search_indices\": false,\n", 1617 | " \"allow_view\": true,\n", 1618 | " \"created\": 1641944340,\n", 1619 | " \"group\": null,\n", 1620 | " \"id\": \"snapperm-Fj1O3zkKXOQy6AkcfQXRKcWA\",\n", 1621 | " \"is_blocking\": false,\n", 1622 | " \"object\": \"model_permission\",\n", 1623 | " \"organization\": \"*\"\n", 1624 | " }\n", 1625 | " ],\n", 1626 | " \"root\": \"text-davinci:001\"\n", 1627 | " },\n", 1628 | " {\n", 1629 | " \"created\": 1641955047,\n", 1630 | " \"id\": \"text-curie:001\",\n", 1631 | " \"object\": \"model\",\n", 1632 | " \"owned_by\": \"system\",\n", 1633 | " \"parent\": null,\n", 1634 | " \"permission\": [\n", 1635 | " {\n", 1636 | " \"allow_create_engine\": false,\n", 1637 | " \"allow_fine_tuning\": false,\n", 1638 | " \"allow_logprobs\": true,\n", 1639 | " \"allow_sampling\": true,\n", 1640 | " \"allow_search_indices\": false,\n", 1641 | " \"allow_view\": true,\n", 1642 | " \"created\": 1641955123,\n", 1643 | " \"group\": null,\n", 1644 | " \"id\": \"snapperm-BI9TAT6SCj43JRsUb9CYadsz\",\n", 1645 | " \"is_blocking\": false,\n", 1646 | " \"object\": \"model_permission\",\n", 1647 | " \"organization\": \"*\"\n", 1648 | " }\n", 1649 | " ],\n", 1650 | " \"root\": \"text-curie:001\"\n", 1651 | " },\n", 1652 | " {\n", 1653 | " \"created\": 1642018370,\n", 1654 | " \"id\": \"text-babbage:001\",\n", 1655 | " \"object\": \"model\",\n", 1656 | " \"owned_by\": \"openai\",\n", 1657 | " \"parent\": null,\n", 1658 | " \"permission\": [\n", 1659 | " {\n", 1660 | " \"allow_create_engine\": false,\n", 1661 | " \"allow_fine_tuning\": false,\n", 1662 | " \"allow_logprobs\": true,\n", 1663 | " \"allow_sampling\": true,\n", 1664 | " \"allow_search_indices\": false,\n", 1665 | " \"allow_view\": true,\n", 1666 | " \"created\": 1642018480,\n", 1667 | " \"group\": null,\n", 1668 | " \"id\": \"snapperm-7oP3WFr9x7qf5xb3eZrVABAH\",\n", 1669 | " \"is_blocking\": false,\n", 1670 | " \"object\": \"model_permission\",\n", 1671 | " \"organization\": \"*\"\n", 1672 | " }\n", 1673 | " ],\n", 1674 | " \"root\": \"text-babbage:001\"\n", 1675 | " }\n", 1676 | " ],\n", 1677 | " \"object\": \"list\"\n", 1678 | "}" 1679 | ] 1680 | }, 1681 | <<<<<<< HEAD 1682 | "execution_count": 4, 1683 | ======= 1684 | "execution_count": 7, 1685 | >>>>>>> 060eac35c2416599e8256f93faee17f0bfd2bec1 1686 | "metadata": {}, 1687 | "output_type": "execute_result" 1688 | } 1689 | ], 1690 | "source": [ 1691 | "# Listing all the available models\n", 1692 | "openai.Model.list()" 1693 | ] 1694 | }, 1695 | { 1696 | "cell_type": "code", 1697 | "execution_count": 8, 1698 | "id": "b2767cfd", 1699 | "metadata": { 1700 | "ExecuteTime": { 1701 | "end_time": "2023-04-13T20:43:40.013285Z", 1702 | "start_time": "2023-04-13T20:43:39.893813Z" 1703 | } 1704 | }, 1705 | "outputs": [ 1706 | { 1707 | "data": { 1708 | "text/plain": [ 1709 | " JSON: {\n", 1710 | " \"created\": 1669599635,\n", 1711 | " \"id\": \"text-davinci-003\",\n", 1712 | " \"object\": \"model\",\n", 1713 | " \"owned_by\": \"openai-internal\",\n", 1714 | " \"parent\": null,\n", 1715 | " \"permission\": [\n", 1716 | " {\n", 1717 | " \"allow_create_engine\": false,\n", 1718 | " \"allow_fine_tuning\": false,\n", 1719 | " \"allow_logprobs\": true,\n", 1720 | " \"allow_sampling\": true,\n", 1721 | " \"allow_search_indices\": false,\n", 1722 | " \"allow_view\": true,\n", 1723 | " \"created\": 1682112392,\n", 1724 | " \"group\": null,\n", 1725 | " \"id\": \"modelperm-oyykgcqEhcKfSCh5Ca9BETGH\",\n", 1726 | " \"is_blocking\": false,\n", 1727 | " \"object\": \"model_permission\",\n", 1728 | " \"organization\": \"*\"\n", 1729 | " }\n", 1730 | " ],\n", 1731 | " \"root\": \"text-davinci-003\"\n", 1732 | "}" 1733 | ] 1734 | }, 1735 | "execution_count": 8, 1736 | "metadata": {}, 1737 | "output_type": "execute_result" 1738 | } 1739 | ], 1740 | "source": [ 1741 | "# Viewing a more specific model's information\n", 1742 | "openai.Model.retrieve('text-davinci-003')" 1743 | ] 1744 | }, 1745 | { 1746 | "cell_type": "markdown", 1747 | "id": "9874dbda", 1748 | "metadata": {}, 1749 | "source": [ 1750 | "## Completions" 1751 | ] 1752 | }, 1753 | { 1754 | "cell_type": "code", 1755 | "execution_count": 9, 1756 | "id": "391f266f", 1757 | "metadata": { 1758 | "ExecuteTime": { 1759 | "end_time": "2023-04-13T20:47:45.394542Z", 1760 | "start_time": "2023-04-13T20:47:44.263737Z" 1761 | } 1762 | }, 1763 | "outputs": [ 1764 | { 1765 | "data": { 1766 | "text/plain": [ 1767 | " JSON: {\n", 1768 | " \"choices\": [\n", 1769 | " {\n", 1770 | " \"finish_reason\": \"stop\",\n", 1771 | " \"index\": 0,\n", 1772 | " \"logprobs\": null,\n", 1773 | " \"text\": \"\\n\\n\\\"The Deep-Dish Experience You Crave!\\\"\"\n", 1774 | " }\n", 1775 | " ],\n", 1776 | " \"created\": 1682187796,\n", 1777 | " \"id\": \"cmpl-78CBAuI6NKxp4lANuftjBvOajvWTh\",\n", 1778 | " \"model\": \"text-davinci-003\",\n", 1779 | " \"object\": \"text_completion\",\n", 1780 | " \"usage\": {\n", 1781 | " \"completion_tokens\": 13,\n", 1782 | " \"prompt_tokens\": 16,\n", 1783 | " \"total_tokens\": 29\n", 1784 | " }\n", 1785 | "}" 1786 | ] 1787 | }, 1788 | "execution_count": 9, 1789 | "metadata": {}, 1790 | "output_type": "execute_result" 1791 | } 1792 | ], 1793 | "source": [ 1794 | "openai.Completion.create(\n", 1795 | " model = 'text-davinci-003',\n", 1796 | " prompt = 'Write a tagline for a pizza parlor that specializes in deep dish pizza.'\n", 1797 | ")" 1798 | ] 1799 | }, 1800 | { 1801 | "cell_type": "markdown", 1802 | "id": "020120cc", 1803 | "metadata": { 1804 | "ExecuteTime": { 1805 | "end_time": "2023-04-13T20:51:51.365127Z", 1806 | "start_time": "2023-04-13T20:51:51.357905Z" 1807 | } 1808 | }, 1809 | "source": [ 1810 | "## Chats" 1811 | ] 1812 | }, 1813 | { 1814 | "cell_type": "code", 1815 | "execution_count": null, 1816 | "id": "6eb14725", 1817 | "metadata": { 1818 | "ExecuteTime": { 1819 | "end_time": "2023-04-13T20:59:22.963730Z", 1820 | "start_time": "2023-04-13T20:59:22.956492Z" 1821 | } 1822 | }, 1823 | "outputs": [], 1824 | "source": [ 1825 | "# Starting a new template chat\n", 1826 | "chat_flow = [\n", 1827 | " {'role': 'system', 'content': 'You are a classy butler, like Alfred from Batman.'}\n", 1828 | "]" 1829 | ] 1830 | }, 1831 | { 1832 | "cell_type": "code", 1833 | "execution_count": null, 1834 | "id": "bd118567", 1835 | "metadata": { 1836 | "ExecuteTime": { 1837 | "end_time": "2023-04-13T20:59:23.225688Z", 1838 | "start_time": "2023-04-13T20:59:23.217314Z" 1839 | } 1840 | }, 1841 | "outputs": [], 1842 | "source": [ 1843 | "# Accepting a new prompt from a user\n", 1844 | "user_prompt = 'What is the capital of Illinois?'" 1845 | ] 1846 | }, 1847 | { 1848 | "cell_type": "code", 1849 | "execution_count": null, 1850 | "id": "9cf5370f", 1851 | "metadata": { 1852 | "ExecuteTime": { 1853 | "end_time": "2023-04-13T20:59:23.389121Z", 1854 | "start_time": "2023-04-13T20:59:23.379345Z" 1855 | } 1856 | }, 1857 | "outputs": [], 1858 | "source": [ 1859 | "# Appending the user prompt to the chat flow\n", 1860 | "chat_flow.append({'role': 'user', 'content': user_prompt})\n", 1861 | "chat_flow" 1862 | ] 1863 | }, 1864 | { 1865 | "cell_type": "code", 1866 | "execution_count": null, 1867 | "id": "5d846695", 1868 | "metadata": { 1869 | "ExecuteTime": { 1870 | "end_time": "2023-04-13T21:00:40.883209Z", 1871 | "start_time": "2023-04-13T21:00:40.005552Z" 1872 | } 1873 | }, 1874 | "outputs": [], 1875 | "source": [ 1876 | "# Obtaining the response from the API\n", 1877 | "chat_response = openai.ChatCompletion.create(\n", 1878 | " model = 'gpt-3.5-turbo',\n", 1879 | " messages = chat_flow\n", 1880 | ")" 1881 | ] 1882 | }, 1883 | { 1884 | "cell_type": "code", 1885 | "execution_count": null, 1886 | "id": "06046d20", 1887 | "metadata": { 1888 | "ExecuteTime": { 1889 | "end_time": "2023-04-13T21:02:40.653297Z", 1890 | "start_time": "2023-04-13T21:02:40.646187Z" 1891 | } 1892 | }, 1893 | "outputs": [], 1894 | "source": [ 1895 | "# Appending the result of the chat response to the chat flow\n", 1896 | "chat_flow.append({'role': 'assistant', 'content': chat_response['choices'][0]['message']['content']})" 1897 | ] 1898 | }, 1899 | { 1900 | "cell_type": "code", 1901 | "execution_count": null, 1902 | "id": "7fbfab56", 1903 | "metadata": { 1904 | "ExecuteTime": { 1905 | "end_time": "2023-04-13T21:03:18.940323Z", 1906 | "start_time": "2023-04-13T21:03:18.928214Z" 1907 | } 1908 | }, 1909 | "outputs": [], 1910 | "source": [ 1911 | "# Asking a follow up question\n", 1912 | "user_prompt = 'What is the largest city in that state?'\n", 1913 | "\n", 1914 | "chat_flow.append({'role': 'user', 'content': user_prompt})\n", 1915 | "chat_flow" 1916 | ] 1917 | }, 1918 | { 1919 | "cell_type": "code", 1920 | "execution_count": null, 1921 | "id": "374a0cf9", 1922 | "metadata": { 1923 | "ExecuteTime": { 1924 | "end_time": "2023-04-13T21:03:26.666960Z", 1925 | "start_time": "2023-04-13T21:03:25.481943Z" 1926 | } 1927 | }, 1928 | "outputs": [], 1929 | "source": [ 1930 | "# Obtaining the response from the API\n", 1931 | "chat_response = openai.ChatCompletion.create(\n", 1932 | " model = 'gpt-3.5-turbo',\n", 1933 | " messages = chat_flow\n", 1934 | ")" 1935 | ] 1936 | }, 1937 | { 1938 | "cell_type": "code", 1939 | "execution_count": null, 1940 | "id": "9a7dd805", 1941 | "metadata": { 1942 | "ExecuteTime": { 1943 | "end_time": "2023-04-13T21:03:30.154481Z", 1944 | "start_time": "2023-04-13T21:03:30.142405Z" 1945 | } 1946 | }, 1947 | "outputs": [], 1948 | "source": [ 1949 | "chat_response" 1950 | ] 1951 | }, 1952 | { 1953 | "cell_type": "code", 1954 | "execution_count": 2, 1955 | "id": "29861e9a", 1956 | "metadata": { 1957 | "ExecuteTime": { 1958 | "end_time": "2023-04-14T15:43:32.445579Z", 1959 | "start_time": "2023-04-14T15:43:32.437389Z" 1960 | } 1961 | }, 1962 | "outputs": [], 1963 | "source": [ 1964 | "import re\n", 1965 | "ssn_regex = r'\\b(?!000)(?!666)(?!9\\d{2})\\d{3}[-]?(?!00)\\d{2}[-]?(?!0000)\\d{4}\\b'" 1966 | ] 1967 | }, 1968 | { 1969 | "cell_type": "code", 1970 | "execution_count": null, 1971 | "id": "cf84b568", 1972 | "metadata": {}, 1973 | "outputs": [], 1974 | "source": [ 1975 | "re." 1976 | ] 1977 | } 1978 | ], 1979 | "metadata": { 1980 | "kernelspec": { 1981 | "display_name": "Python 3 (ipykernel)", 1982 | "language": "python", 1983 | "name": "python3" 1984 | }, 1985 | "language_info": { 1986 | "codemirror_mode": { 1987 | "name": "ipython", 1988 | "version": 3 1989 | }, 1990 | "file_extension": ".py", 1991 | "mimetype": "text/x-python", 1992 | "name": "python", 1993 | "nbconvert_exporter": "python", 1994 | "pygments_lexer": "ipython3", 1995 | "version": "3.10.11" 1996 | } 1997 | }, 1998 | "nbformat": 4, 1999 | "nbformat_minor": 5 2000 | } 2001 | -------------------------------------------------------------------------------- /notebooks/prompt-engineering-stream.ipynb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dkhundley/openai-api-tutorial/7ba59ec2862aea2599333c1356c7f68e61ad6880/notebooks/prompt-engineering-stream.ipynb -------------------------------------------------------------------------------- /notebooks/prompt-engineering.ipynb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dkhundley/openai-api-tutorial/7ba59ec2862aea2599333c1356c7f68e61ad6880/notebooks/prompt-engineering.ipynb -------------------------------------------------------------------------------- /notebooks/whisper-api.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import yaml\n", 10 | "import openai" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "# Loading the API key and organization ID from file (NOT pushed to GitHub)\n", 20 | "with open('../keys/openai-keys.yaml') as f:\n", 21 | " keys_yaml = yaml.safe_load(f)\n", 22 | "\n", 23 | "# Applying our API key and organization ID to OpenAI\n", 24 | "openai.organization = keys_yaml['ORG_ID']\n", 25 | "openai.api_key = keys_yaml['API_KEY']" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 3, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "# Loading the audio file\n", 35 | "audio_file = open('../data/whisper-test.m4a', 'rb')" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 4, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "transcript = openai.Audio.transcribe(model = 'whisper-1',\n", 45 | " file = audio_file)" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 6, 51 | "metadata": {}, 52 | "outputs": [ 53 | { 54 | "data": { 55 | "text/plain": [ 56 | " JSON: {\n", 57 | " \"text\": \"This is a test of Whisper by OpenAI. I am going to build a Gradio application that can record bits of information and then use LangChain to do some other interesting things with them.\"\n", 58 | "}" 59 | ] 60 | }, 61 | "execution_count": 6, 62 | "metadata": {}, 63 | "output_type": "execute_result" 64 | } 65 | ], 66 | "source": [ 67 | "transcript" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [] 76 | } 77 | ], 78 | "metadata": { 79 | "kernelspec": { 80 | "display_name": "Python 3", 81 | "language": "python", 82 | "name": "python3" 83 | }, 84 | "language_info": { 85 | "codemirror_mode": { 86 | "name": "ipython", 87 | "version": 3 88 | }, 89 | "file_extension": ".py", 90 | "mimetype": "text/x-python", 91 | "name": "python", 92 | "nbconvert_exporter": "python", 93 | "pygments_lexer": "ipython3", 94 | "version": "3.10.7" 95 | }, 96 | "orig_nbformat": 4 97 | }, 98 | "nbformat": 4, 99 | "nbformat_minor": 2 100 | } 101 | -------------------------------------------------------------------------------- /src/chat-ui.py: -------------------------------------------------------------------------------- 1 | import re 2 | import time 3 | import yaml 4 | import openai 5 | import gradio as gr 6 | 7 | 8 | 9 | ## API INSTANTIATION 10 | ## --------------------------------------------------------------------------------------------------------------------- 11 | # Loading the API key and organization ID from file (NOT pushed to GitHub) 12 | with open('../keys/openai-keys.yaml') as f: 13 | keys_yaml = yaml.safe_load(f) 14 | 15 | # Applying our API key and organization ID to OpenAI 16 | openai.organization = keys_yaml['ORG_ID'] 17 | openai.api_key = keys_yaml['API_KEY'] 18 | 19 | 20 | 21 | ## HELPER FUNCTIONS 22 | ## --------------------------------------------------------------------------------------------------------------------- 23 | def initiate_chat_flow(): 24 | ''' 25 | Initiates a new chat flow 26 | 27 | Inputs: 28 | - N/A 29 | 30 | Returns: 31 | - chat_flow (list): A newly initiated chat flow 32 | ''' 33 | 34 | chat_flow = [ 35 | {'role': 'system', 'content': 'You are an assistant that speaks like Jar Jar Binks from Star Wars.'} 36 | ] 37 | 38 | return chat_flow 39 | 40 | 41 | 42 | def clear_chat_interface(): 43 | ''' 44 | Clears the chat interface when the button is clicked 45 | 46 | Inputs: 47 | - N/A 48 | 49 | Returns 50 | - N/A 51 | ''' 52 | # Referencing the chat flow as a global variable 53 | global chat_flow 54 | 55 | # Reinitiating the chat flow 56 | chat_flow = initiate_chat_flow() 57 | 58 | 59 | 60 | def check_sensitive_data(user_prompt): 61 | ''' 62 | Checks the user's prompt to see if any sensitive information has been pass in via the prompt 63 | 64 | Inputs: 65 | - user_prompt (str): The user's inputted prompt 66 | 67 | Returns: 68 | - has_sensitive_data (bool): A boolean value indicating if the prompt contains sensitive data 69 | ''' 70 | 71 | # Establishing a bit of regex to catch social security numbers 72 | ssn_regex = r'\b(?!000)(?!666)(?!9\d{2})\d{3}[-]?(?!00)\d{2}[-]?(?!0000)\d{4}\b' 73 | 74 | # Checking to see if there is a match based on the regex 75 | has_sensitive_data = bool(re.search(ssn_regex, user_prompt)) 76 | 77 | return has_sensitive_data 78 | 79 | 80 | 81 | def process_prompt(user_prompt, chatbot): 82 | ''' 83 | Processes the user prompt submitted to the chat interface with the appropriate response from OpenAI's API 84 | 85 | Inputs: 86 | - user_prompt (str): The prompt text submitted by the user 87 | - chatbot (Gradio chatbot): The chatbot interface that is displayed to the user 88 | 89 | Returns: 90 | - user_prompt (str): A cleared out prompt ready for the next user input 91 | - chatbot (Gradio chatbot): The chatbot interface that is displayed to the user 92 | ''' 93 | 94 | # Referencing the chat_flow as a global variable 95 | global chat_flow 96 | 97 | # Checking the prompt for any sensitive data 98 | has_sensitive_data = check_sensitive_data(user_prompt) 99 | 100 | # Prompting the user to submit a new prompt without sensitive data if sensitive data is present 101 | if has_sensitive_data: 102 | 103 | # Waiting a beat 104 | time.sleep(1) 105 | 106 | # Adding the appropriate message to the chatbot 107 | chatbot.append((user_prompt,'Meesa sorry, but it looks like yousa prompt contains sensitive information. For security reasons, meesa cannot let it through. Please be careful not to include any sensitive information in your prompts in the future. If yousa still have a question or concern, please submit a new prompt without the sensitive information, and meesa will do our best to help you. Thank yousa for your understanding!')) 108 | 109 | # Clearing the prompt for the next user input 110 | user_prompt = '' 111 | 112 | return user_prompt, chatbot 113 | 114 | # Appending the prompt to the chat flow 115 | chat_flow.append({'role': 'user', 'content': user_prompt}) 116 | 117 | # Obtaining the response from the API 118 | chat_response = openai.ChatCompletion.create( 119 | model = 'gpt-3.5-turbo', 120 | messages = chat_flow 121 | ) 122 | 123 | # Obtaining the specific message to return to the user 124 | chat_answer = chat_response['choices'][0]['message']['content'] 125 | 126 | # Appending the user prompt and answer to the chatbot interaction 127 | chatbot.append((user_prompt, chat_answer)) 128 | 129 | # Appending the chat answer to the chat flow sent to OpenAI 130 | chat_flow.append({'role': 'assistant', 'content': chat_answer}) 131 | 132 | # Clearing the prompt for the next user input 133 | user_prompt = '' 134 | 135 | return user_prompt, chatbot 136 | 137 | 138 | 139 | ## GRADIO UI LAYOUT & FUNCTIONALITY 140 | ## --------------------------------------------------------------------------------------------------------------------- 141 | # Defining the building blocks that represent the form and function of the Gradio UI 142 | with gr.Blocks() as chat_ui: 143 | 144 | # Instantiating the chatbot interface 145 | header_image = gr.Image('jarjar.png').style(height = (447 / 3), show_label = False) 146 | chatbot = gr.Chatbot(label = 'Jar Jar Binks') 147 | user_prompt = gr.Textbox(placeholder = 'To send mesa a message, just type what yousa would like to say and press the "Enter" key to submit. Mesa waiting to hear from yousah!', 148 | show_label = False) 149 | start_new_convo_button = gr.Button('Start New Conversation') 150 | 151 | # Defining the behavior for what occurs when the user hits "Enter" after typing a prompt 152 | user_prompt.submit(fn = process_prompt, 153 | inputs = [user_prompt, chatbot], 154 | outputs = [user_prompt, chatbot]) 155 | 156 | # Defining the behavior for what occurs when the "Start New Conversation" button is clicked 157 | start_new_convo_button.click(fn = clear_chat_interface, 158 | inputs = None, 159 | outputs = chatbot, 160 | queue = False) 161 | 162 | 163 | 164 | ## SCRIPT INVOCATION 165 | ## --------------------------------------------------------------------------------------------------------------------- 166 | if __name__ == "__main__": 167 | 168 | # Instantiating the initial chat flow used as a global variable 169 | chat_flow = initiate_chat_flow() 170 | 171 | # Launching the Gradio Chatbot 172 | chat_ui.launch(share = True) -------------------------------------------------------------------------------- /src/chat.py: -------------------------------------------------------------------------------- 1 | # Importing the necessary Python libraries 2 | import re 3 | import yaml 4 | import openai 5 | import inquirer 6 | 7 | 8 | 9 | ## API INSTANTIATION 10 | ## --------------------------------------------------------------------------------------------------------------------- 11 | # Loading the API key and organization ID from file (NOT pushed to GitHub) 12 | with open('../keys/openai-keys.yaml') as f: 13 | keys_yaml = yaml.safe_load(f) 14 | 15 | # Applying our API key and organization ID to OpenAI 16 | openai.organization = keys_yaml['ORG_ID'] 17 | openai.api_key = keys_yaml['API_KEY'] 18 | 19 | 20 | 21 | ## HELPER FUNCTIONS 22 | ## --------------------------------------------------------------------------------------------------------------------- 23 | def initiate_chat_flow(): 24 | ''' 25 | Initiates a new chat flow 26 | 27 | Inputs: 28 | - N/A 29 | 30 | Returns: 31 | - chat_flow (list): A newly initiated chat flow 32 | ''' 33 | 34 | chat_flow = [ 35 | {'role': 'system', 'content': 'You are an assistant that speaks like Jar Jar Binks from Star Wars.'} 36 | ] 37 | 38 | return chat_flow 39 | 40 | 41 | 42 | def check_sensitive_data(user_prompt): 43 | ''' 44 | Checks the user's prompt to see if any sensitive information has been pass in via the prompt 45 | 46 | Inputs: 47 | - user_prompt (str): The user's inputted prompt 48 | 49 | Returns: 50 | - has_sensitive_data (bool): A boolean value indicating if the prompt contains sensitive data 51 | ''' 52 | 53 | # Establishing a bit of regex to catch social security numbers 54 | ssn_regex = r'\b(?!000)(?!666)(?!9\d{2})\d{3}[-]?(?!00)\d{2}[-]?(?!0000)\d{4}\b' 55 | 56 | # Checking to see if there is a match based on the regex 57 | has_sensitive_data = bool(re.search(ssn_regex, user_prompt)) 58 | 59 | return has_sensitive_data 60 | 61 | 62 | 63 | def prompt_next_choice(): 64 | ''' 65 | Prompts the user to continue the current conversation, start a new conversation, or end the program 66 | 67 | Inputs: 68 | - N/A 69 | 70 | Returns: 71 | - next_action (str): The choice selected by the user 72 | ''' 73 | 74 | # Setting the list of options that the user can select from 75 | user_choices = [ 76 | inquirer.List( 77 | 'user_choices', 78 | message = 'Would you like to continue the conversation?', 79 | choices = ['Yes', 'Start New Conversation', 'End Program'] 80 | ) 81 | ] 82 | 83 | # Retreiving the selected option by the user 84 | next_action = inquirer.prompt(user_choices)['user_choices'] 85 | 86 | return next_action 87 | 88 | 89 | 90 | ## SCRIPT INSTANTIATION 91 | ## --------------------------------------------------------------------------------------------------------------------- 92 | if __name__ == "__main__": 93 | # Printing a welcome statement 94 | print('Welcome to my ChatGPT Python script! Enter a prompt to begin the conversation.') 95 | 96 | # Starting an initial chat flow 97 | chat_flow = initiate_chat_flow() 98 | 99 | # Starting a reiterating loop for the prompts 100 | while True: 101 | 102 | # Retrieving the prompt input from the user 103 | user_prompt = input('What would you like to ask?\n') 104 | 105 | # Checking the prompt for any sensitive data 106 | has_sensitive_data = check_sensitive_data(user_prompt) 107 | 108 | # Prompting the user to submit a new prompt without sensitive data if sensitive data is present 109 | if has_sensitive_data: 110 | print('Your prompt appears to have sensitive data in the body of the text. Please remove this sensitive data and submit a new prompt.\n') 111 | continue 112 | 113 | # Appending the user prompt to the chat flow 114 | chat_flow.append({'role': 'user', 'content': user_prompt}) 115 | 116 | # Obtaining the response from the API 117 | chat_response = openai.ChatCompletion.create( 118 | model = 'gpt-3.5-turbo', 119 | messages = chat_flow 120 | ) 121 | 122 | # Printing ChatGPT's response back to the user 123 | print(f"\nChatGPT's response: {chat_response['choices'][0]['message']['content']}\n") 124 | 125 | # Prompting the user if they would like to continue the current chat, start a new one, or end the program 126 | next_action = prompt_next_choice() 127 | 128 | # Taking the appropriate action based on the user's next desired action 129 | if next_action == 'Yes': 130 | 131 | # Appending the result of the chat response to the chat flow for continued conversation 132 | chat_flow.append({'role': 'assistant', 'content': chat_response['choices'][0]['message']['content']}) 133 | 134 | elif next_action == 'Start New Conversation': 135 | 136 | # Re-initiating the chat flow 137 | chat_flow = initiate_chat_flow() 138 | 139 | elif next_action == 'End Program': 140 | exit(0) -------------------------------------------------------------------------------- /src/combined-dalle-ui.py: -------------------------------------------------------------------------------- 1 | # Importing the necessary Python libraries 2 | import yaml 3 | import openai 4 | import gradio as gr 5 | from io import BytesIO 6 | from base64 import b64decode 7 | from PIL import Image 8 | 9 | 10 | 11 | ## OPENAI CONNECTION 12 | ## --------------------------------------------------------------------------------------------------------------------- 13 | # Loading the API key and organization ID from file (NOT pushed to GitHub) 14 | with open('../keys/openai-keys.yaml') as f: 15 | keys_yaml = yaml.safe_load(f) 16 | 17 | # Applying our API key and organization ID to OpenAI 18 | openai.organization = keys_yaml['ORG_ID'] 19 | openai.api_key = keys_yaml['API_KEY'] 20 | 21 | 22 | 23 | ## GRADIO HELPER FUNCTIONS 24 | ## --------------------------------------------------------------------------------------------------------------------- 25 | def generate_image(user_prompt): 26 | ''' 27 | Generates an image using the DALL-E API per the user's prompt 28 | 29 | Inputs: 30 | - user_prompt (str): A body of text describing what the user would like to see 31 | 32 | Returns: 33 | - dalle_image (PIL): The image generated by DALL-E 34 | ''' 35 | 36 | # Checking that the user prompt does not exceed 1000 character 37 | if len(user_prompt) > 1000: 38 | raise gr.Error('Input prompt cannot exceed 1000 characters.') 39 | 40 | # Using DALL-E to generate the image as a base64 encoded object 41 | openai_response = openai.Image.create( 42 | prompt = user_prompt, 43 | n = 1, 44 | size = '1024x1024', 45 | response_format = 'b64_json' 46 | ) 47 | 48 | # Decoding the base64 encoded object into a PIL image 49 | dalle_image = Image.open(BytesIO(b64decode(openai_response['data'][0]['b64_json']))) 50 | 51 | return dalle_image 52 | 53 | 54 | 55 | def generate_similar_images(upload_image): 56 | ''' 57 | Generates similar images based on an input image 58 | 59 | Inputs: 60 | - upload_image (PIL): An image uploaded by the user that will be the basis to create similar images 61 | 62 | Returns: 63 | - output_gallery (list): A list of images that will be returned in a display gallery 64 | ''' 65 | # Using DALL-E to generate similar images compared to the one uploaded by the user 66 | openai_response = openai.Image.create_variation( 67 | image = open(upload_image, 'rb'), 68 | n = 5, 69 | size = '1024x1024', 70 | response_format = 'b64_json' 71 | ) 72 | 73 | # Creating an empty list to hold all the images for the output gallery 74 | output_gallery = [] 75 | 76 | # Iterating through all the images returned by DALL-E 77 | for image in openai_response['data']: 78 | 79 | # Appending the DALL-E generated image to the gallery 80 | output_gallery.append(Image.open(BytesIO(b64decode(image['b64_json'])))) 81 | 82 | return output_gallery 83 | 84 | 85 | 86 | ## GRADIO UI LAYOUT & FUNCTIONALITY 87 | ## --------------------------------------------------------------------------------------------------------------------- 88 | # Defining the building blocks that represent the form and function of the Gradio UI 89 | with gr.Blocks(title = 'DALL-E Combined UI', theme = 'base') as combined_dalle_ui: 90 | 91 | # Setting the display into two columns 92 | with gr.Row(): 93 | 94 | # Setting the display for the first column 95 | with gr.Column(): 96 | 97 | # Adding a header for the left side of the UI 98 | image_generation_header = gr.Markdown(''' 99 | # DALL-E Image Generation 100 | 101 | Please enter a prompt for what you would like DALL-E to generate and click the "Generate Image" button to watch DALL-E work its magic! 102 | ''' 103 | ) 104 | 105 | # Adding a textbox for the user to submit a prompt 106 | user_prompt = gr.Textbox(label = 'What would you like to see?', placeholder = 'Enter some text (up to 1000 characters) of what you would like DALL-E to generate for you.') 107 | 108 | # Adding a button for the user to click to generate the DALL-E image 109 | generate_image_button = gr.Button('Generate Image') 110 | 111 | # Adding a thing to display the DALL-E image 112 | dalle_image = gr.Image(label = 'DALL-E Generated Image', interactive = False) 113 | 114 | # Setting the display for the second column 115 | with gr.Column(): 116 | 117 | # Adding a header for the right side of the UI 118 | similar_image_header = gr.Markdown(''' 119 | # DALL-E Image Variation Generator 120 | 121 | Upload your own `.png` image (< 4MB) to have DALL-E generate a gallery of similar images. 122 | ''' 123 | ) 124 | 125 | # Adding the mechanism to upload a user image 126 | upload_image = gr.Image(label = 'Image Uploader', type = 'filepath') 127 | 128 | # Adding a button for the user to click to generate similar images to the one uploaded 129 | generate_similar_images_button = gr.Button('Generate Similar Images') 130 | 131 | # Adding an output gallery to display the similar images 132 | output_gallery = gr.Gallery(label = 'Similar Image Gallery') 133 | 134 | # Defining the behavior of what happens when the "Generate Image" button is clicked 135 | generate_image_button.click(fn = generate_image, 136 | inputs = [user_prompt], 137 | outputs = [dalle_image]) 138 | 139 | # Defining the behavior of what happens when the "Generate Similar Images" button is clicked 140 | generate_similar_images_button.click(fn = generate_similar_images, 141 | inputs = [upload_image], 142 | outputs = [output_gallery]) 143 | 144 | 145 | 146 | ## SCRIPT INVOCATION 147 | ## --------------------------------------------------------------------------------------------------------------------- 148 | if __name__ == "__main__": 149 | 150 | # Launching the Gradio UI 151 | combined_dalle_ui.launch() -------------------------------------------------------------------------------- /src/convo-sim.py: -------------------------------------------------------------------------------- 1 | # Importing the necessary Python libraries 2 | import yaml 3 | import openai 4 | import gradio as gr 5 | 6 | 7 | 8 | ## OPENAI CONNECTION 9 | ## --------------------------------------------------------------------------------------------------------------------- 10 | # Loading the API key and organization ID from file (NOT pushed to GitHub) 11 | with open('../keys/openai-keys.yaml') as f: 12 | keys_yaml = yaml.safe_load(f) 13 | 14 | # Applying our API key and organization ID to OpenAI 15 | openai.organization = keys_yaml['ORG_ID'] 16 | openai.api_key = keys_yaml['API_KEY'] 17 | 18 | # Setting the OpenAI model selection (may adjust later to be user changeable for those lucky folks out there with GPT-4 access ;) ) 19 | openai_model = 'gpt-4' 20 | 21 | # Setting the number of words to return in a response 22 | NUM_WORDS = 300 23 | 24 | 25 | 26 | ## PROMPT ENGINEERING 27 | ## --------------------------------------------------------------------------------------------------------------------- 28 | # Setting options to select from for philosophers 29 | PHILOSOPHERS = [ 30 | 'Alan Watts', 31 | 'Anne Lamott', 32 | 'Brene Brown', 33 | 'Duncan Trussell', 34 | 'Eckhart Tolle', 35 | 'Joseph Campbell', 36 | 'Pete Holmes', 37 | 'Ram Dass', 38 | 'Scott Adams', 39 | 'Socrates' 40 | ] 41 | 42 | # Setting a list of comedians, just to be able to get better results from the model 43 | COMEDIANS = [ 44 | 'Duncan Trussell', 45 | 'Pete Holmes', 46 | 'Scott Adams' 47 | ] 48 | 49 | 50 | 51 | ## GRADIO HELPER FUNCTIONS 52 | ## --------------------------------------------------------------------------------------------------------------------- 53 | def converse_amongst_philosophers(philosopher_1, philosopher_2, convo_topic, convo_chatbot, rounds = 2): 54 | ''' 55 | Simulates a conversation between two phiosopher using Generative AI 56 | 57 | Inputs: 58 | - philosopher_1 (str): The name of the first philosopher, who will begin the conversation 59 | - philosopher_2 (str): The name of the second philosopher 60 | - convo_topic (str): The topic of conversation about to take place between the philosophers 61 | - convo_chatbot (Gradio Chatbot): The chatbot interface that will hold the dialogue betweent the two philosophers 62 | - rounds (int): The number of rounds of conversation that will take place (default = 2) 63 | 64 | Returns: 65 | - convo_chatbot (Gradio Chatbot): The chatbot interface that holds the dialogue betweent the two philosophers 66 | ''' 67 | 68 | # Checking if the philosopher is also a comedian 69 | if philosopher_1 in COMEDIANS: 70 | philosopher_1 = 'and comedian ' + philosopher_1 71 | if philosopher_2 in COMEDIANS: 72 | philosopher_2 = 'and comedian ' + philosopher_2 73 | 74 | # Instantiating chat flows for each respective philosopher 75 | philosopher_1_chat_flow = [] 76 | philosopher_2_chat_flow = [] 77 | 78 | # Prompt engineering the opening from philosopher 1 79 | philosopher_1_opener_prompt = f''' 80 | You are philosopher {philosopher_1} and are about to have a conversation with another philosopher, {philosopher_2}. 81 | The topic of conversation is {convo_topic}. 82 | You are first to speak. 83 | Please give your opening as {philosopher_1} 84 | Do not continue as {philosopher_2}. 85 | Please keep your opening under {NUM_WORDS} words. 86 | ''' 87 | 88 | # Simulating the opening of the dialogue with philsopher 1 kicking things off 89 | philosopher_1_chat_flow.append({'role': 'user', 'content': philosopher_1_opener_prompt}) 90 | philosopher_1_opener = openai.ChatCompletion.create( 91 | model = openai_model, 92 | messages = philosopher_1_chat_flow 93 | )['choices'][0]['message']['content'] 94 | philosopher_1_chat_flow.append({'role': 'assistant', 'content': philosopher_1_opener}) 95 | 96 | # Prompt engineering the opening from philosopher 2 97 | philosopher_2_opener_prompt = f''' 98 | You are philosopher {philosopher_2} and are about to have a conversation with another philosopher, {philosopher_1}. 99 | The topic of conversation is {convo_topic}. 100 | The other person has opened the conversation with the following: 101 | "{philosopher_1_opener}" 102 | Respond back accordingly. 103 | Do not continue as {philosopher_1}. 104 | Please keep your response under {NUM_WORDS} words. 105 | ''' 106 | 107 | # Simulating the opening response from philosopher 2 on hearing philosopher 1's opening 108 | philosopher_2_chat_flow.append({'role': 'user', 'content': philosopher_2_opener_prompt}) 109 | philosopher_2_response = openai.ChatCompletion.create( 110 | model = openai_model, 111 | messages = philosopher_2_chat_flow 112 | )['choices'][0]['message']['content'] 113 | philosopher_2_chat_flow.append({'role': 'assistant', 'content': philosopher_2_response}) 114 | 115 | # Appending the opening interaction to the chatbot 116 | convo_chatbot.append((philosopher_1_opener, philosopher_2_response)) 117 | 118 | # Continuing a general back-and-forth based on number of rounds 119 | for _ in range(rounds): 120 | 121 | # Prompt engineering the continued conversation for philosopher 1 122 | philosopher_1_response_prompt = f''' 123 | {philosopher_2} has responded with the following: 124 | "{philosopher_2_response}" 125 | Respond back accordingly. 126 | Do not continue as {philosopher_2}. 127 | Plase keep your response under {NUM_WORDS} words. 128 | ''' 129 | 130 | # Simulating the response from philosopher 1 131 | philosopher_1_chat_flow.append({'role': 'user', 'content': philosopher_1_response_prompt}) 132 | philosopher_1_response = openai.ChatCompletion.create( 133 | model = openai_model, 134 | messages = philosopher_1_chat_flow 135 | )['choices'][0]['message']['content'] 136 | philosopher_1_chat_flow.append({'role': 'assistant', 'content': philosopher_1_response}) 137 | 138 | # Prompt engineering the continued conversation for philosopher 2 139 | philosopher_2_response_prompt = f''' 140 | {philosopher_1} has responded with the following: 141 | "{philosopher_1_response}" 142 | Respond back accordingly. 143 | Do not continue as {philosopher_1}. 144 | Plase keep your response under {NUM_WORDS} words. 145 | ''' 146 | 147 | # Simulating the response from philosopher 2 148 | philosopher_2_chat_flow.append({'role': 'user', 'content': philosopher_2_response_prompt}) 149 | philosopher_2_response = openai.ChatCompletion.create( 150 | model = openai_model, 151 | messages = philosopher_2_chat_flow 152 | )['choices'][0]['message']['content'] 153 | philosopher_2_chat_flow.append({'role': 'assistant', 'content': philosopher_2_response}) 154 | 155 | # Appending this round of conversation to the chatbot 156 | convo_chatbot.append((philosopher_1_response, philosopher_2_response)) 157 | 158 | # Prompt engineering a close of the conversation instigated by philosopher 1 159 | philosopher_1_closer_prompt = f''' 160 | {philosopher_2} has responded with the following: 161 | "{philosopher_2_response}" 162 | It's time to bring this conversation to a close. Please give one final thought before closing. 163 | Do not continue as {philosopher_2}. 164 | Please keep your response under {NUM_WORDS} words. 165 | ''' 166 | 167 | # Simulating the closer from philosopher 1 168 | philosopher_1_chat_flow.append({'role': 'user', 'content': philosopher_1_closer_prompt}) 169 | philosopher_1_closer = openai.ChatCompletion.create( 170 | model = openai_model, 171 | messages = philosopher_1_chat_flow 172 | )['choices'][0]['message']['content'] 173 | philosopher_1_chat_flow.append({'role': 'assistant', 'content': philosopher_1_closer}) 174 | 175 | # Prompt engineering a close of the conversation, finally wrapping things up with philosopher 2 176 | philosopher_2_closer_prompt = f''' 177 | {philosopher_1} is bringing the conversation to a close with this final remark: 178 | "{philosopher_1_closer}" 179 | Please bring this conversation to a close and keep your response under {NUM_WORDS} words. 180 | ''' 181 | 182 | # Simulating the closer from philosopher 2 183 | philosopher_2_chat_flow.append({'role': 'user', 'content': philosopher_2_closer_prompt}) 184 | philosopher_2_closer = openai.ChatCompletion.create( 185 | model = openai_model, 186 | messages = philosopher_2_chat_flow 187 | )['choices'][0]['message']['content'] 188 | philosopher_2_chat_flow.append({'role': 'assistant', 'content': philosopher_2_closer}) 189 | 190 | # Appending the closing remarks to the chatbot 191 | convo_chatbot.append((philosopher_1_closer, philosopher_2_closer)) 192 | 193 | return convo_chatbot 194 | 195 | 196 | 197 | ## GRADIO UI LAYOUT & FUNCTIONALITY 198 | ## --------------------------------------------------------------------------------------------------------------------- 199 | # Defining the building blocks that represent the form and function of the Gradio UI 200 | with gr.Blocks(title = 'Philosophy Conversation Simulator', theme = 'base') as convo_sim: 201 | 202 | # Setting the overall header for the page 203 | gr.Markdown(''' 204 | # Philosophy Conversation Simulator 205 | 206 | This interface allows you to simulate a conversation between two philosophers about whatever you want them to talk about! 207 | ''') 208 | 209 | # Setting a side-by-side selector for conversators 210 | with gr.Row(): 211 | 212 | with gr.Column(): 213 | 214 | # Enabling a dropdown to select the first participant 215 | philosopher_1 = gr.Dropdown(choices = PHILOSOPHERS, label = 'Philosopher 1', allow_custom_value = True) 216 | 217 | with gr.Column(): 218 | # Enabling a dropdown to select the second participant 219 | philosopher_2 = gr.Dropdown(choices = PHILOSOPHERS, label = 'Philosopher 2') 220 | 221 | # Creating a freeform textbox allowing the user to submit any topic they would like the participants to converse about 222 | convo_topic = gr.Textbox(label = 'Please enter an idea for a topic of conversation.', 223 | placeholder = 'e.g. Chicago Style Pizza') 224 | 225 | # Creating the button to simulate the conversation 226 | simulate_conversation_button = gr.Button('Simulate Conversation') 227 | 228 | # Instantiating the chatbot interface to hold the back-and-forth of the conversation 229 | convo_chatbot = gr.Chatbot(label = 'Simulated Conversation') 230 | 231 | # Defining the behavior for when the user clicks the "Simulate Conversation" button 232 | simulate_conversation_button.click(fn = converse_amongst_philosophers, 233 | inputs = [philosopher_1, philosopher_2, convo_topic, convo_chatbot], 234 | outputs = [convo_chatbot], 235 | queue = False) 236 | 237 | 238 | 239 | 240 | ## SCRIPT INVOCATION 241 | ## --------------------------------------------------------------------------------------------------------------------- 242 | if __name__ == "__main__": 243 | 244 | # Instantiating the initial chat flow used as a global variable 245 | chat_flow = [] 246 | 247 | # Launching the Gradio UI 248 | convo_sim.launch() -------------------------------------------------------------------------------- /src/image-generator.py: -------------------------------------------------------------------------------- 1 | # Importing the necessary Python libraries 2 | import yaml 3 | from io import BytesIO 4 | from PIL import Image 5 | from base64 import b64decode 6 | import openai 7 | import gradio as gr 8 | 9 | 10 | 11 | ## OPENAI CONNECTION 12 | ## --------------------------------------------------------------------------------------------------------------------- 13 | # Loading the API key and organization ID from file (NOT pushed to GitHub) 14 | with open('../keys/openai-keys.yaml') as f: 15 | keys_yaml = yaml.safe_load(f) 16 | 17 | # Applying our API key and organization ID to OpenAI 18 | openai.organization = keys_yaml['ORG_ID'] 19 | openai.api_key = keys_yaml['API_KEY'] 20 | 21 | 22 | 23 | ## GRADIO HELPER FUNCTIONS 24 | ## --------------------------------------------------------------------------------------------------------------------- 25 | def generate_image(user_prompt): 26 | ''' 27 | Generates an image using the DALL-E API per the user's prompt 28 | 29 | Inputs: 30 | - user_prompt (str): A body of text describing what the user would like to see 31 | 32 | Returns: 33 | - dalle_image (PIL): The image generated by DALL-E 34 | ''' 35 | 36 | # Checking that the user prompt does not exceed 1000 characters 37 | if len(user_prompt) > 1000: 38 | raise gr.Error('Input prompt cannot exceed 1000 characters.') 39 | 40 | # Using DALL-E to generate the image as a base64 encoded object 41 | openai_response = openai.Image.create( 42 | prompt = user_prompt, 43 | n = 1, 44 | size = '1024x1024', 45 | response_format = 'b64_json' 46 | ) 47 | 48 | # Decoding the base64 encoded object into a PIL image 49 | dalle_image = Image.open(BytesIO(b64decode(openai_response['data'][0]['b64_json']))) 50 | 51 | return dalle_image 52 | 53 | 54 | ## GRADIO UI LAYOUT & FUNCTIONALITY 55 | ## --------------------------------------------------------------------------------------------------------------------- 56 | # Defining the building blocks that represent the form and function of the Gradio UI 57 | with gr.Blocks(title = 'DALL-E Image Generator', theme = 'base') as image_generator: 58 | 59 | # Instantiating the UI interface 60 | header = gr.Markdown(''' 61 | # DALL-E Image Generator 62 | 63 | Please enter a prompt for what you would like DALL-E to generate and click the "Generate Image" button to watch DALL-E work its magic! 64 | ''') 65 | user_prompt = gr.Textbox(label = 'What would you like to see?', 66 | placeholder = 'Enter some text (up to 1000 characters) of what you would like DALL-E to generate.') 67 | generate_image_button = gr.Button('Generate Image') 68 | dalle_image = gr.Image(label = 'DALL-E Generated Image', interactive = False) 69 | 70 | # Defining the behavior for when the "Generate Image" button is clicked 71 | generate_image_button.click(fn = generate_image, 72 | inputs = [user_prompt], 73 | outputs = [dalle_image]) 74 | 75 | 76 | 77 | 78 | ## SCRIPT INVOCATION 79 | ## --------------------------------------------------------------------------------------------------------------------- 80 | if __name__ == "__main__": 81 | 82 | # Launching the Gradio UI 83 | image_generator.launch() -------------------------------------------------------------------------------- /src/jarjar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dkhundley/openai-api-tutorial/7ba59ec2862aea2599333c1356c7f68e61ad6880/src/jarjar.png -------------------------------------------------------------------------------- /src/similar-image-generator.py: -------------------------------------------------------------------------------- 1 | # Importing the necessary Python libraries 2 | import yaml 3 | from io import BytesIO 4 | from PIL import Image 5 | from base64 import b64decode 6 | import openai 7 | import gradio as gr 8 | 9 | 10 | 11 | ## OPENAI CONNECTION 12 | ## --------------------------------------------------------------------------------------------------------------------- 13 | # Loading the API key and organization ID from file (NOT pushed to GitHub) 14 | with open('../keys/openai-keys.yaml') as f: 15 | keys_yaml = yaml.safe_load(f) 16 | 17 | # Applying our API key and organization ID to OpenAI 18 | openai.organization = keys_yaml['ORG_ID'] 19 | openai.api_key = keys_yaml['API_KEY'] 20 | 21 | 22 | 23 | ## GRADIO HELPER FUNCTIONS 24 | ## --------------------------------------------------------------------------------------------------------------------- 25 | def generate_similar_images(upload_image): 26 | ''' 27 | Generates similar images based on an input image 28 | 29 | Inputs: 30 | - upload_image (PIL): An image uploaded by the user that will be the basis to create similar images 31 | 32 | Returns: 33 | - output_gallery (list): A list of images that will be returned in a display gallery 34 | ''' 35 | # Using DALL-E to generate the image as a base64 encoded object 36 | openai_response = openai.Image.create_variation( 37 | image = open(upload_image, 'rb'), 38 | n = 5, 39 | size = '1024x1024', 40 | response_format = 'b64_json' 41 | ) 42 | 43 | # Creating a list to hold all the images as the output gallery 44 | output_gallery = [] 45 | 46 | # Iterating through all the images returned by DALL-E 47 | for image in openai_response['data']: 48 | 49 | # Appending the DALL-E generated image to the gallery 50 | output_gallery.append(Image.open(BytesIO(b64decode(image['b64_json'])))) 51 | 52 | return output_gallery 53 | 54 | 55 | ## GRADIO UI LAYOUT & FUNCTIONALITY 56 | ## --------------------------------------------------------------------------------------------------------------------- 57 | # Defining the building blocks that represent the form and function of the Gradio UI 58 | with gr.Blocks(title = 'DALL-E Similar Image Generator', theme = 'base') as similar_image_generator: 59 | 60 | # Instantiating the UI interface 61 | header = gr.Markdown(''' 62 | # DALL-E Similar Images Generator 63 | 64 | Upload an image of what you would like DALL-E to produce a gallery of similar images. Please note that this upload image must be a `.png` image and must be less than 4MB. 65 | ''' 66 | ) 67 | upload_image = gr.Image(label = 'Image Uploader', type = 'filepath') 68 | generate_similar_images_button = gr.Button('Generate Similar Images') 69 | output_gallery = gr.Gallery(label = 'Similar Image Gallery', object_fit = 'scale-down') 70 | examples = gr.Examples( 71 | examples = ['../data/car.png'], 72 | inputs = upload_image, 73 | outputs = output_gallery, 74 | fn = generate_similar_images 75 | ) 76 | 77 | # Defining the behavior for when the "Generate Similar Images" button is clicked 78 | generate_similar_images_button.click(fn = generate_similar_images, 79 | inputs = [upload_image], 80 | outputs = [output_gallery]) 81 | 82 | 83 | 84 | 85 | ## SCRIPT INVOCATION 86 | ## --------------------------------------------------------------------------------------------------------------------- 87 | if __name__ == "__main__": 88 | 89 | # Launching the Gradio UI 90 | similar_image_generator.launch() -------------------------------------------------------------------------------- /src/whisper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | import openai 4 | import gradio as gr 5 | 6 | 7 | 8 | ## OPENAI CONNECTION 9 | ## --------------------------------------------------------------------------------------------------------------------- 10 | # Loading the API key and organization ID from file (NOT pushed to GitHub) 11 | with open('../keys/openai-keys.yaml') as f: 12 | keys_yaml = yaml.safe_load(f) 13 | 14 | # Applying our API key and organization ID to OpenAI 15 | openai.organization = keys_yaml['ORG_ID'] 16 | openai.api_key = keys_yaml['API_KEY'] 17 | 18 | 19 | 20 | ## GRADIO HELPER FUNCTIONS 21 | ## --------------------------------------------------------------------------------------------------------------------- 22 | def transcribe(audio_intake_file): 23 | ''' 24 | Transcribes the input audio using OpenAI's Whisper API 25 | 26 | Inputs: 27 | - audio_intake_file (.wav audio file): Audio intake received from the Gradio UI 28 | 29 | Returns: 30 | - transcript (Gradio textbox): The transcription provided by OpenAI's Whisper API 31 | ''' 32 | 33 | # Appending the .wav file extension to the existing audio file 34 | os.rename(audio_intake_file, audio_intake_file + '.wav') 35 | print(audio_intake_file) 36 | 37 | # Loading the audio file in a read-only bytes format 38 | rb_audio = open(audio_intake_file, 'rb') 39 | 40 | # Getting the transcription from OpenAI's Whisper API 41 | transcript = openai.Audio.transcribe(model = 'whisper-1', file = rb_audio) 42 | 43 | return transcript 44 | 45 | 46 | 47 | 48 | 49 | ## GRADIO UI LAYOUT & FUNCTIONALITY 50 | ## --------------------------------------------------------------------------------------------------------------------- 51 | # Defining the building blocks that represent the form and function of the Gradio UI 52 | with gr.Blocks() as whisper_ui: 53 | 54 | # Instantiating the UI interface 55 | header = gr.Markdown('# Whisper-Gradio UI!') 56 | audio_intake = gr.Audio(source = 'upload', type = 'filepath') 57 | transcript = gr.Textbox(label = 'Transcription', interactive = False) 58 | transcribe_button = gr.Button('Transcribe My Audio') 59 | 60 | # Defining the behavior for when the transcribe button is clicked 61 | transcribe_button.click(fn = transcribe, 62 | inputs = audio_intake, 63 | outputs = transcript) 64 | 65 | 66 | 67 | 68 | ## SCRIPT INVOCATION 69 | ## --------------------------------------------------------------------------------------------------------------------- 70 | if __name__ == "__main__": 71 | 72 | # Launching the Gradio UI 73 | whisper_ui.launch() --------------------------------------------------------------------------------