├── .gitignore ├── README.md ├── gpt3-spreadsheet-apps-script.js ├── requirements.txt └── streamlit-openai-restaurant.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenAI examples 2 | 3 | ## Restaurant Recommendation 4 | 5 | - Install [pyenv](https://github.com/pyenv/pyenv) 6 | 7 | ``` 8 | brew update 9 | brew install pyenv 10 | ``` 11 | 12 | - Install Python if you haven't done so 13 | ``` 14 | pyenv install 3.10.8 (or any newer version) 15 | pyenv global 3.10.8 16 | 17 | - get OpenAI API key: 18 | 19 | ``` 20 | export OPENAI_API_KEY="{YOUR_API_KEY}" 21 | ``` 22 | 23 | - Install Python modules: 24 | 25 | ``` 26 | pip install -r requirements.txt 27 | ``` 28 | 29 | - Run: 30 | 31 | ``` 32 | streamlit run streamlit-openai-restaurant.py 33 | ``` 34 | 35 | ## Google Sheets 36 | 37 | - Open Extensions > Apps Script 38 | - Paste [Apps Script](gpt3-spreadsheet-apps-script.js 39 | ) and add your OpenAI API key 40 | - Reload 41 | -------------------------------------------------------------------------------- /gpt3-spreadsheet-apps-script.js: -------------------------------------------------------------------------------- 1 | /* source: https://docs.google.com/spreadsheets/d/17y_oAaDq2ycCh_1LomxrK2_1QgATckZAKC_bAv002xc/edit?usp=sharing */ 2 | 3 | const OPENAI_API_KEY = "";; // <- PASTE YOUR SECRET KEY HERE 4 | const OPENAI_API_URL = "https://api.openai.com/v1/completions"; 5 | 6 | /** 7 | * Submits a prompt to GPT-3 and returns the completion 8 | * 9 | * @param {string} prompt Prompt to submit to GPT-3 10 | * @param {float} temperature Model temperature (0-1) 11 | * @param {string} model Model name (e.g. text-davinci-002) 12 | * @param {int} maxTokens Max Tokens (< 4000) 13 | * @return Completion from GPT-3 14 | * @customfunction 15 | */ 16 | function GPT3( 17 | prompt, 18 | temperature = 0.6, 19 | model = "text-davinci-003", 20 | maxTokens = 256 21 | ) { 22 | var data = { 23 | prompt: prompt, 24 | temperature: temperature, 25 | model: model, 26 | max_tokens: maxTokens, 27 | }; 28 | var options = { 29 | method: "post", 30 | contentType: "application/json", 31 | payload: JSON.stringify(data), 32 | headers: { 33 | "Authorization": `Bearer ${OPENAI_API_KEY}`, 34 | "Content-Type": "application/json", 35 | }, 36 | }; 37 | var response = UrlFetchApp.fetch( 38 | OPENAI_API_URL, 39 | options 40 | ); 41 | 42 | return JSON.parse(response.getContentText()).choices[0].text.trim(); 43 | } 44 | 45 | /** 46 | * Submits examples to GPT-3 and returns the completion 47 | * 48 | * @param {Array>} input Range of cells with input examples 49 | * @param {Array>} input Range of cells with output examples 50 | * @param {string} Cell to pass as input for completion 51 | * @param {float} temperature Model temperature (0-1) 52 | * @param {string} model Model name (e.g. text-davinci-002) 53 | * @param {int} maxTokens Max Tokens (< 4000) 54 | * @return Completion from GPT-3 55 | * @customfunction 56 | */ 57 | function GPT3_RANGE( 58 | examples_input, 59 | examples_output, 60 | input, 61 | temperature = 0.6, 62 | model = "text-davinci-003", 63 | maxTokens = 256 64 | ) { 65 | prompt = `I am an input/output bot. Given example inputs, I identify the pattern and produce the associated outputs.`; 66 | 67 | for (let i = 0; i < examples_input.length; i++) { 68 | example_input = examples_input[i]; 69 | example_output = examples_output[i]; 70 | 71 | prompt += ` 72 | 73 | Input: ${example_input} 74 | Output: ${example_output}`; 75 | } 76 | 77 | prompt += ` 78 | 79 | Input: ${input} 80 | Output:`; 81 | 82 | console.log(prompt); 83 | 84 | return GPT3(prompt, temperature, model, maxTokens); 85 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit 2 | openai 3 | watchdog -------------------------------------------------------------------------------- /streamlit-openai-restaurant.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | import streamlit as st 4 | 5 | # Set the API key 6 | openai.api_key = os.getenv("OPENAI_API_KEY") or st.secrets["OPENAI_API_KEY"] 7 | 8 | st.title("Restaurant Suggestion App") 9 | 10 | # Get input from the user 11 | input_text = st.text_input("Enter a location to get restaurant suggestions:") 12 | 13 | # Use GPT-3 to generate text based on the input 14 | prompt = (f"Restaurant suggestions in {input_text}") 15 | model = "text-davinci-003" 16 | completions = openai.Completion.create( 17 | engine=model, 18 | prompt=prompt, 19 | max_tokens=2048, 20 | n=1, 21 | temperature=0.5, 22 | ) 23 | 24 | # Show the generated text to the user 25 | output_text = completions.choices[0].text 26 | st.write(output_text) --------------------------------------------------------------------------------