├── .gitignore
├── AIAssistant
└── app.py
├── LICENSE
├── One-Prompt-Charts
├── README.md
├── app.py
├── app_brain.py
├── key_check.py
└── utils.py
├── README.md
├── Stream-Argument
├── app.py
└── requirenents.txt
├── chatbot
└── app.py
└── requirements.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/AIAssistant/app.py:
--------------------------------------------------------------------------------
1 | import openai
2 | import streamlit as st
3 |
4 | openai.api_key = st.secrets['api_secret']
5 |
6 | # This function uses the OpenAI Completion API to generate a
7 | # response based on the given prompt. The temperature parameter controls
8 | # the randomness of the generated response. A higher temperature will result
9 | # in more random responses,
10 | # while a lower temperature will result in more predictable responses.
11 |
12 | def generate_response(prompt):
13 | completions = openai.Completion.create (
14 | engine="text-davinci-003",
15 | prompt=prompt,
16 | max_tokens=1024,
17 | n=1,
18 | stop=None,
19 | temperature=0.5,
20 | )
21 |
22 | message = completions.choices[0].text
23 | return message
24 |
25 | st.title("AI Assistant : openAI + Streamlit")
26 |
27 | prompt = st.text_input("Enter your message:", key='prompt')
28 | if st.button("Submit", key='submit'):
29 | response = generate_response(prompt)
30 | st.success(response)
31 |
32 |
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Avra
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/One-Prompt-Charts/README.md:
--------------------------------------------------------------------------------
1 | ## One Prompt Charts
2 |
3 | - #### One Prompt Chart is available as a template over Databutton . Link to the template to get started right away.
4 |
5 | - Youtube video - here
6 |
7 | - *Blog post coming soon*
8 |
9 | - Live demo app (an extended version of this tutorial) - Link
10 |
11 | One-Prompt Charts uses OpenAI powered Large Language Models and Databutton for the ease of development. The app utilizes tools like LangChainAI and PandasAI for testing. This started as a fun project. Big thanks to Databutton for letting users use prompts for free.
12 |
13 |
--------------------------------------------------------------------------------
/One-Prompt-Charts/app.py:
--------------------------------------------------------------------------------
1 | # Import necessary modules
2 | import databutton as db
3 | import streamlit as st
4 |
5 | # Import custom utility functions
6 | from utils import get_data
7 | from app_brain import handle_openai_query
8 |
9 | # This function checks for API keys and completely optional as usage
10 | # and is not necessary when defining an Input box or using secrets
11 | from key_check import key_check
12 |
13 | # Suppress deprecation warnings related to Pyplot's global use
14 | st.set_option("deprecation.showPyplotGlobalUse", False)
15 |
16 |
17 | # Cache the header of the app to prevent re-rendering on each load
18 | @st.cache_resource
19 | def display_app_header():
20 | """Display the header of the Streamlit app."""
21 | st.title("1️⃣ One-Prompt Charts 📊 ")
22 | st.markdown("***Prompt about your data, and see it visualized** ✨ This app runs on the power of your prompting. As here in Databutton HQ, we envision, '**Prompting is the new programming.**'*")
23 |
24 |
25 | # Display the header of the app
26 | display_app_header()
27 |
28 | with st.expander("App Overview", expanded=False):
29 | st.markdown(
30 | """
31 |
32 | You will find each functions either in the library or in the main script. Feel free to modu
33 | - **App Header:** The function `display_app_header` defines the title and a brief description of the app, setting the context for the user. This function is displayed at the top of the app when called on line 24.
34 |
35 | - **API Key Check:** The `key_check` function is invoked to ensure the necessary API keys are present before proceeding. This might be for authentication or to access certain services.
36 |
37 | - **Data Upload and Display:** The app provides the user with an option to upload data using the `get_data` function (line 47). Once the data is uploaded, it's optionally displayed in an expandable section for the user to review.
38 |
39 | - **OpenAI Query Handling:** If the uploaded data is not empty, the `handle_openai_query` function is called (line 64) to process the user's prompt regarding the data and visualize it accordingly. If the uploaded data is empty, a warning is displayed to the user.
40 |
41 | """
42 | )
43 |
44 | # Check for the necessary API keys
45 | key_check()
46 |
47 | options = st.radio(
48 | "Data Usage", options=["Upload file", "Use Data in Storage"], horizontal=True
49 | )
50 | if options == "Upload file":
51 | # Get data uploaded by the user
52 | df = get_data()
53 | else:
54 | df = db.storage.dataframes.get(key="spectra-csv")
55 |
56 |
57 | # If data is uploaded successfully
58 | if df is not None:
59 | # Create an expander to optionally display the uploaded data
60 | with st.expander("Show data"):
61 | st.write(df)
62 |
63 | # Extract column names for further processing
64 | column_names = ", ".join(df.columns)
65 |
66 | # Check if the uploaded DataFrame is not empty
67 | if not df.empty:
68 | # Handle the OpenAI query and display results
69 | handle_openai_query(df, column_names)
70 | else:
71 | # Display a warning if the uploaded data is empty
72 | st.warning("The given data is empty.")
73 |
--------------------------------------------------------------------------------
/One-Prompt-Charts/app_brain.py:
--------------------------------------------------------------------------------
1 | import databutton as db
2 | import streamlit as st
3 | import pandas as pd
4 | import re
5 | import openai
6 |
7 | # Define the model to use
8 | MODEL_NAME = "gpt-3.5-turbo"
9 |
10 |
11 | def handle_openai_query(df, column_names):
12 | """
13 | Handle the OpenAI query and display the response.
14 |
15 | Parameters:
16 | - df: DataFrame containing the data
17 | - column_names: List of column names in the DataFrame
18 | """
19 |
20 | # Create a text area for user input
21 | query = st.text_area(
22 | "Enter your Prompt:",
23 | placeholder="Prompt tips: Use plotting related keywords such as 'Plots' or 'Charts' or 'Subplots'. Prompts must be concise and clear, example 'Bar plot for the first ten rows.'",
24 | help="""
25 | How an ideal prompt should look like? *Feel free to copy the format and adapt to your own dataset.*
26 |
27 | ```
28 | - Subplot 1: Line plot of the whole spectra.
29 | - Subplot 2: Zoom into the spectra in region 1000 and 1200.
30 | - Subplot 3: Compare the area of whole spectra and zoom spectra as Bar Plot.
31 | - Subplot 4: Area curve of the zoom spectra.
32 | ```
33 | """,
34 | )
35 |
36 | # If the "Get Answer" button is clicked
37 | if st.button("Get Answer"):
38 | # Ensure the query is not empty
39 | if query and query.strip() != "":
40 | # Define the prompt content
41 | prompt_content = f"""
42 | The dataset is ALREADY loaded into a DataFrame named 'df'. DO NOT load the data again.
43 |
44 | The DataFrame has the following columns: {column_names}
45 |
46 | Before plotting, ensure the data is ready:
47 | 1. Check if columns that are supposed to be numeric are recognized as such. If not, attempt to convert them.
48 | 2. Handle NaN values by filling with mean or median.
49 |
50 | Use package Pandas and Matplotlib ONLY.
51 | Provide SINGLE CODE BLOCK with a solution using Pandas and Matplotlib plots in a single figure to address the following query:
52 |
53 | {query}
54 |
55 | - USE SINGLE CODE BLOCK with a solution.
56 | - Do NOT EXPLAIN the code
57 | - DO NOT COMMENT the code.
58 | - ALWAYS WRAP UP THE CODE IN A SINGLE CODE BLOCK.
59 | - The code block must start and end with ```
60 |
61 | - Example code format ```code```
62 |
63 | - Colors to use for background and axes of the figure : #F0F0F6
64 | - Try to use the following color palette for coloring the plots : #8f63ee #ced5ce #a27bf6 #3d3b41
65 |
66 | """
67 |
68 | # Define the messages for the OpenAI model
69 | messages = [
70 | {
71 | "role": "system",
72 | "content": "You are a helpful Data Visualization assistant who gives a single block without explaining or commenting the code to plot. IF ANYTHING NOT ABOUT THE DATA, JUST politely respond that you don't know.",
73 | },
74 | {"role": "user", "content": prompt_content},
75 | ]
76 |
77 | # Call OpenAI and display the response
78 | with st.status("📟 *Prompting is the new programming*..."):
79 | with st.chat_message("assistant", avatar="📊"):
80 | botmsg = st.empty()
81 | response = []
82 | for chunk in openai.ChatCompletion.create(
83 | model=MODEL_NAME, messages=messages, stream=True
84 | ):
85 | text = chunk.choices[0].get("delta", {}).get("content")
86 | if text:
87 | response.append(text)
88 | result = "".join(response).strip()
89 | botmsg.write(result)
90 | execute_openai_code(result, df, query)
91 |
92 |
93 | def extract_code_from_markdown(md_text):
94 | """
95 | Extract Python code from markdown text.
96 |
97 | Parameters:
98 | - md_text: Markdown text containing the code
99 |
100 | Returns:
101 | - The extracted Python code
102 | """
103 | # Extract code between the delimiters
104 | code_blocks = re.findall(r"```(python)?(.*?)```", md_text, re.DOTALL)
105 |
106 | # Strip leading and trailing whitespace and join the code blocks
107 | code = "\n".join([block[1].strip() for block in code_blocks])
108 |
109 | return code
110 |
111 |
112 | def execute_openai_code(response_text: str, df: pd.DataFrame, query):
113 | """
114 | Execute the code provided by OpenAI in the app.
115 |
116 | Parameters:
117 | - response_text: The response text from OpenAI
118 | - df: DataFrame containing the data
119 | - query: The user's query
120 | """
121 |
122 | # Extract code from the response text
123 | code = extract_code_from_markdown(response_text)
124 |
125 | # If there's code in the response, try to execute it
126 | if code:
127 | try:
128 | exec(code)
129 | st.pyplot()
130 | except Exception as e:
131 | error_message = str(e)
132 | st.error(
133 | f"📟 Apologies, failed to execute the code due to the error: {error_message}"
134 | )
135 | st.warning(
136 | """
137 | 📟 Check the error message and the code executed above to investigate further.
138 |
139 | Pro tips:
140 | - Tweak your prompts to overcome the error
141 | - Use the words 'Plot'/ 'Subplot'
142 | - Use simpler, concise words
143 | - Remember, I'm specialized in displaying charts not in conveying information about the dataset
144 | """
145 | )
146 | else:
147 | st.write(response_text)
--------------------------------------------------------------------------------
/One-Prompt-Charts/key_check.py:
--------------------------------------------------------------------------------
1 | import databutton as db
2 | import streamlit as st
3 | import openai
4 | from openai import OpenAI
5 |
6 |
7 | def is_valid_openai_key(api_key: str) -> bool:
8 | """
9 | Validates whether the provided OpenAI API key is valid.
10 |
11 | Parameters:
12 | - api_key (str): The OpenAI API key to validate.
13 |
14 | Returns:
15 | - bool: True if the API key is valid, False otherwise.
16 | """
17 | try:
18 | Client = OpenAI(api_key=db.secrets.get("OPENAI_API_KEY"))
19 | # Attempting to list models; will throw an exception if the key is invalid.
20 | if Client.models.list():
21 | return True
22 | except:
23 | return False
24 |
25 |
26 | def key_check():
27 | """
28 | Checks the OpenAI API key, either from the Databutton secrets store or from user input.
29 | If the key is valid, it continues the app flow; otherwise, it stops the app and provides feedback.
30 | """
31 | try:
32 | # Attempting to get the OpenAI API key from the Databutton secrets store.
33 | openai.api_key = db.secrets.get(name="OPENAI_API_KEY")
34 |
35 | # Check if the connection is established and models are available.
36 | if not openai.Model.list():
37 | st.write("Not connected to OpenAI.")
38 | st.stop()
39 |
40 | except Exception as e:
41 | # Display information about needing an OpenAI API key.
42 | mtinfo = st.empty()
43 | mtinfo.info(
44 | """
45 | Hi there! Welcome to the "One-Prompt Charts" app template. 📊
46 |
47 | This app allows you to upload your data and get visual insights with just a single prompt. However, to power the magic behind the scenes, I need your OpenAI API key.
48 |
49 | If you don't have a key, you can sign up and create one [here](https://platform.openai.com/account/api-keys).
50 |
51 | Don't worry, your key will be securely stored in the Databutton secrets store, which you can find in the left-side menu under "Configure". If you prefer to add it manually, ensure to assign the name as `OPENAI_API_KEY` for your secret.
52 |
53 | Once set up, simply upload your data, prompt about it, and see it visualized! ✨
54 |
55 | """,
56 | icon="🤖",
57 | )
58 |
59 | # Accept user input for the API key.
60 | mt = st.empty()
61 | user_provided_key = mt.text_input(
62 | "Type your OpenAI API key here to continue:", type="password"
63 | )
64 |
65 | # Check the format of the provided API key.
66 | if user_provided_key.startswith("sk-"):
67 | with st.status("Connecting to OpenAI.", expanded=True) as status:
68 | # Validate the provided API key.
69 | if is_valid_openai_key(user_provided_key):
70 | status.write("Adding OpenAI API key...")
71 | db.secrets.put(name="OPENAI_API_KEY", value=user_provided_key)
72 | status.update(
73 | label="Added OpenAI API key to Databutton secrets securely. Chatbot is enabled for you.",
74 | state="complete",
75 | )
76 | status.write("Added and cleaning onboarding UI...")
77 | # Clean the screen
78 | mt.empty()
79 | mtinfo.empty()
80 | else:
81 | st.error(
82 | "Error: Invalid OpenAI API Key. You can find your API key at [this link](https://platform.openai.com/account/api-keys).",
83 | )
84 | st.stop()
85 | else:
86 | st.warning("Please ensure a correct API key.")
87 | st.stop()
88 |
--------------------------------------------------------------------------------
/One-Prompt-Charts/utils.py:
--------------------------------------------------------------------------------
1 | import databutton as db
2 | import streamlit as st
3 | import pandas as pd
4 |
5 |
6 | def get_data():
7 | """
8 | Upload data via a file.
9 |
10 | Returns:
11 | - df: DataFrame containing the uploaded data or None if no data was uploaded
12 | """
13 |
14 | # File uploader for data file
15 | file_types = ["csv", "xlsx", "xls"]
16 | data_upload = st.file_uploader("Upload a data file", type=file_types)
17 |
18 | if data_upload:
19 | # Check the type of file uploaded and read accordingly
20 | if data_upload.name.endswith('.csv'):
21 | df = pd.read_csv(data_upload)
22 | elif data_upload.name.endswith('.xlsx') or data_upload.name.endswith('.xls'):
23 | df = pd.read_excel(data_upload)
24 | else:
25 | df = None
26 | return df
27 |
28 | return None
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # OpenAI Web App tutorials
2 |
3 | - [OpenAI](https://platform.openai.com/docs/models) models
4 |
5 | - [Databutton](https://www.databutton.io) - App development and deployment
6 |
7 | - [Streamlit](https://streamlit.io) - The UI framework used by Databutton
8 |
9 | - [Playlist](https://youtube.com/playlist?list=PLqQrRCH56DH82KNwvlWpgh3YJXu461q69&si=Jt5UKCabu9vHEmyK)
10 |
--------------------------------------------------------------------------------
/Stream-Argument/app.py:
--------------------------------------------------------------------------------
1 | import openai
2 | import streamlit as st
3 | from streamlit_pills import pills
4 |
5 | openai.api_key = st.secrets['api_secret']
6 |
7 | st.subheader("AI Assistant : Streamlit + OpenAI: `stream` *argument*")
8 | selected = pills("", ["NO Streaming", "Streaming"], ["🎈", "🌈"])
9 |
10 | user_input = st.text_input("You: ",placeholder = "Ask me anything ...", key="input")
11 |
12 |
13 | if st.button("Submit", type="primary"):
14 | st.markdown("----")
15 | res_box = st.empty()
16 | if selected == "Streaming":
17 | report = []
18 | for resp in openai.Completion.create(model='text-davinci-003',
19 | prompt=user_input,
20 | max_tokens=120,
21 | temperature = 0.5,
22 | stream = True):
23 | # join method to concatenate the elements of the list
24 | # into a single string,
25 | # then strip out any empty strings
26 | report.append(resp.choices[0].text)
27 | result = "".join(report).strip()
28 | result = result.replace("\n", "")
29 | res_box.markdown(f'*{result}*')
30 |
31 | else:
32 | completions = openai.Completion.create(model='text-davinci-003',
33 | prompt=user_input,
34 | max_tokens=120,
35 | temperature = 0.5,
36 | stream = False)
37 | result = completions.choices[0].text
38 |
39 | res_box.write(result)
40 | st.markdown("----")
41 |
--------------------------------------------------------------------------------
/Stream-Argument/requirenents.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | streamlit-pills==0.3.0
3 | openai
4 |
--------------------------------------------------------------------------------
/chatbot/app.py:
--------------------------------------------------------------------------------
1 | import openai
2 | import streamlit as st
3 | from streamlit_chat import message
4 |
5 | openai.api_key = st.secrets['api_secret']
6 |
7 | # This function uses the OpenAI Completion API to generate a
8 | # response based on the given prompt. The temperature parameter controls
9 | # the randomness of the generated response. A higher temperature will result
10 | # in more random responses,
11 | # while a lower temperature will result in more predictable responses.
12 | def generate_response(prompt):
13 | completions = openai.Completion.create (
14 | engine="text-davinci-003",
15 | prompt=prompt,
16 | max_tokens=1024,
17 | n=1,
18 | stop=None,
19 | temperature=0.5,
20 | )
21 |
22 | message = completions.choices[0].text
23 | return message
24 |
25 |
26 | st.title("🤖 chatBot : openAI GPT-3 + Streamlit")
27 |
28 |
29 | if 'generated' not in st.session_state:
30 | st.session_state['generated'] = []
31 |
32 | if 'past' not in st.session_state:
33 | st.session_state['past'] = []
34 |
35 |
36 | def get_text():
37 | input_text = st.text_input("You: ","Hello, how are you?", key="input")
38 | return input_text
39 |
40 |
41 | user_input = get_text()
42 |
43 | if user_input:
44 | output = generate_response(user_input)
45 | st.session_state.past.append(user_input)
46 | st.session_state.generated.append(output)
47 |
48 | if st.session_state['generated']:
49 |
50 | for i in range(len(st.session_state['generated'])-1, -1, -1):
51 | message(st.session_state["generated"][i], key=str(i))
52 | message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
53 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | streamlit-pills==0.3.0
3 | openai
4 |
--------------------------------------------------------------------------------