├── __init__.py
├── chatgpt_batch_whipper
├── __init__.py
├── pub
│ ├── __init__.py
│ ├── chatgpt_wrapper.py
│ └── whipper_ui.py
├── version.py
├── icon.png
├── background.png
├── prompt_master.csv
├── main.py
└── start_whipper.py
├── testdata.csv
├── documents
└── photos
│ ├── auth1.png
│ ├── auth2.png
│ ├── single shoot.png
│ ├── Fully Automatic mode.png
│ ├── Fully Automatic mode result.png
│ └── Fully Automatic mode result check.png
├── postinstall.sh
├── requirements.txt
├── LICENSE
├── setup.py
└── README.MD
/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/chatgpt_batch_whipper/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/chatgpt_batch_whipper/pub/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/chatgpt_batch_whipper/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.1.0"
2 |
--------------------------------------------------------------------------------
/testdata.csv:
--------------------------------------------------------------------------------
1 | food
2 | Burgers
3 | Apple Pie
4 | French Fries
5 | Hot Dogs
6 |
--------------------------------------------------------------------------------
/documents/photos/auth1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CodeDiggerM/chatgpt-batch-whipper/HEAD/documents/photos/auth1.png
--------------------------------------------------------------------------------
/documents/photos/auth2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CodeDiggerM/chatgpt-batch-whipper/HEAD/documents/photos/auth2.png
--------------------------------------------------------------------------------
/chatgpt_batch_whipper/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CodeDiggerM/chatgpt-batch-whipper/HEAD/chatgpt_batch_whipper/icon.png
--------------------------------------------------------------------------------
/documents/photos/single shoot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CodeDiggerM/chatgpt-batch-whipper/HEAD/documents/photos/single shoot.png
--------------------------------------------------------------------------------
/chatgpt_batch_whipper/background.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CodeDiggerM/chatgpt-batch-whipper/HEAD/chatgpt_batch_whipper/background.png
--------------------------------------------------------------------------------
/postinstall.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo "Installing playwright"
3 | playwright install firefox
4 | cp -Rf ./chatgpt-batch-whipper ~/chatgpt-batch-whipper
--------------------------------------------------------------------------------
/documents/photos/Fully Automatic mode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CodeDiggerM/chatgpt-batch-whipper/HEAD/documents/photos/Fully Automatic mode.png
--------------------------------------------------------------------------------
/documents/photos/Fully Automatic mode result.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CodeDiggerM/chatgpt-batch-whipper/HEAD/documents/photos/Fully Automatic mode result.png
--------------------------------------------------------------------------------
/documents/photos/Fully Automatic mode result check.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CodeDiggerM/chatgpt-batch-whipper/HEAD/documents/photos/Fully Automatic mode result check.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | pandas
3 | streamlit-aggrid
4 | readline; platform_system=="Linux"
5 | pyreadline3; platform_system=="Windows"
6 | pytest-playwright; platform_system=="Windows"
7 | playwright; platform_system!="Windows"
8 |
--------------------------------------------------------------------------------
/chatgpt_batch_whipper/prompt_master.csv:
--------------------------------------------------------------------------------
1 | Date,No,prompt,conversation_id,parent_message_id
2 | 2023-02-19,prompt_2,"Please translate this word to Japanese
3 | Please Do not include any explanation in your reply.
4 | Word:",688a6f7a-1c05-4367-8d5f-9467b3bdc547,ece902fb-8068-42f3-9319-11e649d8e94b
5 | 2023-02-19,prompt_1,hello there,7b2bda0a-b460-4f7e-a1c0-a9d8ef0c9ca5,2b25bf03-4bcf-48ad-81c6-68446b4af37f
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 CodeDiggerM
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/chatgpt_batch_whipper/main.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import sys
3 | import os
4 | from chatgpt_batch_whipper.pub.chatgpt_wrapper import ChatGPT
5 | from chatgpt_batch_whipper.version import __version__
6 | import cmd
7 |
8 | def main():
9 |
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument(
12 | "--version",
13 | "-v",
14 | action="version",
15 | version=f"{sys.argv[0]} version {__version__}",
16 | help="Print version and exit.",
17 | )
18 | parser.add_argument(
19 | "params",
20 | nargs="*",
21 | help="Use 'auth' for auth mode, or run 'ui' to start the streamlit UI.",
22 | )
23 |
24 | args = parser.parse_args()
25 | auth_mode = (len(args.params) == 1 and args.params[0] == "auth") or len(args.params) == 0
26 | run_mode = len(args.params) == 1 and args.params[0].upper() == "UI"
27 |
28 | if auth_mode:
29 | ChatGPT(headless=False, timeout=90)
30 | if run_mode:
31 | os.system("streamlit run start_whipper.py")
32 | else:
33 | print("please input the right command. Use 'auth' for auth mode, or run 'UI' to start the streamlit UI.")
34 |
35 | while True:
36 | choice = input("Enter Q to quit, or press return to continue")
37 | if choice.upper() == "Q":
38 | break
39 |
40 |
41 | if __name__ == "__main__":
42 | main()
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import find_packages, setup
2 | from setuptools.command.develop import develop
3 | from setuptools.command.install import install
4 | from subprocess import check_call
5 |
6 | with open("README.md", "r", encoding="utf-8") as fh:
7 | long_description = fh.read()
8 |
9 | with open('requirements.txt') as f:
10 | install_requirement = f.readlines()
11 |
12 |
13 | class PostDevelopCommand(develop):
14 | """Post-installation for development mode."""
15 |
16 | def run(self):
17 | develop.run(self)
18 | check_call('playwright install firefox')
19 | #check_call('cp -Rf ./chatgpt-batch-whipper ~/chatgpt-batch-whipper'.split())
20 |
21 |
22 | class PostInstallCommand(install):
23 | """Post-installation for installation mode."""
24 |
25 | def run(self):
26 | install.run(self)
27 | check_call('playwright install firefox'.split())
28 | #check_call('cp -Rf ./chatgpt-batch-whipper ~/chatgpt-batch-whipper'.split())
29 |
30 |
31 | setup(
32 | name="chatGPT Bach Whipper",
33 | version="0.1.0",
34 | author="Codedigger",
35 | author_email="zhichao.liu@hotmail.com",
36 | description="The ChatGPT Batch Whipper is a tool designed to simplify batch jobs using ChatGPT",
37 | long_description=long_description,
38 | long_description_content_type="text/markdown",
39 | url="https://github.com/CodeDiggerM/chatgpt-batch-whipper",
40 | packages=find_packages(),
41 | install_requires=install_requirement,
42 | classifiers=[
43 | "Programming Language :: Python :: 3",
44 | "License :: OSI Approved :: MIT License",
45 | "Operating System :: OS Independent",
46 | ],
47 | python_requires=">=3.7",
48 | entry_points={
49 | "console_scripts": [
50 | "run_chatgpt = chatgpt_batch_whipper.main:main"
51 | ]
52 | },
53 | cmdclass={
54 | 'develop': PostDevelopCommand,
55 | 'install': PostInstallCommand,
56 | },
57 |
58 | scripts=["postinstall.sh"],
59 | )
60 |
--------------------------------------------------------------------------------
/chatgpt_batch_whipper/start_whipper.py:
--------------------------------------------------------------------------------
1 | """
2 | MIT License
3 |
4 | Copyright (c) 2023, CodeDigger
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
24 | ---
25 | Start function.
26 | Please run to start APP.
27 | streamlit run start_whipper.py
28 | Author: CodeDigger
29 | Date: 2023/02/19
30 | Description: This module defines a UIControl class for Streamlit, which provides a consistent interface for creating and interacting with different types of UI controls. The class supports boolean, integer, float, and string data types.
31 | Disclaimer: This software is provided "as is" and without any express or implied warranties, including, without limitation, the implied warranties of merchantability and fitness for a particular purpose. The author and contributors of this module shall not be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.
32 | """
33 |
34 | from pub.whipper_ui import WhipperUI
35 |
36 | if __name__ == "__main__":
37 | whipper_ui = WhipperUI()
38 | #set_up_page() has to be putted at the first line.
39 | whipper_ui.show_prompt_ui()
40 |
41 |
--------------------------------------------------------------------------------
/README.MD:
--------------------------------------------------------------------------------
1 | # 🦮 Welcome to ChatGPT Batch Whipper 🦮
2 |
3 |
4 | The ChatGPT Batch Whipper is a tool designed to simplify batch jobs using ChatGPT. With this tool, you can:
5 |
6 | * Save and reuse prompts, making it easy to apply them to multiple inputs automatically using an input CSV file.
7 | * Ensure continuity and coherence by submitting input data for the same prompt to the same conversation.
8 | * Resume the batch job from where you left off, even if you unintentionally stop the process, thanks to the tool's data saving feature.
9 | * Never worry about exceeding hourly submit times, as the tool waits until it can run again.
10 | In short, the ChatGPT Batch Whipper tool is an efficient and user-friendly way to perform batch jobs with ChatGPT. We welcome any feedback or suggestions you may have, so give it a try and see how it can improve your workflow!
11 | Thanks to @mmabrouk some of codes were inspired and modified from his [repo](https://github.com/mmabrouk/chatgpt-wrapper).
12 |
13 |
14 |
15 |
16 | ## Installation
17 | ### Use PIP command
18 | 1. Install the latest version of this software directly from github with pip:
19 | ```bash
20 | pip install git+https://github.com/CodeDiggerM/chatgpt-batch-whipper.git
21 | ```
22 | 2. Go to **auth** mode. This will open up a browser window. Log in to ChatGPT in the browser window, then close the browser.
23 | ```bash
24 | run_chatgpt auth
25 | ```
26 | 3. Start the UI
27 | ```bash
28 | run_chatgpt ui
29 | ```
30 |
31 | ### Manually set up
32 |
33 | 1. Clone the repo to your working directory
34 | ```bash
35 | git clone https://github.com/CodeDiggerM/chatgpt-batch-whipper.git
36 | ```
37 | 2. install the dependcy.
38 | ```bash
39 | pip install -r requirements.txt
40 | ```
41 |
42 | 3. Install a browser in playwright (if you haven't already). The program will use firefox by default.
43 |
44 | ```
45 | playwright install firefox
46 | ```
47 |
48 | 4. Go to the chatgpt-batch-whipper/
49 |
50 | ```bash
51 | cd chatgpt_batch_whipper/
52 | ````
53 |
54 | 5. Run the main page by streamlit.
55 | you can got to [streamlit](https://github.com/streamlit/streamlit) to check more about streamlit.
56 |
57 | ```bash
58 | streamlit run start_whipper.py
59 | ````
60 | 6. Authenticate your openAI account
61 | Click the **auth** button
62 |
63 |
64 | It will open up an authentication page in the web browser you installed using playwright. Like below, authenticate with your registered account.
65 |
66 |
67 |
68 | ## Quickstart
69 |
70 | ### Use API
71 | 1. Grant auth from chatGPT.
72 | ```python
73 | from chatgpt_batch_whipper.pub.chatgpt_wrapper import ChatGPT
74 | bot = ChatGPT()
75 | response = bot.auth()
76 | print(response)
77 | ```
78 |
79 | 2. Ask the question to chatGPT
80 | ```python
81 | from chatgpt_batch_whipper.pub.chatgpt_wrapper import ChatGPT
82 | bot = ChatGPT()
83 | response = bot.ask("Greeting!")
84 | print(response)
85 | ```
86 |
87 |
88 | ### Streamlit UI
89 |
90 | Now run it to open the app!
91 | ```
92 | streamlit run streamlit_app.py
93 | ```
94 |
95 | #### Single shoot mode
96 |
97 | 1. select the **Single shoot mode**.
98 | 2. Type your prompt then click submit
99 | 3. click the submit button
100 |
101 |
102 | Here are some tips.
103 |
104 | #### Fully Automatic mode
105 | You can apply your prompt to multiple records in the **Fully Automatic mode**.
106 |
107 | 1. Select Fully Automatic mode.
108 | 2. Select CSV file.
109 | 3. Select column you want to process.
110 | 4. Type the prompt.
111 | 5. click to Submit.
112 | After processing. The result will appears in the **The processed result** section.
113 |
114 |
115 | you can check the result and check the "is false" then click the **Submit** to reprocess the "failed" one.
116 |
117 |
118 |
119 | * You can save the prompt by click **Add** button.
120 | * You can choose the old prompt by select **prompt list**.
121 | * You can delete the old prompt by click **Delete Prompt**.
122 | * You can delete the saved process result by click **Delete Cached result**.
123 | * You can update the saved process result by click **Update**.
124 | * You can download the result file by click **Download**.
125 |
--------------------------------------------------------------------------------
/chatgpt_batch_whipper/pub/chatgpt_wrapper.py:
--------------------------------------------------------------------------------
1 | """
2 | MIT License
3 |
4 | Copyright (c) 2023, CodeDigger
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
24 | ---
25 |
26 | ChatGPT: A ChatGPT wrapper class.
27 | Thanks to @mmabrouk
28 | This part is modified from his repo
29 | https://github.com/mmabrouk/chatgpt-wrapper.
30 | Author: CodeDigger
31 | Date: 2023/02/19
32 | Description: This module defines a UIControl class for Streamlit, which provides a consistent interface for creating and interacting with different types of UI controls. The class supports boolean, integer, float, and string data types.
33 | Disclaimer: This software is provided "as is" and without any express or implied warranties, including, without limitation, the implied warranties of merchantability and fitness for a particular purpose. The author and contributors of this module shall not be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.
34 | """
35 |
36 | import atexit
37 | import base64
38 | import json
39 | import math
40 | import operator
41 | import time
42 | import uuid
43 | import shutil
44 | from functools import reduce
45 | from time import sleep
46 | from typing import Optional
47 | import os
48 | from playwright.sync_api import sync_playwright
49 | from playwright._impl._api_structures import ProxySettings
50 |
51 |
52 | class ChatGPT:
53 | """
54 | A ChatGPT interface that uses Playwright to run a browser,
55 | and interacts with that browser to communicate with ChatGPT in
56 | order to provide an open API to ChatGPT.
57 | """
58 |
59 | stream_div_id = "chatgpt-wrapper-conversation-stream-data"
60 | eof_div_id = "chatgpt-wrapper-conversation-stream-data-eof"
61 | session_div_id = "chatgpt-wrapper-session-data"
62 | _instance = None
63 |
64 | def __new__(cls, headless: bool = True, browser="firefox", timeout=60, proxy: Optional[ProxySettings] = None):
65 | """
66 | ChatGPT should be only be created once.
67 | """
68 | if cls._instance is None:
69 | cls._instance = super().__new__(cls)
70 | return cls._instance
71 |
72 | def _connect(self):
73 | self.play = sync_playwright().start()
74 |
75 | try:
76 | playbrowser = getattr(self.play, self.browser_type)
77 | except Exception:
78 | print(f"Browser {self.browser} is invalid, falling back on firefox")
79 | playbrowser = self.play.firefox
80 | try:
81 | self.browser = playbrowser.launch_persistent_context(
82 | user_data_dir="/tmp/playwright",
83 | headless=self.headless,
84 | proxy=self.proxy,
85 | )
86 | except Exception:
87 | self.user_data_dir = f"/tmp/{str(uuid.uuid4())}"
88 | shutil.copytree("/tmp/playwright", self.user_data_dir)
89 | self.browser = playbrowser.launch_persistent_context(
90 | user_data_dir=self.user_data_dir,
91 | headless=self.headless,
92 | proxy=self.proxy,
93 | )
94 |
95 | if len(self.browser.pages) > 0:
96 | self.page = self.browser.pages[0]
97 | else:
98 | self.page = self.browser.new_page()
99 | self._start_browser()
100 | self.parent_message_id = str(uuid.uuid4())
101 | self.conversation_id = None
102 | self.session = None
103 | atexit.register(self._cleanup)
104 |
105 | def __init__(self, headless: bool = True, browser="firefox", timeout=60, proxy: Optional[ProxySettings] = None):
106 | self._kill_nightly_processes()
107 | self.play = sync_playwright().start()
108 |
109 | try:
110 | playbrowser = getattr(self.play, browser)
111 | except Exception:
112 | print(f"Browser {browser} is invalid, falling back on firefox")
113 | playbrowser = self.play.firefox
114 | try:
115 | self.browser = playbrowser.launch_persistent_context(
116 | user_data_dir="/tmp/playwright",
117 | headless=headless,
118 | proxy=proxy,
119 | )
120 | except Exception:
121 | self.user_data_dir = f"/tmp/{str(uuid.uuid4())}"
122 | shutil.copytree("/tmp/playwright", self.user_data_dir)
123 | self.browser = playbrowser.launch_persistent_context(
124 | user_data_dir=self.user_data_dir,
125 | headless=headless,
126 | proxy=proxy,
127 | )
128 |
129 | if len(self.browser.pages) > 0:
130 | self.page = self.browser.pages[0]
131 | else:
132 | self.page = self.browser.new_page()
133 | self._start_browser()
134 | self.parent_message_id = str(uuid.uuid4())
135 | self.conversation_id = None
136 | self.session = None
137 | self.timeout = timeout
138 | self.proxy = proxy
139 | self.browser_type = browser
140 | self.headless = headless
141 | atexit.register(self._cleanup)
142 |
143 | def reset(self):
144 | self._cleanup()
145 | self._connect()
146 |
147 |
148 | @staticmethod
149 | def _kill_nightly_processes():
150 | # Determine the name of the pkill command based on the OS
151 | if os.name == 'nt': # Windows
152 | pkill_command = 'taskkill /F /IM'
153 | else: # Unix
154 | pkill_command = 'pkill -f'
155 |
156 | # Kill any process with "Nightly" in the name
157 | print(f"{pkill_command} Nightly")
158 | os.system(f"{pkill_command} Nightly")
159 |
160 | def _start_browser(self):
161 | self.page.goto("https://chat.openai.com/")
162 |
163 | def _cleanup(self):
164 | self.browser.close()
165 | # remove the user data dir in case this is a second instance
166 | if hasattr(self, "user_data_dir"):
167 | shutil.rmtree(self.user_data_dir)
168 | self.play.stop()
169 |
170 | def refresh_session(self):
171 | self.page.evaluate(
172 | """
173 | const xhr = new XMLHttpRequest();
174 | xhr.open('GET', 'https://chat.openai.com/api/auth/session');
175 | xhr.onload = () => {
176 | if(xhr.status == 200) {
177 | var mydiv = document.createElement('DIV');
178 | mydiv.id = "SESSION_DIV_ID"
179 | mydiv.innerHTML = xhr.responseText;
180 | document.body.appendChild(mydiv);
181 | }
182 | };
183 | xhr.send();
184 | """.replace(
185 | "SESSION_DIV_ID", self.session_div_id
186 | )
187 | )
188 |
189 | while True:
190 | session_datas = self.page.query_selector_all(f"div#{self.session_div_id}")
191 | if len(session_datas) > 0:
192 | break
193 | sleep(0.2)
194 |
195 | session_data = json.loads(session_datas[0].inner_text())
196 | self.session = session_data
197 |
198 | self.page.evaluate(f"document.getElementById('{self.session_div_id}').remove()")
199 |
200 | def _cleanup_divs(self):
201 | self.page.evaluate(f"document.getElementById('{self.stream_div_id}').remove()")
202 | self.page.evaluate(f"document.getElementById('{self.eof_div_id}').remove()")
203 |
204 | def ask_stream(self, prompt: str, conversation_id: str = "", parent_message_id: str = ""):
205 | if self.session is None:
206 | self.refresh_session()
207 | if conversation_id != conversation_id \
208 | or parent_message_id != parent_message_id or \
209 | len(conversation_id) == 0 \
210 | or len(parent_message_id) == 0:
211 | conversation_id = self.conversation_id
212 | parent_message_id = self.parent_message_id
213 | else:
214 | conversation_id = conversation_id
215 | parent_message_id = parent_message_id
216 | new_message_id = str(uuid.uuid4())
217 |
218 | if "accessToken" not in self.session:
219 | yield (
220 | "Your ChatGPT session is not usable.\n"
221 | "* Run this program with the `install` parameter and log in to ChatGPT.\n"
222 | "* If you think you are already logged in, try running the `session` command."
223 | )
224 | return
225 |
226 | request = {
227 | "messages": [
228 | {
229 | "id": new_message_id,
230 | "role": "user",
231 | "content": {"content_type": "text", "parts": [prompt]},
232 | }
233 | ],
234 | "model": "text-davinci-002-render-sha",
235 | "conversation_id": conversation_id,
236 | "parent_message_id": parent_message_id,
237 | "action": "next",
238 | }
239 |
240 | code = (
241 | """
242 | const stream_div = document.createElement('DIV');
243 | stream_div.id = "STREAM_DIV_ID";
244 | document.body.appendChild(stream_div);
245 | const xhr = new XMLHttpRequest();
246 | xhr.open('POST', 'https://chat.openai.com/backend-api/conversation');
247 | xhr.setRequestHeader('Accept', 'text/event-stream');
248 | xhr.setRequestHeader('Content-Type', 'application/json');
249 | xhr.setRequestHeader('Authorization', 'Bearer BEARER_TOKEN');
250 | xhr.responseType = 'stream';
251 | xhr.onreadystatechange = function() {
252 | var newEvent;
253 | if(xhr.readyState == 3 || xhr.readyState == 4) {
254 | const newData = xhr.response.substr(xhr.seenBytes);
255 | try {
256 | const newEvents = newData.split(/\\n\\n/).reverse();
257 | newEvents.shift();
258 | if(newEvents[0] == "data: [DONE]") {
259 | newEvents.shift();
260 | }
261 | if(newEvents.length > 0) {
262 | newEvent = newEvents[0].substring(6);
263 | // using XHR for eventstream sucks and occasionally ive seen incomplete
264 | // json objects come through JSON.parse will throw if that happens, and
265 | // that should just skip until we get a full response.
266 | JSON.parse(newEvent);
267 | }
268 | } catch (err) {
269 | console.log(err);
270 | newEvent = undefined;
271 | }
272 | if(newEvent !== undefined) {
273 | stream_div.innerHTML = btoa(newEvent);
274 | xhr.seenBytes = xhr.responseText.length;
275 | }
276 | }
277 | if(xhr.readyState == 4) {
278 | const eof_div = document.createElement('DIV');
279 | eof_div.id = "EOF_DIV_ID";
280 | document.body.appendChild(eof_div);
281 | }
282 | };
283 | xhr.send(JSON.stringify(REQUEST_JSON));
284 | """.replace(
285 | "BEARER_TOKEN", self.session["accessToken"]
286 | )
287 | .replace("REQUEST_JSON", json.dumps(request))
288 | .replace("STREAM_DIV_ID", self.stream_div_id)
289 | .replace("EOF_DIV_ID", self.eof_div_id)
290 | )
291 | self.page.evaluate(code)
292 | last_event_msg = ""
293 | start_time = time.time()
294 | while True:
295 | eof_datas = self.page.query_selector_all(f"div#{self.eof_div_id}")
296 |
297 | conversation_datas = self.page.query_selector_all(
298 | f"div#{self.stream_div_id}"
299 | )
300 | if len(conversation_datas) == 0:
301 | continue
302 |
303 | full_event_message = None
304 |
305 | try:
306 | event_raw = base64.b64decode(conversation_datas[0].inner_html())
307 | if len(event_raw) > 0:
308 | event = json.loads(event_raw)
309 | if event is not None:
310 | self.parent_message_id = event["message"]["id"]
311 | self.conversation_id = event["conversation_id"]
312 | full_event_message = "\n".join(
313 | event["message"]["content"]["parts"]
314 | )
315 | except Exception:
316 | yield (
317 | "Failed to read response from ChatGPT. Tips:\n"
318 | " * Try again. ChatGPT can be flaky.\n"
319 | " * Use the `session` command to refresh your session, and then try again.\n"
320 | " * Restart the program in the `install` mode and make sure you are logged in."
321 | )
322 | break
323 |
324 | if full_event_message is not None:
325 | chunk = full_event_message[len(last_event_msg):]
326 | last_event_msg = full_event_message
327 | yield chunk
328 |
329 | # if we saw the eof signal, this was the last event we
330 | # should process and we are done
331 | if len(eof_datas) > 0 or (((time.time() - start_time) > self.timeout) and full_event_message is None):
332 | break
333 |
334 | sleep(0.2)
335 |
336 | self._cleanup_divs()
337 |
338 | def ask(self, message: str, conversation_id: str = "", parent_message_id: str = "") -> str:
339 | """
340 | Send a message to chatGPT and return the response.
341 |
342 | Args:
343 | message (str): The message to send.
344 | conversation_id (str): Conversation id.
345 | parent_message_id (str): parent_message_id.
346 |
347 | Returns:
348 | str: The response received from OpenAI.
349 | """
350 | response = list(self.ask_stream(message, conversation_id,parent_message_id))
351 | return (
352 | reduce(operator.add, response)
353 | if len(response) > 0
354 | else None
355 | )
356 |
357 | def new_conversation(self):
358 | self.parent_message_id = str(uuid.uuid4())
359 | self.conversation_id = None
360 |
361 | def get_conversation_id(self):
362 | return self.conversation_id
363 |
364 | def get_parent_message_id(self):
365 | return self.parent_message_id
--------------------------------------------------------------------------------
/chatgpt_batch_whipper/pub/whipper_ui.py:
--------------------------------------------------------------------------------
1 | """
2 | MIT License
3 |
4 | Copyright (c) 2023, CodeDigger
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
24 | ---
25 |
26 | WhipperUI: A Streamlit UI control class
27 | Author: CodeDigger
28 | Date: 2023/02/19
29 | Description: This module defines a UIControl class for Streamlit, which provides a consistent interface for creating and interacting with different types of UI controls. The class supports boolean, integer, float, and string data types.
30 | Disclaimer: This software is provided "as is" and without any express or implied warranties, including, without limitation, the implied warranties of merchantability and fitness for a particular purpose. The author and contributors of this module shall not be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.
31 | """
32 | import time
33 |
34 | import streamlit as st
35 | from st_aggrid import GridOptionsBuilder, AgGrid, JsCode
36 | from datetime import datetime
37 | import base64
38 | import os
39 | import csv
40 | from PIL import Image
41 | import pandas as pd
42 | from .chatgpt_wrapper import ChatGPT
43 |
44 |
45 | class WhipperUI:
46 | BOT = None
47 | TABLE_FONTSIZE = "17px"
48 | HOME_PATH = "./%s"
49 | WAITING_TIME = 10
50 | PROMPT_PATH = HOME_PATH % "prompt_master.csv"
51 | ICON_FILE = "icon.png"
52 | RESULT_FILE = HOME_PATH % "buff/"
53 | GPT_RESULT_COL = "result"
54 | GPT_INPUT_COL = "input"
55 | CHECK_COL = "Is false"
56 | COMMENT_COL = "Comment"
57 | INPUT_FOLD = HOME_PATH % "inputs"
58 | CHECKBOR_RENDDER = JsCode("""
59 | class CheckboxRenderer{
60 |
61 | init(params) {
62 | this.params = params;
63 |
64 | this.eGui = document.createElement('input');
65 | this.eGui.type = 'checkbox';
66 | this.eGui.checked = params.value;
67 |
68 | this.checkedHandler = this.checkedHandler.bind(this);
69 | this.eGui.addEventListener('click', this.checkedHandler);
70 | }
71 |
72 | checkedHandler(e) {
73 | let checked = e.target.checked;
74 | let colId = this.params.column.colId;
75 | this.params.node.setDataValue(colId, checked);
76 | }
77 |
78 | getGui(params) {
79 | return this.eGui;
80 | }
81 |
82 | destroy(params) {
83 | this.eGui.removeEventListener('click', this.checkedHandler);
84 | }
85 | }//end class
86 | """)
87 |
88 | DEFAULT_CELL_JS = JsCode("""
89 | function(params) {
90 | params.columnApi.autoSizeColumns();
91 | if (params.data.hasOwnProperty('status')){
92 | if(params.data.status == 'Finished'){
93 | return {
94 | 'color': 'black',
95 | 'backgroundColor': 'green',
96 | 'fontSize':'{fontSize}'
97 | }
98 | }
99 | }
100 |
101 | return {
102 | 'color': 'black',
103 | 'backgroundColor': 'white',
104 | 'fontSize':'{fontSize}'
105 | }
106 |
107 | }
108 | """.replace("{fontSize}", TABLE_FONTSIZE))
109 |
110 | def _set_up_page(self):
111 | im = Image.open(self.HOME_PATH % self.ICON_FILE)
112 | STREAMLIT_AGGRID_URL = "https://github.com/PablocFonseca/streamlit-aggrid"
113 | st.set_page_config(
114 | layout="centered",
115 | page_icon=im,
116 | page_title="Moomin Rakuten Offical Shop Dashboard(By ECD)-Search scraping"
117 | )
118 | st.title("Chat GPT batch job")
119 | self._set_background(self.HOME_PATH % 'background.png')
120 |
121 | def __init__(self):
122 | self._set_up_page()
123 | return
124 |
125 | @staticmethod
126 | def _get_base64(bin_file):
127 | """
128 | Returns the Base64-encoded representation of a binary file.
129 |
130 | :param bin_file: the path to the binary file to encode
131 | :return: the Base64-encoded string
132 | """
133 | with open(bin_file, 'rb') as f:
134 | data = f.read()
135 | return base64.b64encode(data).decode()
136 |
137 | def _set_background(self, png_file):
138 | """
139 | Sets the background image of the Streamlit app to the specified PNG file.
140 |
141 | :param png_file: the path to the PNG file to use as the background image
142 | """
143 | # Convert the PNG file to a Base64-encoded string
144 | bin_str = self._get_base64(png_file)
145 |
146 | # Define a CSS rule to set the background image of the app
147 | # to the Base64-encoded PNG file
148 | page_bg_img = '''
149 |
156 |
157 | ''' % bin_str
158 |
159 | # Apply the CSS rule to the Streamlit app
160 | st.markdown(page_bg_img, unsafe_allow_html=True)
161 |
162 | def _load_cache(self):
163 | """
164 | Loads the cache from the result file.
165 |
166 | :return: a pandas DataFrame containing the cache data, or None if the result file could not be loaded
167 | """
168 | try:
169 | # Try to load the result file as a pandas DataFrame
170 | return pd.read_csv(self.RESULT_FILE, index_col=False, encoding='utf-8-sig')
171 | except:
172 | # If there is an error, return None
173 | return None
174 |
175 | def _load_prompts(self):
176 | """
177 | Loads the prompts data from a CSV file.
178 |
179 | :return: a pandas DataFrame containing the prompts data
180 | """
181 | if os.path.isfile(self.PROMPT_PATH):
182 | # If the file exists, load the data as a pandas DataFrame
183 | prompts_df = pd.read_csv(self.PROMPT_PATH)
184 | prompts_df[["conversation_id", "parent_message_id"]].fillna("", inplace=True)
185 | else:
186 | # If the file doesn't exist, create an empty DataFrame with the default columns
187 | default_columns = ["Date", "No", "prompt", "conversation_id", "parent_message_id"]
188 | prompts_df = pd.DataFrame(columns=default_columns)
189 |
190 | return prompts_df
191 |
192 | def _create_table(self, data, check_col=None, comment_col=None, pagesize=100):
193 | """
194 | Creates an Ag-Grid table from a pandas DataFrame.
195 |
196 | :param data: the pandas DataFrame to use as the data source for the table
197 | :param comment_col: the name of the column to use for comments, if any
198 | """
199 | # Create an Ag-Grid options builder from the pandas DataFrame
200 | indexs = None
201 | if "index" in data.columns.values:
202 | indexs = data["index"]
203 | data.drop("index", axis=1, inplace=True)
204 | gb = GridOptionsBuilder.from_dataframe(data)
205 |
206 | # Configure the default column settings for the table
207 | gb.configure_default_column(
208 | min_column_width=120, # Set the minimum column width to 120 pixels
209 | suppressMenu=True, # Disable the column menu
210 | autoSizeColumns=True, # Automatically size the columns to fit the data
211 | editable=False # Make the cells non-editable
212 | )
213 |
214 | # Enable range selection for the table
215 | gb.configure_grid_options(enableRangeSelection=True)
216 |
217 | # Configure pagination settings for the table
218 | gb.configure_pagination(
219 | paginationPageSize=pagesize, # Set the number of rows per page to 100
220 | paginationAutoPageSize=False # Disable automatic pagination
221 | )
222 | if check_col is not None:
223 | gb.configure_column(check_col, editable=True, cellRenderer=self.CHECKBOR_RENDDER)
224 | if comment_col is not None:
225 | gb.configure_column(comment_col, editable=True)
226 |
227 | grid_options = gb.build()
228 | grid_options['getRowStyle'] = self.DEFAULT_CELL_JS
229 | result = AgGrid(data, gridOptions=grid_options, enable_enterprise_modules=True, allow_unsafe_jscode=True)[
230 | "data"]
231 | if indexs is not None:
232 | result["index"] = indexs
233 | return result
234 |
235 | def _load_result(self, result_no):
236 | """
237 | Loads the result data from a CSV file.
238 |
239 | :param result_no: the number of the result to load
240 | :return: a pandas DataFrame containing the result data
241 | """
242 | # Create the result file directory if it doesn't exist
243 | if not os.path.exists(self.RESULT_FILE):
244 | os.makedirs(self.RESULT_FILE)
245 |
246 | # Construct the file path for the specified result number
247 | file_path = os.path.join(self.RESULT_FILE, f"{result_no}.csv")
248 |
249 | if os.path.isfile(file_path):
250 | # If the file exists, load the data as a pandas DataFrame
251 | result_df = pd.read_csv(file_path)
252 | else:
253 | # If the file doesn't exist, create an empty DataFrame with the default columns
254 | default_columns = [self.GPT_RESULT_COL, self.GPT_INPUT_COL, self.CHECK_COL, self.COMMENT_COL]
255 | result_df = pd.DataFrame(columns=default_columns)
256 |
257 | return result_df
258 |
259 | @staticmethod
260 | def _list_prompts(prompts_df):
261 | """
262 | Lists the prompts from a pandas DataFrame and returns the selected prompt.
263 |
264 | :param prompts_df: the pandas DataFrame containing the prompts data
265 | :return: the selected prompt number
266 | """
267 | # Extract the prompt numbers from the DataFrame
268 | prompts_list = [no for no in prompts_df["No"]]
269 |
270 | # Display the prompts list in a Streamlit container
271 | st.markdown("### Prompts list:")
272 | placeholder = st.empty()
273 | placeholder.empty()
274 | with placeholder.container():
275 | # Allow the user to select a prompt from the list
276 | return st.selectbox('', prompts_list)
277 |
278 | def on_add(self, prompt, prompt_name):
279 | """
280 | Adds a new prompt to the prompts data and saves it to a CSV file.
281 |
282 | :param prompt: the prompt text to add
283 | :param prompt_name: the name of the prompt to add, if any
284 | """
285 | # Load the prompts data from the CSV file
286 | prompts_df = self._load_prompts()
287 |
288 | # Get the index of the new prompt
289 | index = len(prompts_df)
290 |
291 | if len(prompt_name) > 0:
292 | # If the prompt has a name, use it in the prompt number
293 | no = f"{prompt_name}_{index}"
294 | else:
295 | # Otherwise, use the index as the prompt number
296 | no = str(index)
297 |
298 | # Create a new row for the prompts DataFrame with the current date, prompt number, and prompt text
299 | new_row = {"Date": datetime.now().strftime('%Y-%m-%d'), "No": no, "prompt": prompt}
300 |
301 | # Add the new row to the top of the DataFrame
302 | prompts_df = pd.concat([pd.DataFrame(new_row, index=[0]), prompts_df]).reset_index(drop=True)
303 |
304 | # Save the updated prompts data to the CSV file
305 | prompts_df.to_csv(self.PROMPT_PATH, encoding='utf-8-sig', index=False)
306 |
307 | @staticmethod
308 | def reformat(text):
309 | """
310 | Replaces newline characters with a placeholder string.
311 |
312 | :param text: the text to reformat
313 | :return: the reformatted text
314 | """
315 | return text.replace("\n", "{pun}")
316 |
317 | @staticmethod
318 | def reformat_back(text):
319 | """
320 | Replaces a placeholder string with newline characters.
321 |
322 | :param text: the text to reformat
323 | :return: the reformatted text
324 | """
325 | return text.format(pun="\n")
326 |
327 | def on_set(self, prompt_no, prompt_text):
328 | """
329 | Sets a prompt text for a specified prompt number and saves it to a CSV file.
330 |
331 | :param prompt_no: the number of the prompt to set the text for
332 | :param prompt_text: the new text for the prompt
333 | """
334 | # Load the prompts data from the CSV file
335 | prompts_df = self._load_prompts()
336 |
337 | if len(prompts_df["No"] == prompt_no) == 0:
338 | # If the prompt number doesn't exist in the DataFrame, add a new prompt with the specified text
339 | self.on_add(prompt_text, prompt_name=prompt_no)
340 | return
341 | print(prompts_df)
342 | # Create a new row for the prompts DataFrame with the current date, prompt number, and prompt text
343 | new_row = prompts_df.loc[prompts_df["No"] == prompt_no]
344 | new_row["Date"] = datetime.now().strftime('%Y-%m-%d')
345 | new_row["prompt"] = prompt_text
346 | prompts_df = pd.concat([new_row, prompts_df[prompts_df["No"] != prompt_no]])
347 | # Save the updated prompts data to the CSV file
348 | prompts_df.to_csv(self.PROMPT_PATH, encoding='utf-8-sig', index=False)
349 |
350 | def on_delete_cache(self, result_no):
351 | """
352 | Deletes a cache file for a specified result number.
353 |
354 | :param result_no: the number of the result to delete the cache file for
355 | """
356 | cache_name = os.path.join(self.RESULT_FILE, f"{result_no}.csv")
357 | try:
358 | os.remove(cache_name)
359 | except OSError as error:
360 | print(f"An error occurred: {error}")
361 |
362 | def on_auth(self):
363 | """
364 | Deletes a cache file for a specified result number.
365 |
366 | :param result_no: the number of the result to delete the cache file for
367 | """
368 | ChatGPT(headless=False)
369 |
370 | def on_delete_prompt(self, prompt_no):
371 | """
372 | Deletes a prompt with a specified prompt number from the prompts data and saves the updated data to a CSV file.
373 |
374 | :param prompt_no: the number of the prompt to delete
375 | """
376 | # Load the prompts data from the CSV file
377 | prompts_df = self._load_prompts()
378 |
379 | # Remove the row with the specified prompt number from the DataFrame
380 | prompts_df = prompts_df[prompts_df["No"] != prompt_no]
381 |
382 | # Save the updated prompts data to the CSV file
383 | prompts_df.to_csv(self.PROMPT_PATH, encoding='utf-8-sig', index=False)
384 |
385 | @staticmethod
386 | def is_csv_format(s):
387 | try:
388 | _ = [_ for _ in csv.reader([s])]
389 | return True
390 | except csv.Error:
391 | return False
392 |
393 | def save_review_data(self, data, no):
394 | """
395 | Saves a DataFrame with checked results to a cache file.
396 |
397 | :param data: the DataFrame with checked results to save
398 | :param result_no: the number of the result to save the checked data for
399 | """
400 | cache_name = ("%s%s.csv") % (self.RESULT_FILE, no)
401 | data[self.CHECK_COL] = [True if s == True else False for s in data[self.CHECK_COL]]
402 | data.to_csv(cache_name, encoding='utf-8-sig', index=False)
403 |
404 | def _load_saved_input_data(self, selected_prompt_no):
405 | if selected_prompt_no is not None:
406 | try:
407 | return pd.read_csv("%s/input_%s.csv" % (self.INPUT_FOLD, selected_prompt_no))
408 | except:
409 | pass
410 | return None
411 |
412 | def _update_reviewdata(self, data_old, data_new):
413 | data_new = data_new[[self.CHECK_COL, self.COMMENT_COL]]
414 | result = data_old.join(data_new, how="left", rsuffix='_new')
415 | result = result.fillna("")
416 | new_checks = []
417 | new_comments = []
418 | new_check_col = self.CHECK_COL + '_new'
419 | new_comment_col = self.COMMENT_COL + '_new'
420 | for i, row in result.iterrows():
421 | if row[new_check_col] == row[new_check_col]:
422 | new_checks += [row[new_check_col]]
423 | else:
424 | new_checks += [row[self.CHECK_COL]]
425 | if row[new_comment_col] == row[new_comment_col]:
426 | new_comments += [row[new_comment_col]]
427 | else:
428 | new_comments += [row[self.COMMENT_COL]]
429 | result[self.CHECK_COL] = new_checks
430 | result[self.COMMENT_COL] = new_comments
431 | result = result.drop([new_check_col, new_comment_col], axis=1)
432 | return result
433 |
434 | def _submit(self, prompt, conversation_id, parent_message_id):
435 | res = self.BOT.ask(prompt)
436 | failed = False
437 | while res is None:
438 | st.error("Process failed will resubmit it after %d minutes" % (self.WAITING_TIME // 60))
439 | st.empty()
440 | time.sleep(self.WAITING_TIME)
441 | self.BOT.reset()
442 | res = self.BOT.ask(prompt,
443 | conversation_id,
444 | parent_message_id)
445 | failed = True
446 | if failed:
447 | st.success("Process resumed!")
448 | return res
449 |
450 | def on_do(self, prompt_id, data, target_column, no_explain, do_false_only):
451 | """
452 | Uses the ChatGPT API to generate responses for prompts in a DataFrame.
453 |
454 | :param prompt: the prompt text to use for generating responses
455 | :param prompt_id: the id of the prompt
456 | :param conversation_id: the id of the conversation
457 | :param data: the DataFrame with prompts to generate responses for
458 | :param target_column: the column of the DataFrame with the prompts to use for generating responses
459 | :param no_explain: whether to prompt the user to avoid including explanations in their responses
460 | """
461 | if self.BOT is None:
462 | with st.spinner('Wait for connect to chatGPT...'):
463 | self.BOT = ChatGPT()
464 | prompts_df = self._load_prompts()
465 | setting = prompts_df[prompts_df["No"] == prompt_id]
466 |
467 | if len(setting) == 1:
468 | prompt = setting['prompt'].values[0]
469 | conversation_id = setting['conversation_id'].values[0]
470 | parent_message_id = setting['parent_message_id'].values[0]
471 | else:
472 | st.error("There is multiple prompts but currently we can only do one.")
473 | return
474 | if len(prompt) == 0:
475 | st.error("There is no prompt to do.")
476 |
477 | cache_name = ("%s%s.csv") % (self.RESULT_FILE, prompt_id)
478 | processed_data = self._load_result(prompt_id)
479 | if do_false_only:
480 | progress_bar = st.progress(0)
481 | condition = processed_data[self.CHECK_COL] == True
482 | row_indexs = processed_data[condition].index
483 | i = 0
484 | num = len(row_indexs)
485 | for row_index in row_indexs:
486 | progress_bar.progress(i * 100 // num)
487 | prompt_text = "%s\n\t\t%s" % (prompt, processed_data.at[row_index, self.GPT_INPUT_COL])
488 | processed_data.at[row_index, self.GPT_RESULT_COL] = self._submit(prompt_text, conversation_id,
489 | parent_message_id)
490 | processed_data.to_csv(cache_name, encoding='utf-8-sig', index=False)
491 | i += 1
492 | return
493 | if data is not None and target_column is not None:
494 | prompts_inputs = [text for text in data[target_column]]
495 | else:
496 | prompts_inputs = [""]
497 |
498 | if data is not None:
499 | num = len(data)
500 | i = len(processed_data)
501 | else:
502 | num = 1
503 | i = 0
504 | bar_index = i * 100 // num
505 | progress_bar = st.progress(bar_index)
506 | for prompts_input in prompts_inputs[i: ]:
507 | prompt_text = "%s\n\t\t%s" % (prompt, prompts_input)
508 | res = self._submit(prompt_text, conversation_id, parent_message_id)
509 | conversation_id = self.BOT.get_conversation_id()
510 | parent_message_id = self.BOT.get_parent_message_id()
511 | if no_explain and not self.is_csv_format(res):
512 | prompt_text = "Do not include any explanation in your reply, please redo the \n\t\t%s." % prompts_input
513 | res = self._submit(prompt_text, conversation_id, parent_message_id)
514 | new_s = i * 100 // num
515 | if new_s > bar_index:
516 | progress_bar.progress(bar_index)
517 | bar_index = new_s
518 | i += 1
519 | new_row = pd.DataFrame([[prompts_input, res]], columns=[self.GPT_INPUT_COL, self.GPT_RESULT_COL])
520 | processed_data = pd.concat([processed_data, new_row],
521 | ignore_index=True)
522 | processed_data.to_csv(cache_name, encoding='utf-8-sig', index=False)
523 | # Create a new row for the prompts DataFrame with the current date, prompt number, and prompt text
524 | new_row = {"Date": datetime.now().strftime('%Y-%m-%d'),
525 | "No": prompt_id,
526 | "prompt": prompt,
527 | "conversation_id": conversation_id,
528 | "parent_message_id": parent_message_id
529 | }
530 | # Update the row with the new prompt text in the DataFrame
531 | prompts_df.loc[prompts_df["No"] == prompt_id] = list(new_row.values())
532 | # Save the updated prompts data to the CSV file
533 | prompts_df.to_csv(self.PROMPT_PATH, encoding='utf-8-sig', index=False)
534 | progress_bar.empty()
535 |
536 | def show_prompt_ui(self):
537 | prompts_df = self._load_prompts()
538 | selected_prompt_no = self._list_prompts(prompts_df)
539 | setting = prompts_df[prompts_df["No"] == selected_prompt_no]
540 | setting = setting[["prompt"]]
541 | if len(setting) > 0:
542 | setting = setting.iloc[0, :].tolist()
543 | prompt_default = setting[0]
544 |
545 | else:
546 | prompt_default = ""
547 | st.markdown("##### Mode")
548 | mode = st.radio("", ('Single shoot', 'Fully Automatic(Batch job)'))
549 | uploaded_file = None
550 | no_explain = False
551 | if mode == 'Fully Automatic(Batch job)':
552 | file_select, no_explain_check = st.columns([3, 1])
553 | no_explain = no_explain_check.checkbox("No explanation in the reply", value=True,
554 | key=None)
555 | uploaded_file = file_select.file_uploader("Select a CSV file")
556 | result_data = self._load_result(selected_prompt_no)
557 | data = self._load_saved_input_data(selected_prompt_no)
558 | prompt_name_title, select_column_title = st.columns(2)
559 | prompt_name_input, select_column = st.columns(2)
560 | target_column = None
561 | if uploaded_file is not None:
562 | if ".CSV" in uploaded_file.name.upper():
563 | try:
564 | uploaded_file.name
565 | data = pd.read_csv(uploaded_file)
566 | if selected_prompt_no is not None:
567 | data.to_csv("%s/input_%s.csv" % (self.INPUT_FOLD, selected_prompt_no), index=False)
568 | except:
569 | pass
570 |
571 | else:
572 | st.error("Only CSV files are supported currently.")
573 | if data is not None:
574 | select_column_title.markdown("##### Select column you want to process")
575 | target_column = select_column.selectbox(
576 | '',
577 | data.columns.values)
578 | prompt_name_title.markdown("##### Please name you prompt")
579 | prompt_name = prompt_name_input.text_input('',
580 | "prompt")
581 | st.markdown("##### Please write you prompt")
582 | prompt = st.text_area('',
583 | prompt_default,
584 | height=200)
585 | auth_bth, add_btn, process_btn, = st.columns(3)
586 | set_btn, delete_btn_cache, delete_btn_prompt = st.columns(3)
587 | download_btn, _ = st.columns(2)
588 | add_btn.button('Add',
589 | on_click=self.on_add,
590 | args=(prompt, prompt_name))
591 | set_btn.button('Update',
592 | on_click=self.on_set,
593 | args=(selected_prompt_no,
594 | prompt))
595 |
596 | delete_btn_prompt.button('Delete Prompt',
597 | on_click=self.on_delete_prompt,
598 | args=(selected_prompt_no,))
599 |
600 | delete_btn_cache.button('Delete Cached result',
601 | on_click=self.on_delete_cache,
602 | args=(selected_prompt_no,))
603 | auth_bth.button('Auth',
604 | on_click=self.on_auth,
605 | args=())
606 | st.markdown("[Go to chatGPT](https://chat.openai.com/chat)")
607 | if data is not None:
608 | st.markdown("### The input data ")
609 | self._create_table(data, pagesize=10)
610 | show_false_only = False
611 | if len(result_data) > 0:
612 | st_title, show_false_only_cb = st.columns(2)
613 | st_title.markdown("### The processed result")
614 | show_false_only = show_false_only_cb.checkbox("Show only false data", value=False)
615 | result_data = result_data[[self.GPT_INPUT_COL, self.GPT_RESULT_COL, self.CHECK_COL, self.COMMENT_COL]]
616 | result_data.reset_index(inplace=True)
617 | if show_false_only:
618 | data_table = result_data[result_data[self.CHECK_COL]]
619 | else:
620 | data_table = result_data
621 | data_review = self._create_table(data_table, self.CHECK_COL, self.COMMENT_COL)
622 | result_data = self._update_reviewdata(result_data, data_review)
623 | download_btn.download_button(
624 | label="Download",
625 | data=result_data.to_csv().encode('utf-8'),
626 | file_name="%s.csv" % (selected_prompt_no),
627 | mime='text/csv')
628 | self.save_review_data(result_data, selected_prompt_no)
629 |
630 | process_btn.button('Submit',
631 | on_click=self.on_do,
632 | args=(selected_prompt_no,
633 | data,
634 | target_column,
635 | no_explain,
636 | show_false_only))
637 |
--------------------------------------------------------------------------------