├── .github └── workflows │ └── main.yml.backup ├── .gitignore ├── .pypirc ├── CONTRIBUTING.md ├── LICENCE ├── README.md ├── pyproject.toml ├── release.txt ├── requirements.txt └── src └── gptchatbotcli ├── __init__.py ├── database.py ├── history.py ├── index.py ├── openapi_controller.py ├── payloads.py └── services.py /.github/workflows/main.yml.backup: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v2 15 | 16 | - name: Get version 17 | run: echo "VERSION=$(git describe --tags)" >> $GITHUB_ENV 18 | # run: echo ::set-env name=VERSION::$(git describe --tags) 19 | 20 | - name: Build 21 | run: | 22 | mkdir release 23 | tar --exclude="./release" -cvzf release/release.tar.gz . 24 | mv release/release.tar.gz . 25 | 26 | - name: Create Release 27 | uses: softprops/action-gh-release@v1 28 | 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .venv 3 | build 4 | gpt_chatbot_cli.egg-info 5 | dist 6 | db 7 | ``` 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | share/python-wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | MANIFEST 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .nox/ 50 | .coverage 51 | .coverage.* 52 | .cache 53 | nosetests.xml 54 | coverage.xml 55 | *.cover 56 | *.py,cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | cover/ 60 | 61 | # Translations 62 | *.mo 63 | *.pot 64 | 65 | # Django stuff: 66 | *.log 67 | local_settings.py 68 | db.sqlite3 69 | db.sqlite3-journal 70 | 71 | # Flask stuff: 72 | instance/ 73 | .webassets-cache 74 | 75 | # Scrapy stuff: 76 | .scrapy 77 | 78 | # Sphinx documentation 79 | docs/_build/ 80 | 81 | # PyBuilder 82 | .pybuilder/ 83 | target/ 84 | 85 | # Jupyter Notebook 86 | .ipynb_checkpoints 87 | 88 | # IPython 89 | profile_default/ 90 | ipython_config.py 91 | 92 | # pyenv 93 | # For a library or package, you might want to ignore these files since the code is 94 | # intended to run in multiple environments; otherwise, check them in: 95 | # .python-version 96 | 97 | # pipenv 98 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 99 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 100 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 101 | # install all needed dependencies. 102 | #Pipfile.lock 103 | 104 | # poetry 105 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 106 | # This is especially recommended for binary packages to ensure reproducibility, and is more 107 | # commonly ignored for libraries. 108 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 109 | #poetry.lock 110 | 111 | # pdm 112 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 113 | #pdm.lock 114 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 115 | # in version control. 116 | # https://pdm.fming.dev/#use-with-ide 117 | .pdm.toml 118 | 119 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 120 | __pypackages__/ 121 | 122 | # Celery stuff 123 | celerybeat-schedule 124 | celerybeat.pid 125 | 126 | # SageMath parsed files 127 | *.sage.py 128 | 129 | # Environments 130 | .env 131 | .venv 132 | env/ 133 | venv/ 134 | ENV/ 135 | env.bak/ 136 | venv.bak/ 137 | 138 | # Spyder project settings 139 | .spyderproject 140 | .spyproject 141 | 142 | # Rope project settings 143 | .ropeproject 144 | 145 | # mkdocs documentation 146 | /site 147 | 148 | # mypy 149 | .mypy_cache/ 150 | .dmypy.json 151 | dmypy.json 152 | 153 | # Pyre type checker 154 | .pyre/ 155 | 156 | # pytype static type analyzer 157 | .pytype/ 158 | 159 | # Cython debug symbols 160 | cython_debug/ 161 | 162 | # PyCharm 163 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 164 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 165 | # and can be added to the global gitignore or merged into this file. For a more nuclear 166 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 167 | #.idea/ 168 | -------------------------------------------------------------------------------- /.pypirc: -------------------------------------------------------------------------------- 1 | [distutils] 2 | index=servers-pypi 3 | [pypi] 4 | repository = https://upload.pypi.org/legacy 5 | username = rukh-debug 6 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to gpt-chatbot-cli 2 | 3 | Thank you for your interest in contributing to gpt-chatbot-cli! Please take a moment to review these guidelines before submitting your contribution. 4 | 5 | ## Reporting Issues 6 | 7 | If you find a bug or have a feature request, please open an issue on the [GitHub issue tracker](https://github.com/rukh-debug/gpt-chatbot-cli/issues). Please include as much information as possible. 8 | 9 | ## Contributing Code 10 | 11 | If you would like to contribute code to gpt-chatbot-cli, please follow these steps: 12 | 13 | 1. Fork the repository on GitHub 14 | 2. Clone your fork to your local machine 15 | 3. Create a new branch for your changes 16 | 4. Make your changes and commit them 17 | 5. Push your changes to your fork 18 | 6. Open a pull request on the [GitHub repository](https://github.com/rukh-debug/gpt-chatbot-cli/) and describe your changes 19 | 20 | ## License 21 | 22 | By contributing to gpt-chatbot-cli, you agree that your contributions will be licensed under its MIT license. 23 | 24 | ## Contact 25 | 26 | If you have any questions, feel free to contact me on [Twitter](https://twitter.com/getrubenk) or https://rubenk.com.np 27 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | Copyright (c) chatgpt-cli 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a 4 | copy of this software and associated documentation files (the "Software"), 5 | to deal in the Software without restriction, including without limitation 6 | the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | and/or sell copies of the Software, and to permit persons to whom the 8 | Software is furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | DEALINGS IN THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gpt-chatbot-cli 2 | 3 | It's a very minimal cli prompt, where you can chat and keeping the conversation session memorable by chatgpt and keep chat history for future use. 4 | 5 | ## Installation 6 | 7 | Assuming you created a env variable with key named `OPENAI_API_KEY`. 8 | If you don't have a api key [visit here](https://platform.openai.com/account/api-keys) and generate one. 9 | 10 | To install ChatGPT-CLI, you'll first need to set up an OpenAI API key. If you don't have one yet, visit the [OpenAI Website](https://platform.openai.com/account/api-keys) to sign up and generate an API key. 11 | 12 | 13 | Next, add the following line to your `~/.bashrc` or `~/.zshrc` file: 14 | 15 | ```bash 16 | export OPENAI_API_KEY= 17 | 18 | ``` 19 | Be sure to replace with your actual API key. Then, source the file using: 20 | 21 | Lastly source the file using `source ~/.bashrc` or `source ~/.zshrc` 22 | 23 | 24 | --- 25 | 26 | You may now install the package using pip: 27 | 28 | ```bash 29 | $ pip3 install gpt-chatbot-cli 30 | ``` 31 | 32 | For arch users, this exact package is available on [aur](https://aur.archlinux.org/packages/chatgpt-cli-git) by the name chatgpt-cli-git 33 | 34 | ```bash 35 | $ paru chatgpt-cli-git 36 | ``` 37 | 38 | ## Usage 39 | 40 | Once you have installed ChatGPT-CLI, you can run it by typing: 41 | ```bash 42 | $ gpt-chatbot-cli 43 | ``` 44 | 45 | This will start the CLI prompt, and you can begin chatting with the AI bot. 46 | 47 | You can also pass various options to the gpt-chatbot-cli command to customize its behavior. Here are the available options: 48 | 49 | ```bash 50 | -k, --api_key TEXT Openai API key. If not provided, will prompt for it 51 | or use the environment variable OPENAI_API_KEY. 52 | -m, --model TEXT Model to use for text generation | (default: 53 | gpt-3.5-turbo) 54 | -t, --temperature FLOAT Temperature for text generation | (default: 0.9) 55 | -p, --preset TEXT Preset mode to use for text generation | (default: 56 | Chat) Available presets: Chat, Q&A, Grammar 57 | Correction, Eli5, Custom 58 | -hs, --history Show chat history picker | (default: False) 59 | -h, --help Show this message and exit. 60 | ``` 61 | 62 | ## Demo 63 | 64 | [![asciicast](https://asciinema.org/a/9L0MjDExrMFb0XhBbqYaXBBWL.svg)](https://asciinema.org/a/9L0MjDExrMFb0XhBbqYaXBBWL) 65 | 66 | 67 | 68 | ## Dependencies 69 | 70 | ChatGPT-CLI depends on the following Python packages: 71 | 72 | - Openai 73 | - prompt-toolkit 74 | - termcolor 75 | - tinyDB 76 | - click 77 | 78 | ## Contributing 79 | 80 | Contributions are welcome! Please read the [contributing guidelines](CONTRIBUTING.md) first. 81 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "gpt-chatbot-cli" 7 | version = "0.3.4" 8 | authors = [ 9 | { name="Ruben Kharel", email="talkto@rubenk.com.np" }, 10 | ] 11 | description = "A minimal chatgpt cli" 12 | readme = "README.md" 13 | license = {file = "LICENSE"} 14 | keywords = ["chatgpt", "gpt-chatbot", "chatgpt-cli"] 15 | 16 | classifiers = [ 17 | 'Development Status :: 4 - Beta', 18 | "Intended Audience :: End Users/Desktop", 19 | "License :: OSI Approved :: MIT License", 20 | 'Programming Language :: Python :: 3.9', 21 | 'Topic :: Text Processing :: Linguistic', 22 | 'Topic :: Utilities', 23 | ] 24 | dependencies = [ 25 | "openai==0.27.4", 26 | 'prompt-toolkit==3.0.38', 27 | 'termcolor==2.2.0', 28 | 'tinydb==4.7.1', 29 | 'click==8.1.3' 30 | ] 31 | 32 | [project.urls] 33 | Homepage = "https://github.com/rukh-debug/gpt-chatbot-cli" 34 | "Bug Tracker" = "https://github.com/rukh-debug/gpt-chatbot-cli/issues" 35 | 36 | [project.scripts] 37 | gpt-chatbot-cli = "gptchatbotcli.index:main" -------------------------------------------------------------------------------- /release.txt: -------------------------------------------------------------------------------- 1 | # Build 2 | python -m build 3 | 4 | # instll locally to test 5 | python -m pip install dist/whatever.whl 6 | python -m pip install dist/whatever.tar 7 | 8 | # publish 9 | twine upload dist/* --verbos -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.8.4 2 | aiosignal==1.3.1 3 | async-timeout==4.0.2 4 | attrs==22.2.0 5 | certifi==2022.12.7 6 | charset-normalizer==3.1.0 7 | click==8.1.3 8 | frozenlist==1.3.3 9 | idna==3.4 10 | multidict==6.0.4 11 | openai==0.27.4 12 | prompt-toolkit==3.0.38 13 | requests==2.28.2 14 | termcolor==2.2.0 15 | tinydb==4.7.1 16 | tqdm==4.65.0 17 | urllib3==1.26.15 18 | wcwidth==0.2.6 19 | yarl==1.8.2 20 | -------------------------------------------------------------------------------- /src/gptchatbotcli/__init__.py: -------------------------------------------------------------------------------- 1 | """ChatGPT CLI 2 | A bloat-free CLI for ChatGPT. 3 | """ 4 | 5 | __version__ = "0.3.4" 6 | __author__ = "rukh-debug" 7 | -------------------------------------------------------------------------------- /src/gptchatbotcli/database.py: -------------------------------------------------------------------------------- 1 | from tinydb import TinyDB, Query 2 | import time 3 | import os 4 | 5 | home_dir = os.path.expanduser("~") 6 | chat_history_path = os.path.join(home_dir, ".gpt_chatbot_chat_history.json") 7 | if not os.path.exists(chat_history_path): 8 | with open(chat_history_path, "x") as f: 9 | f.write("") 10 | db = TinyDB(chat_history_path) 11 | 12 | def init_chat_history(message, preset, model): 13 | # generate random string 14 | _id = str(time.time()).replace(".", "") 15 | # print("Saving chat history...") 16 | db.insert({"_id": _id, "preset": preset, "title": "Untitled"+"_"+_id ,"model": model, 'messages': message}) 17 | return _id 18 | 19 | def update_chat_history(id, message): 20 | db.update({'messages': message}, Query()._id == id) 21 | return True 22 | 23 | def get_all_chat_history(): 24 | return db.all() 25 | 26 | def defind_chat_title(id, title): 27 | db.update({'title': title}, Query()._id == id) 28 | -------------------------------------------------------------------------------- /src/gptchatbotcli/history.py: -------------------------------------------------------------------------------- 1 | from gptchatbotcli.database import get_all_chat_history 2 | 3 | def chat_history_picker(): 4 | chat_history = get_all_chat_history() 5 | if (len(chat_history) == 0): 6 | print("No chat history found") 7 | exit() 8 | else: 9 | print("Pick a chat history:") 10 | for i in range(len(chat_history)): 11 | print(f"{i+1}. {chat_history[i]['title']}") 12 | chat_history_id = int(input("Enter a number: ")) 13 | return chat_history[chat_history_id-1] 14 | 15 | -------------------------------------------------------------------------------- /src/gptchatbotcli/index.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import termcolor 4 | from prompt_toolkit import prompt 5 | from prompt_toolkit.styles import Style 6 | import click 7 | 8 | from gptchatbotcli.payloads import presets, chat_complitions_models 9 | from gptchatbotcli.database import init_chat_history, update_chat_history, defind_chat_title 10 | from gptchatbotcli.history import chat_history_picker 11 | from gptchatbotcli.services import print_char_by_char, print_whole_but_color 12 | from gptchatbotcli.openapi_controller import check_api_key_validity, chat_api_call, title_gen 13 | 14 | @click.command() 15 | @click.option('--api_key', '-k', help='Openai API key. If not provided, will prompt for it or use the environment variable OPENAI_API_KEY.') 16 | @click.option('--model', '-m', default='gpt-3.5-turbo', help='Model to use for text generation | (default: gpt-3.5-turbo)') 17 | @click.option('--temperature', '-t', default=0.9, type=click.FLOAT, help='Temperature for text generation | (default: 0.9)') 18 | @click.option('--preset', '-p', default='chat', help='Preset mode to use for text generation | (default: Chat) \nAvailable presets: Chat, Q&A, Grammar Correction, Eli5, Custom') 19 | @click.option('--history', '-hs', default=False, is_flag=True, help='Show chat history | (default: False)') 20 | @click.help_option('--help', '-h') 21 | def main(api_key, model, temperature, preset, history): 22 | """ 23 | A CLI for OpenAI's GPT-3 API. 24 | Chat with a bot, ask questions, correct grammar, summarize text, and more. 25 | 26 | \b 27 | Examples: 28 | gpt-chatbot-cli 29 | gpt-chatbot-cli --api_key=YOUR_API_KEY 30 | gpt-chatbot-cli --api_key=YOUR_API_KEY --model=text-davinci-003 --temperature=0.7 31 | gpt-chatbot-cli -m gpt-4 -t 0.8 -p "q&a" 32 | """ 33 | 34 | def api_key_helper(): 35 | return [('class:api-key-helper', 'Set the environment variable OPENAI_API_KEY to avoid further prompts. ')] 36 | 37 | 38 | def chat_prompt_helper(on, message): 39 | return [('class:chat-prompt-helper', "Mode: "+on+"\n"+message)] 40 | 41 | style = Style.from_dict({ 42 | # make a gruvbox theme for this 43 | 'api-key-helper': '#fc802d bg:#282828 bold', 44 | # a compatable color for the prompt 45 | 'chat-prompt-helper': 'bg:#000000 #ffffff', 46 | }) 47 | 48 | # import api key 49 | openai_api_key = os.environ.get("OPENAI_API_KEY") or api_key 50 | 51 | if not openai_api_key: 52 | try: 53 | openai_api_key = prompt("Please enter your OpenAI API key: ", bottom_toolbar=api_key_helper, style=style) 54 | check_api_key_validity(openai_api_key, "not-prompt") 55 | except KeyboardInterrupt: 56 | print("Exiting...") 57 | exit(0) 58 | else: 59 | check_api_key_validity(openai_api_key, "prompt") 60 | 61 | # set model and temperature 62 | lang_model = model 63 | config_temperature = temperature 64 | 65 | # Initialize conversation_history 66 | conversation_history = '' 67 | messages = [] 68 | _id = None 69 | 70 | try: 71 | # saving chat history initialize 72 | if history: 73 | # print("Load chat history... and load preset") 74 | historyx = chat_history_picker() 75 | chosen_preset = historyx["preset"] 76 | _id = historyx["_id"] 77 | messages = historyx["messages"] 78 | conversation_history = historyx["messages"] 79 | lang_model = historyx["model"] 80 | else: 81 | chosen_preset = preset 82 | # replace chosen_preset with preset's actual name with the CASE from presets dictionary 83 | 84 | # create a list of lowercase keys from presets dictionary 85 | preset_keys = [key.lower() for key in presets.keys()] 86 | 87 | # find the index of the lowercase version of chosen_preset 88 | index = preset_keys.index(chosen_preset.lower()) 89 | 90 | # get the actual key name from presets dictionary using the index 91 | chosen_preset = list(presets.keys())[index] 92 | 93 | # Initialize chat history 94 | _id = init_chat_history(messages, chosen_preset, lang_model) 95 | 96 | # insert preset message to chat history 97 | messages.append({ 98 | "role": "system", "content": presets[chosen_preset]["message"] 99 | }) 100 | 101 | # update chat history with new initialized message 102 | update_chat_history(_id, messages) 103 | 104 | # we will take care of conversation history later 105 | conversation_history += presets[chosen_preset]["message"] 106 | 107 | # Replace #END# and #START# with preset's end and start's string if available 108 | if "inject" in presets[chosen_preset] and presets[chosen_preset]["inject"]["state"]: 109 | state = True 110 | end_string = presets[chosen_preset]["inject"]["end"] 111 | start_string = presets[chosen_preset]["inject"]["start"] 112 | else: 113 | state = False 114 | end_string = ">" 115 | start_string = ">" 116 | 117 | # start chat loop 118 | count = 0 119 | while True: 120 | # render history on term before letting user input new crap 121 | if (count == 0 and history): 122 | if lang_model in chat_complitions_models: 123 | x = 0 124 | for message in messages[1:]: 125 | if (x%2 == 0): 126 | print(end_string, message["content"]) 127 | else: 128 | print_whole_but_color(start_string, message["content"]) 129 | x += 1 130 | else: 131 | print("---SORRY FOR BAD RENDERING, WILL MAKE IT BETTER ON NEXT PATCH---") 132 | print(conversation_history) 133 | 134 | 135 | user_input = prompt(end_string + " ", bottom_toolbar=chat_prompt_helper(chosen_preset, presets[chosen_preset]["message"]), style=style) 136 | if (count == 0 and not history): 137 | title = title_gen(user_input) 138 | defind_chat_title(_id, title) 139 | 140 | if user_input.lower() in [":q","exit", ":wq", "exitgpt", "exit()"]: 141 | break 142 | 143 | 144 | if lang_model in chat_complitions_models: 145 | messages.append({ 146 | "role": "user", "content": user_input 147 | }) 148 | 149 | response = chat_api_call(lang_model, messages) 150 | messages.append({ 151 | "role": response.choices[0].message.role, "content": response.choices[0].message.content 152 | }) 153 | update_chat_history(_id, messages) 154 | print_char_by_char(start_string, response.choices[0].message.content) 155 | else: 156 | response = chat_api_call(lang_model, conversation_history + end_string + user_input + "\n" + start_string, config_temperature, 1024) 157 | if (state): 158 | conversation_history += end_string + user_input + "\n" + response.choices[0].text + "\n" 159 | else: 160 | conversation_history = presets[chosen_preset]["message"] 161 | update_chat_history(_id, conversation_history) 162 | print_char_by_char(start_string, response.choices[0].text) 163 | count += 1 164 | 165 | except Exception as e: 166 | print(termcolor.colored(f"Error: {e}", 'light_red')) 167 | exit(1) 168 | except KeyboardInterrupt: 169 | print(termcolor.colored(f"Keyboard Interrupt, Exiting...", 'light_red')) 170 | exit(0) 171 | 172 | 173 | if __name__ == '__main__': 174 | main() 175 | -------------------------------------------------------------------------------- /src/gptchatbotcli/openapi_controller.py: -------------------------------------------------------------------------------- 1 | import openai 2 | import termcolor 3 | 4 | from gptchatbotcli.payloads import chat_complitions_models 5 | 6 | 7 | def check_api_key_validity(api_key, where): 8 | if(where == "prompt"): 9 | print("Found env variable OPENAI_API_KEY") 10 | print("Checking for validity") 11 | try: 12 | openai.api_key = api_key 13 | openai.Model.list() 14 | print(termcolor.colored(f"API key is valid", 'light_green', attrs=["bold"])) 15 | except openai.OpenAIError as e: 16 | print(termcolor.colored(f"Invalid API key", 'light_red', attrs=["bold"]) + "\nGrab your API key from: "+termcolor.colored(f"https://platform.openai.com/account/api-keys", 'light_blue', attrs=["underline"])) 17 | exit() 18 | # how to make parameter optional, write in below line 19 | 20 | def chat_api_call(engine, messages, temperature=None, max_tokens=None): 21 | if engine in chat_complitions_models: 22 | response = openai.ChatCompletion.create( 23 | model=engine, 24 | messages=messages, 25 | # temperature=temperature, 26 | # max_tokens=max_tokens, 27 | ) 28 | return response 29 | else: 30 | response = openai.Completion.create( 31 | engine=engine, 32 | prompt=messages, 33 | temperature=temperature, 34 | max_tokens=max_tokens, 35 | ) 36 | return response 37 | 38 | def title_gen(init_text_input): 39 | messages = [{ 40 | "role": "system", "content": "You are a title generator, You are given a text input from a user, and according to that text input you will figure out the context of the chat and generate a very short title, maximum 4,5 word of length" 41 | }] 42 | messages.append({ 43 | "role": "user", 44 | "content": init_text_input 45 | }) 46 | response = openai.ChatCompletion.create( 47 | model="gpt-3.5-turbo", 48 | messages=messages, 49 | ) 50 | return response["choices"][0]["message"]["content"] -------------------------------------------------------------------------------- /src/gptchatbotcli/payloads.py: -------------------------------------------------------------------------------- 1 | presets = { 2 | "Chat": { 3 | "message": "The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.", 4 | "inject": { 5 | "state": True, 6 | "start": "AI:", 7 | "end": "Human:" 8 | }, 9 | }, 10 | "Q&A": { 11 | "message": "I am a highly intelligent question answering bot. If you ask me a question that is rooted in truth, I will give you the answer. If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with 'Unknown'.\n", 12 | "inject": { 13 | "state": True, 14 | "start": "A:", 15 | "end": "Q:" 16 | }, 17 | }, 18 | "Grammar Correction": { 19 | "message": "Correct this to standard English:\n", 20 | "inject": { 21 | "state": False, 22 | } 23 | }, 24 | "Eli5": { 25 | "message":"Summarize this for a second-grade student:\n", 26 | "inject": { 27 | "state": False, 28 | } 29 | }, 30 | "Custom": { 31 | "message": "", 32 | "inject": { 33 | "state": False 34 | } 35 | } 36 | } 37 | 38 | # generate response 39 | chat_complitions_models = [ 40 | "gpt-3.5-turbo", 41 | "gpt-3.5-turbo-0301", 42 | "gpt-4", 43 | "gpt-4-0314", 44 | "gpt-4-32k", 45 | "gpt-4-32k-0314", 46 | ] -------------------------------------------------------------------------------- /src/gptchatbotcli/services.py: -------------------------------------------------------------------------------- 1 | import termcolor 2 | import time 3 | 4 | def print_char_by_char(start_string, text): 5 | print(start_string, end=" ") 6 | for char in text: 7 | print(termcolor.colored(char, 'light_yellow'), end='', flush=True) 8 | time.sleep(0.01) 9 | print() 10 | 11 | def print_whole_but_color(start_string, text): 12 | print(start_string, end=" ") 13 | print(termcolor.colored(text, 'light_yellow'), end='', flush=True) 14 | print() --------------------------------------------------------------------------------