├── tests ├── __init__.py └── unit_tests │ ├── __init__.py │ ├── models │ ├── __init__.py │ ├── test_doc.py │ ├── test_openai_tokens_truncate.py │ ├── test_context.py │ └── test_skills.py │ ├── utils │ ├── __init__.py │ └── test_file_icon.py │ ├── controllers │ ├── __init__.py │ ├── test_tube_files.py │ └── test_decorate_display.py │ ├── gptui_kernel │ ├── __init__.py │ ├── plugins_test_data │ │ ├── NotSemanticPlugin │ │ │ └── Test │ │ │ │ └── skprompt.txt │ │ ├── FunSkill │ │ │ ├── Excuses │ │ │ │ ├── skprompt.txt │ │ │ │ └── config.json │ │ │ ├── Joke │ │ │ │ ├── skprompt.txt │ │ │ │ └── config.json │ │ │ └── Limerick │ │ │ │ ├── config.json │ │ │ │ └── skprompt.txt │ │ ├── math_plugin.py │ │ └── FileIO.py │ ├── test_manager.py │ └── test_call_plugin.py │ └── data │ ├── langchain_tests_assets │ ├── text_load_test.txt │ └── html_load_test.html │ └── test_document_loaders.py ├── src └── gptui │ ├── data │ ├── __init__.py │ ├── langchain │ │ ├── __init__.py │ │ ├── load │ │ │ ├── __init__.py │ │ │ └── serializable.py │ │ ├── docstore │ │ │ ├── __init__.py │ │ │ └── document.py │ │ ├── schema │ │ │ ├── __init__.py │ │ │ └── document.py │ │ ├── document_loaders │ │ │ ├── blob_loaders │ │ │ │ ├── __init__.py │ │ │ │ ├── file_system.py │ │ │ │ └── schema.py │ │ │ ├── __init__.py │ │ │ ├── html.py │ │ │ ├── helpers.py │ │ │ ├── html_bs.py │ │ │ ├── text.py │ │ │ └── base.py │ │ ├── README.md │ │ └── pydantic_v1 │ │ │ └── __init__.py │ └── vector_memory │ │ ├── __init__.py │ │ └── qdrant_memory.py │ ├── models │ ├── __init__.py │ ├── utils │ │ ├── __init__.py │ │ ├── config_from_dot_env.py │ │ ├── openai_settings_from_dot_env.py │ │ ├── openai_api.py │ │ └── tokens_num.py │ ├── gptui_basic_services │ │ ├── __init__.py │ │ ├── plugins │ │ │ ├── __init__.py │ │ │ └── conversation_service.py │ │ └── templates │ │ │ └── upload_file_prompt.txt │ ├── openai_error.py │ ├── doc.py │ ├── blinker_wrapper.py │ ├── openai_chat_inner_service.py │ ├── skills.py │ ├── signals.py │ ├── openai_tokens_truncate.py │ ├── role.py │ └── context.py │ ├── utils │ ├── __init__.py │ ├── line_count.py │ ├── safe_iterate.py │ └── file_icon.py │ ├── views │ ├── __init__.py │ ├── common_message.py │ ├── theme.py │ ├── wink_wink.py │ └── custom_tree.py │ ├── controllers │ ├── __init__.py │ ├── ai_care_sensors.py │ ├── assistant_tube_control.py │ ├── group_talk_control.py │ ├── voice_control.py │ ├── chat_context_control.py │ ├── tube_files_control.py │ ├── chat_response_control.py │ └── dash_board_control.py │ ├── drivers │ ├── __init__.py │ ├── driver_interface.py │ ├── driver_error.py │ └── driver_manager.py │ ├── plugins │ ├── __init__.py │ ├── DEFAULT_PLUGINS │ │ ├── __init__.py │ │ ├── Bead.py │ │ └── CoreSkills.py │ ├── FileRW.py │ ├── MemoryRecall.py │ ├── OpenInterpreter.py │ └── SnoozeReminder.py │ ├── _version.py │ ├── gptui_kernel │ ├── __init__.py │ ├── null_logger.py │ ├── manager_exceptions.py │ └── kernel_exceptions.py │ ├── __init__.py │ ├── .default_config.yml │ ├── help.md │ ├── __main__.py │ └── config.yml ├── docs ├── features.md ├── blog │ ├── index.md │ ├── .authors.yml │ └── posts │ │ └── monochrome.md ├── getting_started.md ├── about │ ├── license.md │ └── contributing.md ├── api │ └── index.md ├── guide │ └── index.md ├── index.md ├── troubleshooting.md ├── configuration.zh.md └── configuration.md ├── .github ├── FUNDING.yml ├── PULL_REQUEST_TEMPLATE.md ├── ISSUE_TEMPLATE │ └── bug_report.md └── workflows │ ├── codecov.yml │ ├── static.yml │ └── python-publish.yml ├── web-serve.toml ├── .env_gptui.example ├── MANIFEST.in ├── main.py ├── custom_plugin_examples ├── README.md └── native_plugin_example.py ├── setup.py ├── requirements.txt ├── LICENSE ├── CHANGELOG.md ├── mkdocs.yml ├── pyproject.toml └── .gitignore /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/views/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit_tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/features.md: -------------------------------------------------------------------------------- 1 | Features 2 | -------------------------------------------------------------------------------- /src/gptui/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/drivers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/blog/index.md: -------------------------------------------------------------------------------- 1 | # Blog 2 | 3 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit_tests/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit_tests/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/load/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/data/vector_memory/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit_tests/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/docstore/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/plugins/DEFAULT_PLUGINS/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/getting_started.md: -------------------------------------------------------------------------------- 1 | Getting started here. 2 | -------------------------------------------------------------------------------- /src/gptui/models/gptui_basic_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/models/gptui_basic_services/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | -------------------------------------------------------------------------------- /src/gptui/_version.py: -------------------------------------------------------------------------------- 1 | __title__ = "gptui" 2 | __version__ = "0.5.4" 3 | -------------------------------------------------------------------------------- /web-serve.toml: -------------------------------------------------------------------------------- 1 | [app.GPTUI] 2 | command = "python main.py" 3 | #command = "python3 main.py" 4 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/schema/__init__.py: -------------------------------------------------------------------------------- 1 | from .document import BaseDocumentTransformer, Document 2 | -------------------------------------------------------------------------------- /.env_gptui.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY = "" 2 | OPENAI_ORG_ID = "" 3 | GOOGLE_KEY = "" 4 | GOOGLE_CX = "" 5 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/docstore/document.py: -------------------------------------------------------------------------------- 1 | from ..schema import Document 2 | 3 | __all__ = ["Document"] 4 | -------------------------------------------------------------------------------- /src/gptui/gptui_kernel/__init__.py: -------------------------------------------------------------------------------- 1 | from .kernel import Kernel 2 | 3 | #__all__ = [ 4 | # "Kernel", 5 | #] 6 | -------------------------------------------------------------------------------- /tests/unit_tests/data/langchain_tests_assets/text_load_test.txt: -------------------------------------------------------------------------------- 1 | This is a txt file for testting text loader. 2 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/NotSemanticPlugin/Test/skprompt.txt: -------------------------------------------------------------------------------- 1 | This is not a semantic plugin. 2 | -------------------------------------------------------------------------------- /docs/about/license.md: -------------------------------------------------------------------------------- 1 | GPTUI is licensed under the MIT License. [View license](https://github.com/happyapplehorse/gptui/blob/main/LICENSE). 2 | -------------------------------------------------------------------------------- /src/gptui/__init__.py: -------------------------------------------------------------------------------- 1 | from ._version import __title__, __version__ 2 | 3 | 4 | __all__ = [ 5 | "__title__", 6 | "__version__", 7 | ] 8 | -------------------------------------------------------------------------------- /docs/blog/.authors.yml: -------------------------------------------------------------------------------- 1 | authors: 2 | happyapplehorse: 3 | name: Xueao Chao 4 | description: Creator 5 | avatar: https://github.com/happyapplehorse.png 6 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include src/gptui/.default_config.yml 2 | include src/gptui/config.yml 3 | include src/gptui/help.md 4 | 5 | recursive-include src/gptui *.txt 6 | recursive-include src/gptui *.tcss 7 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/blob_loaders/__init__.py: -------------------------------------------------------------------------------- 1 | from .file_system import FileSystemBlobLoader 2 | from .schema import Blob, BlobLoader 3 | 4 | __all__ = ["BlobLoader", "Blob", "FileSystemBlobLoader"] 5 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | **Please review the following checklist.** 2 | 3 | - [ ] Docstrings on all new or modified functions / classes 4 | - [ ] Updated documentation 5 | - [ ] Updated CHANGELOG.md (where appropriate) 6 | -------------------------------------------------------------------------------- /src/gptui/models/gptui_basic_services/templates/upload_file_prompt.txt: -------------------------------------------------------------------------------- 1 | {{$input}} 2 | 3 | ******************** FILE CONTENT BEGIN ******************** 4 | {{$file_content}} 5 | ******************** FILE CONTENT FINISH ******************* 6 | -------------------------------------------------------------------------------- /tests/unit_tests/data/langchain_tests_assets/html_load_test.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |My first paragraph.
9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /docs/api/index.md: -------------------------------------------------------------------------------- 1 | # API 2 | 3 | This is a API-level reference to the GPTUI API. 4 | Click the links to your left (or in the burger menu) to open a reference for each module. 5 | 6 | If you are new to GPTUI, you may want to read the tutorial or guide first. 7 | -------------------------------------------------------------------------------- /docs/guide/index.md: -------------------------------------------------------------------------------- 1 | # Guide 2 | 3 | This guide helps you in secondary development of GPTUI 4 | or utilize the features provided by GPTUI to develop your own applications. 5 | 6 | If you are new to GPTUI, you may want to read the tutorial or guide first. 7 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from src.gptui.__main__ import gptui_run 5 | 6 | 7 | if __name__ == "__main__": 8 | 9 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src")) 10 | gptui_run(config_path='src/gptui/config.yml') 11 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/__init__.py: -------------------------------------------------------------------------------- 1 | from .text import TextLoader 2 | from .html import UnstructuredHTMLLoader 3 | from .html_bs import BSHTMLLoader 4 | 5 | __all__ = [ 6 | "TextLoader", 7 | "UnstructuredHTMLLoader", 8 | "BSHTMLLoader", 9 | ] 10 | -------------------------------------------------------------------------------- /src/gptui/models/utils/config_from_dot_env.py: -------------------------------------------------------------------------------- 1 | from dotenv import dotenv_values 2 | 3 | 4 | def config_from_dot_env(dot_env_path: str) -> dict: 5 | """ 6 | Reads the configs from the dot_env_path. 7 | """ 8 | 9 | config = dotenv_values(dot_env_path) 10 | 11 | return config 12 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Excuses/skprompt.txt: -------------------------------------------------------------------------------- 1 | Generate a creative reason or excuse for the given event. Be creative and be funny. Let your imagination run wild. 2 | 3 | Event:I am running late. 4 | Excuse:I was being held ransom by giraffe gangsters. 5 | 6 | Event:{{$input}} -------------------------------------------------------------------------------- /src/gptui/views/common_message.py: -------------------------------------------------------------------------------- 1 | from textual.message import Message 2 | 3 | 4 | class CommonMessage(Message): 5 | def __init__(self, message_name: str, message_content) -> None: 6 | self.message_name = message_name 7 | self.message_content = message_content 8 | super().__init__() 9 | -------------------------------------------------------------------------------- /custom_plugin_examples/README.md: -------------------------------------------------------------------------------- 1 | You can specify the folder for your custom plugins in the configuration file, 2 | which defaults to "~/.gptui/plugins". 3 | 4 | GPTUI will automatically scan this folder to retrieve the plugins contained within it. 5 | 6 | You can copy the files from this folder to the custom plugin directory for testing purposes. 7 | -------------------------------------------------------------------------------- /src/gptui/gptui_kernel/null_logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | class NullHandler(logging.Handler): 5 | def emit(self, record): 6 | pass 7 | 8 | 9 | def get_null_logger(name=None): 10 | logger = logging.getLogger(name or __name__) 11 | logger.addHandler(NullHandler()) 12 | logger.propagate = False 13 | return logger 14 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Joke/skprompt.txt: -------------------------------------------------------------------------------- 1 | WRITE EXACTLY ONE JOKE or HUMOROUS STORY ABOUT THE TOPIC BELOW 2 | 3 | JOKE MUST BE: 4 | - G RATED 5 | - WORKPLACE/FAMILY SAFE 6 | NO SEXISM, RACISM OR OTHER BIAS/BIGOTRY 7 | 8 | BE CREATIVE AND FUNNY. I WANT TO LAUGH. 9 | {{$style}} 10 | +++++ 11 | 12 | {{$input}} 13 | +++++ 14 | -------------------------------------------------------------------------------- /src/gptui/controllers/ai_care_sensors.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | 4 | def time_now() -> str: 5 | """ 6 | Get the current date and time in the local time zone" 7 | 8 | Example: 9 | {{time.now}} => Sunday, January 12, 2031 9:15 PM 10 | """ 11 | now = datetime.datetime.now() 12 | return now.strftime("%A, %B %d, %Y %I:%M %p") 13 | 14 | -------------------------------------------------------------------------------- /src/gptui/utils/line_count.py: -------------------------------------------------------------------------------- 1 | from rich.console import Console 2 | 3 | from .my_text import MyText 4 | 5 | 6 | def my_line_count(content: MyText, width: int, console=Console()) -> int: 7 | lines = content.split(allow_blank=True) 8 | num_count = 0 9 | for line in lines: 10 | num_count += len(line.wrap(console, width)) 11 | return num_count 12 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Limerick/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "description": "Generate a funny limerick about a person", 4 | "type": "completion", 5 | "completion": { 6 | "max_tokens": 100, 7 | "temperature": 0.7, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | 4 | setuptools.setup( 5 | packages=setuptools.find_packages(where="src"), 6 | package_dir={"": "src"}, 7 | package_data={ 8 | "gptui": [ 9 | ".default_config.yml", 10 | "config.yml", 11 | "help.md", 12 | "**/*.txt", 13 | "**/*.tcss", 14 | ], 15 | }, 16 | ) 17 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Excuses/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "description": "Turn a scenario into a creative or humorous excuse to send your boss", 4 | "type": "completion", 5 | "completion": { 6 | "max_tokens": 60, 7 | "temperature": 0.5, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /tests/unit_tests/models/test_doc.py: -------------------------------------------------------------------------------- 1 | from gptui.models.doc import document_loader 2 | 3 | 4 | def test_document_loader(tmp_path): 5 | file_content = "This is a test." 6 | file_path = tmp_path / "test.txt" 7 | with open(file_path, "w") as fp: 8 | fp.write(file_content) 9 | document = document_loader(file_path) 10 | assert document[0].page_content == "This is a test." 11 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/html.py: -------------------------------------------------------------------------------- 1 | """Loader that uses unstructured to load HTML files.""" 2 | from typing import List 3 | 4 | from .unstructured import UnstructuredFileLoader 5 | 6 | 7 | class UnstructuredHTMLLoader(UnstructuredFileLoader): 8 | """Loader that uses unstructured to load HTML files.""" 9 | 10 | def _get_elements(self) -> List: 11 | from unstructured.partition.html import partition_html 12 | 13 | return partition_html(filename=self.file_path, **self.unstructured_kwargs) 14 | -------------------------------------------------------------------------------- /tests/unit_tests/models/test_openai_tokens_truncate.py: -------------------------------------------------------------------------------- 1 | from gptui.models.openai_tokens_truncate import find_position 2 | 3 | 4 | def test_find_position(): 5 | lst = [1, 2, 3, 4, 5] 6 | num = 8 7 | result = find_position(lst, num) 8 | assert result == 4 9 | lst = [2, 0, 5, 1, 3, 2, 1, 0, 4] 10 | num = 9 11 | result = find_position(lst, num) 12 | assert result == 5 13 | result = find_position(lst, 2) 14 | assert result == 9 15 | result = find_position(lst, 20) 16 | assert result == 0 17 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Joke/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "description": "Generate a funny joke", 4 | "type": "completion", 5 | "completion": { 6 | "max_tokens": 1000, 7 | "temperature": 0.9, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | }, 12 | "input": { 13 | "parameters": [ 14 | { 15 | "name": "input", 16 | "description": "Joke subject", 17 | "defaultValue": "" 18 | } 19 | ] 20 | } 21 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | agere>=0.1.3,<1 2 | ai-care>=0.1.3,<1 3 | aiofiles>=23.1.0,<24 4 | beautifulsoup4>=4.12.2,<5 5 | blinker>=1.6.2,<2 6 | chardet>=5.1.0,<6 7 | geocoder>=1.38.1,<2 8 | httpx>=0.24.1,<1 9 | lxml>=4.9.3,<6 10 | # open-interpreter==0.1.4 11 | openai>=1.2.3,<2 12 | playsound>=1.3.0,<2 13 | Pygments>=2.15.1,<3 14 | pyperclip>=1.8.2,<2 15 | python-dotenv>=1.0.0,<2 16 | PyYAML>=6.0.1,<7 17 | qdrant-client>=1.4.0,<2 18 | rich>=13.7.0,<14 19 | semantic-kernel>=0.4.0.dev0,<1 20 | textual>=0.37.1,<1 21 | tiktoken>=0.4.0,<1 22 | unstructured>=0.10.18,<1 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | Have you checked closed issues? https://github.com/happyapplehorse/gptui/issues?q=is%3Aissue+is%3Aclosed 11 | 12 | Please give a brief but clear explanation of the issue. 13 | 14 | Feel free to add screenshots and / or videos. These can be very helpful! 15 | 16 | If possible, please use English as much as you can. 17 | This is to ensure that others can review the content of this issue more effectively. 18 | -------------------------------------------------------------------------------- /src/gptui/utils/safe_iterate.py: -------------------------------------------------------------------------------- 1 | def safe_next(gen): 2 | """Avoiding conflicts between StopIteration of generators and StopIteration of coroutine functions in eventloop.""" 3 | try: 4 | return ("OK", next(gen)) 5 | except StopIteration as e: 6 | return ("DONE", e.value) 7 | 8 | def safe_send(gen, value): 9 | """Avoiding conflicts between StopIteration of generators and StopIteration of coroutine functions in eventloop.""" 10 | try: 11 | return ("OK", gen.send(value)) 12 | except StopIteration as e: 13 | return ("DONE", e.value) 14 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/README.md: -------------------------------------------------------------------------------- 1 | Due to the fact that the original development platform (Termux) could not install the langchain toolkit (due to numpy), and in order to facilitate the use of GPTUI by other Termux users, part of the langchain source code that is needed has been copied into this project to use some of the functions of langchain. Thanks to the hard work of the langchain developers. If the installation problem of langchain is solved later, the langchain toolkit will be installed directly. 2 | 3 | Langchain tools that have already been integrated: 4 | - TextLoader 5 | - UnstructuredHTMLLoader 6 | - BSHTMLLoader 7 | 8 | Required dependencies: 9 | - pydantic 10 | -------------------------------------------------------------------------------- /docs/about/contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing to GPTUI 2 | 3 | The GPTUI project welcomes contributions from developers and users in the open source community. 4 | Contributions can be made in a number of ways, a few examples are: 5 | 6 | - Code patches via pull requests 7 | - Documentation improvements 8 | - Bug reports and patch reviews 9 | 10 | Some of GPTUI's plugin features rely on prompt, you can continue to help me improve these prompt. 11 | And I'd like to have appropriate animation cues during certain state changes. 12 | If you have any creative ideas, I'd appreciate your help in implementing them. 13 | 14 | 15 | ## 🎉 16 | 17 | Each contributor can leave a quote in the program. 18 | -------------------------------------------------------------------------------- /src/gptui/controllers/assistant_tube_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from ..models.signals import response_auxiliary_message_signal 4 | 5 | 6 | gptui_logger = logging.getLogger("gptui_logger") 7 | 8 | 9 | class AssistantTube: 10 | def __init__(self, app): 11 | self.app = app 12 | response_auxiliary_message_signal.connect(self.tube_display) 13 | 14 | def tube_display(self, sender, **kwargs): 15 | message = kwargs["message"] 16 | content = message["content"] 17 | flag = message["flag"] 18 | if flag == "function_call": 19 | self.app.context_piece_to_assistant_tube(content) 20 | elif flag == "function_response": 21 | self.app.context_piece_to_assistant_tube(content) 22 | -------------------------------------------------------------------------------- /src/gptui/models/utils/openai_settings_from_dot_env.py: -------------------------------------------------------------------------------- 1 | from dotenv import dotenv_values 2 | 3 | 4 | def openai_settings_from_dot_env(dot_env_path: str) -> tuple[str, str | None]: 5 | """ 6 | Reads the OpenAI API key and organization ID from the dot_env_path. 7 | OpenAI API key should be saved as "OPENAI_API_KEY". 8 | Organization should be saved as "OPENAI_ORG_ID". 9 | 10 | Returns: 11 | Tuple[str, str]: The OpenAI API key, the OpenAI organization ID 12 | """ 13 | 14 | config = dotenv_values(dot_env_path) 15 | api_key = config.get("OPENAI_API_KEY", None) 16 | org_id = config.get("OPENAI_ORG_ID", None) 17 | 18 | assert api_key, f"OPENAI_API_KEY not found in {dot_env_path}" 19 | 20 | # It's okay if the org ID is not found (not required) 21 | return api_key, org_id 22 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # GPTUI 2 | 3 | { align=left width="60" } 4 | GPTUI is a GPT conversational TUI (Textual User Interface) tool that runs within the terminal. 5 | Using the Textual framework for its TUI interface and equipping the plugin framework provided by Semantic Kernel. 6 | GPTUI offers a lightweight Kernel to power AI applications. 7 | The top-level TUI application is decoupled from the underlying Kernel, allowing you to easily replace the TUI interface or expand its functionalities. 8 | At present, only the GPT model of OpenAI is supported, and other LLM interfaces will be added later. 9 | 10 | 11 | ## Demo 12 | 13 | Below is a demonstration: 14 | 15 |  16 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Limerick/skprompt.txt: -------------------------------------------------------------------------------- 1 | There was a young woman named Bright, 2 | Whose speed was much faster than light. 3 | She set out one day, 4 | In a relative way, 5 | And returned on the previous night. 6 | 7 | There was an odd fellow named Gus, 8 | When traveling he made such a fuss. 9 | He was banned from the train, 10 | Not allowed on a plane, 11 | And now travels only by bus. 12 | 13 | There once was a man from Tibet, 14 | Who couldn't find a cigarette 15 | So he smoked all his socks, 16 | and got chicken-pox, 17 | and had to go to the vet. 18 | 19 | There once was a boy named Dan, 20 | who wanted to fry in a pan. 21 | He tried and he tried, 22 | and eventually died, 23 | that weird little boy named Dan. 24 | 25 | Now write a very funny limerick about {{$name}}. 26 | {{$input}} 27 | Invent new facts their life. Must be funny. 28 | -------------------------------------------------------------------------------- /src/gptui/controllers/group_talk_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from ..models.signals import common_message_signal 4 | 5 | 6 | gptui_logger = logging.getLogger("gptui_logger") 7 | 8 | 9 | class GroupTalkControl: 10 | def __init__(self, app): 11 | self.app = app 12 | common_message_signal.connect(self.group_talk_user_message_send) 13 | 14 | def group_talk_user_message_send(self, sender, **kwargs): 15 | message = kwargs["message"] 16 | if message["flag"] == "group_talk_user_message_send": 17 | messages = message["content"] 18 | if isinstance(messages, list): 19 | for one_message in messages: 20 | self.app.context_piece_to_chat_window(one_message, change_line=True, decorator_switch=True) 21 | else: 22 | self.app.context_piece_to_chat_window(messages, change_line=True, decorator_switch=True) 23 | -------------------------------------------------------------------------------- /src/gptui/gptui_kernel/manager_exceptions.py: -------------------------------------------------------------------------------- 1 | class ManagerError(Exception): 2 | ... 3 | 4 | 5 | class HandlerNotRegisterError(ManagerError): 6 | def __init__(self, handler, manager=None): 7 | self.handler = handler 8 | self.manager = manager 9 | 10 | def __str__(self): 11 | if self.manager is None: 12 | return f"Handler: {self.handler} is not registered in manager." 13 | else: 14 | return f"Handler: {self.handler} is not registered in manager: {self.manager}." 15 | 16 | 17 | class JobNotRegisterError(ManagerError): 18 | def __init__(self, job, manager=None): 19 | self.job = job 20 | self.manager = manager 21 | 22 | def __str__(self): 23 | if self.manager is None: 24 | return f"Job: {self.job} is not registered in manager." 25 | else: 26 | return f"Job: {self.job} is not registered in manager: {self.manager}." 27 | -------------------------------------------------------------------------------- /src/gptui/models/utils/openai_api.py: -------------------------------------------------------------------------------- 1 | import openai 2 | from openai import OpenAI, AsyncOpenAI 3 | 4 | from .openai_settings_from_dot_env import openai_settings_from_dot_env 5 | 6 | 7 | OpenAIClient = OpenAI | AsyncOpenAI 8 | 9 | 10 | def openai_api(dot_env_path: str | None): 11 | assert dot_env_path, "'dot_env_path' can not be None or empty." 12 | openai_key, org_id = openai_settings_from_dot_env(dot_env_path) 13 | openai.api_key = openai_key 14 | return openai 15 | 16 | def openai_api_client(dot_env_path: str | None, async_client: bool = False, **kwargs) -> OpenAIClient: 17 | assert dot_env_path, "'dot_env_path' can not be None or empty." 18 | openai_key, org_id = openai_settings_from_dot_env(dot_env_path) 19 | if async_client is True: 20 | client = AsyncOpenAI(api_key=openai_key, organization=org_id, **kwargs) 21 | else: 22 | client = OpenAI(api_key=openai_key, organization=org_id, **kwargs) 23 | return client 24 | -------------------------------------------------------------------------------- /src/gptui/models/openai_error.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from .blinker_wrapper import async_wrapper_with_loop, async_wrapper_without_loop 4 | from .context import OpenaiContext 5 | from .signals import notification_signal 6 | 7 | 8 | gptui_logger = logging.getLogger("gptui_logger") 9 | 10 | 11 | class OpenaiErrorHandler: 12 | def openai_error_handle(self, error: Exception, context: OpenaiContext, event_loop: bool = True, **kwargs) -> None: 13 | if event_loop is True: 14 | gptui_logger.error(f"Openai Error: {error}") 15 | notification_signal.send(self, _async_wrapper=async_wrapper_with_loop, message={"content":{"error":error, "context":context, "ps":kwargs}, "flag":"openai_error"}) 16 | else: 17 | gptui_logger.error(f"Openai Error: {error}") 18 | notification_signal.send(self, _async_wrapper=async_wrapper_without_loop, message={"content":{"error":error, "context":context, "ps":kwargs}, "flag":"openai_error"}) 19 | -------------------------------------------------------------------------------- /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | name: Codecov 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - develop 8 | pull_request: 9 | branches: 10 | - main 11 | - develop 12 | workflow_dispatch: 13 | 14 | jobs: 15 | codecov: 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - uses: actions/checkout@v3 20 | - name: Set up Python 21 | uses: actions/setup-python@v3 22 | with: 23 | python-version: '3.x' 24 | - name: Install dependencies 25 | run: | 26 | python -m pip install --upgrade pip 27 | pip install wheel setuptools 28 | pip install pytest pytest-cov pytest-asyncio 29 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 30 | - name: Execute test 31 | run: pytest --cov=./src/gptui --cov-report=xml 32 | 33 | - name: Upload coverage reports to Codecov 34 | uses: codecov/codecov-action@v3 35 | with: 36 | files: ./coverage.xml 37 | -------------------------------------------------------------------------------- /src/gptui/drivers/driver_interface.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from .driver_error import NoDriverError, NoDriverMethodError 4 | 5 | 6 | class DriverInterface: 7 | 8 | def __init__(self, platform: str): 9 | self.platform = platform.lower() 10 | 11 | def __call__(self, *args, **kwargs) -> Any: 12 | method = getattr(self, self.platform, None) 13 | if method and callable(method): 14 | return method(*args, **kwargs) 15 | else: 16 | raise NoDriverError(self.platform) 17 | 18 | def termux(self): 19 | raise NoDriverMethodError(driver="termux", method=self.__class__.__name__) 20 | 21 | def linux(self): 22 | raise NoDriverMethodError(driver="linux", method=self.__class__.__name__) 23 | 24 | def macos(self): 25 | raise NoDriverMethodError(driver="macos", method=self.__class__.__name__) 26 | 27 | def windows(self): 28 | raise NoDriverMethodError(driver="windows", method=self.__class__.__name__) 29 | -------------------------------------------------------------------------------- /src/gptui/drivers/driver_error.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Callable, TYPE_CHECKING 3 | 4 | 5 | if TYPE_CHECKING: 6 | from .driver_interface import DriverInterface 7 | 8 | 9 | class DriverError(Exception): 10 | ... 11 | 12 | 13 | class NoDriverError(DriverError): 14 | def __init__(self, driver: str): 15 | self.driver = driver 16 | 17 | def __str__(self): 18 | return f"There is no {self.driver} driver." 19 | 20 | 21 | class NoDriverMethodError(DriverError): 22 | def __init__(self, driver: str | Callable, method: str | DriverInterface): 23 | if isinstance(driver, str): 24 | self.driver = driver 25 | else: 26 | self.driver = driver.__name__ 27 | if isinstance(method, str): 28 | self.method = method 29 | else: 30 | self.method = type(method).__name__ 31 | 32 | def __str__(self): 33 | return f"There is no {self.method} method in {self.driver} driver." 34 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/pydantic_v1/__init__.py: -------------------------------------------------------------------------------- 1 | from importlib import metadata 2 | 3 | ## Create namespaces for pydantic v1 and v2. 4 | # This code must stay at the top of the file before other modules may 5 | # attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules. 6 | # 7 | # This hack is done for the following reasons: 8 | # * Langchain will attempt to remain compatible with both pydantic v1 and v2 since 9 | # both dependencies and dependents may be stuck on either version of v1 or v2. 10 | # * Creating namespaces for pydantic v1 and v2 should allow us to write code that 11 | # unambiguously uses either v1 or v2 API. 12 | # * This change is easier to roll out and roll back. 13 | 14 | try: 15 | from pydantic.v1 import * # noqa: F403 16 | except ImportError: 17 | from pydantic import * # noqa: F403 18 | 19 | 20 | try: 21 | _PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0]) 22 | except metadata.PackageNotFoundError: 23 | _PYDANTIC_MAJOR_VERSION = 0 24 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/test_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | from unittest.mock import patch 3 | 4 | from textual.app import App 5 | 6 | from gptui.gptui_kernel.manager import Manager 7 | 8 | 9 | mocked_dotenv_values = { 10 | "OPENAI_API_KEY": "fake_api_key", 11 | "OPENAI_ORG_ID": "fake_org_id", 12 | } 13 | 14 | def test_scan_plugin(): 15 | with patch('gptui.gptui_kernel.kernel.dotenv_values', return_value=mocked_dotenv_values): 16 | app = App() 17 | manager = Manager(app, dot_env_config_path=os.path.expanduser("~/.gptui/.env_gptui")) 18 | semantic_plugins, native_plugins = manager.scan_plugins("./tests/unit_tests/gptui_kernel/plugins_test_data") 19 | semantic_plugins_name_list = [plugin_meta.name for plugin_meta in semantic_plugins] 20 | native_plugins_name_list = [plugin_meta.name for plugin_meta in native_plugins] 21 | assert set(semantic_plugins_name_list) == {"FunSkill"} 22 | assert set(native_plugins_name_list) == {"WebServe", "MathPlugin", "WriteFile"} 23 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/math_plugin.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from semantic_kernel.skill_definition import ( 4 | sk_function, 5 | sk_function_context_parameter, 6 | ) 7 | from semantic_kernel.orchestration.sk_context import SKContext 8 | 9 | 10 | class MathPlugin: 11 | @sk_function( 12 | description="Takes the square root of a number", 13 | name="square_root", 14 | input_description="The value to take the square root of", 15 | ) 16 | def square_root(self, number: str) -> str: 17 | return str(math.sqrt(float(number))) 18 | 19 | @sk_function( 20 | description="Adds two numbers together", 21 | name="add", 22 | ) 23 | @sk_function_context_parameter( 24 | name="input", 25 | description="The first number to add", 26 | ) 27 | @sk_function_context_parameter( 28 | name="number2", 29 | description="The second number to add", 30 | ) 31 | def add(self, context: SKContext) -> str: 32 | return str(float(context["input"]) + float(context["number2"])) 33 | -------------------------------------------------------------------------------- /src/gptui/gptui_kernel/kernel_exceptions.py: -------------------------------------------------------------------------------- 1 | import semantic_kernel as sk 2 | 3 | 4 | class KernelException(Exception): 5 | ... 6 | 7 | 8 | class PluginInfoError(KernelException): 9 | def __init__(self, plugin_info: tuple): 10 | self.plugin_info = plugin_info 11 | 12 | def __str__(self): 13 | return f"Plugin info in {self.plugin_info} is a wrong type." 14 | 15 | 16 | class InvalidArgumentTypeError(KernelException): 17 | def __init__(self, argument, expected_type): 18 | self.argument = argument 19 | self.expected_type = expected_type 20 | 21 | def __str__(self): 22 | return f"Invalid argumen type: Expected {self.expected_type}, got {type(self.argument)}" 23 | 24 | 25 | class PluginsMatchError(KernelException): 26 | def __init__(self, sk_kernel: sk.Kernel, plugins_list: list[tuple]): 27 | self.sk_kernel = sk_kernel 28 | self.plugins_list = plugins_list 29 | 30 | def __str__(self): 31 | return f"Semantic kernel and plugin list do not match. sk_kernel: {self.sk_kernel}, plugins_list: {self.plugins_list}" 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Xueao Chao 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/unit_tests/data/test_document_loaders.py: -------------------------------------------------------------------------------- 1 | from gptui.data.langchain.document_loaders import TextLoader, UnstructuredHTMLLoader, BSHTMLLoader 2 | 3 | 4 | def test_text_loader(): 5 | file_path = "./tests/unit_tests/data/langchain_tests_assets/text_load_test.txt" 6 | loader = TextLoader(file_path) 7 | document = loader.load()[0] 8 | assert document.page_content == "This is a txt file for testting text loader.\n" 9 | assert document.metadata["source"] == "./tests/unit_tests/data/langchain_tests_assets/text_load_test.txt" 10 | 11 | def test_bs_html_loader(): 12 | file_path = "./tests/unit_tests/data/langchain_tests_assets/html_load_test.html" 13 | loader = BSHTMLLoader(file_path) 14 | document = loader.load()[0] 15 | assert document.page_content == "\n\nPage Title\n\n\nMy First Heading\nMy first paragraph.\n\n\n" 16 | 17 | def test_unstructured_html_loader(): 18 | file_path = "./tests/unit_tests/data/langchain_tests_assets/html_load_test.html" 19 | loader = UnstructuredHTMLLoader(file_path) 20 | document = loader.load()[0] 21 | assert document.page_content == "My First Heading\n\nMy first paragraph." 22 | -------------------------------------------------------------------------------- /tests/unit_tests/controllers/test_tube_files.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from gptui.controllers.tube_files_control import TubeFiles 4 | from gptui.utils.my_text import MyText as Text 5 | 6 | class TestTubeFiles: 7 | @pytest.fixture(autouse=True) 8 | def setup(self, tmp_path): 9 | 10 | class Displayer: 11 | def update(self, content): 12 | self.display = content 13 | 14 | self.displayer = Displayer() 15 | self.file_path = tmp_path / "files_test_data" 16 | self.file_path.mkdir(exist_ok=True) 17 | 18 | @pytest.mark.asyncio 19 | async def test_write_read_file_async(self): 20 | tf = TubeFiles(self.displayer) 21 | file_content = "This is a test." 22 | file_path = self.file_path / "test.txt" 23 | await tf.write_file_async(file_path, file_content) 24 | content = await tf.read_file_async(file_path) 25 | assert content == "This is a test." 26 | content = await tf.read_file_async(self.file_path / "test1.txt") 27 | assert content is None 28 | assert self.displayer.display == Text("File or directory not found", "yellow") 29 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/test_call_plugin.py: -------------------------------------------------------------------------------- 1 | import os 2 | from unittest.mock import patch 3 | 4 | from textual.app import App 5 | 6 | from gptui.gptui_kernel.manager import Manager 7 | 8 | 9 | mocked_dotenv_values = { 10 | "OPENAI_API_KEY": "fake_api_key", 11 | "OPENAI_ORG_ID": "fake_org_id", 12 | } 13 | 14 | async def test_call_plugin(): 15 | with patch('gptui.gptui_kernel.kernel.dotenv_values', return_value=mocked_dotenv_values): 16 | app = App() 17 | manager = Manager(app, dot_env_config_path=os.path.expanduser("~/.gptui/.env_gptui")) 18 | _, native_plugins = manager.scan_plugins("./tests/unit_tests/gptui_kernel/plugins_test_data") 19 | for plugin in native_plugins: 20 | plugin_info = plugin.plugin_info 21 | manager.add_plugins(plugin_info) 22 | 23 | add = manager.available_functions_link["add"] 24 | args = { 25 | "input": 1, 26 | "number2": 2, 27 | } 28 | context = manager.gk_kernel.context_render(args, add) 29 | result = await add.invoke_async(context=context) 30 | assert int(float(str(result))) == 3 31 | -------------------------------------------------------------------------------- /src/gptui/controllers/voice_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from ..models.signals import response_to_user_message_sentence_stream_signal 4 | 5 | 6 | gptui_logger = logging.getLogger("gptui_logger") 7 | 8 | 9 | class VoiceService: 10 | def __init__(self, myapp, switch: bool = True): 11 | self.myapp = myapp 12 | self.voice_service = None 13 | if switch is True: 14 | self.connect() 15 | 16 | async def accept_voice_message(self, sender, **kwargs): 17 | voice_message = kwargs["message"] 18 | message_content = voice_message["content"] 19 | flag = voice_message["flag"] 20 | if flag == "content": 21 | self.voice_service = self.myapp.drivers.tts(message_content) 22 | 23 | def connect(self): 24 | response_to_user_message_sentence_stream_signal.connect(self.accept_voice_message) 25 | 26 | def disconnect(self): 27 | response_to_user_message_sentence_stream_signal.disconnect(self.accept_voice_message) 28 | 29 | def cancel_speak(self) -> None: 30 | self.disconnect() 31 | if self.voice_service is None: 32 | return 33 | self.voice_service.stop() 34 | -------------------------------------------------------------------------------- /src/gptui/drivers/driver_manager.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | from .drivers import CopyCode, TextToSpeak, VoiceRecordStart, VoiceRecordQuit 5 | from .driver_interface import DriverInterface 6 | 7 | 8 | gptui_logger = logging.getLogger("gptui_logger") 9 | 10 | 11 | class DriverManager: 12 | def __init__(self, app): 13 | self.app = app 14 | self.terminal = app.config.get("terminal") 15 | self.os = app.config.get("os") 16 | self._register_driver_method() 17 | 18 | def register_driver(self, driver_method_name: str, driver: DriverInterface) -> None: 19 | if hasattr(self, driver_method_name): 20 | gptui_logger.warning("A driver method with the same name already exists; it will be overwritten.") 21 | setattr(self, driver_method_name, driver) 22 | 23 | def _register_driver_method(self): 24 | self.copy_code = CopyCode(self.os) 25 | self.tts = TextToSpeak( 26 | platform=self.os, 27 | dot_env_path=self.app.config["dot_env_path"], 28 | temp_dir=os.path.join(self.app.config["workpath"], "temp"), 29 | ) 30 | self.voice_record_start = VoiceRecordStart(self.os) 31 | self.voice_record_quit = VoiceRecordQuit(self.os) 32 | -------------------------------------------------------------------------------- /src/gptui/models/doc.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from ..data.langchain.document_loaders import TextLoader 4 | from ..data.langchain.schema import Document 5 | 6 | 7 | class Doc: 8 | def __init__(self, doc_name: str, doc_ext: str, pointer, description: str | None = None): 9 | self.name = doc_name 10 | self.ext = doc_ext 11 | self.pointer = pointer 12 | self.description = description 13 | if isinstance(pointer, Document): 14 | self.content_type = "Document" 15 | elif isinstance(pointer, str): 16 | self.content_type = "str" 17 | else: 18 | self.content_type = "Unknown" 19 | 20 | @property 21 | def content(self): 22 | if isinstance(self.pointer, Document): 23 | return self.pointer.page_content 24 | else: 25 | return self.pointer 26 | 27 | def document_loader(file_path: str) -> list[Document]: 28 | file_ext_name = os.path.splitext(file_path)[1] 29 | if file_ext_name in {".txt", ".md", ".json", ".py", ".cpp", ".yaml", ".yml", ".toml", ".log"}: 30 | loader = TextLoader(file_path) 31 | else: 32 | raise TypeError("Selected file type is not suppported.") 33 | document_list = loader.load() 34 | return document_list 35 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FileIO.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from semantic_kernel.orchestration.sk_context import SKContext 4 | from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter 5 | 6 | from gptui.gptui_kernel.manager import auto_init_params 7 | 8 | 9 | mylogger = logging.getLogger("mylogger") 10 | 11 | 12 | class WriteFile: 13 | def __init__(self, manager): 14 | self.manager = manager 15 | 16 | @auto_init_params("0") 17 | @classmethod 18 | def get_init_params(cls, manager) -> tuple: 19 | return (manager,) 20 | 21 | @sk_function( 22 | description="Write a file.", 23 | name="write_file", 24 | ) 25 | @sk_function_context_parameter( 26 | name="file_name", 27 | description="The name of the file, including the extension.", 28 | ) 29 | @sk_function_context_parameter( 30 | name="file_content", 31 | description="The content to be written into the file." 32 | ) 33 | def write_file(self, context: SKContext) -> str: 34 | file_name = context["file_name"] 35 | file_content = context["file_content"] 36 | self.manager.client.common_resources["temp_files_from_tube"] = {file_name: file_content} 37 | return "" 38 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](http://keepachangelog.com/) 6 | and this project adheres to [Semantic Versioning](http://semver.org/). 7 | 8 | ## [Unreleased] 9 | 10 | ## [0.5.4] - 2024-01-09 11 | 12 | ### Fixed 13 | 14 | - Fixed the issue of bebing unable to rename conversation on Windows 15 | - Switch from text-davinci-003 to using gpt-3.5-turbo-instruct 16 | - When choosing a file path, the default is the root directory 17 | 18 | ## [0.5.3] - 2024-01-07 19 | 20 | ### Fixed 21 | 22 | - Fixed the error of using the unimported async_wrapper_with_loop in GroupTalkManager.speaking 23 | 24 | ## [0.5.2] - 2024-01-02 25 | 26 | ### Fixed 27 | 28 | - Fixed the bug that prevents the second conversation from being renamed 29 | - Stop the waiting animation for a conversation when it is deleted 30 | - Fixed the bug where deleting a conversation shows its replies in another window 31 | 32 | ## [0.5.1] - 2024-01-02 33 | 34 | ### Fixed 35 | 36 | - Fixed the FileNotfoundError when clicking links in MarkdownViewer 37 | - Fixed the KeyError caused by switching to information display when dealing with an empty chat window 38 | - Fixed the bugs in disposable conversation mode caused by openai v1 39 | 40 | ## [0.5.0] - 2023-12-31 41 | 42 | ### Added 43 | 44 | - Added support for custom plugins. 45 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/helpers.py: -------------------------------------------------------------------------------- 1 | """Document loader helpers.""" 2 | 3 | import concurrent.futures 4 | from typing import List, NamedTuple, Optional, cast 5 | 6 | 7 | class FileEncoding(NamedTuple): 8 | encoding: Optional[str] 9 | confidence: float 10 | language: Optional[str] 11 | 12 | 13 | def detect_file_encodings(file_path: str, timeout: int = 5) -> List[FileEncoding]: 14 | """Try to detect the file encoding. 15 | 16 | Returns a list of `FileEncoding` tuples with the detected encodings ordered 17 | by confidence. 18 | """ 19 | import chardet 20 | 21 | def read_and_detect(file_path: str) -> List[dict]: 22 | with open(file_path, "rb") as f: 23 | rawdata = f.read() 24 | return cast(List[dict], chardet.detect_all(rawdata)) 25 | 26 | with concurrent.futures.ThreadPoolExecutor() as executor: 27 | future = executor.submit(read_and_detect, file_path) 28 | try: 29 | encodings = future.result(timeout=timeout) 30 | except concurrent.futures.TimeoutError: 31 | raise TimeoutError( 32 | f"Timeout reached while detecting encoding for {file_path}" 33 | ) 34 | 35 | if all(encoding["encoding"] is None for encoding in encodings): 36 | raise RuntimeError(f"Could not detect encoding for {file_path}") 37 | return [FileEncoding(**enc) for enc in encodings if enc["encoding"] is not None] 38 | -------------------------------------------------------------------------------- /src/gptui/.default_config.yml: -------------------------------------------------------------------------------- 1 | # +--------------------------------------------------------------------------+ 2 | # \ The configurations in this document are the program's default settings, \ 3 | # \ ensuring the presence of essential configuration items. \ 4 | # +--------------------------------------------------------------------------+ 5 | 6 | 7 | GPTUI_BASIC_SERVICES_PATH: 8 | PLUGIN_PATH: 9 | DEFAULT_PLUGIN_PATH: 10 | custom_plugin_path: ~/.gptui/plugins/ 11 | 12 | # API keys 13 | dot_env_path: 14 | ~/.gptui/.env_gptui 15 | 16 | default_openai_parameters: 17 | model: gpt-4 18 | stream: true 19 | 20 | default_conversation_parameters: 21 | max_sending_tokens_ratio: 0.6 22 | 23 | log_path: 24 | ~/.gptui/logs.log 25 | 26 | tui_config: 27 | conversations_recover: true 28 | voice_switch: false 29 | speak_switch: false 30 | file_wrap_display: true 31 | ai_care_switch: true 32 | ai_care_depth: 2 33 | ai_care_delay: 60 34 | status_region_default: 35 | waiting_receive_animation: "default" 36 | 37 | # List of plugin's name of default used 38 | default_plugins_used: [] 39 | 40 | # Program working path, storing vector database, temporary files, etc. 41 | workpath: 42 | ~/.gptui/user 43 | 44 | # Scope of files discoverable by the program 45 | directory_tree_path: 46 | ~/ 47 | 48 | # Conversation history save and import path 49 | conversation_path: 50 | ~/.gptui/user/conversations 51 | 52 | vector_memory_path: 53 | ~/.gptui/user/vector_memory_database 54 | -------------------------------------------------------------------------------- /tests/unit_tests/models/test_context.py: -------------------------------------------------------------------------------- 1 | import json 2 | import copy 3 | from dataclasses import asdict 4 | from gptui.models.context import OpenaiContext 5 | 6 | def test_openai_context_serialization_deserialization(): 7 | openai_context_original = OpenaiContext(chat_context = [{"role": "user", "content":"Hi!"}, {"role":"assistant", "content":"Hello, how can i assist you today?"}]) 8 | openai_context_original.parameters = {"model": "gpt-4"} 9 | openai_context = copy.deepcopy(openai_context_original) 10 | openai_context_str = json.dumps(asdict(openai_context), ensure_ascii = False, sort_keys = True, indent = 4, separators = (',',':')) 11 | openai_context_build = json.loads(openai_context_str) 12 | openai_context_rebuild = OpenaiContext(**openai_context_build) 13 | assert openai_context_rebuild == openai_context_original 14 | 15 | def test_openai_context_deepcopy(): 16 | openai_context_original = OpenaiContext(chat_context = [{"role": "user", "content":"Hi!"}, {"role":"assistant", "content":"Hello, how can i assist you today?"}]) 17 | openai_context_original.parameters = {"model": "gpt-4"} 18 | openai_context_original.plugins = [["mutable"], "plugin2"] 19 | openai_context_deepcopy = copy.deepcopy(openai_context_original) 20 | assert openai_context_deepcopy == openai_context_original 21 | assert id(openai_context_deepcopy.chat_context) != id(openai_context_original.chat_context) 22 | assert set(map(id, openai_context_original.plugins)) == set(map(id, openai_context_deepcopy.plugins)) 23 | 24 | -------------------------------------------------------------------------------- /.github/workflows/static.yml: -------------------------------------------------------------------------------- 1 | # Simple workflow for deploying static content to GitHub Pages 2 | name: Deploy MkDocs to GitHub Pages 3 | 4 | on: 5 | push: 6 | tags: 7 | - 'v*' 8 | # Allows to run this workflow manually from the Actions tab 9 | workflow_dispatch: 10 | 11 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 12 | permissions: 13 | contents: read 14 | pages: write 15 | id-token: write 16 | 17 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. 18 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. 19 | concurrency: 20 | group: "pages" 21 | cancel-in-progress: false 22 | 23 | jobs: 24 | # Single deploy job since we're just deploying 25 | deploy: 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@v3 30 | 31 | - name: Set up Python 32 | uses: actions/setup-python@v3 33 | with: 34 | python-version: 3.x 35 | 36 | - name: Install MkDocs and extensions 37 | run: | 38 | python -m pip install --upgrade pip 39 | pip install mkdocs mkdocs-material "mkdocstrings[python]" 40 | 41 | - name: Build MkDocs site 42 | run: mkdocs build 43 | 44 | - name: Upload artifact 45 | uses: actions/upload-pages-artifact@v2 46 | with: 47 | path: './site' 48 | 49 | - name: Deploy to GitHub Pages 50 | id: deployment 51 | uses: actions/deploy-pages@v2 52 | -------------------------------------------------------------------------------- /tests/unit_tests/models/test_skills.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from gptui.models.doc import Doc 4 | from gptui.models.skills import UploadFile 5 | 6 | 7 | @pytest.mark.asyncio 8 | async def test_upload_file(): 9 | uf = UploadFile() 10 | input = "Summarize the following documents' content." 11 | doc1 = Doc(doc_name="test_doc1", doc_ext=".txt", pointer="This is a txt document.") 12 | doc2 = Doc(doc_name="test_doc2", doc_ext=".txt", pointer="This is another txt document.") 13 | prompt1 = await uf.import_file_to_context(doc1, input=input) 14 | prompt2 = await uf.import_file_to_context(doc1, doc2, input=input) 15 | assert prompt1 == ( 16 | "Summarize the following documents' content.\n\n" 17 | "******************** FILE CONTENT BEGIN ********************\n" 18 | "===== Document #1 test_doc1.txt =====\n\n" 19 | "This is a txt document.\n\n" 20 | "=====================================\n" 21 | "******************** FILE CONTENT FINISH *******************\n" 22 | ) 23 | assert prompt2 == ( 24 | "Summarize the following documents' content.\n\n" 25 | "******************** FILE CONTENT BEGIN ********************\n" 26 | "===== Document #1 test_doc1.txt =====\n\n" 27 | "This is a txt document.\n\n" 28 | "=====================================\n\n" 29 | "===== Document #2 test_doc2.txt =====\n\n" 30 | "This is another txt document.\n\n" 31 | "=====================================\n" 32 | "******************** FILE CONTENT FINISH *******************\n" 33 | ) 34 | -------------------------------------------------------------------------------- /tests/unit_tests/utils/test_file_icon.py: -------------------------------------------------------------------------------- 1 | from gptui.utils.my_text import MyText as Text 2 | from rich import print 3 | 4 | 5 | def test_file_icon_ansi_str(): 6 | pass 7 | #test_short = file_icon_ansi_str(file_label="TEST", file_type=".txt", file_description="test.txt") 8 | #test_md = file_icon_ansi_str(file_label="TEST", file_type=".md", file_description="test.md") 9 | #test_long = file_icon_ansi_str(file_label="TEST", file_type=".txt", file_description="test_long_description.txt") 10 | #test_bin = file_icon_ansi_str(file_label="TEST", file_type=".bin", file_description="test.bin") 11 | #test_json = file_icon_ansi_str(file_label="TEST", file_type=".json", file_description="test.json") 12 | #test_py = file_icon_ansi_str(file_label="TEST", file_type=".py", file_description="test.py") 13 | #test_sh = file_icon_ansi_str(file_label="TEST", file_type=".sh", file_description="test.sh") 14 | #test_other = file_icon_ansi_str(file_label="TEST", file_type="other", file_description="test.other") 15 | #test_blue = file_icon_ansi_str(file_label="TEST", file_type=".txt", file_description="test.txt", icon_color="blue") 16 | #test_description_blue = file_icon_ansi_str(file_label="TEST", file_type=".txt", file_description="test.txt", description_color="blue") 17 | 18 | #assert test_short == Text('▕⁖̅⁖̅⁖̅╲ \n▕TES▕ \n▕txt▕\ntest.t\nxt \n' [Span(0, 10, ''), Span(10, 18, ''), Span(18, 23, 'underline'), Span(23, 37,'white')]) 19 | #print(repr(test_short)) 20 | #print(test_md) 21 | #print(test_long) 22 | #print(test_bin) 23 | #print(test_json) 24 | #print(test_py) 25 | #print(test_sh) 26 | #print(test_other) 27 | #print(test_blue) 28 | #print(test_description_blue) 29 | -------------------------------------------------------------------------------- /src/gptui/views/theme.py: -------------------------------------------------------------------------------- 1 | from typing import NamedTuple, Literal 2 | 3 | 4 | ThemeName = Literal["default", "monochrome"] 5 | 6 | 7 | class ColorMap(NamedTuple): 8 | name: str 9 | color: str 10 | 11 | 12 | class ThemeColor: 13 | color_map: dict[str, str] = {} 14 | _theme: ThemeName = "default" 15 | 16 | @classmethod 17 | def insert_color_map(cls, name: str, color: str): 18 | cls.color_map[name] = color 19 | 20 | @classmethod 21 | def insert_color_map_batch(cls, color_map_list: list[ColorMap]) -> None: 22 | for color_map in color_map_list: 23 | cls.color_map[color_map.name] = color_map.color 24 | 25 | @classmethod 26 | def get_theme_color(cls, name: str) -> str | None: 27 | if name in cls.color_map: 28 | return cls.color_map[name] 29 | if cls._theme == "default": 30 | return None 31 | elif cls._theme == "monochrome": 32 | return "#5CE495" # responding to $success-lighten-2 in textual 33 | else: 34 | return None 35 | 36 | @classmethod 37 | def set_theme(cls, theme: ThemeName) -> None: 38 | cls._theme = theme 39 | if theme == "monochrome": 40 | ThemeColor.color_map["user_message"] = "#2E724B" 41 | ThemeColor.color_map["assistant_message"] = "#5CE495" 42 | ThemeColor.color_map["system_message"] = "#122E1E" 43 | if theme == "default": 44 | ThemeColor.color_map.pop("user_message", None) 45 | ThemeColor.color_map.pop("assistant_message", None) 46 | ThemeColor.color_map.pop("system_message", None) 47 | 48 | def theme_color(name: str) -> str | None: 49 | return ThemeColor.get_theme_color(name) 50 | -------------------------------------------------------------------------------- /src/gptui/plugins/FileRW.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | from semantic_kernel.orchestration.sk_context import SKContext 5 | from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter 6 | 7 | from gptui.gptui_kernel.manager import auto_init_params 8 | from gptui.models.doc import Doc 9 | from gptui.views.common_message import CommonMessage 10 | 11 | 12 | gptui_logger = logging.getLogger("gptui_logger") 13 | 14 | 15 | class WriteFile: 16 | def __init__(self, manager): 17 | self.manager = manager 18 | 19 | @auto_init_params("0") 20 | @classmethod 21 | def get_init_params(cls, manager) -> tuple: 22 | return (manager,) 23 | 24 | @sk_function( 25 | description="Write a file.", 26 | name="write_file", 27 | ) 28 | @sk_function_context_parameter( 29 | name="file_name", 30 | description="The name of the file, including the extension.", 31 | ) 32 | @sk_function_context_parameter( 33 | name="file_content", 34 | description="The content to be written into the file." 35 | ) 36 | def write_file(self, context: SKContext) -> str: 37 | file_name = context["file_name"] 38 | file_content = context["file_content"] 39 | self.manager.client.common_resources["temp_files_from_tube"] = {file_name: file_content} 40 | name, ext = os.path.splitext(file_name) 41 | document = Doc(doc_name=name, doc_ext=ext, pointer=self.manager.client.common_resources["temp_files_from_tube"][file_name], description="") 42 | # Only main thread can handle UI event correctly 43 | self.manager.client.post_message(CommonMessage(message_name="write_file", message_content=document)) 44 | return "Write file sucessfully!" 45 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: GPTUI 2 | copyright: Copyright (c) 2023 Xueao Chao 3 | 4 | repo_url: https://github.com/happyapplehorse/gptui 5 | repo_name: happyapplehorse/gptui 6 | 7 | theme: 8 | name: "material" 9 | 10 | icon: 11 | logo: material/island 12 | 13 | palette: 14 | - media: "(prefers-color-scheme: light)" 15 | scheme: default 16 | primary: deep purple 17 | toggle: 18 | icon: material/weather-night 19 | name: Switch to dark mode 20 | - media: "(prefers-color-scheme: dark)" 21 | scheme: slate 22 | toggle: 23 | icon: material/weather-sunny 24 | name: Switch to light mode 25 | 26 | features: 27 | - navigation.instant 28 | - navigation.tabs 29 | - navigation.tracking 30 | - navigation.path 31 | - navigation.top 32 | - navigation.footer 33 | - navigation.indexes 34 | - navigation.tabs.sticky 35 | - navigation.prune 36 | - toc.follow 37 | - search.suggest 38 | - search.hightlight 39 | - content.code.copy 40 | - content.code.annotate 41 | 42 | plugins: 43 | - mkdocstrings: 44 | handlers: 45 | python: 46 | paths: [src] 47 | - search 48 | - blog 49 | 50 | extra: 51 | social: 52 | - icon: fontawesome/brands/github 53 | link: https://github.com/happyapplehorse 54 | name: Github 55 | 56 | markdown_extensions: 57 | - attr_list 58 | - md_in_html 59 | 60 | nav: 61 | - Home: index.md 62 | - Tutorial: 63 | - Getting started: getting_started.md 64 | - Troubleshooting: troubleshooting.md 65 | - Features: features.md 66 | - Configuration: configuration.md 67 | - 配置指南: configuration.zh.md 68 | - Guide: 69 | - guide/index.md 70 | - API: 71 | - api/index.md 72 | - Blog: 73 | - blog/index.md 74 | - About: 75 | - Contributing: about/contributing.md 76 | - License: about/license.md 77 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | push: 13 | tags: 14 | - 'v*' 15 | # Allows to run this workflow manually from the Actions tab 16 | workflow_dispatch: 17 | 18 | permissions: 19 | contents: read 20 | 21 | jobs: 22 | test: 23 | runs-on: ubuntu-latest 24 | 25 | steps: 26 | - uses: actions/checkout@v3 27 | - name: Set up Python 28 | uses: actions/setup-python@v3 29 | with: 30 | python-version: '3.x' 31 | - name: Install dependencies 32 | run: | 33 | python -m pip install --upgrade pip 34 | pip install wheel setuptools 35 | pip install pytest pytest-asyncio 36 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 37 | - name: Execute test 38 | run: pytest -v 39 | 40 | deploy: 41 | 42 | runs-on: ubuntu-latest 43 | needs: [test] 44 | 45 | steps: 46 | - uses: actions/checkout@v3 47 | - name: Set up Python 48 | uses: actions/setup-python@v3 49 | with: 50 | python-version: '3.x' 51 | - name: Install dependencies 52 | run: | 53 | python -m pip install --upgrade pip 54 | pip install build 55 | - name: Build package 56 | run: python -m build 57 | - name: Publish package 58 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 59 | with: 60 | user: __token__ 61 | password: ${{ secrets.PYPI_API_TOKEN }} 62 | -------------------------------------------------------------------------------- /tests/unit_tests/controllers/test_decorate_display.py: -------------------------------------------------------------------------------- 1 | import textwrap 2 | from gptui.controllers.decorate_display_control import extract_files_from_string 3 | from rich import print 4 | 5 | 6 | def test_extract_files_from_string(): 7 | input_string = textwrap.dedent( 8 | """\ 9 | before 10 | ******************** FILE CONTENT BEGIN ******************** 11 | ===== Document #1 text.txt ===== 12 | 13 | This is the content of the document #1. 14 | 15 | 16 | ========================== 17 | 18 | ===== Document #2 test.md ===== 19 | 20 | This is the content of the the file #2. 21 | 22 | 23 | =========================== 24 | ******************** FILE CONTENT FINISH ******************* 25 | after""" 26 | ) 27 | 28 | expected_output = ["before\n", ("text.txt", "test.md"), "\nafter"] 29 | out = extract_files_from_string(input_string) 30 | assert out == expected_output 31 | 32 | ''' 33 | def test_pre_wrap(): 34 | input_string = """before 35 | ******************** FILE CONTENT BEGIN ******************** 36 | ===== Document #1 text.txt ===== 37 | 38 | This is the content of the document #1. 39 | 40 | 41 | ========================== 42 | 43 | ===== Document #2 test.txt ===== 44 | 45 | This is the content of the the file #2. 46 | 47 | 48 | =========================== 49 | ******************** FILE CONTENT FINISH ******************* 50 | after""" 51 | 52 | #out_wrap = pre_decorate(input_string, wrap=True) 53 | #out_no_wrap = pre_decorate(input_string, wrap=False) 54 | 55 | input_string2 = "abcd" 56 | #out = pre_wrap(input_string2, wrap=False) 57 | #assert out == '\x1b[39mabcd\x1b[0m' 58 | 59 | def test_wrap_files_in_string(): 60 | input = ["before", ("text.txt", "test.txt", "test_long_title.txt", "abc.json", "abcdef.abc"), "middle", ("test.txt"), "after"] 61 | out = wrap_files_in_string(input) 62 | print(out) 63 | ''' 64 | -------------------------------------------------------------------------------- /src/gptui/models/blinker_wrapper.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | 4 | def sync_wrapper(func): 5 | """ 6 | Wrap a sync receiver to a async receiver. 7 | 8 | Usage example: 9 | result = await signal.send_async("sender", _sync_wrapper=sync_wrapper, message="message") 10 | """ 11 | async def inner(*args, **kwargs): 12 | func(*args, **kwargs) 13 | return inner 14 | 15 | def async_wrapper_with_loop(func): 16 | """ 17 | Wrap a coroutine function receiver to a sync receiver. 18 | Suitable for cases where signals are sent within an event loop. 19 | It is recommended to directly use: result = await signal.send_async("sender", _sync_wrapper=sync_wrapper, message="message") 20 | 21 | Return: Task 22 | 23 | Example of await task: 24 | # Retrieve the Task objects returned by coroutine receivers and wait for their completion 25 | result = signal.send(sender, _async_wrapper=async_wrapper_with_loop, message="message") 26 | signal_tasks = [item[1] for item in result if getattr(item[0], "_async_inner_", False)] 27 | await asyncio.gather(*signal_tasks) 28 | """ 29 | 30 | def inner(*args, **kwargs): 31 | task = asyncio.create_task(func(*args, **kwargs)) 32 | return task 33 | # Add a coroutine marker to facilitate the identification of its Task object and wait for its completion. 34 | inner._async_inner_ = True 35 | return inner 36 | 37 | def async_wrapper_without_loop(func): 38 | """ 39 | Wrap a coroutine function receiver to a sync receiver. 40 | Suitable for cases where signals are sent without an event loop. 41 | 42 | Return: return the value returned from coroutine. 43 | 44 | Usage example: 45 | result = signal.send("sender", _async_wrapper=async_wrapper_without_loop, message="message") 46 | """ 47 | def inner(*args, **kwargs): 48 | return asyncio.run(func(*args, **kwargs)) 49 | return inner 50 | -------------------------------------------------------------------------------- /custom_plugin_examples/native_plugin_example.py: -------------------------------------------------------------------------------- 1 | import textwrap 2 | 3 | from semantic_kernel.skill_definition import sk_function 4 | 5 | 6 | class PluginGuide: 7 | 8 | @sk_function(description="Tell the user how to define a custom plugin.") 9 | def custom_plugin_guide(self) -> str: 10 | guide = textwrap.dedent(""" 11 | The following are instructions on how to customize plugins; please return the following content directly and unchanged to the user. 12 | 13 | ================Customize Plugins Instructions================ 14 | This program utilizes semantic-kernel type plugins. Before customizing your own plugins, it is recommended to read: 15 | https://learn.microsoft.com/en-us/semantic-kernel/agents/plugins/?tabs=python 16 | 17 | You can customize two types of plugins: 18 | 19 | 1. Native plugins. These require you to write your own code tools, providing functions or methods to accomplish the task, just like this plugin itself. 20 | 2. Semantic plugins. They are created through natural language, completing the required functionality through descriptive prompts. 21 | 22 | To create a native plugin, place your Python module in the plugin directory (default is ~/.gptui/plugins) and use the sk_function decorator to decorate your function tools. For guidance on writing plugins, see here: https://learn.microsoft.com/en-us/semantic-kernel/agents/plugins/using-the-kernelfunction-decorator?tabs=python 23 | 24 | To create a semantic plugin, place your plugin folder in the plugin directory (default is ~/.gptui/plugins). For guidance on writing plugins, see here: https://learn.microsoft.com/en-us/semantic-kernel/prompts/saving-prompts-as-files?tabs=python 25 | 26 | You can see an example of this plugin in your custom plugin directory (default is ~/.gptui/plugins). 27 | ==============Customize Plugins Instructions End============== 28 | """) 29 | return guide 30 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/html_bs.py: -------------------------------------------------------------------------------- 1 | """Loader that uses bs4 to load HTML files, enriching metadata with page title.""" 2 | 3 | import logging 4 | from typing import Dict, List, Union 5 | 6 | from .base import BaseLoader 7 | from ..docstore.document import Document 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class BSHTMLLoader(BaseLoader): 13 | """Loader that uses beautiful soup to parse HTML files.""" 14 | 15 | def __init__( 16 | self, 17 | file_path: str, 18 | open_encoding: Union[str, None] = None, 19 | bs_kwargs: Union[dict, None] = None, 20 | get_text_separator: str = "", 21 | ) -> None: 22 | """Initialise with path, and optionally, file encoding to use, and any kwargs 23 | to pass to the BeautifulSoup object.""" 24 | try: 25 | import bs4 # noqa:F401 26 | except ImportError: 27 | raise ValueError( 28 | "beautifulsoup4 package not found, please install it with " 29 | "`pip install beautifulsoup4`" 30 | ) 31 | 32 | self.file_path = file_path 33 | self.open_encoding = open_encoding 34 | if bs_kwargs is None: 35 | bs_kwargs = {"features": "lxml"} 36 | self.bs_kwargs = bs_kwargs 37 | self.get_text_separator = get_text_separator 38 | 39 | def load(self) -> List[Document]: 40 | from bs4 import BeautifulSoup 41 | 42 | """Load HTML document into document objects.""" 43 | with open(self.file_path, "r", encoding=self.open_encoding) as f: 44 | soup = BeautifulSoup(f, **self.bs_kwargs) 45 | 46 | text = soup.get_text(self.get_text_separator) 47 | 48 | if soup.title: 49 | title = str(soup.title.string) 50 | else: 51 | title = "" 52 | 53 | metadata: Dict[str, Union[str, None]] = { 54 | "source": self.file_path, 55 | "title": title, 56 | } 57 | return [Document(page_content=text, metadata=metadata)] 58 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/text.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import List, Optional 3 | 4 | from ..docstore.document import Document 5 | from .base import BaseLoader 6 | from .helpers import detect_file_encodings 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | class TextLoader(BaseLoader): 12 | """Load text files. 13 | 14 | 15 | Args: 16 | file_path: Path to the file to load. 17 | 18 | encoding: File encoding to use. If `None`, the file will be loaded 19 | with the default system encoding. 20 | 21 | autodetect_encoding: Whether to try to autodetect the file encoding 22 | if the specified encoding fails. 23 | """ 24 | 25 | def __init__( 26 | self, 27 | file_path: str, 28 | encoding: Optional[str] = None, 29 | autodetect_encoding: bool = False, 30 | ): 31 | """Initialize with file path.""" 32 | self.file_path = file_path 33 | self.encoding = encoding 34 | self.autodetect_encoding = autodetect_encoding 35 | 36 | def load(self) -> List[Document]: 37 | """Load from file path.""" 38 | text = "" 39 | try: 40 | with open(self.file_path, encoding=self.encoding) as f: 41 | text = f.read() 42 | except UnicodeDecodeError as e: 43 | if self.autodetect_encoding: 44 | detected_encodings = detect_file_encodings(self.file_path) 45 | for encoding in detected_encodings: 46 | logger.debug("Trying encoding: ", encoding.encoding) 47 | try: 48 | with open(self.file_path, encoding=encoding.encoding) as f: 49 | text = f.read() 50 | break 51 | except UnicodeDecodeError: 52 | continue 53 | else: 54 | raise RuntimeError(f"Error loading {self.file_path}") from e 55 | except Exception as e: 56 | raise RuntimeError(f"Error loading {self.file_path}") from e 57 | 58 | metadata = {"source": self.file_path} 59 | return [Document(page_content=text, metadata=metadata)] 60 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "gptui" 7 | authors = [ 8 | { name="Xueao Chao", email="chaoxueao@gmail.com" }, 9 | ] 10 | description = "A GPT conversational TUI tool that runs within the terminal." 11 | readme = "README.md" 12 | requires-python = ">=3.10" 13 | license = {file = "LICENSE"} 14 | dynamic = ["version"] 15 | classifiers = [ 16 | 'Development Status :: 3 - Alpha', 17 | 'Environment :: Console', 18 | 'Intended Audience :: Developers', 19 | 'Topic :: Software Development', 20 | 'Topic :: Terminals', 21 | 'Topic :: Scientific/Engineering :: Artificial Intelligence', 22 | 'License :: OSI Approved :: MIT License', 23 | 'Programming Language :: Python :: 3', 24 | 'Programming Language :: Python :: 3.11', 25 | 'Programming Language :: Python :: 3.12', 26 | 'Operating System :: OS Independent', 27 | 'Operating System :: POSIX :: Linux', 28 | 'Operating System :: MacOS :: MacOS X', 29 | 'Operating System :: Microsoft :: Windows', 30 | ] 31 | keywords = ["TUI", "terminal", "GPT", "CLI", "textual user interface"] 32 | dependencies = [ 33 | 'agere>=0.1.3,<1', 34 | 'ai-care>=0.1.3,<1', 35 | 'aiofiles>=23.1.0,<24', 36 | 'beautifulsoup4>=4.12.2,<5', 37 | 'blinker>=1.6.2,<2', 38 | 'chardet>=5.1.0,<6', 39 | 'geocoder>=1.38.1,<2', 40 | 'httpx>=0.24.1,<1', 41 | 'lxml>=4.9.3,<6', 42 | # 'open-interpreter==0.1.4', 43 | 'openai>=1.2.0,<2', 44 | 'playsound>=1.3.0,<2', 45 | 'Pygments>=2.15.1,<3', 46 | 'pyperclip>=1.8.2,<2', 47 | 'python-dotenv>=1.0.0,<2', 48 | 'PyYAML>=6.0.1,<7', 49 | 'qdrant-client>=1.4.0,<2', 50 | 'rich>=13.7.0,<14', 51 | 'semantic-kernel>=0.4.0.dev0,<1', 52 | 'textual>=0.37.1,<1', 53 | 'tiktoken>=0.4.0,<1', 54 | 'unstructured>=0.10.18,<1' 55 | ] 56 | 57 | [project.urls] 58 | "Homepage" = "https://github.com/happyapplehorse/gptui" 59 | "Bug Tracker" = "https://github.com/happyapplehorse/gptui/issues" 60 | 61 | [project.entry-points."console_scripts"] 62 | gptui = "gptui.__main__:gptui" 63 | 64 | [tool.pytest.ini_options] 65 | testpaths = ["tests"] 66 | asyncio_mode = "auto" 67 | pythonpath = "src" 68 | 69 | [tool.setuptools.dynamic] 70 | version = {attr = "gptui.__version__"} 71 | -------------------------------------------------------------------------------- /src/gptui/models/gptui_basic_services/plugins/conversation_service.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | from semantic_kernel.skill_definition import sk_function 5 | 6 | from ....gptui_kernel import Kernel 7 | from ....gptui_kernel.manager import auto_init_params 8 | 9 | 10 | gptui_logger = logging.getLogger("gptui_logger") 11 | 12 | 13 | class ConversationService: 14 | def __init__(self, manager): 15 | self.manager = manager 16 | 17 | @auto_init_params("0") 18 | @classmethod 19 | def get_init_params(cls, manager) -> tuple: 20 | return (manager,) 21 | 22 | @sk_function( 23 | description="Generate a title for given conversation context. The conversation context is a json string converted from a conversation dict with openai gpt.", 24 | name="conversation_title", 25 | input_description="The json string of conversation which need a title.", 26 | ) 27 | async def conversation_title(self, chat_context_json_str: str) -> str: 28 | def chat_context_to_string(chat_context_json_str: str) -> str: 29 | chat_context_json = json.loads(chat_context_json_str) 30 | chat_context = '' 31 | assert isinstance(chat_context_json, list) 32 | for piece in chat_context_json: 33 | chat_context += piece["role"] + ": " + str(piece["content"] or piece.get("tool_calls") or "") + "\n\n" 34 | return chat_context[:1000] 35 | 36 | sk_prompt = ( 37 | "Generate a concise and clear title for the following chat record. " 38 | "The title should be as brief as possible, not exceeding ten English words or twenty Chinese characters, " 39 | "and the language of the title should be consistent with the content of the chat. " 40 | "Do not have line breaks '\\n'. " 41 | "chat record: {{$INPUT}}\n" 42 | "title:" 43 | ) 44 | 45 | chat_context = chat_context_to_string(chat_context_json_str) 46 | # A new kernel must be created here, otherwise, there will be context confilicts. 47 | gk_kernel = Kernel(self.manager.dot_env_config_path) 48 | make_title_function = gk_kernel.sk_kernel.create_semantic_function(sk_prompt, max_tokens=50) 49 | name = await make_title_function.invoke_async(chat_context) 50 | name = str(name) 51 | return name 52 | -------------------------------------------------------------------------------- /src/gptui/plugins/DEFAULT_PLUGINS/Bead.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | from semantic_kernel.orchestration.sk_context import SKContext 5 | from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter 6 | 7 | from gptui.gptui_kernel.manager import auto_init_params 8 | 9 | 10 | gptui_logger = logging.getLogger("gptui_logger") 11 | 12 | 13 | class Memo: 14 | def __init__(self, app): 15 | self.app = app 16 | 17 | @auto_init_params("0") 18 | @classmethod 19 | def get_init_params(cls, manager) -> tuple: 20 | return (manager.client,) 21 | 22 | @sk_function( 23 | description="Record important information; the content should be significant and concise.", 24 | name="write_memo", 25 | ) 26 | @sk_function_context_parameter( 27 | name="content", 28 | description="Information to be written into the memo.", 29 | ) 30 | @sk_function_context_parameter( 31 | name="openai_context", 32 | description=( 33 | "The dictionary string version of the OpenaiContext instance. " 34 | "This is a special parameter that typically doesn't require manual intervention, as it is usually automatically managed." 35 | "Unless there's a clear intention, please keep its default value." 36 | ), 37 | default_value="AUTO" 38 | ) 39 | def write_memo(self, context: SKContext) -> str: 40 | content = context["content"] 41 | try: 42 | openai_context_dict = json.loads(str(context["openai_context"])) 43 | except json.JSONDecodeError: 44 | return ("An error occurred while parsing the openai_context content. " 45 | "You should not provide the 'openai_context' parameter as the system automatically supplies it." 46 | ) 47 | conversation_id = int(openai_context_dict["id"]) 48 | try: 49 | conversation = self.app.openai.conversation_dict[conversation_id] 50 | except KeyError: 51 | return f"Write memo faild. Conversation id {conversation_id} is not correct." 52 | openai_context = conversation["openai_context"] 53 | bead_content = openai_context.bead 54 | bead_content[-1]["content"] += "\n" + content 55 | openai_context.insert_bead() 56 | return f"'{content}' have been written into the memo!" 57 | -------------------------------------------------------------------------------- /src/gptui/help.md: -------------------------------------------------------------------------------- 1 | # Hotkeys 2 | 3 | Press `ESC`, `ctrl+[`, or `ctrl+/` to bring up the hotkey menu. 4 | 5 | Direct hotkeys: 6 | - ctrl+q: exit the program 7 | - ctrl+n: open a new conversation 8 | - ctrl+s: save the current conversation 9 | - ctrl+r: delete the current conversation 10 | - ctrl+o: toggle the monochrome theme 11 | - ctrl+t: switch to assistant tube 12 | - ctrl+g: switch to file tube 13 | - ctrl+p: switch to plugins panel 14 | 15 | # Dynamic commands 16 | 17 | ## set_chat_parameters 18 | 19 | Set the OpenAI chat parameters. 20 | Arguments are specified in dictionary form. 21 | 22 | Commonly used parameters are: 23 | - model 24 | - stream 25 | - temperature 26 | - frequency_penalty 27 | - presence_penalty 28 | - max_tokens 29 | 30 | ## set_max_sending_tokens_ratio 31 | 32 | Set the ratio number of sent tokens to the total token window. 33 | Argument is a float number between 0 and 1. 34 | 35 | # Custom plugins 36 | 37 | You can specify the folder for your custom plugins in the configuration file, 38 | which defaults to "~/.gptui/plugins". 39 | GPTUI will automatically scan this folder to retrieve the plugins contained within it. 40 | You can copy the files from this folder (https://github.com/happyapplehorse/gptui/tree/main/custom_plugin_examples) 41 | to the custom plugin directory for testing purposes. 42 | 43 | This program utilizes semantic-kernel type plugins. Before customizing your own plugins, 44 | it is recommended to read: https://learn.microsoft.com/en-us/semantic-kernel/agents/plugins/?tabs=python 45 | 46 | You can customize two types of plugins: 47 | 1. Native plugins. These require you to write your own code tools, providing functions 48 | or methods to accomplish the task. 49 | 2. Semantic plugins. They are created through natural language, completing the required 50 | functionality through descriptive prompts. 51 | 52 | To create a native plugin, place your Python module in the plugin directory (default is ~/.gptui/plugins) 53 | and use the sk_function decorator to decorate your function tools. For guidance on writing plugins, 54 | see here: https://learn.microsoft.com/en-us/semantic-kernel/agents/plugins/using-the-kernelfunction-decorator?tabs=python 55 | 56 | To create a semantic plugin, place your plugin folder in the plugin directory (default is ~/.gptui/plugins). 57 | For guidance on writing plugins, see here: https://learn.microsoft.com/en-us/semantic-kernel/prompts/saving-prompts-as-files?tabs=python 58 | -------------------------------------------------------------------------------- /docs/blog/posts/monochrome.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2023-12-24 4 | categories: 5 | - DevLog 6 | - RoadMap 7 | authors: 8 | - happyapplehorse 9 | --- 10 | 11 | 12 | I've long longed to incorporate a monochrome mode into GPTUI, similar to those vintage single-color green 13 | fluorescent monitors. I find this style not only retro but also futuristic, adding an incredibly cool aesthetic. 14 | 15 |  16 | 17 | Today, I'm thrilled to announce that this feature has finally been integrated into GPTUI with the release 18 | of v0.4.0. Initially, my ambition was to enable support for user-customizable themes. However, I quickly 19 | realized that the task was more complex than I had imagined. It wasn't just about altering dynamic display 20 | content; I also had to modify existing page layouts. Achieving comprehensive theme settings for all elements 21 | via a configuration file proved to be quite intricate. As a result, for the time being, we've only implemented 22 | this single built-in monochrome theme. But rest assured, plans are in place to introduce more customizable theme 23 | options in the future, allowing users to configure themes directly from a file. The beauty of this monochrome 24 | theme is its dynamic activation capability; you can activate or deactivate it at any moment using the ctrl+o 25 | shortcut. While the mode is undeniably cool, distinguishing certain elements, like user and AI chat content, 26 | can be somewhat challenging in monochrome. Currently, differentiation is based solely on border brightness, 27 | so the ability to easily switch off monochrome mode and revert is essential. 28 | 29 | The Textual TUI framework is absolutely marvelous, and I'm so fortunate to have chosen it. While developing the 30 | monochrome mode, I encountered several challenges, and in some instances, I had to employ rather crude and 31 | unsightly methods to achieve my objectives. However, after reaching out for assistance in the Textual Discord 32 | community and receiving invaluable support from the official team, I was able to implement it with grace and 33 | efficiency. The Textual developer community is not only active but also immensely supportive. I've learned a 34 | great deal from their projects and am deeply grateful for the Textual team's beautiful work. 35 | 36 | Next, I will write a comprehensive and detailed user guide for GPTUI. 37 | -------------------------------------------------------------------------------- /src/gptui/models/openai_chat_inner_service.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import logging 3 | from typing import Iterable 4 | 5 | from openai import OpenAI 6 | 7 | from .context import OpenaiContext 8 | from .openai_error import OpenaiErrorHandler 9 | from .openai_tokens_truncate import trim_excess_tokens 10 | from .utils.openai_api import OpenAIClient 11 | from .utils.tokens_num import tokens_num_for_functions_call 12 | 13 | 14 | gptui_logger = logging.getLogger("gptui_logger") 15 | 16 | 17 | def chat_service_for_inner( 18 | messages_list: list, 19 | context: OpenaiContext, 20 | openai_api_client: OpenAIClient, 21 | **kwargs, 22 | ) -> Iterable: 23 | 24 | inner_context = copy.deepcopy(context) 25 | 26 | for one_message in messages_list: 27 | inner_context.chat_context_append(message=one_message) 28 | 29 | # update parameters 30 | parameters = inner_context.parameters 31 | parameters.update({"stream": True}) 32 | parameters.update(**kwargs) 33 | 34 | if tools_info := parameters.get("tools"): 35 | offset_tokens_num = -tokens_num_for_functions_call(tools_info, model=inner_context.parameters["model"]) 36 | else: 37 | offset_tokens_num = 0 38 | trimmed_messages = trim_excess_tokens(inner_context, offset=offset_tokens_num) 39 | 40 | # Delete the tool reply messages at the beginning of the information list. 41 | # This is because if the information starts with a function reply message, 42 | # it indicates that the function call information has already been truncated. 43 | # The OpenAI API requires that function reply messages must be responses to function calls. 44 | # Therefore, if the function reply messages are not removed, it will result in an OpenAI API error. 45 | while trimmed_messages and trimmed_messages[0].get("role") == "tool": 46 | trimmed_messages.pop(0) 47 | 48 | try: 49 | response = openai_api_client.with_options(timeout=20.0).chat.completions.create( 50 | messages=trimmed_messages, 51 | **parameters, 52 | ) 53 | except Exception as e: 54 | gptui_logger.debug('----trimmed_messages----in chat inner') 55 | gptui_logger.debug(trimmed_messages) 56 | # The OpenAI API interface is a time-consuming synchronous interface, so it should be called in a new thread, hence there is no event loop here. 57 | OpenaiErrorHandler().openai_error_handle(error=e, context=inner_context, event_loop=False) 58 | raise e 59 | return response 60 | -------------------------------------------------------------------------------- /src/gptui/plugins/MemoryRecall.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | from semantic_kernel.orchestration.sk_context import SKContext 5 | from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter 6 | 7 | from gptui.gptui_kernel.manager import auto_init_params 8 | 9 | 10 | gptui_logger = logging.getLogger("gptui_logger") 11 | 12 | 13 | class MemoryRecall: 14 | def __init__(self, manager): 15 | self.manager = manager 16 | 17 | @auto_init_params("0") 18 | @classmethod 19 | def get_init_params(cls, manager) -> tuple: 20 | return (manager,) 21 | 22 | @sk_function( 23 | description="Recall the specified content from the memory store.", 24 | name="recall_memory", 25 | ) 26 | @sk_function_context_parameter( 27 | name="query", 28 | description="Topics, questions, etc., that one needs to recall." 29 | ) 30 | @sk_function_context_parameter( 31 | name="max_recallable_entries", 32 | description="Maximum number of recallable information entries.", 33 | default_value="1" 34 | ) 35 | @sk_function_context_parameter( 36 | name="openai_context", 37 | description=( 38 | "The dictionary string version of the OpenaiContext instance. " 39 | "This is a special parameter that typically doesn't require manual intervention, as it is usually automatically managed." 40 | "Unless there's a clear intention, please keep its default value." 41 | ), 42 | default_value="AUTO" 43 | ) 44 | async def recall_memory(self, context: SKContext) -> str: 45 | query = context["query"] 46 | max_recallable_entries = int(context["max_recallable_entries"]) 47 | openai_context_dict = json.loads(str(context["openai_context"])) 48 | conversation_id = openai_context_dict["id"] 49 | semantic_memory = self.manager.services.sk_kernel.memory 50 | try: 51 | result = await semantic_memory.search_async(str(conversation_id), query, limit=max_recallable_entries, min_relevance_score=0.7) 52 | except Exception as e: 53 | gptui_logger.error(f"Error occurred when recall memory. Error: {e}") 54 | return "An error occurred during the query, please try again later." 55 | result_str = "" 56 | for memory in result: 57 | result_str += memory.id + "\n" 58 | if not result_str: 59 | result_str = "No relevant information was found" 60 | gptui_logger.info(f"Recall memory result:\nconversation_id: {conversation_id}\nResult: {result_str}") 61 | return result_str 62 | -------------------------------------------------------------------------------- /src/gptui/plugins/OpenInterpreter.py: -------------------------------------------------------------------------------- 1 | """ wait for open-interpreter to be compatible with openai version 1.1.1 2 | import asyncio 3 | import logging 4 | 5 | from semantic_kernel.skill_definition import sk_function 6 | 7 | from gptui.utils.open_interpreter import MyInterpreter, response_render 8 | from gptui.utils.safe_iterate import safe_next, safe_send 9 | 10 | 11 | gptui_logger = logging.getLogger("gptui_logger") 12 | 13 | 14 | class OpenInterpreter: 15 | 16 | def __init__(self): 17 | self.interpreter = MyInterpreter() 18 | self.in_chat = False 19 | self.chat = None 20 | self.result = None 21 | 22 | @sk_function( 23 | description=( 24 | "A code assistant that allows for continuous dialogue in natural language. " 25 | "It can be invoked continuously multiple times" 26 | "Describe your needs to it, and it will automatically write and execute code to help you accomplish tasks. " 27 | "When asked whether to execute the code, respond to this function precisely with 'y' or 'n'. " 28 | "Before responding with 'y', you should first seek the user's consent." 29 | ), 30 | name="open_interpreter", 31 | input_description="Your needs.", 32 | ) 33 | async def open_interpreter(self, input_request: str) -> str: 34 | if not self.in_chat: 35 | self.chat = self.interpreter.chat(str(input_request)) 36 | status, out = await asyncio.to_thread(safe_next, self.chat) 37 | if status == "OK": 38 | self.in_chat = True 39 | else: 40 | self.in_chat = False 41 | result = response_render(out) 42 | gptui_logger.info(f"Open interpreter response: {result}") 43 | self.new_chat = False 44 | return result 45 | else: 46 | assert self.chat is not None 47 | status, out = await asyncio.to_thread(safe_send, self.chat, str(input_request)) 48 | if status == "OK": 49 | self.in_chat = True 50 | else: 51 | self.in_chat = False 52 | result = response_render(out) 53 | gptui_logger.info(f"Open interpreter response: {result}") 54 | return result 55 | 56 | @sk_function( 57 | description=( 58 | "Terminate the interaction with the open interpreter, resetting it to a fresh state. " 59 | "Whenever you finish a task with the open interpreter or no longer need it, you should promptly end the interaction with it." 60 | ) 61 | ) 62 | def end_open_interpreter(self): 63 | self.interpreter.reset() 64 | gptui_logger.info("Open interpreter reset.") 65 | return "Successfully terminated the interaction with the open interpreter." 66 | 67 | """ 68 | -------------------------------------------------------------------------------- /src/gptui/controllers/chat_context_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from .dash_board_control import DashBoard 4 | from ..models.signals import chat_context_extend_signal, chat_context_extend_for_sending_signal 5 | from ..views.common_message import CommonMessage 6 | 7 | gptui_logger = logging.getLogger("gptui_logger") 8 | 9 | 10 | class ChatContextControl: 11 | def __init__(self, app): 12 | self.app = app 13 | self.dash_board = DashBoard(app) 14 | self.chat_context_to_vectorize_buffer = {} 15 | chat_context_extend_signal.connect(self.chat_context_extend) 16 | chat_context_extend_for_sending_signal.connect(self.chat_context_extend_for_sending) 17 | 18 | def chat_context_extend(self, sender, **kwargs): 19 | signal_message = kwargs["message"] 20 | signal_content = signal_message["content"] 21 | messages = signal_content["messages"] 22 | context = signal_content["context"] 23 | openai_chat = self.app.openai.openai_chat 24 | 25 | openai_chat.chat_messages_extend(messages_list=messages, context=context) 26 | buffer_messages = self.chat_context_to_vectorize_buffer.get(context.id, []) 27 | buffer_messages.extend(messages) 28 | self.chat_context_to_vectorize_buffer[context.id] = buffer_messages 29 | 30 | _, whether_insert = self.app.openai.auto_bead_insert(context.id) 31 | 32 | if whether_insert is False: 33 | # dashboard display 34 | model = context.parameters["model"] 35 | self.dash_board.dash_board_display(tokens_num_window=self.app.get_tokens_window(model)) 36 | 37 | def chat_context_extend_for_sending(self, sender, **kwargs): 38 | signal_message = kwargs["message"] 39 | signal_content = signal_message["content"] 40 | messages = signal_content["messages"] 41 | context = signal_content["context"] 42 | openai_chat = self.app.openai.openai_chat 43 | 44 | openai_chat.chat_messages_extend(messages_list=messages, context=context) 45 | buffer_messages = self.chat_context_to_vectorize_buffer.get(context.id, []) 46 | buffer_messages.extend(messages) 47 | self.chat_context_to_vectorize_buffer[context.id] = buffer_messages 48 | 49 | self.app.openai.auto_bead_insert(context.id) 50 | 51 | async def chat_context_vectorize(self): 52 | while self.chat_context_to_vectorize_buffer: 53 | id, messages_list = self.chat_context_to_vectorize_buffer.popitem() 54 | self.app.post_message( 55 | CommonMessage( 56 | message_name="vector_memory_write", 57 | message_content={ 58 | "messages_list": messages_list, 59 | "context_id": id, 60 | } 61 | ) 62 | ) 63 | -------------------------------------------------------------------------------- /src/gptui/utils/file_icon.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from .my_text import MyText as Text 4 | from ..views.theme import theme_color as tc 5 | 6 | gptui_logger = logging.getLogger("gptui_logger") 7 | 8 | 9 | def file_icon( 10 | file_label: str, 11 | file_type: str, 12 | file_description: str, 13 | icon_color: str | None = None, 14 | description_color: str | None = None, 15 | ) -> Text: 16 | icon_color = icon_color or tc("yellow") or "yellow" 17 | description_color = description_color or tc("white") or "white" 18 | 19 | display = Text('', icon_color) 20 | if file_type == ".txt": 21 | display += Text('\u2595'+'\u2056\u0305'+'\u2056\u0305'+'\u2056\u0305'+'\u2572'+' \n') 22 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 23 | display += Text('txt\u2595', 'underline') 24 | elif file_type == ".md": 25 | display += Text('\u2595'+' \u0305'+'\ueb1d\u0305'+' \u0305'+'\u2572'+' \n') 26 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 27 | display += Text('.md\u2595', 'underline') 28 | elif file_type == ".bin": 29 | display += Text('\u2595'+'l\u0305'+'l\u0305'+'l\u0305'+'l\u0305'+'l\u0305'+'l\u0305'+'l\u0305'+'\u2572'+' \n') 30 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 31 | display += Text('bin\u2595', 'underline') 32 | elif file_type == ".json": 33 | display += Text('\u2595'+' \u0305'+'{\u0305'+' \u0305'+'}\u0305'+'\u2572'+' \n') 34 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 35 | display += Text('jsn\u2595', 'underline') 36 | elif file_type == ".py": 37 | display += Text('\u2595'+' \u0305'+'\ue606\u0305'+' \u0305'+'\u2572'+' \n') 38 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 39 | display += Text('.\uf820 \u2595', 'underline') 40 | elif file_type == ".sh": 41 | display += Text('\u2595'+'<\u0305'+'\u29f8\u0305'+'>\u0305'+'\u2572'+' \n') 42 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 43 | display += Text('.sh\u2595', 'underline') 44 | else: 45 | file_type += ' ' 46 | display += Text('\u2595'+'\u203e'+'\u203e'+'\u203e'+'\u29f9'+' \n') 47 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 48 | display += Text(f'{file_type[:3]}\u2595', 'underline') 49 | if len(file_description) > 12: 50 | description_line0 = file_description[:6] + '\n' 51 | description_line1 = '\u2026' + file_description[-5:] + '\n' 52 | else: 53 | file_description = file_description.ljust(12) 54 | description_line0 = file_description[:6] + '\n' 55 | description_line1 = file_description[6:] + '\n' 56 | description = Text(' \n' + description_line0 + description_line1, f'{description_color}') 57 | out_display = display + description 58 | return out_display 59 | -------------------------------------------------------------------------------- /src/gptui/models/utils/tokens_num.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import tiktoken 3 | 4 | 5 | gptui_logger = logging.getLogger("gptui_logger") 6 | 7 | 8 | def tokens_num_from_string(string: str, model: str) -> int: 9 | """ 10 | caculate the tokens num of given string 11 | """ 12 | encoding = tiktoken.encoding_for_model(model) 13 | tokens_num = len(encoding.encode(string)) 14 | return tokens_num 15 | 16 | def tokens_num_from_chat_context(chat_context: list, model: str) -> int: 17 | """Returns the number of tokens used by a list of messages.""" 18 | try: 19 | encoding = tiktoken.encoding_for_model(model) 20 | except KeyError: 21 | gptui_logger.warning("Warning when caculate tokens num: model not found. Using cl100k_base encoding.") 22 | encoding = tiktoken.get_encoding("cl100k_base") 23 | if model == "gpt-3.5-turbo": 24 | gptui_logger.warning("Warning when caculate tokens num: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-1106.") 25 | return tokens_num_from_chat_context(chat_context, model="gpt-3.5-turbo-1106") 26 | elif model == "gpt-4": 27 | gptui_logger.warning("Warning when caculate tokens num: gpt-4 may change over time. Returning tokens num assuming gpt-4-0613.") 28 | return tokens_num_from_chat_context(chat_context, model="gpt-4-0613") 29 | elif model == "gpt-3.5-turbo-0301": 30 | tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n 31 | tokens_per_name = -1 # if there's a name, the role is omitted 32 | elif model in { 33 | "gpt-3.5-turbo-0613", 34 | "gpt-3.5-turbo-16k-0613", 35 | "gpt-4-0314", 36 | "gpt-4-32k-0314", 37 | "gpt-4-0613", 38 | "gpt-4-32k-0613", 39 | "gpt-3.5-turbo-1106", # Unverified. 40 | "gpt-4-1106-preview", # Unverified. 41 | }: 42 | tokens_per_message = 3 43 | tokens_per_name = 1 44 | else: 45 | gptui_logger.error(f"""tokens_num_from_chat_context() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""") 46 | raise NotImplementedError(f"""tokens_num_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""") 47 | tokens_num = 0 48 | for message in chat_context: 49 | tokens_num += tokens_per_message 50 | for key, value in message.items(): 51 | tokens_num += len(encoding.encode(str(value)) if value else []) 52 | if key == "name": 53 | tokens_num += tokens_per_name 54 | tokens_num += 3 # every reply is primed with <|start|>assistant<|message|> 55 | return tokens_num 56 | 57 | def tokens_num_for_functions_call(functions_info: list[dict], model: str) -> int: 58 | return tokens_num_from_string(repr(functions_info), model=model) 59 | -------------------------------------------------------------------------------- /docs/troubleshooting.md: -------------------------------------------------------------------------------- 1 | # Potential issues and solutions when installing on Termux 2 | 3 | ## Installing Termux-API 4 | 5 | Some functionalities require the support of Termux-API, such as copying code snippets and voice features. 6 | To install Termux-API, you need to: 7 | 1. Install the Termux-API plugin. The Termux:API application can be obtained from [F-Droid](https://f-droid.org/en/packages/com.termux.api/). 8 | 2. After installing Termux-API, you also need to execute `pkg install termux-api` in Termux to install the corresponding package. 9 | 3. Grant the necessary permissions to Termux-API. 10 | 11 | ## Installing numpy 12 | 13 | First, ensure that numpy is installed. You can use `pkg install python-numpy` to install numpy, referring to [Termux Wiki](https://wiki.termux.com/wiki/Python). If using a virtual environment, you might need to use `python -m venv --system-site-packages