├── tests ├── __init__.py └── unit_tests │ ├── __init__.py │ ├── models │ ├── __init__.py │ ├── test_doc.py │ ├── test_openai_tokens_truncate.py │ ├── test_context.py │ └── test_skills.py │ ├── utils │ ├── __init__.py │ └── test_file_icon.py │ ├── controllers │ ├── __init__.py │ ├── test_tube_files.py │ └── test_decorate_display.py │ ├── gptui_kernel │ ├── __init__.py │ ├── plugins_test_data │ │ ├── NotSemanticPlugin │ │ │ └── Test │ │ │ │ └── skprompt.txt │ │ ├── FunSkill │ │ │ ├── Excuses │ │ │ │ ├── skprompt.txt │ │ │ │ └── config.json │ │ │ ├── Joke │ │ │ │ ├── skprompt.txt │ │ │ │ └── config.json │ │ │ └── Limerick │ │ │ │ ├── config.json │ │ │ │ └── skprompt.txt │ │ ├── math_plugin.py │ │ └── FileIO.py │ ├── test_manager.py │ └── test_call_plugin.py │ └── data │ ├── langchain_tests_assets │ ├── text_load_test.txt │ └── html_load_test.html │ └── test_document_loaders.py ├── src └── gptui │ ├── data │ ├── __init__.py │ ├── langchain │ │ ├── __init__.py │ │ ├── load │ │ │ ├── __init__.py │ │ │ └── serializable.py │ │ ├── docstore │ │ │ ├── __init__.py │ │ │ └── document.py │ │ ├── schema │ │ │ ├── __init__.py │ │ │ └── document.py │ │ ├── document_loaders │ │ │ ├── blob_loaders │ │ │ │ ├── __init__.py │ │ │ │ ├── file_system.py │ │ │ │ └── schema.py │ │ │ ├── __init__.py │ │ │ ├── html.py │ │ │ ├── helpers.py │ │ │ ├── html_bs.py │ │ │ ├── text.py │ │ │ └── base.py │ │ ├── README.md │ │ └── pydantic_v1 │ │ │ └── __init__.py │ └── vector_memory │ │ ├── __init__.py │ │ └── qdrant_memory.py │ ├── models │ ├── __init__.py │ ├── utils │ │ ├── __init__.py │ │ ├── config_from_dot_env.py │ │ ├── openai_settings_from_dot_env.py │ │ ├── openai_api.py │ │ └── tokens_num.py │ ├── gptui_basic_services │ │ ├── __init__.py │ │ ├── plugins │ │ │ ├── __init__.py │ │ │ └── conversation_service.py │ │ └── templates │ │ │ └── upload_file_prompt.txt │ ├── openai_error.py │ ├── doc.py │ ├── blinker_wrapper.py │ ├── openai_chat_inner_service.py │ ├── skills.py │ ├── signals.py │ ├── openai_tokens_truncate.py │ ├── role.py │ └── context.py │ ├── utils │ ├── __init__.py │ ├── line_count.py │ ├── safe_iterate.py │ └── file_icon.py │ ├── views │ ├── __init__.py │ ├── common_message.py │ ├── theme.py │ ├── wink_wink.py │ └── custom_tree.py │ ├── controllers │ ├── __init__.py │ ├── ai_care_sensors.py │ ├── assistant_tube_control.py │ ├── group_talk_control.py │ ├── voice_control.py │ ├── chat_context_control.py │ ├── tube_files_control.py │ ├── chat_response_control.py │ └── dash_board_control.py │ ├── drivers │ ├── __init__.py │ ├── driver_interface.py │ ├── driver_error.py │ └── driver_manager.py │ ├── plugins │ ├── __init__.py │ ├── DEFAULT_PLUGINS │ │ ├── __init__.py │ │ ├── Bead.py │ │ └── CoreSkills.py │ ├── FileRW.py │ ├── MemoryRecall.py │ ├── OpenInterpreter.py │ └── SnoozeReminder.py │ ├── _version.py │ ├── gptui_kernel │ ├── __init__.py │ ├── null_logger.py │ ├── manager_exceptions.py │ └── kernel_exceptions.py │ ├── __init__.py │ ├── .default_config.yml │ ├── help.md │ ├── __main__.py │ └── config.yml ├── docs ├── features.md ├── blog │ ├── index.md │ ├── .authors.yml │ └── posts │ │ └── monochrome.md ├── getting_started.md ├── about │ ├── license.md │ └── contributing.md ├── api │ └── index.md ├── guide │ └── index.md ├── index.md ├── troubleshooting.md ├── configuration.zh.md └── configuration.md ├── .github ├── FUNDING.yml ├── PULL_REQUEST_TEMPLATE.md ├── ISSUE_TEMPLATE │ └── bug_report.md └── workflows │ ├── codecov.yml │ ├── static.yml │ └── python-publish.yml ├── web-serve.toml ├── .env_gptui.example ├── MANIFEST.in ├── main.py ├── custom_plugin_examples ├── README.md └── native_plugin_example.py ├── setup.py ├── requirements.txt ├── LICENSE ├── CHANGELOG.md ├── mkdocs.yml ├── pyproject.toml └── .gitignore /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/views/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit_tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/features.md: -------------------------------------------------------------------------------- 1 | Features 2 | -------------------------------------------------------------------------------- /src/gptui/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/drivers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/blog/index.md: -------------------------------------------------------------------------------- 1 | # Blog 2 | 3 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit_tests/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit_tests/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/load/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/data/vector_memory/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit_tests/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/docstore/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/plugins/DEFAULT_PLUGINS/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/getting_started.md: -------------------------------------------------------------------------------- 1 | Getting started here. 2 | -------------------------------------------------------------------------------- /src/gptui/models/gptui_basic_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/gptui/models/gptui_basic_services/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | -------------------------------------------------------------------------------- /src/gptui/_version.py: -------------------------------------------------------------------------------- 1 | __title__ = "gptui" 2 | __version__ = "0.5.4" 3 | -------------------------------------------------------------------------------- /web-serve.toml: -------------------------------------------------------------------------------- 1 | [app.GPTUI] 2 | command = "python main.py" 3 | #command = "python3 main.py" 4 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/schema/__init__.py: -------------------------------------------------------------------------------- 1 | from .document import BaseDocumentTransformer, Document 2 | -------------------------------------------------------------------------------- /.env_gptui.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY = "" 2 | OPENAI_ORG_ID = "" 3 | GOOGLE_KEY = "" 4 | GOOGLE_CX = "" 5 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/docstore/document.py: -------------------------------------------------------------------------------- 1 | from ..schema import Document 2 | 3 | __all__ = ["Document"] 4 | -------------------------------------------------------------------------------- /src/gptui/gptui_kernel/__init__.py: -------------------------------------------------------------------------------- 1 | from .kernel import Kernel 2 | 3 | #__all__ = [ 4 | # "Kernel", 5 | #] 6 | -------------------------------------------------------------------------------- /tests/unit_tests/data/langchain_tests_assets/text_load_test.txt: -------------------------------------------------------------------------------- 1 | This is a txt file for testting text loader. 2 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/NotSemanticPlugin/Test/skprompt.txt: -------------------------------------------------------------------------------- 1 | This is not a semantic plugin. 2 | -------------------------------------------------------------------------------- /docs/about/license.md: -------------------------------------------------------------------------------- 1 | GPTUI is licensed under the MIT License. [View license](https://github.com/happyapplehorse/gptui/blob/main/LICENSE). 2 | -------------------------------------------------------------------------------- /src/gptui/__init__.py: -------------------------------------------------------------------------------- 1 | from ._version import __title__, __version__ 2 | 3 | 4 | __all__ = [ 5 | "__title__", 6 | "__version__", 7 | ] 8 | -------------------------------------------------------------------------------- /docs/blog/.authors.yml: -------------------------------------------------------------------------------- 1 | authors: 2 | happyapplehorse: 3 | name: Xueao Chao 4 | description: Creator 5 | avatar: https://github.com/happyapplehorse.png 6 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include src/gptui/.default_config.yml 2 | include src/gptui/config.yml 3 | include src/gptui/help.md 4 | 5 | recursive-include src/gptui *.txt 6 | recursive-include src/gptui *.tcss 7 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/blob_loaders/__init__.py: -------------------------------------------------------------------------------- 1 | from .file_system import FileSystemBlobLoader 2 | from .schema import Blob, BlobLoader 3 | 4 | __all__ = ["BlobLoader", "Blob", "FileSystemBlobLoader"] 5 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | **Please review the following checklist.** 2 | 3 | - [ ] Docstrings on all new or modified functions / classes 4 | - [ ] Updated documentation 5 | - [ ] Updated CHANGELOG.md (where appropriate) 6 | -------------------------------------------------------------------------------- /src/gptui/models/gptui_basic_services/templates/upload_file_prompt.txt: -------------------------------------------------------------------------------- 1 | {{$input}} 2 | 3 | ******************** FILE CONTENT BEGIN ******************** 4 | {{$file_content}} 5 | ******************** FILE CONTENT FINISH ******************* 6 | -------------------------------------------------------------------------------- /tests/unit_tests/data/langchain_tests_assets/html_load_test.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | Page Title 4 | 5 | 6 | 7 |

My First Heading

8 |

My first paragraph.

9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /docs/api/index.md: -------------------------------------------------------------------------------- 1 | # API 2 | 3 | This is a API-level reference to the GPTUI API. 4 | Click the links to your left (or in the burger menu) to open a reference for each module. 5 | 6 | If you are new to GPTUI, you may want to read the tutorial or guide first. 7 | -------------------------------------------------------------------------------- /docs/guide/index.md: -------------------------------------------------------------------------------- 1 | # Guide 2 | 3 | This guide helps you in secondary development of GPTUI 4 | or utilize the features provided by GPTUI to develop your own applications. 5 | 6 | If you are new to GPTUI, you may want to read the tutorial or guide first. 7 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from src.gptui.__main__ import gptui_run 5 | 6 | 7 | if __name__ == "__main__": 8 | 9 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src")) 10 | gptui_run(config_path='src/gptui/config.yml') 11 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/__init__.py: -------------------------------------------------------------------------------- 1 | from .text import TextLoader 2 | from .html import UnstructuredHTMLLoader 3 | from .html_bs import BSHTMLLoader 4 | 5 | __all__ = [ 6 | "TextLoader", 7 | "UnstructuredHTMLLoader", 8 | "BSHTMLLoader", 9 | ] 10 | -------------------------------------------------------------------------------- /src/gptui/models/utils/config_from_dot_env.py: -------------------------------------------------------------------------------- 1 | from dotenv import dotenv_values 2 | 3 | 4 | def config_from_dot_env(dot_env_path: str) -> dict: 5 | """ 6 | Reads the configs from the dot_env_path. 7 | """ 8 | 9 | config = dotenv_values(dot_env_path) 10 | 11 | return config 12 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Excuses/skprompt.txt: -------------------------------------------------------------------------------- 1 | Generate a creative reason or excuse for the given event. Be creative and be funny. Let your imagination run wild. 2 | 3 | Event:I am running late. 4 | Excuse:I was being held ransom by giraffe gangsters. 5 | 6 | Event:{{$input}} -------------------------------------------------------------------------------- /src/gptui/views/common_message.py: -------------------------------------------------------------------------------- 1 | from textual.message import Message 2 | 3 | 4 | class CommonMessage(Message): 5 | def __init__(self, message_name: str, message_content) -> None: 6 | self.message_name = message_name 7 | self.message_content = message_content 8 | super().__init__() 9 | -------------------------------------------------------------------------------- /custom_plugin_examples/README.md: -------------------------------------------------------------------------------- 1 | You can specify the folder for your custom plugins in the configuration file, 2 | which defaults to "~/.gptui/plugins". 3 | 4 | GPTUI will automatically scan this folder to retrieve the plugins contained within it. 5 | 6 | You can copy the files from this folder to the custom plugin directory for testing purposes. 7 | -------------------------------------------------------------------------------- /src/gptui/gptui_kernel/null_logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | class NullHandler(logging.Handler): 5 | def emit(self, record): 6 | pass 7 | 8 | 9 | def get_null_logger(name=None): 10 | logger = logging.getLogger(name or __name__) 11 | logger.addHandler(NullHandler()) 12 | logger.propagate = False 13 | return logger 14 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Joke/skprompt.txt: -------------------------------------------------------------------------------- 1 | WRITE EXACTLY ONE JOKE or HUMOROUS STORY ABOUT THE TOPIC BELOW 2 | 3 | JOKE MUST BE: 4 | - G RATED 5 | - WORKPLACE/FAMILY SAFE 6 | NO SEXISM, RACISM OR OTHER BIAS/BIGOTRY 7 | 8 | BE CREATIVE AND FUNNY. I WANT TO LAUGH. 9 | {{$style}} 10 | +++++ 11 | 12 | {{$input}} 13 | +++++ 14 | -------------------------------------------------------------------------------- /src/gptui/controllers/ai_care_sensors.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | 4 | def time_now() -> str: 5 | """ 6 | Get the current date and time in the local time zone" 7 | 8 | Example: 9 | {{time.now}} => Sunday, January 12, 2031 9:15 PM 10 | """ 11 | now = datetime.datetime.now() 12 | return now.strftime("%A, %B %d, %Y %I:%M %p") 13 | 14 | -------------------------------------------------------------------------------- /src/gptui/utils/line_count.py: -------------------------------------------------------------------------------- 1 | from rich.console import Console 2 | 3 | from .my_text import MyText 4 | 5 | 6 | def my_line_count(content: MyText, width: int, console=Console()) -> int: 7 | lines = content.split(allow_blank=True) 8 | num_count = 0 9 | for line in lines: 10 | num_count += len(line.wrap(console, width)) 11 | return num_count 12 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Limerick/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "description": "Generate a funny limerick about a person", 4 | "type": "completion", 5 | "completion": { 6 | "max_tokens": 100, 7 | "temperature": 0.7, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | 4 | setuptools.setup( 5 | packages=setuptools.find_packages(where="src"), 6 | package_dir={"": "src"}, 7 | package_data={ 8 | "gptui": [ 9 | ".default_config.yml", 10 | "config.yml", 11 | "help.md", 12 | "**/*.txt", 13 | "**/*.tcss", 14 | ], 15 | }, 16 | ) 17 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Excuses/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "description": "Turn a scenario into a creative or humorous excuse to send your boss", 4 | "type": "completion", 5 | "completion": { 6 | "max_tokens": 60, 7 | "temperature": 0.5, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /tests/unit_tests/models/test_doc.py: -------------------------------------------------------------------------------- 1 | from gptui.models.doc import document_loader 2 | 3 | 4 | def test_document_loader(tmp_path): 5 | file_content = "This is a test." 6 | file_path = tmp_path / "test.txt" 7 | with open(file_path, "w") as fp: 8 | fp.write(file_content) 9 | document = document_loader(file_path) 10 | assert document[0].page_content == "This is a test." 11 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/html.py: -------------------------------------------------------------------------------- 1 | """Loader that uses unstructured to load HTML files.""" 2 | from typing import List 3 | 4 | from .unstructured import UnstructuredFileLoader 5 | 6 | 7 | class UnstructuredHTMLLoader(UnstructuredFileLoader): 8 | """Loader that uses unstructured to load HTML files.""" 9 | 10 | def _get_elements(self) -> List: 11 | from unstructured.partition.html import partition_html 12 | 13 | return partition_html(filename=self.file_path, **self.unstructured_kwargs) 14 | -------------------------------------------------------------------------------- /tests/unit_tests/models/test_openai_tokens_truncate.py: -------------------------------------------------------------------------------- 1 | from gptui.models.openai_tokens_truncate import find_position 2 | 3 | 4 | def test_find_position(): 5 | lst = [1, 2, 3, 4, 5] 6 | num = 8 7 | result = find_position(lst, num) 8 | assert result == 4 9 | lst = [2, 0, 5, 1, 3, 2, 1, 0, 4] 10 | num = 9 11 | result = find_position(lst, num) 12 | assert result == 5 13 | result = find_position(lst, 2) 14 | assert result == 9 15 | result = find_position(lst, 20) 16 | assert result == 0 17 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Joke/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "description": "Generate a funny joke", 4 | "type": "completion", 5 | "completion": { 6 | "max_tokens": 1000, 7 | "temperature": 0.9, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | }, 12 | "input": { 13 | "parameters": [ 14 | { 15 | "name": "input", 16 | "description": "Joke subject", 17 | "defaultValue": "" 18 | } 19 | ] 20 | } 21 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | agere>=0.1.3,<1 2 | ai-care>=0.1.3,<1 3 | aiofiles>=23.1.0,<24 4 | beautifulsoup4>=4.12.2,<5 5 | blinker>=1.6.2,<2 6 | chardet>=5.1.0,<6 7 | geocoder>=1.38.1,<2 8 | httpx>=0.24.1,<1 9 | lxml>=4.9.3,<6 10 | # open-interpreter==0.1.4 11 | openai>=1.2.3,<2 12 | playsound>=1.3.0,<2 13 | Pygments>=2.15.1,<3 14 | pyperclip>=1.8.2,<2 15 | python-dotenv>=1.0.0,<2 16 | PyYAML>=6.0.1,<7 17 | qdrant-client>=1.4.0,<2 18 | rich>=13.7.0,<14 19 | semantic-kernel>=0.4.0.dev0,<1 20 | textual>=0.37.1,<1 21 | tiktoken>=0.4.0,<1 22 | unstructured>=0.10.18,<1 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | Have you checked closed issues? https://github.com/happyapplehorse/gptui/issues?q=is%3Aissue+is%3Aclosed 11 | 12 | Please give a brief but clear explanation of the issue. 13 | 14 | Feel free to add screenshots and / or videos. These can be very helpful! 15 | 16 | If possible, please use English as much as you can. 17 | This is to ensure that others can review the content of this issue more effectively. 18 | -------------------------------------------------------------------------------- /src/gptui/utils/safe_iterate.py: -------------------------------------------------------------------------------- 1 | def safe_next(gen): 2 | """Avoiding conflicts between StopIteration of generators and StopIteration of coroutine functions in eventloop.""" 3 | try: 4 | return ("OK", next(gen)) 5 | except StopIteration as e: 6 | return ("DONE", e.value) 7 | 8 | def safe_send(gen, value): 9 | """Avoiding conflicts between StopIteration of generators and StopIteration of coroutine functions in eventloop.""" 10 | try: 11 | return ("OK", gen.send(value)) 12 | except StopIteration as e: 13 | return ("DONE", e.value) 14 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/README.md: -------------------------------------------------------------------------------- 1 | Due to the fact that the original development platform (Termux) could not install the langchain toolkit (due to numpy), and in order to facilitate the use of GPTUI by other Termux users, part of the langchain source code that is needed has been copied into this project to use some of the functions of langchain. Thanks to the hard work of the langchain developers. If the installation problem of langchain is solved later, the langchain toolkit will be installed directly. 2 | 3 | Langchain tools that have already been integrated: 4 | - TextLoader 5 | - UnstructuredHTMLLoader 6 | - BSHTMLLoader 7 | 8 | Required dependencies: 9 | - pydantic 10 | -------------------------------------------------------------------------------- /docs/about/contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing to GPTUI 2 | 3 | The GPTUI project welcomes contributions from developers and users in the open source community. 4 | Contributions can be made in a number of ways, a few examples are: 5 | 6 | - Code patches via pull requests 7 | - Documentation improvements 8 | - Bug reports and patch reviews 9 | 10 | Some of GPTUI's plugin features rely on prompt, you can continue to help me improve these prompt. 11 | And I'd like to have appropriate animation cues during certain state changes. 12 | If you have any creative ideas, I'd appreciate your help in implementing them. 13 | 14 | 15 | ## 🎉 16 | 17 | Each contributor can leave a quote in the program. 18 | -------------------------------------------------------------------------------- /src/gptui/controllers/assistant_tube_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from ..models.signals import response_auxiliary_message_signal 4 | 5 | 6 | gptui_logger = logging.getLogger("gptui_logger") 7 | 8 | 9 | class AssistantTube: 10 | def __init__(self, app): 11 | self.app = app 12 | response_auxiliary_message_signal.connect(self.tube_display) 13 | 14 | def tube_display(self, sender, **kwargs): 15 | message = kwargs["message"] 16 | content = message["content"] 17 | flag = message["flag"] 18 | if flag == "function_call": 19 | self.app.context_piece_to_assistant_tube(content) 20 | elif flag == "function_response": 21 | self.app.context_piece_to_assistant_tube(content) 22 | -------------------------------------------------------------------------------- /src/gptui/models/utils/openai_settings_from_dot_env.py: -------------------------------------------------------------------------------- 1 | from dotenv import dotenv_values 2 | 3 | 4 | def openai_settings_from_dot_env(dot_env_path: str) -> tuple[str, str | None]: 5 | """ 6 | Reads the OpenAI API key and organization ID from the dot_env_path. 7 | OpenAI API key should be saved as "OPENAI_API_KEY". 8 | Organization should be saved as "OPENAI_ORG_ID". 9 | 10 | Returns: 11 | Tuple[str, str]: The OpenAI API key, the OpenAI organization ID 12 | """ 13 | 14 | config = dotenv_values(dot_env_path) 15 | api_key = config.get("OPENAI_API_KEY", None) 16 | org_id = config.get("OPENAI_ORG_ID", None) 17 | 18 | assert api_key, f"OPENAI_API_KEY not found in {dot_env_path}" 19 | 20 | # It's okay if the org ID is not found (not required) 21 | return api_key, org_id 22 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # GPTUI 2 | 3 | ![gptui_logo](https://raw.githubusercontent.com/happyapplehorse/happyapplehorse-assets/main/imgs/gptui_logo.png){ align=left width="60" } 4 | GPTUI is a GPT conversational TUI (Textual User Interface) tool that runs within the terminal. 5 | Using the Textual framework for its TUI interface and equipping the plugin framework provided by Semantic Kernel. 6 | GPTUI offers a lightweight Kernel to power AI applications. 7 | The top-level TUI application is decoupled from the underlying Kernel, allowing you to easily replace the TUI interface or expand its functionalities. 8 | At present, only the GPT model of OpenAI is supported, and other LLM interfaces will be added later. 9 | 10 | 11 | ## Demo 12 | 13 | Below is a demonstration: 14 | 15 | ![gptui_logo](https://raw.githubusercontent.com/happyapplehorse/happyapplehorse-assets/main/imgs/gptui_demo.gif) 16 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FunSkill/Limerick/skprompt.txt: -------------------------------------------------------------------------------- 1 | There was a young woman named Bright, 2 | Whose speed was much faster than light. 3 | She set out one day, 4 | In a relative way, 5 | And returned on the previous night. 6 | 7 | There was an odd fellow named Gus, 8 | When traveling he made such a fuss. 9 | He was banned from the train, 10 | Not allowed on a plane, 11 | And now travels only by bus. 12 | 13 | There once was a man from Tibet, 14 | Who couldn't find a cigarette 15 | So he smoked all his socks, 16 | and got chicken-pox, 17 | and had to go to the vet. 18 | 19 | There once was a boy named Dan, 20 | who wanted to fry in a pan. 21 | He tried and he tried, 22 | and eventually died, 23 | that weird little boy named Dan. 24 | 25 | Now write a very funny limerick about {{$name}}. 26 | {{$input}} 27 | Invent new facts their life. Must be funny. 28 | -------------------------------------------------------------------------------- /src/gptui/controllers/group_talk_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from ..models.signals import common_message_signal 4 | 5 | 6 | gptui_logger = logging.getLogger("gptui_logger") 7 | 8 | 9 | class GroupTalkControl: 10 | def __init__(self, app): 11 | self.app = app 12 | common_message_signal.connect(self.group_talk_user_message_send) 13 | 14 | def group_talk_user_message_send(self, sender, **kwargs): 15 | message = kwargs["message"] 16 | if message["flag"] == "group_talk_user_message_send": 17 | messages = message["content"] 18 | if isinstance(messages, list): 19 | for one_message in messages: 20 | self.app.context_piece_to_chat_window(one_message, change_line=True, decorator_switch=True) 21 | else: 22 | self.app.context_piece_to_chat_window(messages, change_line=True, decorator_switch=True) 23 | -------------------------------------------------------------------------------- /src/gptui/gptui_kernel/manager_exceptions.py: -------------------------------------------------------------------------------- 1 | class ManagerError(Exception): 2 | ... 3 | 4 | 5 | class HandlerNotRegisterError(ManagerError): 6 | def __init__(self, handler, manager=None): 7 | self.handler = handler 8 | self.manager = manager 9 | 10 | def __str__(self): 11 | if self.manager is None: 12 | return f"Handler: {self.handler} is not registered in manager." 13 | else: 14 | return f"Handler: {self.handler} is not registered in manager: {self.manager}." 15 | 16 | 17 | class JobNotRegisterError(ManagerError): 18 | def __init__(self, job, manager=None): 19 | self.job = job 20 | self.manager = manager 21 | 22 | def __str__(self): 23 | if self.manager is None: 24 | return f"Job: {self.job} is not registered in manager." 25 | else: 26 | return f"Job: {self.job} is not registered in manager: {self.manager}." 27 | -------------------------------------------------------------------------------- /src/gptui/models/utils/openai_api.py: -------------------------------------------------------------------------------- 1 | import openai 2 | from openai import OpenAI, AsyncOpenAI 3 | 4 | from .openai_settings_from_dot_env import openai_settings_from_dot_env 5 | 6 | 7 | OpenAIClient = OpenAI | AsyncOpenAI 8 | 9 | 10 | def openai_api(dot_env_path: str | None): 11 | assert dot_env_path, "'dot_env_path' can not be None or empty." 12 | openai_key, org_id = openai_settings_from_dot_env(dot_env_path) 13 | openai.api_key = openai_key 14 | return openai 15 | 16 | def openai_api_client(dot_env_path: str | None, async_client: bool = False, **kwargs) -> OpenAIClient: 17 | assert dot_env_path, "'dot_env_path' can not be None or empty." 18 | openai_key, org_id = openai_settings_from_dot_env(dot_env_path) 19 | if async_client is True: 20 | client = AsyncOpenAI(api_key=openai_key, organization=org_id, **kwargs) 21 | else: 22 | client = OpenAI(api_key=openai_key, organization=org_id, **kwargs) 23 | return client 24 | -------------------------------------------------------------------------------- /src/gptui/models/openai_error.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from .blinker_wrapper import async_wrapper_with_loop, async_wrapper_without_loop 4 | from .context import OpenaiContext 5 | from .signals import notification_signal 6 | 7 | 8 | gptui_logger = logging.getLogger("gptui_logger") 9 | 10 | 11 | class OpenaiErrorHandler: 12 | def openai_error_handle(self, error: Exception, context: OpenaiContext, event_loop: bool = True, **kwargs) -> None: 13 | if event_loop is True: 14 | gptui_logger.error(f"Openai Error: {error}") 15 | notification_signal.send(self, _async_wrapper=async_wrapper_with_loop, message={"content":{"error":error, "context":context, "ps":kwargs}, "flag":"openai_error"}) 16 | else: 17 | gptui_logger.error(f"Openai Error: {error}") 18 | notification_signal.send(self, _async_wrapper=async_wrapper_without_loop, message={"content":{"error":error, "context":context, "ps":kwargs}, "flag":"openai_error"}) 19 | -------------------------------------------------------------------------------- /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | name: Codecov 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - develop 8 | pull_request: 9 | branches: 10 | - main 11 | - develop 12 | workflow_dispatch: 13 | 14 | jobs: 15 | codecov: 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - uses: actions/checkout@v3 20 | - name: Set up Python 21 | uses: actions/setup-python@v3 22 | with: 23 | python-version: '3.x' 24 | - name: Install dependencies 25 | run: | 26 | python -m pip install --upgrade pip 27 | pip install wheel setuptools 28 | pip install pytest pytest-cov pytest-asyncio 29 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 30 | - name: Execute test 31 | run: pytest --cov=./src/gptui --cov-report=xml 32 | 33 | - name: Upload coverage reports to Codecov 34 | uses: codecov/codecov-action@v3 35 | with: 36 | files: ./coverage.xml 37 | -------------------------------------------------------------------------------- /src/gptui/drivers/driver_interface.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from .driver_error import NoDriverError, NoDriverMethodError 4 | 5 | 6 | class DriverInterface: 7 | 8 | def __init__(self, platform: str): 9 | self.platform = platform.lower() 10 | 11 | def __call__(self, *args, **kwargs) -> Any: 12 | method = getattr(self, self.platform, None) 13 | if method and callable(method): 14 | return method(*args, **kwargs) 15 | else: 16 | raise NoDriverError(self.platform) 17 | 18 | def termux(self): 19 | raise NoDriverMethodError(driver="termux", method=self.__class__.__name__) 20 | 21 | def linux(self): 22 | raise NoDriverMethodError(driver="linux", method=self.__class__.__name__) 23 | 24 | def macos(self): 25 | raise NoDriverMethodError(driver="macos", method=self.__class__.__name__) 26 | 27 | def windows(self): 28 | raise NoDriverMethodError(driver="windows", method=self.__class__.__name__) 29 | -------------------------------------------------------------------------------- /src/gptui/drivers/driver_error.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Callable, TYPE_CHECKING 3 | 4 | 5 | if TYPE_CHECKING: 6 | from .driver_interface import DriverInterface 7 | 8 | 9 | class DriverError(Exception): 10 | ... 11 | 12 | 13 | class NoDriverError(DriverError): 14 | def __init__(self, driver: str): 15 | self.driver = driver 16 | 17 | def __str__(self): 18 | return f"There is no {self.driver} driver." 19 | 20 | 21 | class NoDriverMethodError(DriverError): 22 | def __init__(self, driver: str | Callable, method: str | DriverInterface): 23 | if isinstance(driver, str): 24 | self.driver = driver 25 | else: 26 | self.driver = driver.__name__ 27 | if isinstance(method, str): 28 | self.method = method 29 | else: 30 | self.method = type(method).__name__ 31 | 32 | def __str__(self): 33 | return f"There is no {self.method} method in {self.driver} driver." 34 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/pydantic_v1/__init__.py: -------------------------------------------------------------------------------- 1 | from importlib import metadata 2 | 3 | ## Create namespaces for pydantic v1 and v2. 4 | # This code must stay at the top of the file before other modules may 5 | # attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules. 6 | # 7 | # This hack is done for the following reasons: 8 | # * Langchain will attempt to remain compatible with both pydantic v1 and v2 since 9 | # both dependencies and dependents may be stuck on either version of v1 or v2. 10 | # * Creating namespaces for pydantic v1 and v2 should allow us to write code that 11 | # unambiguously uses either v1 or v2 API. 12 | # * This change is easier to roll out and roll back. 13 | 14 | try: 15 | from pydantic.v1 import * # noqa: F403 16 | except ImportError: 17 | from pydantic import * # noqa: F403 18 | 19 | 20 | try: 21 | _PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0]) 22 | except metadata.PackageNotFoundError: 23 | _PYDANTIC_MAJOR_VERSION = 0 24 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/test_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | from unittest.mock import patch 3 | 4 | from textual.app import App 5 | 6 | from gptui.gptui_kernel.manager import Manager 7 | 8 | 9 | mocked_dotenv_values = { 10 | "OPENAI_API_KEY": "fake_api_key", 11 | "OPENAI_ORG_ID": "fake_org_id", 12 | } 13 | 14 | def test_scan_plugin(): 15 | with patch('gptui.gptui_kernel.kernel.dotenv_values', return_value=mocked_dotenv_values): 16 | app = App() 17 | manager = Manager(app, dot_env_config_path=os.path.expanduser("~/.gptui/.env_gptui")) 18 | semantic_plugins, native_plugins = manager.scan_plugins("./tests/unit_tests/gptui_kernel/plugins_test_data") 19 | semantic_plugins_name_list = [plugin_meta.name for plugin_meta in semantic_plugins] 20 | native_plugins_name_list = [plugin_meta.name for plugin_meta in native_plugins] 21 | assert set(semantic_plugins_name_list) == {"FunSkill"} 22 | assert set(native_plugins_name_list) == {"WebServe", "MathPlugin", "WriteFile"} 23 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/math_plugin.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from semantic_kernel.skill_definition import ( 4 | sk_function, 5 | sk_function_context_parameter, 6 | ) 7 | from semantic_kernel.orchestration.sk_context import SKContext 8 | 9 | 10 | class MathPlugin: 11 | @sk_function( 12 | description="Takes the square root of a number", 13 | name="square_root", 14 | input_description="The value to take the square root of", 15 | ) 16 | def square_root(self, number: str) -> str: 17 | return str(math.sqrt(float(number))) 18 | 19 | @sk_function( 20 | description="Adds two numbers together", 21 | name="add", 22 | ) 23 | @sk_function_context_parameter( 24 | name="input", 25 | description="The first number to add", 26 | ) 27 | @sk_function_context_parameter( 28 | name="number2", 29 | description="The second number to add", 30 | ) 31 | def add(self, context: SKContext) -> str: 32 | return str(float(context["input"]) + float(context["number2"])) 33 | -------------------------------------------------------------------------------- /src/gptui/gptui_kernel/kernel_exceptions.py: -------------------------------------------------------------------------------- 1 | import semantic_kernel as sk 2 | 3 | 4 | class KernelException(Exception): 5 | ... 6 | 7 | 8 | class PluginInfoError(KernelException): 9 | def __init__(self, plugin_info: tuple): 10 | self.plugin_info = plugin_info 11 | 12 | def __str__(self): 13 | return f"Plugin info in {self.plugin_info} is a wrong type." 14 | 15 | 16 | class InvalidArgumentTypeError(KernelException): 17 | def __init__(self, argument, expected_type): 18 | self.argument = argument 19 | self.expected_type = expected_type 20 | 21 | def __str__(self): 22 | return f"Invalid argumen type: Expected {self.expected_type}, got {type(self.argument)}" 23 | 24 | 25 | class PluginsMatchError(KernelException): 26 | def __init__(self, sk_kernel: sk.Kernel, plugins_list: list[tuple]): 27 | self.sk_kernel = sk_kernel 28 | self.plugins_list = plugins_list 29 | 30 | def __str__(self): 31 | return f"Semantic kernel and plugin list do not match. sk_kernel: {self.sk_kernel}, plugins_list: {self.plugins_list}" 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Xueao Chao 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/unit_tests/data/test_document_loaders.py: -------------------------------------------------------------------------------- 1 | from gptui.data.langchain.document_loaders import TextLoader, UnstructuredHTMLLoader, BSHTMLLoader 2 | 3 | 4 | def test_text_loader(): 5 | file_path = "./tests/unit_tests/data/langchain_tests_assets/text_load_test.txt" 6 | loader = TextLoader(file_path) 7 | document = loader.load()[0] 8 | assert document.page_content == "This is a txt file for testting text loader.\n" 9 | assert document.metadata["source"] == "./tests/unit_tests/data/langchain_tests_assets/text_load_test.txt" 10 | 11 | def test_bs_html_loader(): 12 | file_path = "./tests/unit_tests/data/langchain_tests_assets/html_load_test.html" 13 | loader = BSHTMLLoader(file_path) 14 | document = loader.load()[0] 15 | assert document.page_content == "\n\nPage Title\n\n\nMy First Heading\nMy first paragraph.\n\n\n" 16 | 17 | def test_unstructured_html_loader(): 18 | file_path = "./tests/unit_tests/data/langchain_tests_assets/html_load_test.html" 19 | loader = UnstructuredHTMLLoader(file_path) 20 | document = loader.load()[0] 21 | assert document.page_content == "My First Heading\n\nMy first paragraph." 22 | -------------------------------------------------------------------------------- /tests/unit_tests/controllers/test_tube_files.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from gptui.controllers.tube_files_control import TubeFiles 4 | from gptui.utils.my_text import MyText as Text 5 | 6 | class TestTubeFiles: 7 | @pytest.fixture(autouse=True) 8 | def setup(self, tmp_path): 9 | 10 | class Displayer: 11 | def update(self, content): 12 | self.display = content 13 | 14 | self.displayer = Displayer() 15 | self.file_path = tmp_path / "files_test_data" 16 | self.file_path.mkdir(exist_ok=True) 17 | 18 | @pytest.mark.asyncio 19 | async def test_write_read_file_async(self): 20 | tf = TubeFiles(self.displayer) 21 | file_content = "This is a test." 22 | file_path = self.file_path / "test.txt" 23 | await tf.write_file_async(file_path, file_content) 24 | content = await tf.read_file_async(file_path) 25 | assert content == "This is a test." 26 | content = await tf.read_file_async(self.file_path / "test1.txt") 27 | assert content is None 28 | assert self.displayer.display == Text("File or directory not found", "yellow") 29 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/test_call_plugin.py: -------------------------------------------------------------------------------- 1 | import os 2 | from unittest.mock import patch 3 | 4 | from textual.app import App 5 | 6 | from gptui.gptui_kernel.manager import Manager 7 | 8 | 9 | mocked_dotenv_values = { 10 | "OPENAI_API_KEY": "fake_api_key", 11 | "OPENAI_ORG_ID": "fake_org_id", 12 | } 13 | 14 | async def test_call_plugin(): 15 | with patch('gptui.gptui_kernel.kernel.dotenv_values', return_value=mocked_dotenv_values): 16 | app = App() 17 | manager = Manager(app, dot_env_config_path=os.path.expanduser("~/.gptui/.env_gptui")) 18 | _, native_plugins = manager.scan_plugins("./tests/unit_tests/gptui_kernel/plugins_test_data") 19 | for plugin in native_plugins: 20 | plugin_info = plugin.plugin_info 21 | manager.add_plugins(plugin_info) 22 | 23 | add = manager.available_functions_link["add"] 24 | args = { 25 | "input": 1, 26 | "number2": 2, 27 | } 28 | context = manager.gk_kernel.context_render(args, add) 29 | result = await add.invoke_async(context=context) 30 | assert int(float(str(result))) == 3 31 | -------------------------------------------------------------------------------- /src/gptui/controllers/voice_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from ..models.signals import response_to_user_message_sentence_stream_signal 4 | 5 | 6 | gptui_logger = logging.getLogger("gptui_logger") 7 | 8 | 9 | class VoiceService: 10 | def __init__(self, myapp, switch: bool = True): 11 | self.myapp = myapp 12 | self.voice_service = None 13 | if switch is True: 14 | self.connect() 15 | 16 | async def accept_voice_message(self, sender, **kwargs): 17 | voice_message = kwargs["message"] 18 | message_content = voice_message["content"] 19 | flag = voice_message["flag"] 20 | if flag == "content": 21 | self.voice_service = self.myapp.drivers.tts(message_content) 22 | 23 | def connect(self): 24 | response_to_user_message_sentence_stream_signal.connect(self.accept_voice_message) 25 | 26 | def disconnect(self): 27 | response_to_user_message_sentence_stream_signal.disconnect(self.accept_voice_message) 28 | 29 | def cancel_speak(self) -> None: 30 | self.disconnect() 31 | if self.voice_service is None: 32 | return 33 | self.voice_service.stop() 34 | -------------------------------------------------------------------------------- /src/gptui/drivers/driver_manager.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | from .drivers import CopyCode, TextToSpeak, VoiceRecordStart, VoiceRecordQuit 5 | from .driver_interface import DriverInterface 6 | 7 | 8 | gptui_logger = logging.getLogger("gptui_logger") 9 | 10 | 11 | class DriverManager: 12 | def __init__(self, app): 13 | self.app = app 14 | self.terminal = app.config.get("terminal") 15 | self.os = app.config.get("os") 16 | self._register_driver_method() 17 | 18 | def register_driver(self, driver_method_name: str, driver: DriverInterface) -> None: 19 | if hasattr(self, driver_method_name): 20 | gptui_logger.warning("A driver method with the same name already exists; it will be overwritten.") 21 | setattr(self, driver_method_name, driver) 22 | 23 | def _register_driver_method(self): 24 | self.copy_code = CopyCode(self.os) 25 | self.tts = TextToSpeak( 26 | platform=self.os, 27 | dot_env_path=self.app.config["dot_env_path"], 28 | temp_dir=os.path.join(self.app.config["workpath"], "temp"), 29 | ) 30 | self.voice_record_start = VoiceRecordStart(self.os) 31 | self.voice_record_quit = VoiceRecordQuit(self.os) 32 | -------------------------------------------------------------------------------- /src/gptui/models/doc.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from ..data.langchain.document_loaders import TextLoader 4 | from ..data.langchain.schema import Document 5 | 6 | 7 | class Doc: 8 | def __init__(self, doc_name: str, doc_ext: str, pointer, description: str | None = None): 9 | self.name = doc_name 10 | self.ext = doc_ext 11 | self.pointer = pointer 12 | self.description = description 13 | if isinstance(pointer, Document): 14 | self.content_type = "Document" 15 | elif isinstance(pointer, str): 16 | self.content_type = "str" 17 | else: 18 | self.content_type = "Unknown" 19 | 20 | @property 21 | def content(self): 22 | if isinstance(self.pointer, Document): 23 | return self.pointer.page_content 24 | else: 25 | return self.pointer 26 | 27 | def document_loader(file_path: str) -> list[Document]: 28 | file_ext_name = os.path.splitext(file_path)[1] 29 | if file_ext_name in {".txt", ".md", ".json", ".py", ".cpp", ".yaml", ".yml", ".toml", ".log"}: 30 | loader = TextLoader(file_path) 31 | else: 32 | raise TypeError("Selected file type is not suppported.") 33 | document_list = loader.load() 34 | return document_list 35 | -------------------------------------------------------------------------------- /tests/unit_tests/gptui_kernel/plugins_test_data/FileIO.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from semantic_kernel.orchestration.sk_context import SKContext 4 | from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter 5 | 6 | from gptui.gptui_kernel.manager import auto_init_params 7 | 8 | 9 | mylogger = logging.getLogger("mylogger") 10 | 11 | 12 | class WriteFile: 13 | def __init__(self, manager): 14 | self.manager = manager 15 | 16 | @auto_init_params("0") 17 | @classmethod 18 | def get_init_params(cls, manager) -> tuple: 19 | return (manager,) 20 | 21 | @sk_function( 22 | description="Write a file.", 23 | name="write_file", 24 | ) 25 | @sk_function_context_parameter( 26 | name="file_name", 27 | description="The name of the file, including the extension.", 28 | ) 29 | @sk_function_context_parameter( 30 | name="file_content", 31 | description="The content to be written into the file." 32 | ) 33 | def write_file(self, context: SKContext) -> str: 34 | file_name = context["file_name"] 35 | file_content = context["file_content"] 36 | self.manager.client.common_resources["temp_files_from_tube"] = {file_name: file_content} 37 | return "" 38 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](http://keepachangelog.com/) 6 | and this project adheres to [Semantic Versioning](http://semver.org/). 7 | 8 | ## [Unreleased] 9 | 10 | ## [0.5.4] - 2024-01-09 11 | 12 | ### Fixed 13 | 14 | - Fixed the issue of bebing unable to rename conversation on Windows 15 | - Switch from text-davinci-003 to using gpt-3.5-turbo-instruct 16 | - When choosing a file path, the default is the root directory 17 | 18 | ## [0.5.3] - 2024-01-07 19 | 20 | ### Fixed 21 | 22 | - Fixed the error of using the unimported async_wrapper_with_loop in GroupTalkManager.speaking 23 | 24 | ## [0.5.2] - 2024-01-02 25 | 26 | ### Fixed 27 | 28 | - Fixed the bug that prevents the second conversation from being renamed 29 | - Stop the waiting animation for a conversation when it is deleted 30 | - Fixed the bug where deleting a conversation shows its replies in another window 31 | 32 | ## [0.5.1] - 2024-01-02 33 | 34 | ### Fixed 35 | 36 | - Fixed the FileNotfoundError when clicking links in MarkdownViewer 37 | - Fixed the KeyError caused by switching to information display when dealing with an empty chat window 38 | - Fixed the bugs in disposable conversation mode caused by openai v1 39 | 40 | ## [0.5.0] - 2023-12-31 41 | 42 | ### Added 43 | 44 | - Added support for custom plugins. 45 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/helpers.py: -------------------------------------------------------------------------------- 1 | """Document loader helpers.""" 2 | 3 | import concurrent.futures 4 | from typing import List, NamedTuple, Optional, cast 5 | 6 | 7 | class FileEncoding(NamedTuple): 8 | encoding: Optional[str] 9 | confidence: float 10 | language: Optional[str] 11 | 12 | 13 | def detect_file_encodings(file_path: str, timeout: int = 5) -> List[FileEncoding]: 14 | """Try to detect the file encoding. 15 | 16 | Returns a list of `FileEncoding` tuples with the detected encodings ordered 17 | by confidence. 18 | """ 19 | import chardet 20 | 21 | def read_and_detect(file_path: str) -> List[dict]: 22 | with open(file_path, "rb") as f: 23 | rawdata = f.read() 24 | return cast(List[dict], chardet.detect_all(rawdata)) 25 | 26 | with concurrent.futures.ThreadPoolExecutor() as executor: 27 | future = executor.submit(read_and_detect, file_path) 28 | try: 29 | encodings = future.result(timeout=timeout) 30 | except concurrent.futures.TimeoutError: 31 | raise TimeoutError( 32 | f"Timeout reached while detecting encoding for {file_path}" 33 | ) 34 | 35 | if all(encoding["encoding"] is None for encoding in encodings): 36 | raise RuntimeError(f"Could not detect encoding for {file_path}") 37 | return [FileEncoding(**enc) for enc in encodings if enc["encoding"] is not None] 38 | -------------------------------------------------------------------------------- /src/gptui/.default_config.yml: -------------------------------------------------------------------------------- 1 | # +--------------------------------------------------------------------------+ 2 | # \ The configurations in this document are the program's default settings, \ 3 | # \ ensuring the presence of essential configuration items. \ 4 | # +--------------------------------------------------------------------------+ 5 | 6 | 7 | GPTUI_BASIC_SERVICES_PATH: 8 | PLUGIN_PATH: 9 | DEFAULT_PLUGIN_PATH: 10 | custom_plugin_path: ~/.gptui/plugins/ 11 | 12 | # API keys 13 | dot_env_path: 14 | ~/.gptui/.env_gptui 15 | 16 | default_openai_parameters: 17 | model: gpt-4 18 | stream: true 19 | 20 | default_conversation_parameters: 21 | max_sending_tokens_ratio: 0.6 22 | 23 | log_path: 24 | ~/.gptui/logs.log 25 | 26 | tui_config: 27 | conversations_recover: true 28 | voice_switch: false 29 | speak_switch: false 30 | file_wrap_display: true 31 | ai_care_switch: true 32 | ai_care_depth: 2 33 | ai_care_delay: 60 34 | status_region_default: 35 | waiting_receive_animation: "default" 36 | 37 | # List of plugin's name of default used 38 | default_plugins_used: [] 39 | 40 | # Program working path, storing vector database, temporary files, etc. 41 | workpath: 42 | ~/.gptui/user 43 | 44 | # Scope of files discoverable by the program 45 | directory_tree_path: 46 | ~/ 47 | 48 | # Conversation history save and import path 49 | conversation_path: 50 | ~/.gptui/user/conversations 51 | 52 | vector_memory_path: 53 | ~/.gptui/user/vector_memory_database 54 | -------------------------------------------------------------------------------- /tests/unit_tests/models/test_context.py: -------------------------------------------------------------------------------- 1 | import json 2 | import copy 3 | from dataclasses import asdict 4 | from gptui.models.context import OpenaiContext 5 | 6 | def test_openai_context_serialization_deserialization(): 7 | openai_context_original = OpenaiContext(chat_context = [{"role": "user", "content":"Hi!"}, {"role":"assistant", "content":"Hello, how can i assist you today?"}]) 8 | openai_context_original.parameters = {"model": "gpt-4"} 9 | openai_context = copy.deepcopy(openai_context_original) 10 | openai_context_str = json.dumps(asdict(openai_context), ensure_ascii = False, sort_keys = True, indent = 4, separators = (',',':')) 11 | openai_context_build = json.loads(openai_context_str) 12 | openai_context_rebuild = OpenaiContext(**openai_context_build) 13 | assert openai_context_rebuild == openai_context_original 14 | 15 | def test_openai_context_deepcopy(): 16 | openai_context_original = OpenaiContext(chat_context = [{"role": "user", "content":"Hi!"}, {"role":"assistant", "content":"Hello, how can i assist you today?"}]) 17 | openai_context_original.parameters = {"model": "gpt-4"} 18 | openai_context_original.plugins = [["mutable"], "plugin2"] 19 | openai_context_deepcopy = copy.deepcopy(openai_context_original) 20 | assert openai_context_deepcopy == openai_context_original 21 | assert id(openai_context_deepcopy.chat_context) != id(openai_context_original.chat_context) 22 | assert set(map(id, openai_context_original.plugins)) == set(map(id, openai_context_deepcopy.plugins)) 23 | 24 | -------------------------------------------------------------------------------- /.github/workflows/static.yml: -------------------------------------------------------------------------------- 1 | # Simple workflow for deploying static content to GitHub Pages 2 | name: Deploy MkDocs to GitHub Pages 3 | 4 | on: 5 | push: 6 | tags: 7 | - 'v*' 8 | # Allows to run this workflow manually from the Actions tab 9 | workflow_dispatch: 10 | 11 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 12 | permissions: 13 | contents: read 14 | pages: write 15 | id-token: write 16 | 17 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. 18 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. 19 | concurrency: 20 | group: "pages" 21 | cancel-in-progress: false 22 | 23 | jobs: 24 | # Single deploy job since we're just deploying 25 | deploy: 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@v3 30 | 31 | - name: Set up Python 32 | uses: actions/setup-python@v3 33 | with: 34 | python-version: 3.x 35 | 36 | - name: Install MkDocs and extensions 37 | run: | 38 | python -m pip install --upgrade pip 39 | pip install mkdocs mkdocs-material "mkdocstrings[python]" 40 | 41 | - name: Build MkDocs site 42 | run: mkdocs build 43 | 44 | - name: Upload artifact 45 | uses: actions/upload-pages-artifact@v2 46 | with: 47 | path: './site' 48 | 49 | - name: Deploy to GitHub Pages 50 | id: deployment 51 | uses: actions/deploy-pages@v2 52 | -------------------------------------------------------------------------------- /tests/unit_tests/models/test_skills.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from gptui.models.doc import Doc 4 | from gptui.models.skills import UploadFile 5 | 6 | 7 | @pytest.mark.asyncio 8 | async def test_upload_file(): 9 | uf = UploadFile() 10 | input = "Summarize the following documents' content." 11 | doc1 = Doc(doc_name="test_doc1", doc_ext=".txt", pointer="This is a txt document.") 12 | doc2 = Doc(doc_name="test_doc2", doc_ext=".txt", pointer="This is another txt document.") 13 | prompt1 = await uf.import_file_to_context(doc1, input=input) 14 | prompt2 = await uf.import_file_to_context(doc1, doc2, input=input) 15 | assert prompt1 == ( 16 | "Summarize the following documents' content.\n\n" 17 | "******************** FILE CONTENT BEGIN ********************\n" 18 | "===== Document #1 test_doc1.txt =====\n\n" 19 | "This is a txt document.\n\n" 20 | "=====================================\n" 21 | "******************** FILE CONTENT FINISH *******************\n" 22 | ) 23 | assert prompt2 == ( 24 | "Summarize the following documents' content.\n\n" 25 | "******************** FILE CONTENT BEGIN ********************\n" 26 | "===== Document #1 test_doc1.txt =====\n\n" 27 | "This is a txt document.\n\n" 28 | "=====================================\n\n" 29 | "===== Document #2 test_doc2.txt =====\n\n" 30 | "This is another txt document.\n\n" 31 | "=====================================\n" 32 | "******************** FILE CONTENT FINISH *******************\n" 33 | ) 34 | -------------------------------------------------------------------------------- /tests/unit_tests/utils/test_file_icon.py: -------------------------------------------------------------------------------- 1 | from gptui.utils.my_text import MyText as Text 2 | from rich import print 3 | 4 | 5 | def test_file_icon_ansi_str(): 6 | pass 7 | #test_short = file_icon_ansi_str(file_label="TEST", file_type=".txt", file_description="test.txt") 8 | #test_md = file_icon_ansi_str(file_label="TEST", file_type=".md", file_description="test.md") 9 | #test_long = file_icon_ansi_str(file_label="TEST", file_type=".txt", file_description="test_long_description.txt") 10 | #test_bin = file_icon_ansi_str(file_label="TEST", file_type=".bin", file_description="test.bin") 11 | #test_json = file_icon_ansi_str(file_label="TEST", file_type=".json", file_description="test.json") 12 | #test_py = file_icon_ansi_str(file_label="TEST", file_type=".py", file_description="test.py") 13 | #test_sh = file_icon_ansi_str(file_label="TEST", file_type=".sh", file_description="test.sh") 14 | #test_other = file_icon_ansi_str(file_label="TEST", file_type="other", file_description="test.other") 15 | #test_blue = file_icon_ansi_str(file_label="TEST", file_type=".txt", file_description="test.txt", icon_color="blue") 16 | #test_description_blue = file_icon_ansi_str(file_label="TEST", file_type=".txt", file_description="test.txt", description_color="blue") 17 | 18 | #assert test_short == Text('▕⁖̅⁖̅⁖̅╲ \n▕TES▕ \n▕txt▕\ntest.t\nxt \n' [Span(0, 10, ''), Span(10, 18, ''), Span(18, 23, 'underline'), Span(23, 37,'white')]) 19 | #print(repr(test_short)) 20 | #print(test_md) 21 | #print(test_long) 22 | #print(test_bin) 23 | #print(test_json) 24 | #print(test_py) 25 | #print(test_sh) 26 | #print(test_other) 27 | #print(test_blue) 28 | #print(test_description_blue) 29 | -------------------------------------------------------------------------------- /src/gptui/views/theme.py: -------------------------------------------------------------------------------- 1 | from typing import NamedTuple, Literal 2 | 3 | 4 | ThemeName = Literal["default", "monochrome"] 5 | 6 | 7 | class ColorMap(NamedTuple): 8 | name: str 9 | color: str 10 | 11 | 12 | class ThemeColor: 13 | color_map: dict[str, str] = {} 14 | _theme: ThemeName = "default" 15 | 16 | @classmethod 17 | def insert_color_map(cls, name: str, color: str): 18 | cls.color_map[name] = color 19 | 20 | @classmethod 21 | def insert_color_map_batch(cls, color_map_list: list[ColorMap]) -> None: 22 | for color_map in color_map_list: 23 | cls.color_map[color_map.name] = color_map.color 24 | 25 | @classmethod 26 | def get_theme_color(cls, name: str) -> str | None: 27 | if name in cls.color_map: 28 | return cls.color_map[name] 29 | if cls._theme == "default": 30 | return None 31 | elif cls._theme == "monochrome": 32 | return "#5CE495" # responding to $success-lighten-2 in textual 33 | else: 34 | return None 35 | 36 | @classmethod 37 | def set_theme(cls, theme: ThemeName) -> None: 38 | cls._theme = theme 39 | if theme == "monochrome": 40 | ThemeColor.color_map["user_message"] = "#2E724B" 41 | ThemeColor.color_map["assistant_message"] = "#5CE495" 42 | ThemeColor.color_map["system_message"] = "#122E1E" 43 | if theme == "default": 44 | ThemeColor.color_map.pop("user_message", None) 45 | ThemeColor.color_map.pop("assistant_message", None) 46 | ThemeColor.color_map.pop("system_message", None) 47 | 48 | def theme_color(name: str) -> str | None: 49 | return ThemeColor.get_theme_color(name) 50 | -------------------------------------------------------------------------------- /src/gptui/plugins/FileRW.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | from semantic_kernel.orchestration.sk_context import SKContext 5 | from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter 6 | 7 | from gptui.gptui_kernel.manager import auto_init_params 8 | from gptui.models.doc import Doc 9 | from gptui.views.common_message import CommonMessage 10 | 11 | 12 | gptui_logger = logging.getLogger("gptui_logger") 13 | 14 | 15 | class WriteFile: 16 | def __init__(self, manager): 17 | self.manager = manager 18 | 19 | @auto_init_params("0") 20 | @classmethod 21 | def get_init_params(cls, manager) -> tuple: 22 | return (manager,) 23 | 24 | @sk_function( 25 | description="Write a file.", 26 | name="write_file", 27 | ) 28 | @sk_function_context_parameter( 29 | name="file_name", 30 | description="The name of the file, including the extension.", 31 | ) 32 | @sk_function_context_parameter( 33 | name="file_content", 34 | description="The content to be written into the file." 35 | ) 36 | def write_file(self, context: SKContext) -> str: 37 | file_name = context["file_name"] 38 | file_content = context["file_content"] 39 | self.manager.client.common_resources["temp_files_from_tube"] = {file_name: file_content} 40 | name, ext = os.path.splitext(file_name) 41 | document = Doc(doc_name=name, doc_ext=ext, pointer=self.manager.client.common_resources["temp_files_from_tube"][file_name], description="") 42 | # Only main thread can handle UI event correctly 43 | self.manager.client.post_message(CommonMessage(message_name="write_file", message_content=document)) 44 | return "Write file sucessfully!" 45 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: GPTUI 2 | copyright: Copyright (c) 2023 Xueao Chao 3 | 4 | repo_url: https://github.com/happyapplehorse/gptui 5 | repo_name: happyapplehorse/gptui 6 | 7 | theme: 8 | name: "material" 9 | 10 | icon: 11 | logo: material/island 12 | 13 | palette: 14 | - media: "(prefers-color-scheme: light)" 15 | scheme: default 16 | primary: deep purple 17 | toggle: 18 | icon: material/weather-night 19 | name: Switch to dark mode 20 | - media: "(prefers-color-scheme: dark)" 21 | scheme: slate 22 | toggle: 23 | icon: material/weather-sunny 24 | name: Switch to light mode 25 | 26 | features: 27 | - navigation.instant 28 | - navigation.tabs 29 | - navigation.tracking 30 | - navigation.path 31 | - navigation.top 32 | - navigation.footer 33 | - navigation.indexes 34 | - navigation.tabs.sticky 35 | - navigation.prune 36 | - toc.follow 37 | - search.suggest 38 | - search.hightlight 39 | - content.code.copy 40 | - content.code.annotate 41 | 42 | plugins: 43 | - mkdocstrings: 44 | handlers: 45 | python: 46 | paths: [src] 47 | - search 48 | - blog 49 | 50 | extra: 51 | social: 52 | - icon: fontawesome/brands/github 53 | link: https://github.com/happyapplehorse 54 | name: Github 55 | 56 | markdown_extensions: 57 | - attr_list 58 | - md_in_html 59 | 60 | nav: 61 | - Home: index.md 62 | - Tutorial: 63 | - Getting started: getting_started.md 64 | - Troubleshooting: troubleshooting.md 65 | - Features: features.md 66 | - Configuration: configuration.md 67 | - 配置指南: configuration.zh.md 68 | - Guide: 69 | - guide/index.md 70 | - API: 71 | - api/index.md 72 | - Blog: 73 | - blog/index.md 74 | - About: 75 | - Contributing: about/contributing.md 76 | - License: about/license.md 77 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | push: 13 | tags: 14 | - 'v*' 15 | # Allows to run this workflow manually from the Actions tab 16 | workflow_dispatch: 17 | 18 | permissions: 19 | contents: read 20 | 21 | jobs: 22 | test: 23 | runs-on: ubuntu-latest 24 | 25 | steps: 26 | - uses: actions/checkout@v3 27 | - name: Set up Python 28 | uses: actions/setup-python@v3 29 | with: 30 | python-version: '3.x' 31 | - name: Install dependencies 32 | run: | 33 | python -m pip install --upgrade pip 34 | pip install wheel setuptools 35 | pip install pytest pytest-asyncio 36 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 37 | - name: Execute test 38 | run: pytest -v 39 | 40 | deploy: 41 | 42 | runs-on: ubuntu-latest 43 | needs: [test] 44 | 45 | steps: 46 | - uses: actions/checkout@v3 47 | - name: Set up Python 48 | uses: actions/setup-python@v3 49 | with: 50 | python-version: '3.x' 51 | - name: Install dependencies 52 | run: | 53 | python -m pip install --upgrade pip 54 | pip install build 55 | - name: Build package 56 | run: python -m build 57 | - name: Publish package 58 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 59 | with: 60 | user: __token__ 61 | password: ${{ secrets.PYPI_API_TOKEN }} 62 | -------------------------------------------------------------------------------- /tests/unit_tests/controllers/test_decorate_display.py: -------------------------------------------------------------------------------- 1 | import textwrap 2 | from gptui.controllers.decorate_display_control import extract_files_from_string 3 | from rich import print 4 | 5 | 6 | def test_extract_files_from_string(): 7 | input_string = textwrap.dedent( 8 | """\ 9 | before 10 | ******************** FILE CONTENT BEGIN ******************** 11 | ===== Document #1 text.txt ===== 12 | 13 | This is the content of the document #1. 14 | 15 | 16 | ========================== 17 | 18 | ===== Document #2 test.md ===== 19 | 20 | This is the content of the the file #2. 21 | 22 | 23 | =========================== 24 | ******************** FILE CONTENT FINISH ******************* 25 | after""" 26 | ) 27 | 28 | expected_output = ["before\n", ("text.txt", "test.md"), "\nafter"] 29 | out = extract_files_from_string(input_string) 30 | assert out == expected_output 31 | 32 | ''' 33 | def test_pre_wrap(): 34 | input_string = """before 35 | ******************** FILE CONTENT BEGIN ******************** 36 | ===== Document #1 text.txt ===== 37 | 38 | This is the content of the document #1. 39 | 40 | 41 | ========================== 42 | 43 | ===== Document #2 test.txt ===== 44 | 45 | This is the content of the the file #2. 46 | 47 | 48 | =========================== 49 | ******************** FILE CONTENT FINISH ******************* 50 | after""" 51 | 52 | #out_wrap = pre_decorate(input_string, wrap=True) 53 | #out_no_wrap = pre_decorate(input_string, wrap=False) 54 | 55 | input_string2 = "abcd" 56 | #out = pre_wrap(input_string2, wrap=False) 57 | #assert out == '\x1b[39mabcd\x1b[0m' 58 | 59 | def test_wrap_files_in_string(): 60 | input = ["before", ("text.txt", "test.txt", "test_long_title.txt", "abc.json", "abcdef.abc"), "middle", ("test.txt"), "after"] 61 | out = wrap_files_in_string(input) 62 | print(out) 63 | ''' 64 | -------------------------------------------------------------------------------- /src/gptui/models/blinker_wrapper.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | 4 | def sync_wrapper(func): 5 | """ 6 | Wrap a sync receiver to a async receiver. 7 | 8 | Usage example: 9 | result = await signal.send_async("sender", _sync_wrapper=sync_wrapper, message="message") 10 | """ 11 | async def inner(*args, **kwargs): 12 | func(*args, **kwargs) 13 | return inner 14 | 15 | def async_wrapper_with_loop(func): 16 | """ 17 | Wrap a coroutine function receiver to a sync receiver. 18 | Suitable for cases where signals are sent within an event loop. 19 | It is recommended to directly use: result = await signal.send_async("sender", _sync_wrapper=sync_wrapper, message="message") 20 | 21 | Return: Task 22 | 23 | Example of await task: 24 | # Retrieve the Task objects returned by coroutine receivers and wait for their completion 25 | result = signal.send(sender, _async_wrapper=async_wrapper_with_loop, message="message") 26 | signal_tasks = [item[1] for item in result if getattr(item[0], "_async_inner_", False)] 27 | await asyncio.gather(*signal_tasks) 28 | """ 29 | 30 | def inner(*args, **kwargs): 31 | task = asyncio.create_task(func(*args, **kwargs)) 32 | return task 33 | # Add a coroutine marker to facilitate the identification of its Task object and wait for its completion. 34 | inner._async_inner_ = True 35 | return inner 36 | 37 | def async_wrapper_without_loop(func): 38 | """ 39 | Wrap a coroutine function receiver to a sync receiver. 40 | Suitable for cases where signals are sent without an event loop. 41 | 42 | Return: return the value returned from coroutine. 43 | 44 | Usage example: 45 | result = signal.send("sender", _async_wrapper=async_wrapper_without_loop, message="message") 46 | """ 47 | def inner(*args, **kwargs): 48 | return asyncio.run(func(*args, **kwargs)) 49 | return inner 50 | -------------------------------------------------------------------------------- /custom_plugin_examples/native_plugin_example.py: -------------------------------------------------------------------------------- 1 | import textwrap 2 | 3 | from semantic_kernel.skill_definition import sk_function 4 | 5 | 6 | class PluginGuide: 7 | 8 | @sk_function(description="Tell the user how to define a custom plugin.") 9 | def custom_plugin_guide(self) -> str: 10 | guide = textwrap.dedent(""" 11 | The following are instructions on how to customize plugins; please return the following content directly and unchanged to the user. 12 | 13 | ================Customize Plugins Instructions================ 14 | This program utilizes semantic-kernel type plugins. Before customizing your own plugins, it is recommended to read: 15 | https://learn.microsoft.com/en-us/semantic-kernel/agents/plugins/?tabs=python 16 | 17 | You can customize two types of plugins: 18 | 19 | 1. Native plugins. These require you to write your own code tools, providing functions or methods to accomplish the task, just like this plugin itself. 20 | 2. Semantic plugins. They are created through natural language, completing the required functionality through descriptive prompts. 21 | 22 | To create a native plugin, place your Python module in the plugin directory (default is ~/.gptui/plugins) and use the sk_function decorator to decorate your function tools. For guidance on writing plugins, see here: https://learn.microsoft.com/en-us/semantic-kernel/agents/plugins/using-the-kernelfunction-decorator?tabs=python 23 | 24 | To create a semantic plugin, place your plugin folder in the plugin directory (default is ~/.gptui/plugins). For guidance on writing plugins, see here: https://learn.microsoft.com/en-us/semantic-kernel/prompts/saving-prompts-as-files?tabs=python 25 | 26 | You can see an example of this plugin in your custom plugin directory (default is ~/.gptui/plugins). 27 | ==============Customize Plugins Instructions End============== 28 | """) 29 | return guide 30 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/html_bs.py: -------------------------------------------------------------------------------- 1 | """Loader that uses bs4 to load HTML files, enriching metadata with page title.""" 2 | 3 | import logging 4 | from typing import Dict, List, Union 5 | 6 | from .base import BaseLoader 7 | from ..docstore.document import Document 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class BSHTMLLoader(BaseLoader): 13 | """Loader that uses beautiful soup to parse HTML files.""" 14 | 15 | def __init__( 16 | self, 17 | file_path: str, 18 | open_encoding: Union[str, None] = None, 19 | bs_kwargs: Union[dict, None] = None, 20 | get_text_separator: str = "", 21 | ) -> None: 22 | """Initialise with path, and optionally, file encoding to use, and any kwargs 23 | to pass to the BeautifulSoup object.""" 24 | try: 25 | import bs4 # noqa:F401 26 | except ImportError: 27 | raise ValueError( 28 | "beautifulsoup4 package not found, please install it with " 29 | "`pip install beautifulsoup4`" 30 | ) 31 | 32 | self.file_path = file_path 33 | self.open_encoding = open_encoding 34 | if bs_kwargs is None: 35 | bs_kwargs = {"features": "lxml"} 36 | self.bs_kwargs = bs_kwargs 37 | self.get_text_separator = get_text_separator 38 | 39 | def load(self) -> List[Document]: 40 | from bs4 import BeautifulSoup 41 | 42 | """Load HTML document into document objects.""" 43 | with open(self.file_path, "r", encoding=self.open_encoding) as f: 44 | soup = BeautifulSoup(f, **self.bs_kwargs) 45 | 46 | text = soup.get_text(self.get_text_separator) 47 | 48 | if soup.title: 49 | title = str(soup.title.string) 50 | else: 51 | title = "" 52 | 53 | metadata: Dict[str, Union[str, None]] = { 54 | "source": self.file_path, 55 | "title": title, 56 | } 57 | return [Document(page_content=text, metadata=metadata)] 58 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/text.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import List, Optional 3 | 4 | from ..docstore.document import Document 5 | from .base import BaseLoader 6 | from .helpers import detect_file_encodings 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | class TextLoader(BaseLoader): 12 | """Load text files. 13 | 14 | 15 | Args: 16 | file_path: Path to the file to load. 17 | 18 | encoding: File encoding to use. If `None`, the file will be loaded 19 | with the default system encoding. 20 | 21 | autodetect_encoding: Whether to try to autodetect the file encoding 22 | if the specified encoding fails. 23 | """ 24 | 25 | def __init__( 26 | self, 27 | file_path: str, 28 | encoding: Optional[str] = None, 29 | autodetect_encoding: bool = False, 30 | ): 31 | """Initialize with file path.""" 32 | self.file_path = file_path 33 | self.encoding = encoding 34 | self.autodetect_encoding = autodetect_encoding 35 | 36 | def load(self) -> List[Document]: 37 | """Load from file path.""" 38 | text = "" 39 | try: 40 | with open(self.file_path, encoding=self.encoding) as f: 41 | text = f.read() 42 | except UnicodeDecodeError as e: 43 | if self.autodetect_encoding: 44 | detected_encodings = detect_file_encodings(self.file_path) 45 | for encoding in detected_encodings: 46 | logger.debug("Trying encoding: ", encoding.encoding) 47 | try: 48 | with open(self.file_path, encoding=encoding.encoding) as f: 49 | text = f.read() 50 | break 51 | except UnicodeDecodeError: 52 | continue 53 | else: 54 | raise RuntimeError(f"Error loading {self.file_path}") from e 55 | except Exception as e: 56 | raise RuntimeError(f"Error loading {self.file_path}") from e 57 | 58 | metadata = {"source": self.file_path} 59 | return [Document(page_content=text, metadata=metadata)] 60 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "gptui" 7 | authors = [ 8 | { name="Xueao Chao", email="chaoxueao@gmail.com" }, 9 | ] 10 | description = "A GPT conversational TUI tool that runs within the terminal." 11 | readme = "README.md" 12 | requires-python = ">=3.10" 13 | license = {file = "LICENSE"} 14 | dynamic = ["version"] 15 | classifiers = [ 16 | 'Development Status :: 3 - Alpha', 17 | 'Environment :: Console', 18 | 'Intended Audience :: Developers', 19 | 'Topic :: Software Development', 20 | 'Topic :: Terminals', 21 | 'Topic :: Scientific/Engineering :: Artificial Intelligence', 22 | 'License :: OSI Approved :: MIT License', 23 | 'Programming Language :: Python :: 3', 24 | 'Programming Language :: Python :: 3.11', 25 | 'Programming Language :: Python :: 3.12', 26 | 'Operating System :: OS Independent', 27 | 'Operating System :: POSIX :: Linux', 28 | 'Operating System :: MacOS :: MacOS X', 29 | 'Operating System :: Microsoft :: Windows', 30 | ] 31 | keywords = ["TUI", "terminal", "GPT", "CLI", "textual user interface"] 32 | dependencies = [ 33 | 'agere>=0.1.3,<1', 34 | 'ai-care>=0.1.3,<1', 35 | 'aiofiles>=23.1.0,<24', 36 | 'beautifulsoup4>=4.12.2,<5', 37 | 'blinker>=1.6.2,<2', 38 | 'chardet>=5.1.0,<6', 39 | 'geocoder>=1.38.1,<2', 40 | 'httpx>=0.24.1,<1', 41 | 'lxml>=4.9.3,<6', 42 | # 'open-interpreter==0.1.4', 43 | 'openai>=1.2.0,<2', 44 | 'playsound>=1.3.0,<2', 45 | 'Pygments>=2.15.1,<3', 46 | 'pyperclip>=1.8.2,<2', 47 | 'python-dotenv>=1.0.0,<2', 48 | 'PyYAML>=6.0.1,<7', 49 | 'qdrant-client>=1.4.0,<2', 50 | 'rich>=13.7.0,<14', 51 | 'semantic-kernel>=0.4.0.dev0,<1', 52 | 'textual>=0.37.1,<1', 53 | 'tiktoken>=0.4.0,<1', 54 | 'unstructured>=0.10.18,<1' 55 | ] 56 | 57 | [project.urls] 58 | "Homepage" = "https://github.com/happyapplehorse/gptui" 59 | "Bug Tracker" = "https://github.com/happyapplehorse/gptui/issues" 60 | 61 | [project.entry-points."console_scripts"] 62 | gptui = "gptui.__main__:gptui" 63 | 64 | [tool.pytest.ini_options] 65 | testpaths = ["tests"] 66 | asyncio_mode = "auto" 67 | pythonpath = "src" 68 | 69 | [tool.setuptools.dynamic] 70 | version = {attr = "gptui.__version__"} 71 | -------------------------------------------------------------------------------- /src/gptui/models/gptui_basic_services/plugins/conversation_service.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | from semantic_kernel.skill_definition import sk_function 5 | 6 | from ....gptui_kernel import Kernel 7 | from ....gptui_kernel.manager import auto_init_params 8 | 9 | 10 | gptui_logger = logging.getLogger("gptui_logger") 11 | 12 | 13 | class ConversationService: 14 | def __init__(self, manager): 15 | self.manager = manager 16 | 17 | @auto_init_params("0") 18 | @classmethod 19 | def get_init_params(cls, manager) -> tuple: 20 | return (manager,) 21 | 22 | @sk_function( 23 | description="Generate a title for given conversation context. The conversation context is a json string converted from a conversation dict with openai gpt.", 24 | name="conversation_title", 25 | input_description="The json string of conversation which need a title.", 26 | ) 27 | async def conversation_title(self, chat_context_json_str: str) -> str: 28 | def chat_context_to_string(chat_context_json_str: str) -> str: 29 | chat_context_json = json.loads(chat_context_json_str) 30 | chat_context = '' 31 | assert isinstance(chat_context_json, list) 32 | for piece in chat_context_json: 33 | chat_context += piece["role"] + ": " + str(piece["content"] or piece.get("tool_calls") or "") + "\n\n" 34 | return chat_context[:1000] 35 | 36 | sk_prompt = ( 37 | "Generate a concise and clear title for the following chat record. " 38 | "The title should be as brief as possible, not exceeding ten English words or twenty Chinese characters, " 39 | "and the language of the title should be consistent with the content of the chat. " 40 | "Do not have line breaks '\\n'. " 41 | "chat record: {{$INPUT}}\n" 42 | "title:" 43 | ) 44 | 45 | chat_context = chat_context_to_string(chat_context_json_str) 46 | # A new kernel must be created here, otherwise, there will be context confilicts. 47 | gk_kernel = Kernel(self.manager.dot_env_config_path) 48 | make_title_function = gk_kernel.sk_kernel.create_semantic_function(sk_prompt, max_tokens=50) 49 | name = await make_title_function.invoke_async(chat_context) 50 | name = str(name) 51 | return name 52 | -------------------------------------------------------------------------------- /src/gptui/plugins/DEFAULT_PLUGINS/Bead.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | from semantic_kernel.orchestration.sk_context import SKContext 5 | from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter 6 | 7 | from gptui.gptui_kernel.manager import auto_init_params 8 | 9 | 10 | gptui_logger = logging.getLogger("gptui_logger") 11 | 12 | 13 | class Memo: 14 | def __init__(self, app): 15 | self.app = app 16 | 17 | @auto_init_params("0") 18 | @classmethod 19 | def get_init_params(cls, manager) -> tuple: 20 | return (manager.client,) 21 | 22 | @sk_function( 23 | description="Record important information; the content should be significant and concise.", 24 | name="write_memo", 25 | ) 26 | @sk_function_context_parameter( 27 | name="content", 28 | description="Information to be written into the memo.", 29 | ) 30 | @sk_function_context_parameter( 31 | name="openai_context", 32 | description=( 33 | "The dictionary string version of the OpenaiContext instance. " 34 | "This is a special parameter that typically doesn't require manual intervention, as it is usually automatically managed." 35 | "Unless there's a clear intention, please keep its default value." 36 | ), 37 | default_value="AUTO" 38 | ) 39 | def write_memo(self, context: SKContext) -> str: 40 | content = context["content"] 41 | try: 42 | openai_context_dict = json.loads(str(context["openai_context"])) 43 | except json.JSONDecodeError: 44 | return ("An error occurred while parsing the openai_context content. " 45 | "You should not provide the 'openai_context' parameter as the system automatically supplies it." 46 | ) 47 | conversation_id = int(openai_context_dict["id"]) 48 | try: 49 | conversation = self.app.openai.conversation_dict[conversation_id] 50 | except KeyError: 51 | return f"Write memo faild. Conversation id {conversation_id} is not correct." 52 | openai_context = conversation["openai_context"] 53 | bead_content = openai_context.bead 54 | bead_content[-1]["content"] += "\n" + content 55 | openai_context.insert_bead() 56 | return f"'{content}' have been written into the memo!" 57 | -------------------------------------------------------------------------------- /src/gptui/help.md: -------------------------------------------------------------------------------- 1 | # Hotkeys 2 | 3 | Press `ESC`, `ctrl+[`, or `ctrl+/` to bring up the hotkey menu. 4 | 5 | Direct hotkeys: 6 | - ctrl+q: exit the program 7 | - ctrl+n: open a new conversation 8 | - ctrl+s: save the current conversation 9 | - ctrl+r: delete the current conversation 10 | - ctrl+o: toggle the monochrome theme 11 | - ctrl+t: switch to assistant tube 12 | - ctrl+g: switch to file tube 13 | - ctrl+p: switch to plugins panel 14 | 15 | # Dynamic commands 16 | 17 | ## set_chat_parameters 18 | 19 | Set the OpenAI chat parameters. 20 | Arguments are specified in dictionary form. 21 | 22 | Commonly used parameters are: 23 | - model 24 | - stream 25 | - temperature 26 | - frequency_penalty 27 | - presence_penalty 28 | - max_tokens 29 | 30 | ## set_max_sending_tokens_ratio 31 | 32 | Set the ratio number of sent tokens to the total token window. 33 | Argument is a float number between 0 and 1. 34 | 35 | # Custom plugins 36 | 37 | You can specify the folder for your custom plugins in the configuration file, 38 | which defaults to "~/.gptui/plugins". 39 | GPTUI will automatically scan this folder to retrieve the plugins contained within it. 40 | You can copy the files from this folder (https://github.com/happyapplehorse/gptui/tree/main/custom_plugin_examples) 41 | to the custom plugin directory for testing purposes. 42 | 43 | This program utilizes semantic-kernel type plugins. Before customizing your own plugins, 44 | it is recommended to read: https://learn.microsoft.com/en-us/semantic-kernel/agents/plugins/?tabs=python 45 | 46 | You can customize two types of plugins: 47 | 1. Native plugins. These require you to write your own code tools, providing functions 48 | or methods to accomplish the task. 49 | 2. Semantic plugins. They are created through natural language, completing the required 50 | functionality through descriptive prompts. 51 | 52 | To create a native plugin, place your Python module in the plugin directory (default is ~/.gptui/plugins) 53 | and use the sk_function decorator to decorate your function tools. For guidance on writing plugins, 54 | see here: https://learn.microsoft.com/en-us/semantic-kernel/agents/plugins/using-the-kernelfunction-decorator?tabs=python 55 | 56 | To create a semantic plugin, place your plugin folder in the plugin directory (default is ~/.gptui/plugins). 57 | For guidance on writing plugins, see here: https://learn.microsoft.com/en-us/semantic-kernel/prompts/saving-prompts-as-files?tabs=python 58 | -------------------------------------------------------------------------------- /docs/blog/posts/monochrome.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2023-12-24 4 | categories: 5 | - DevLog 6 | - RoadMap 7 | authors: 8 | - happyapplehorse 9 | --- 10 | 11 | 12 | I've long longed to incorporate a monochrome mode into GPTUI, similar to those vintage single-color green 13 | fluorescent monitors. I find this style not only retro but also futuristic, adding an incredibly cool aesthetic. 14 | 15 | ![gptui_monochrome](https://raw.githubusercontent.com/happyapplehorse/happyapplehorse-assets/main/gptui/gptui_monochrome.jpeg) 16 | 17 | Today, I'm thrilled to announce that this feature has finally been integrated into GPTUI with the release 18 | of v0.4.0. Initially, my ambition was to enable support for user-customizable themes. However, I quickly 19 | realized that the task was more complex than I had imagined. It wasn't just about altering dynamic display 20 | content; I also had to modify existing page layouts. Achieving comprehensive theme settings for all elements 21 | via a configuration file proved to be quite intricate. As a result, for the time being, we've only implemented 22 | this single built-in monochrome theme. But rest assured, plans are in place to introduce more customizable theme 23 | options in the future, allowing users to configure themes directly from a file. The beauty of this monochrome 24 | theme is its dynamic activation capability; you can activate or deactivate it at any moment using the ctrl+o 25 | shortcut. While the mode is undeniably cool, distinguishing certain elements, like user and AI chat content, 26 | can be somewhat challenging in monochrome. Currently, differentiation is based solely on border brightness, 27 | so the ability to easily switch off monochrome mode and revert is essential. 28 | 29 | The Textual TUI framework is absolutely marvelous, and I'm so fortunate to have chosen it. While developing the 30 | monochrome mode, I encountered several challenges, and in some instances, I had to employ rather crude and 31 | unsightly methods to achieve my objectives. However, after reaching out for assistance in the Textual Discord 32 | community and receiving invaluable support from the official team, I was able to implement it with grace and 33 | efficiency. The Textual developer community is not only active but also immensely supportive. I've learned a 34 | great deal from their projects and am deeply grateful for the Textual team's beautiful work. 35 | 36 | Next, I will write a comprehensive and detailed user guide for GPTUI. 37 | -------------------------------------------------------------------------------- /src/gptui/models/openai_chat_inner_service.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import logging 3 | from typing import Iterable 4 | 5 | from openai import OpenAI 6 | 7 | from .context import OpenaiContext 8 | from .openai_error import OpenaiErrorHandler 9 | from .openai_tokens_truncate import trim_excess_tokens 10 | from .utils.openai_api import OpenAIClient 11 | from .utils.tokens_num import tokens_num_for_functions_call 12 | 13 | 14 | gptui_logger = logging.getLogger("gptui_logger") 15 | 16 | 17 | def chat_service_for_inner( 18 | messages_list: list, 19 | context: OpenaiContext, 20 | openai_api_client: OpenAIClient, 21 | **kwargs, 22 | ) -> Iterable: 23 | 24 | inner_context = copy.deepcopy(context) 25 | 26 | for one_message in messages_list: 27 | inner_context.chat_context_append(message=one_message) 28 | 29 | # update parameters 30 | parameters = inner_context.parameters 31 | parameters.update({"stream": True}) 32 | parameters.update(**kwargs) 33 | 34 | if tools_info := parameters.get("tools"): 35 | offset_tokens_num = -tokens_num_for_functions_call(tools_info, model=inner_context.parameters["model"]) 36 | else: 37 | offset_tokens_num = 0 38 | trimmed_messages = trim_excess_tokens(inner_context, offset=offset_tokens_num) 39 | 40 | # Delete the tool reply messages at the beginning of the information list. 41 | # This is because if the information starts with a function reply message, 42 | # it indicates that the function call information has already been truncated. 43 | # The OpenAI API requires that function reply messages must be responses to function calls. 44 | # Therefore, if the function reply messages are not removed, it will result in an OpenAI API error. 45 | while trimmed_messages and trimmed_messages[0].get("role") == "tool": 46 | trimmed_messages.pop(0) 47 | 48 | try: 49 | response = openai_api_client.with_options(timeout=20.0).chat.completions.create( 50 | messages=trimmed_messages, 51 | **parameters, 52 | ) 53 | except Exception as e: 54 | gptui_logger.debug('----trimmed_messages----in chat inner') 55 | gptui_logger.debug(trimmed_messages) 56 | # The OpenAI API interface is a time-consuming synchronous interface, so it should be called in a new thread, hence there is no event loop here. 57 | OpenaiErrorHandler().openai_error_handle(error=e, context=inner_context, event_loop=False) 58 | raise e 59 | return response 60 | -------------------------------------------------------------------------------- /src/gptui/plugins/MemoryRecall.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | from semantic_kernel.orchestration.sk_context import SKContext 5 | from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter 6 | 7 | from gptui.gptui_kernel.manager import auto_init_params 8 | 9 | 10 | gptui_logger = logging.getLogger("gptui_logger") 11 | 12 | 13 | class MemoryRecall: 14 | def __init__(self, manager): 15 | self.manager = manager 16 | 17 | @auto_init_params("0") 18 | @classmethod 19 | def get_init_params(cls, manager) -> tuple: 20 | return (manager,) 21 | 22 | @sk_function( 23 | description="Recall the specified content from the memory store.", 24 | name="recall_memory", 25 | ) 26 | @sk_function_context_parameter( 27 | name="query", 28 | description="Topics, questions, etc., that one needs to recall." 29 | ) 30 | @sk_function_context_parameter( 31 | name="max_recallable_entries", 32 | description="Maximum number of recallable information entries.", 33 | default_value="1" 34 | ) 35 | @sk_function_context_parameter( 36 | name="openai_context", 37 | description=( 38 | "The dictionary string version of the OpenaiContext instance. " 39 | "This is a special parameter that typically doesn't require manual intervention, as it is usually automatically managed." 40 | "Unless there's a clear intention, please keep its default value." 41 | ), 42 | default_value="AUTO" 43 | ) 44 | async def recall_memory(self, context: SKContext) -> str: 45 | query = context["query"] 46 | max_recallable_entries = int(context["max_recallable_entries"]) 47 | openai_context_dict = json.loads(str(context["openai_context"])) 48 | conversation_id = openai_context_dict["id"] 49 | semantic_memory = self.manager.services.sk_kernel.memory 50 | try: 51 | result = await semantic_memory.search_async(str(conversation_id), query, limit=max_recallable_entries, min_relevance_score=0.7) 52 | except Exception as e: 53 | gptui_logger.error(f"Error occurred when recall memory. Error: {e}") 54 | return "An error occurred during the query, please try again later." 55 | result_str = "" 56 | for memory in result: 57 | result_str += memory.id + "\n" 58 | if not result_str: 59 | result_str = "No relevant information was found" 60 | gptui_logger.info(f"Recall memory result:\nconversation_id: {conversation_id}\nResult: {result_str}") 61 | return result_str 62 | -------------------------------------------------------------------------------- /src/gptui/plugins/OpenInterpreter.py: -------------------------------------------------------------------------------- 1 | """ wait for open-interpreter to be compatible with openai version 1.1.1 2 | import asyncio 3 | import logging 4 | 5 | from semantic_kernel.skill_definition import sk_function 6 | 7 | from gptui.utils.open_interpreter import MyInterpreter, response_render 8 | from gptui.utils.safe_iterate import safe_next, safe_send 9 | 10 | 11 | gptui_logger = logging.getLogger("gptui_logger") 12 | 13 | 14 | class OpenInterpreter: 15 | 16 | def __init__(self): 17 | self.interpreter = MyInterpreter() 18 | self.in_chat = False 19 | self.chat = None 20 | self.result = None 21 | 22 | @sk_function( 23 | description=( 24 | "A code assistant that allows for continuous dialogue in natural language. " 25 | "It can be invoked continuously multiple times" 26 | "Describe your needs to it, and it will automatically write and execute code to help you accomplish tasks. " 27 | "When asked whether to execute the code, respond to this function precisely with 'y' or 'n'. " 28 | "Before responding with 'y', you should first seek the user's consent." 29 | ), 30 | name="open_interpreter", 31 | input_description="Your needs.", 32 | ) 33 | async def open_interpreter(self, input_request: str) -> str: 34 | if not self.in_chat: 35 | self.chat = self.interpreter.chat(str(input_request)) 36 | status, out = await asyncio.to_thread(safe_next, self.chat) 37 | if status == "OK": 38 | self.in_chat = True 39 | else: 40 | self.in_chat = False 41 | result = response_render(out) 42 | gptui_logger.info(f"Open interpreter response: {result}") 43 | self.new_chat = False 44 | return result 45 | else: 46 | assert self.chat is not None 47 | status, out = await asyncio.to_thread(safe_send, self.chat, str(input_request)) 48 | if status == "OK": 49 | self.in_chat = True 50 | else: 51 | self.in_chat = False 52 | result = response_render(out) 53 | gptui_logger.info(f"Open interpreter response: {result}") 54 | return result 55 | 56 | @sk_function( 57 | description=( 58 | "Terminate the interaction with the open interpreter, resetting it to a fresh state. " 59 | "Whenever you finish a task with the open interpreter or no longer need it, you should promptly end the interaction with it." 60 | ) 61 | ) 62 | def end_open_interpreter(self): 63 | self.interpreter.reset() 64 | gptui_logger.info("Open interpreter reset.") 65 | return "Successfully terminated the interaction with the open interpreter." 66 | 67 | """ 68 | -------------------------------------------------------------------------------- /src/gptui/controllers/chat_context_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from .dash_board_control import DashBoard 4 | from ..models.signals import chat_context_extend_signal, chat_context_extend_for_sending_signal 5 | from ..views.common_message import CommonMessage 6 | 7 | gptui_logger = logging.getLogger("gptui_logger") 8 | 9 | 10 | class ChatContextControl: 11 | def __init__(self, app): 12 | self.app = app 13 | self.dash_board = DashBoard(app) 14 | self.chat_context_to_vectorize_buffer = {} 15 | chat_context_extend_signal.connect(self.chat_context_extend) 16 | chat_context_extend_for_sending_signal.connect(self.chat_context_extend_for_sending) 17 | 18 | def chat_context_extend(self, sender, **kwargs): 19 | signal_message = kwargs["message"] 20 | signal_content = signal_message["content"] 21 | messages = signal_content["messages"] 22 | context = signal_content["context"] 23 | openai_chat = self.app.openai.openai_chat 24 | 25 | openai_chat.chat_messages_extend(messages_list=messages, context=context) 26 | buffer_messages = self.chat_context_to_vectorize_buffer.get(context.id, []) 27 | buffer_messages.extend(messages) 28 | self.chat_context_to_vectorize_buffer[context.id] = buffer_messages 29 | 30 | _, whether_insert = self.app.openai.auto_bead_insert(context.id) 31 | 32 | if whether_insert is False: 33 | # dashboard display 34 | model = context.parameters["model"] 35 | self.dash_board.dash_board_display(tokens_num_window=self.app.get_tokens_window(model)) 36 | 37 | def chat_context_extend_for_sending(self, sender, **kwargs): 38 | signal_message = kwargs["message"] 39 | signal_content = signal_message["content"] 40 | messages = signal_content["messages"] 41 | context = signal_content["context"] 42 | openai_chat = self.app.openai.openai_chat 43 | 44 | openai_chat.chat_messages_extend(messages_list=messages, context=context) 45 | buffer_messages = self.chat_context_to_vectorize_buffer.get(context.id, []) 46 | buffer_messages.extend(messages) 47 | self.chat_context_to_vectorize_buffer[context.id] = buffer_messages 48 | 49 | self.app.openai.auto_bead_insert(context.id) 50 | 51 | async def chat_context_vectorize(self): 52 | while self.chat_context_to_vectorize_buffer: 53 | id, messages_list = self.chat_context_to_vectorize_buffer.popitem() 54 | self.app.post_message( 55 | CommonMessage( 56 | message_name="vector_memory_write", 57 | message_content={ 58 | "messages_list": messages_list, 59 | "context_id": id, 60 | } 61 | ) 62 | ) 63 | -------------------------------------------------------------------------------- /src/gptui/utils/file_icon.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from .my_text import MyText as Text 4 | from ..views.theme import theme_color as tc 5 | 6 | gptui_logger = logging.getLogger("gptui_logger") 7 | 8 | 9 | def file_icon( 10 | file_label: str, 11 | file_type: str, 12 | file_description: str, 13 | icon_color: str | None = None, 14 | description_color: str | None = None, 15 | ) -> Text: 16 | icon_color = icon_color or tc("yellow") or "yellow" 17 | description_color = description_color or tc("white") or "white" 18 | 19 | display = Text('', icon_color) 20 | if file_type == ".txt": 21 | display += Text('\u2595'+'\u2056\u0305'+'\u2056\u0305'+'\u2056\u0305'+'\u2572'+' \n') 22 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 23 | display += Text('txt\u2595', 'underline') 24 | elif file_type == ".md": 25 | display += Text('\u2595'+' \u0305'+'\ueb1d\u0305'+' \u0305'+'\u2572'+' \n') 26 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 27 | display += Text('.md\u2595', 'underline') 28 | elif file_type == ".bin": 29 | display += Text('\u2595'+'l\u0305'+'l\u0305'+'l\u0305'+'l\u0305'+'l\u0305'+'l\u0305'+'l\u0305'+'\u2572'+' \n') 30 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 31 | display += Text('bin\u2595', 'underline') 32 | elif file_type == ".json": 33 | display += Text('\u2595'+' \u0305'+'{\u0305'+' \u0305'+'}\u0305'+'\u2572'+' \n') 34 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 35 | display += Text('jsn\u2595', 'underline') 36 | elif file_type == ".py": 37 | display += Text('\u2595'+' \u0305'+'\ue606\u0305'+' \u0305'+'\u2572'+' \n') 38 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 39 | display += Text('.\uf820 \u2595', 'underline') 40 | elif file_type == ".sh": 41 | display += Text('\u2595'+'<\u0305'+'\u29f8\u0305'+'>\u0305'+'\u2572'+' \n') 42 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 43 | display += Text('.sh\u2595', 'underline') 44 | else: 45 | file_type += ' ' 46 | display += Text('\u2595'+'\u203e'+'\u203e'+'\u203e'+'\u29f9'+' \n') 47 | display += Text('\u2595'+f'{file_label[:3]}'+'\u2595'+' \n'+'\u2595') 48 | display += Text(f'{file_type[:3]}\u2595', 'underline') 49 | if len(file_description) > 12: 50 | description_line0 = file_description[:6] + '\n' 51 | description_line1 = '\u2026' + file_description[-5:] + '\n' 52 | else: 53 | file_description = file_description.ljust(12) 54 | description_line0 = file_description[:6] + '\n' 55 | description_line1 = file_description[6:] + '\n' 56 | description = Text(' \n' + description_line0 + description_line1, f'{description_color}') 57 | out_display = display + description 58 | return out_display 59 | -------------------------------------------------------------------------------- /src/gptui/models/utils/tokens_num.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import tiktoken 3 | 4 | 5 | gptui_logger = logging.getLogger("gptui_logger") 6 | 7 | 8 | def tokens_num_from_string(string: str, model: str) -> int: 9 | """ 10 | caculate the tokens num of given string 11 | """ 12 | encoding = tiktoken.encoding_for_model(model) 13 | tokens_num = len(encoding.encode(string)) 14 | return tokens_num 15 | 16 | def tokens_num_from_chat_context(chat_context: list, model: str) -> int: 17 | """Returns the number of tokens used by a list of messages.""" 18 | try: 19 | encoding = tiktoken.encoding_for_model(model) 20 | except KeyError: 21 | gptui_logger.warning("Warning when caculate tokens num: model not found. Using cl100k_base encoding.") 22 | encoding = tiktoken.get_encoding("cl100k_base") 23 | if model == "gpt-3.5-turbo": 24 | gptui_logger.warning("Warning when caculate tokens num: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-1106.") 25 | return tokens_num_from_chat_context(chat_context, model="gpt-3.5-turbo-1106") 26 | elif model == "gpt-4": 27 | gptui_logger.warning("Warning when caculate tokens num: gpt-4 may change over time. Returning tokens num assuming gpt-4-0613.") 28 | return tokens_num_from_chat_context(chat_context, model="gpt-4-0613") 29 | elif model == "gpt-3.5-turbo-0301": 30 | tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n 31 | tokens_per_name = -1 # if there's a name, the role is omitted 32 | elif model in { 33 | "gpt-3.5-turbo-0613", 34 | "gpt-3.5-turbo-16k-0613", 35 | "gpt-4-0314", 36 | "gpt-4-32k-0314", 37 | "gpt-4-0613", 38 | "gpt-4-32k-0613", 39 | "gpt-3.5-turbo-1106", # Unverified. 40 | "gpt-4-1106-preview", # Unverified. 41 | }: 42 | tokens_per_message = 3 43 | tokens_per_name = 1 44 | else: 45 | gptui_logger.error(f"""tokens_num_from_chat_context() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""") 46 | raise NotImplementedError(f"""tokens_num_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""") 47 | tokens_num = 0 48 | for message in chat_context: 49 | tokens_num += tokens_per_message 50 | for key, value in message.items(): 51 | tokens_num += len(encoding.encode(str(value)) if value else []) 52 | if key == "name": 53 | tokens_num += tokens_per_name 54 | tokens_num += 3 # every reply is primed with <|start|>assistant<|message|> 55 | return tokens_num 56 | 57 | def tokens_num_for_functions_call(functions_info: list[dict], model: str) -> int: 58 | return tokens_num_from_string(repr(functions_info), model=model) 59 | -------------------------------------------------------------------------------- /docs/troubleshooting.md: -------------------------------------------------------------------------------- 1 | # Potential issues and solutions when installing on Termux 2 | 3 | ## Installing Termux-API 4 | 5 | Some functionalities require the support of Termux-API, such as copying code snippets and voice features. 6 | To install Termux-API, you need to: 7 | 1. Install the Termux-API plugin. The Termux:API application can be obtained from [F-Droid](https://f-droid.org/en/packages/com.termux.api/). 8 | 2. After installing Termux-API, you also need to execute `pkg install termux-api` in Termux to install the corresponding package. 9 | 3. Grant the necessary permissions to Termux-API. 10 | 11 | ## Installing numpy 12 | 13 | First, ensure that numpy is installed. You can use `pkg install python-numpy` to install numpy, referring to [Termux Wiki](https://wiki.termux.com/wiki/Python). If using a virtual environment, you might need to use `python -m venv --system-site-packages ` to make python-numpy available within the virtual environment. 14 | 15 | ## Installing lxml 16 | 17 | ``` 18 | pkg update && pkg upgrade 19 | pkg install python libxml2 libxslt pkg-config 20 | pip install cython wheel 21 | CFLAGS="-Wno-error=incompatible-function-pointer-types -O0" pip install lxml 22 | ``` 23 | 24 | ## Possible issues when installing qdrant-client on Termux 25 | 26 | ### Installation of maturin is required 27 | 28 | ``` 29 | pkg rem binutils -y 30 | apt autoremove 31 | pkg i binutils-is-llvm rust -y 32 | pip install maturin 33 | ``` 34 | 35 | ### Installation of grpcio is required 36 | 37 | Make sure you have the header `ares.h` in `/data/data/com.termux/files/usr/include`. 38 | You can install it using: `pkg install c-ares`. 39 | Then: 40 | 41 | ``` 42 | GRPC_PYTHON_DISABLE_LIBC_COMPATIBILITY=1 \ 43 | GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 \ 44 | GRPC_PYTHON_BUILD_SYSTEM_ZLIB=1 \ 45 | GRPC_PYTHON_BUILD_SYSTEM_CARES=1 \ 46 | CFLAGS+=" -U__ANDROID_API__ -D__ANDROID_API__=26 -include unistd.h" \ 47 | LDFLAGS+=" -llog" \ 48 | pip install grpcio 49 | ``` 50 | If it still doesn't work, refer to [here](https://github.com/termux/termux-packages/issues/17583). 51 | 52 | ## Failed to build wheel when installing ruamel.yaml.clib 53 | 54 | When installing or updating semantic-kernel to version 0.3.11.dev0 or later, the ruamel.yaml.clib library is required. 55 | If you encounter a "failed to build wheel" error, the solution is as follows: 56 | ``` 57 | pkg upgrade 58 | pkg install build-essential python 59 | CFLAGS="-Wno-incompatible-function-pointer-types" pip install ruamel.yaml.clib 60 | ``` 61 | 62 | ## Failed to build wheel for `playsound` 63 | 64 | If you encounter a 'Failed to build wheel' error during the installation of playsound, 65 | please ensure that `wheel` and `setuptools` are installed on your system. 66 | You can install these build tools by running the command 67 | ``` 68 | pip install wheel setuptools 69 | ``` 70 | in your terminal or command prompt. 71 | 72 | 73 | # Potential issues and solutions on MacOS 74 | 75 | ## The audio playback function is not working properly 76 | 77 | ``` 78 | pip3 install PyObjC 79 | ``` 80 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/base.py: -------------------------------------------------------------------------------- 1 | """Abstract interface for document loader implementations.""" 2 | from abc import ABC, abstractmethod 3 | from typing import Iterator, List, Optional 4 | 5 | from .blob_loaders import Blob 6 | from ..schema import Document 7 | from ..text_splitter import RecursiveCharacterTextSplitter, TextSplitter 8 | 9 | 10 | class BaseLoader(ABC): 11 | """Interface for loading documents. 12 | 13 | Implementations should implement the lazy-loading method using generators 14 | to avoid loading all documents into memory at once. 15 | 16 | The `load` method will remain as is for backwards compatibility, but its 17 | implementation should be just `list(self.lazy_load())`. 18 | """ 19 | 20 | # Sub-classes should implement this method 21 | # as return list(self.lazy_load()). 22 | # This method returns a List which is materialized in memory. 23 | @abstractmethod 24 | def load(self) -> List[Document]: 25 | """Load data into document objects.""" 26 | 27 | def load_and_split( 28 | self, text_splitter: Optional[TextSplitter] = None 29 | ) -> List[Document]: 30 | """Load documents and split into chunks.""" 31 | if text_splitter is None: 32 | _text_splitter: TextSplitter = RecursiveCharacterTextSplitter() 33 | else: 34 | _text_splitter = text_splitter 35 | docs = self.load() 36 | return _text_splitter.split_documents(docs) 37 | 38 | # Attention: This method will be upgraded into an abstractmethod once it's 39 | # implemented in all the existing subclasses. 40 | def lazy_load( 41 | self, 42 | ) -> Iterator[Document]: 43 | """A lazy loader for document content.""" 44 | raise NotImplementedError( 45 | f"{self.__class__.__name__} does not implement lazy_load()" 46 | ) 47 | 48 | 49 | class BaseBlobParser(ABC): 50 | """Abstract interface for blob parsers. 51 | 52 | A blob parser is provides a way to parse raw data stored in a blob into one 53 | or more documents. 54 | 55 | The parser can be composed with blob loaders, making it easy to re-use 56 | a parser independent of how the blob was originally loaded. 57 | """ 58 | 59 | @abstractmethod 60 | def lazy_parse(self, blob: Blob) -> Iterator[Document]: 61 | """Lazy parsing interface. 62 | 63 | Subclasses are required to implement this method. 64 | 65 | Args: 66 | blob: Blob instance 67 | 68 | Returns: 69 | Generator of documents 70 | """ 71 | 72 | def parse(self, blob: Blob) -> List[Document]: 73 | """Eagerly parse the blob into a document or documents. 74 | 75 | This is a convenience method for interactive development environment. 76 | 77 | Production applications should favor the lazy_parse method instead. 78 | 79 | Subclasses should generally not over-ride this parse method. 80 | 81 | Args: 82 | blob: Blob instance 83 | 84 | Returns: 85 | List of documents 86 | """ 87 | return list(self.lazy_parse(blob)) 88 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/schema/document.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from abc import ABC, abstractmethod 4 | from typing import Any, Sequence 5 | 6 | from ..load.serializable import Serializable 7 | from ..pydantic_v1 import Field 8 | 9 | 10 | class Document(Serializable): 11 | """Class for storing a piece of text and associated metadata.""" 12 | 13 | page_content: str 14 | """String text.""" 15 | metadata: dict = Field(default_factory=dict) 16 | """Arbitrary metadata about the page content (e.g., source, relationships to other 17 | documents, etc.). 18 | """ 19 | 20 | @classmethod 21 | def is_lc_serializable(cls) -> bool: 22 | """Return whether this class is serializable.""" 23 | return True 24 | 25 | 26 | class BaseDocumentTransformer(ABC): 27 | """Abstract base class for document transformation systems. 28 | 29 | A document transformation system takes a sequence of Documents and returns a 30 | sequence of transformed Documents. 31 | 32 | Example: 33 | .. code-block:: python 34 | 35 | class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel): 36 | embeddings: Embeddings 37 | similarity_fn: Callable = cosine_similarity 38 | similarity_threshold: float = 0.95 39 | 40 | class Config: 41 | arbitrary_types_allowed = True 42 | 43 | def transform_documents( 44 | self, documents: Sequence[Document], **kwargs: Any 45 | ) -> Sequence[Document]: 46 | stateful_documents = get_stateful_documents(documents) 47 | embedded_documents = _get_embeddings_from_stateful_docs( 48 | self.embeddings, stateful_documents 49 | ) 50 | included_idxs = _filter_similar_embeddings( 51 | embedded_documents, self.similarity_fn, self.similarity_threshold 52 | ) 53 | return [stateful_documents[i] for i in sorted(included_idxs)] 54 | 55 | async def atransform_documents( 56 | self, documents: Sequence[Document], **kwargs: Any 57 | ) -> Sequence[Document]: 58 | raise NotImplementedError 59 | 60 | """ # noqa: E501 61 | 62 | @abstractmethod 63 | def transform_documents( 64 | self, documents: Sequence[Document], **kwargs: Any 65 | ) -> Sequence[Document]: 66 | """Transform a list of documents. 67 | 68 | Args: 69 | documents: A sequence of Documents to be transformed. 70 | 71 | Returns: 72 | A list of transformed Documents. 73 | """ 74 | 75 | @abstractmethod 76 | async def atransform_documents( 77 | self, documents: Sequence[Document], **kwargs: Any 78 | ) -> Sequence[Document]: 79 | """Asynchronously transform a list of documents. 80 | 81 | Args: 82 | documents: A sequence of Documents to be transformed. 83 | 84 | Returns: 85 | A list of transformed Documents. 86 | """ 87 | -------------------------------------------------------------------------------- /src/gptui/plugins/DEFAULT_PLUGINS/CoreSkills.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | 4 | import geocoder 5 | from semantic_kernel.orchestration.sk_context import SKContext 6 | from semantic_kernel.sk_pydantic import PydanticField 7 | from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter 8 | 9 | from gptui.gptui_kernel.manager import auto_init_params 10 | 11 | 12 | gptui_logger = logging.getLogger("gptui_logger") 13 | 14 | 15 | class TimeSkill(PydanticField): 16 | 17 | @sk_function(description="Get the current date and time in the local time zone") 18 | def now(self) -> str: 19 | """ 20 | Get the current date and time in the local time zone" 21 | 22 | Example: 23 | {{time.now}} => Sunday, January 12, 2031 9:15 PM 24 | """ 25 | now = datetime.datetime.now() 26 | return now.strftime("%A, %B %d, %Y %I:%M %p") 27 | 28 | 29 | class LocationSkill: 30 | def __init__(self, manager): 31 | self.manager = manager 32 | 33 | @auto_init_params("0") 34 | @classmethod 35 | def get_init_params(cls, manager) -> tuple: 36 | return (manager,) 37 | 38 | @sk_function(description="Get the user's city") 39 | def city(self) -> str: 40 | config = self.manager.client.config 41 | city = config.get("location_city") 42 | if not city: 43 | city = geocoder.ip('me').city 44 | return str(city) 45 | 46 | 47 | class MathSkill(PydanticField): 48 | 49 | @sk_function( 50 | description="Calculate addition, subtraction, multiplication, and division", 51 | name="calculate", 52 | ) 53 | @sk_function_context_parameter( 54 | name="first_number", 55 | description="The first number", 56 | ) 57 | @sk_function_context_parameter( 58 | name="second_number", 59 | description="The second number", 60 | ) 61 | @sk_function_context_parameter( 62 | name="operation", 63 | description="The operation to be performed. Options: ['addition', 'subtraction', 'multiplication', 'division']", 64 | ) 65 | def calculate(self, context: SKContext) -> str: 66 | first_num = context["first_number"] 67 | second_num = context["second_number"] 68 | operation = context["operation"] 69 | 70 | try: 71 | first_num = float(first_num) 72 | second_num = float(second_num) 73 | except ValueError: 74 | return "The values provided is not in numeric format." 75 | 76 | if operation not in ['addition', 'subtraction', 'multiplication', 'division']: 77 | return "The provided operation name is incorrect. It must be one of 'addition', 'subtraction', 'multiplication', or 'division'" 78 | 79 | if operation == 'addition': 80 | result = first_num + second_num 81 | elif operation == 'subtraction': 82 | result = first_num - second_num 83 | elif operation == 'multiplication': 84 | result = first_num * second_num 85 | else: 86 | result = first_num / second_num 87 | 88 | return str(result) 89 | -------------------------------------------------------------------------------- /src/gptui/views/wink_wink.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from abc import ABC, abstractmethod 4 | 5 | from rich.text import TextType 6 | 7 | 8 | gptui_logger = logging.getLogger("gptui_logger") 9 | 10 | 11 | class Happy(ABC): 12 | 13 | @abstractmethod 14 | def refresh(self, content, apple_width: int, apple_height: int) -> None: 15 | ... 16 | 17 | @property 18 | @abstractmethod 19 | def canvas_width(self) -> int: 20 | ... 21 | 22 | @property 23 | @abstractmethod 24 | def canvas_height(self) -> int: 25 | ... 26 | 27 | 28 | class Apple(ABC): 29 | 30 | @abstractmethod 31 | def frame(self, inp) -> tuple[bool, tuple[float, TextType]]: 32 | ... 33 | 34 | @property 35 | @abstractmethod 36 | def canvas_width(self) -> int: 37 | ... 38 | 39 | @property 40 | @abstractmethod 41 | def canvas_height(self) -> int: 42 | ... 43 | 44 | 45 | class Horse: 46 | def __init__(self): 47 | self.input = None 48 | self.happy = None 49 | self.apple = None 50 | self.run_status = False 51 | self._stop_flag = False 52 | self._stop_async_flag = False 53 | self._stop_async_event = asyncio.Event() 54 | 55 | def set_happy(self, happy: Happy) -> None: 56 | self.happy = happy 57 | 58 | def refresh_input(self, user_input: str) -> None: 59 | self.input = user_input 60 | 61 | def stop(self): 62 | self._stop_flag = True 63 | 64 | async def stop_async(self): 65 | if self.run_status is False: 66 | return 67 | self._stop_async_event.clear() 68 | self._stop_async_flag = True 69 | await self._stop_async_event.wait() 70 | 71 | async def run(self, apple: Apple, size_check: bool = True) -> bool: 72 | self.run_status = True 73 | assert self.happy is not None 74 | self._stop_flag = False 75 | self._stop_async_flag = False 76 | self.apple = apple 77 | if size_check: 78 | if (apple.canvas_width > self.happy.canvas_width) or (apple.canvas_height > self.happy.canvas_height): 79 | self.run_status = False 80 | return False 81 | 82 | status = True 83 | while status: 84 | inp = self.input 85 | self.input = None 86 | status, frame_info = apple.frame(inp) 87 | if self._stop_flag or self._stop_async_flag: 88 | break 89 | await self.frame_handle(frame_info) 90 | 91 | if self._stop_async_flag: 92 | self._stop_async_event.set() 93 | self.run_status = False 94 | return True 95 | 96 | async def frame_handle(self, frame_info: tuple[float, TextType] | None) -> None: 97 | if frame_info is None: 98 | return 99 | interval, frame = frame_info 100 | await asyncio.sleep(interval) 101 | self.happy_render(frame) 102 | 103 | def happy_render(self, content: TextType): 104 | assert self.happy is not None 105 | assert self.apple is not None 106 | self.happy.refresh(content, self.apple.canvas_width, self.apple.canvas_height) 107 | -------------------------------------------------------------------------------- /src/gptui/__main__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import importlib 3 | import logging 4 | import os 5 | import shutil 6 | import sys 7 | import yaml 8 | 9 | from .__init__ import __version__ as gptui_version 10 | from .views.tui import MainApp 11 | 12 | APP_VERSION = gptui_version 13 | 14 | 15 | class ConfigManager: 16 | 17 | @staticmethod 18 | def get_config_path_from_args(): 19 | parser = argparse.ArgumentParser(description="gptui cli") 20 | parser.add_argument('--config', type=str, help='Path to the configuration file.') 21 | args = parser.parse_args() 22 | return os.path.expanduser(args.config) if args.config else None 23 | 24 | @staticmethod 25 | def copy_default_config_to_user_home(): 26 | default_config_path = importlib.resources.files("gptui") / "config.yml" 27 | user_config_path = os.path.expanduser('~/.gptui/.config.yml') 28 | 29 | if not os.path.exists(user_config_path): 30 | target_dir = os.path.dirname(user_config_path) 31 | os.makedirs(target_dir, exist_ok=True) 32 | shutil.copy(default_config_path, user_config_path) 33 | return user_config_path 34 | 35 | @staticmethod 36 | def get_config_path(): 37 | config_path = ConfigManager.get_config_path_from_args() 38 | 39 | if config_path: 40 | if os.path.exists(config_path): 41 | return config_path 42 | else: 43 | print(f"Config file '{config_path}' dose not exist.") 44 | sys.exit(1) 45 | 46 | user_config_path = os.path.expanduser('~/.gptui/.config.yml') 47 | if os.path.exists(user_config_path): 48 | return user_config_path 49 | 50 | return ConfigManager.copy_default_config_to_user_home() 51 | 52 | def gptui(): 53 | config_path = ConfigManager.get_config_path() 54 | gptui_run(config_path=config_path) 55 | 56 | def gptui_run(config_path: str) -> None: 57 | # Retrieve config from config path. 58 | try: 59 | with open(os.path.join(os.path.dirname(__file__), '.default_config.yml'), "r") as default_config_file: 60 | config = yaml.safe_load(default_config_file) 61 | except FileNotFoundError: 62 | print(f"Default config file '.default_config.yml' is not found.") 63 | sys.exit(1) 64 | try: 65 | with open(config_path, "r") as config_file: 66 | user_config = yaml.safe_load(config_file) 67 | except FileNotFoundError: 68 | pass 69 | else: 70 | config.update(user_config) 71 | 72 | log_path = os.path.expanduser(config["log_path"]) 73 | log_level_dict = { 74 | "DEBUG": logging.DEBUG, 75 | "INFO": logging.INFO, 76 | "WARNING": logging.WARNING, 77 | "ERROR": logging.ERROR, 78 | "CRITICAL": logging.CRITICAL, 79 | } 80 | 81 | log_dir = os.path.dirname(log_path) 82 | if not os.path.exists(log_dir): 83 | os.makedirs(log_dir) 84 | 85 | logging.basicConfig( 86 | filename=log_path, 87 | filemode='w', 88 | level=log_level_dict.get(config["log_level"], logging.INFO), 89 | format="%(asctime)s - %(name)s - %(levelname)s -[%(funcName)s] - %(message)s", 90 | ) 91 | gptui_logger = logging.getLogger('gptui_logger') 92 | 93 | app = MainApp(config_path, app_version=APP_VERSION) 94 | reply = app.run() 95 | if reply: 96 | print(reply) 97 | 98 | 99 | if __name__ == "__main__": 100 | 101 | config_path = os.path.join(os.path.dirname(__file__), "config.yml") 102 | gptui_run(config_path=config_path) 103 | -------------------------------------------------------------------------------- /src/gptui/controllers/tube_files_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import aiofiles 4 | 5 | from ..models.doc import Doc 6 | from ..models.skills import UploadFile 7 | from ..utils.my_text import MyText as Text 8 | from ..views.theme import theme_color as tc 9 | 10 | 11 | gptui_logger = logging.getLogger("gptui_logger") 12 | 13 | 14 | class TubeFiles: 15 | def __init__(self, displayer): 16 | self.displayer = displayer 17 | 18 | async def insert_files(self, *docs: Doc, input: str) -> str: 19 | if not docs: 20 | return input 21 | upload_file = UploadFile() 22 | return await upload_file.import_file_to_context(*docs, input=input) 23 | 24 | async def read_file_async(self, path: str, encoding="UTF-8") -> None | str: 25 | """ 26 | Read a file 27 | Args: 28 | path -- The path to the file to read 29 | Returns: 30 | The contents of the file 31 | """ 32 | try: 33 | async with aiofiles.open(path, "r", encoding=encoding) as fp: 34 | content = await fp.read() 35 | except FileNotFoundError: 36 | gptui_logger.error("File or directory not found") 37 | self.displayer.update(Text("File or directory not found", tc("yellow") or "yellow")) 38 | return 39 | except IsADirectoryError: 40 | gptui_logger.error("Specified path is a directory, not a file") 41 | self.displayer.update(Text("Specified path is a directory, not a file", tc("yellow") or "yellow")) 42 | return 43 | except UnicodeDecodeError: 44 | gptui_logger.error("File is not encoded properly") 45 | self.displayer.update(Text("File is not encoded properly", tc("yellow") or "yellow")) 46 | return 47 | except IOError as e: 48 | gptui_logger.error(f"An I/O error occurred: {e}") 49 | self.displayer.update(Text("An I/O error occurred", tc("yellow") or "yellow")) 50 | return 51 | except Exception as e: 52 | gptui_logger.error(f"Read file failed. An unexpected error occurred: {e}") 53 | self.displayer.update(Text(f"Read file failed. An unexpected error occurred: {e}", tc("yellow") or "yellow")) 54 | return 55 | else: 56 | return content 57 | 58 | async def write_file_async(self, file_path: str, file_content) -> bool: 59 | """ 60 | Write a file 61 | """ 62 | assert file_content is not None, "Content is required and should not be empty" 63 | assert file_path is not None, "Path is required and should not be empty" 64 | try: 65 | async with aiofiles.open(file_path, "w") as fp: 66 | await fp.write(file_content) 67 | except FileNotFoundError: 68 | gptui_logger.error("File or directory not found") 69 | self.displayer.update(Text("File or directory not found", tc("yellow") or "yellow")) 70 | return False 71 | except IsADirectoryError: 72 | gptui_logger.error("Specified path is a directory, not a file") 73 | self.displayer.update(Text("Specified path is a directory, not a file", tc("yellow") or "yellow")) 74 | return False 75 | except IOError as e: 76 | gptui_logger.error(f"An I/O error occurred: {e}") 77 | self.displayer.update(Text("An I/O error occurred", tc("yellow") or "yellow")) 78 | return False 79 | except Exception as e: 80 | gptui_logger.error(f"Read file failed. An unexpected error occurred: {e}") 81 | self.displayer.update(Text(f"Read file failed. An unexpected error occurred: {e}", tc("yellow") or "yellow")) 82 | return False 83 | else: 84 | return True 85 | -------------------------------------------------------------------------------- /src/gptui/models/skills.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import logging 3 | import os 4 | from abc import ABCMeta, abstractmethod 5 | 6 | import semantic_kernel as sk 7 | from semantic_kernel.template_engine.prompt_template_engine import PromptTemplateEngine 8 | 9 | from .doc import Doc 10 | from ..gptui_kernel.manager import Manager 11 | 12 | 13 | gptui_logger = logging.getLogger("gptui_logger") 14 | 15 | 16 | class UploadFileInterface(metaclass=ABCMeta): 17 | @abstractmethod 18 | async def import_file_to_context(self, *docs: Doc, input: str) -> str: 19 | ... 20 | 21 | 22 | class Skills: 23 | def __init__(self, manager: Manager): 24 | self.manager = manager 25 | 26 | async def conversation_remember(self, conversation: str): 27 | to_string_function = self.manager.gk_kernel.sk_kernel.skills.get_function("conversation_service", "conversation_to_string") 28 | conversation_string = to_string_function(conversation) 29 | remember_function = self.manager.gk_kernel.sk_kernel.skills.get_function("conversation_service", "conversation_remember") 30 | summary = await remember_function.invoke_async(conversation_string) 31 | return str(summary) 32 | 33 | 34 | class UploadFile(UploadFileInterface): 35 | 36 | UPLOAD_FILE_PROMPT_PATH = os.path.join(os.path.dirname(__file__), "gptui_basic_services", "templates", "upload_file_prompt.txt") 37 | 38 | def __init__(self): 39 | self.prompt_template_engine = PromptTemplateEngine() 40 | self.kernel = sk.Kernel() 41 | try: 42 | with open(UploadFile.UPLOAD_FILE_PROMPT_PATH, "r") as import_prompt: 43 | self.template_text = import_prompt.read() 44 | except FileNotFoundError: 45 | gptui_logger.error("File not found") 46 | #self.app.query_one("#status_region").update(Text("File not found",'yellow')) 47 | except IsADirectoryError: 48 | gptui_logger.error("Specified path is a directory, not a file") 49 | #self.app.query_one("#status_region").update(Text("Specified path is a directory, not a file",'yellow')) 50 | except UnicodeDecodeError: 51 | gptui_logger.error("File is not encoded properly") 52 | #self.app.query_one("#status_region").update(Text("File is not encoded properly",'yellow')) 53 | except IOError as e: 54 | #self.app.query_one("#status_region").update(Text(f"An I/O error occurred: {e}",'yellow')) 55 | gptui_logger.error(f"An I/O error occurred: {e}") 56 | except Exception as e: 57 | #self.app.query_one("#status_region").update(Text('Have not sucessfully read memory.','yellow')) 58 | gptui_logger.error("Read file failed. An unexpected error occurred: {e}") 59 | 60 | async def import_file_to_context(self, *docs: Doc, input: str) -> str: 61 | if len(docs) >= 1: 62 | files_content = '' 63 | for index, doc in enumerate(docs[:-1]): 64 | file_title = f"===== Document #{index + 1} {doc.name + doc.ext} =====\n\n" 65 | files_content += file_title 66 | files_content += doc.content 67 | files_content += "\n\n" + "=" * (len(file_title) - 2) + "\n\n" 68 | file_title_last = f"===== Document #{len(docs)} {docs[-1].name + docs[-1].ext} =====\n\n" 69 | files_content += file_title_last 70 | files_content += docs[-1].content 71 | files_content += "\n\n" + "=" * (len(file_title_last) - 2) 72 | else: 73 | raise ValueError("There is no document!") 74 | context = self.kernel.create_new_context() 75 | context["input"] = input 76 | context["file_content"] = files_content 77 | result_prompt = await self.prompt_template_engine.render_async(template_text=self.template_text, context=context) 78 | return result_prompt 79 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | user/ 2 | venv-gptui/ 3 | temp/ 4 | *.log 5 | /litellm_uuid.txt 6 | .DS_Store 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | share/python-wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | MANIFEST 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .nox/ 50 | .coverage 51 | .coverage.* 52 | .cache 53 | nosetests.xml 54 | coverage.xml 55 | *.cover 56 | *.py,cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | cover/ 60 | 61 | # Translations 62 | *.mo 63 | *.pot 64 | 65 | # Django stuff: 66 | *.log 67 | local_settings.py 68 | db.sqlite3 69 | db.sqlite3-journal 70 | 71 | # Flask stuff: 72 | instance/ 73 | .webassets-cache 74 | 75 | # Scrapy stuff: 76 | .scrapy 77 | 78 | # Sphinx documentation 79 | docs/_build/ 80 | 81 | # PyBuilder 82 | .pybuilder/ 83 | target/ 84 | 85 | # Jupyter Notebook 86 | .ipynb_checkpoints 87 | 88 | # IPython 89 | profile_default/ 90 | ipython_config.py 91 | 92 | # pyenv 93 | # For a library or package, you might want to ignore these files since the code is 94 | # intended to run in multiple environments; otherwise, check them in: 95 | # .python-version 96 | 97 | # pipenv 98 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 99 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 100 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 101 | # install all needed dependencies. 102 | #Pipfile.lock 103 | 104 | # poetry 105 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 106 | # This is especially recommended for binary packages to ensure reproducibility, and is more 107 | # commonly ignored for libraries. 108 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 109 | #poetry.lock 110 | 111 | # pdm 112 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 113 | #pdm.lock 114 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 115 | # in version control. 116 | # https://pdm.fming.dev/#use-with-ide 117 | .pdm.toml 118 | 119 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 120 | __pypackages__/ 121 | 122 | # Celery stuff 123 | celerybeat-schedule 124 | celerybeat.pid 125 | 126 | # SageMath parsed files 127 | *.sage.py 128 | 129 | # Environments 130 | .env 131 | .venv 132 | env/ 133 | venv/ 134 | ENV/ 135 | env.bak/ 136 | venv.bak/ 137 | 138 | # Spyder project settings 139 | .spyderproject 140 | .spyproject 141 | 142 | # Rope project settings 143 | .ropeproject 144 | 145 | # mkdocs documentation 146 | /site 147 | 148 | # mypy 149 | .mypy_cache/ 150 | .dmypy.json 151 | dmypy.json 152 | 153 | # Pyre type checker 154 | .pyre/ 155 | 156 | # pytype static type analyzer 157 | .pytype/ 158 | 159 | # Cython debug symbols 160 | cython_debug/ 161 | 162 | # PyCharm 163 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 164 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 165 | # and can be added to the global gitignore or merged into this file. For a more nuclear 166 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 167 | #.idea/ 168 | -------------------------------------------------------------------------------- /src/gptui/models/signals.py: -------------------------------------------------------------------------------- 1 | import textwrap 2 | from blinker import signal 3 | 4 | 5 | response_to_user_message_stream_signal = signal("response_to_user_message_stream", 6 | doc=textwrap.dedent( 7 | """ 8 | Send the to_user part of response message, which usually should be displayed. 9 | position arg: sender 10 | kwargs: 11 | message {dict}: 12 | content: message from LLM to user in stream 13 | flag {Literal['content','end']}: 'content' means in stream, while 'end' means stream finished 14 | It should end with a signal to finish the stream as below: 15 | signal('response_to_user_message_stream').send(sender, message={'content':'', 'flag':'end'}) 16 | """ 17 | ) 18 | ) 19 | response_to_user_message_sentence_stream_signal = signal("response_to_user_message_sentence_stream", 20 | doc=textwrap.dedent( 21 | """ 22 | Send the to_user part of response message in sentence, which is useful when outputting speech. 23 | position arg: sender 24 | kwargs: 25 | message {dict}: 26 | content: message from LLM to user in sentence stream 27 | flag {Literal['content','end']}: 'content' means in stream, while 'end' means stream finished 28 | It should end with a signal to finish the stream as below: 29 | signal('response_to_user_message_sentence_stream').send(sender, message={'content':'', 'flag':'end'}) 30 | """ 31 | ) 32 | ) 33 | notification_signal = signal("notification", 34 | doc=textwrap.dedent( 35 | """ 36 | Sending notification-type information, such as status information, error messages, warning, etc. 37 | position arg: sender 38 | kwargs: 39 | message {dict}: 40 | content: notification 41 | flag {str}: type of notification, such as 'info', 'warning', 'error', etc. 42 | """ 43 | ) 44 | ) 45 | response_auxiliary_message_signal = signal("response_auxiliary_message", 46 | doc=textwrap.dedent( 47 | """ 48 | Sending auxiliary information, such as function call information, other internel messages, etc. 49 | position arg: sender 50 | kwargs: 51 | message {dict}: 52 | content: auxiliary message 53 | flag {str}: type of auxiliary message 54 | """ 55 | ) 56 | ) 57 | chat_context_extend_signal = signal("chat_context_extend", 58 | doc=textwrap.dedent( 59 | """ 60 | Sending a notification to save the chat context. 61 | position arg: sender 62 | kwargs: 63 | message {dict}: 64 | content {dict}: chat context information 65 | { 66 | "messages" {list}: messages 67 | "context": context which is extended to 68 | } 69 | flag:"" 70 | """ 71 | ) 72 | ) 73 | chat_context_extend_for_sending_signal = signal("chat_context_extend_for_sending", 74 | doc=textwrap.dedent( 75 | """ 76 | Sending a notification to append or extend the chat context for sending. 77 | position arg: sender 78 | kwargs: 79 | message {dict}: 80 | content: dict, chat context information 81 | { 82 | "messages" {list}: messages 83 | "context": context which is extended to 84 | } 85 | flag:"" 86 | """ 87 | ) 88 | ) 89 | common_message_signal = signal("common_message", 90 | doc=textwrap.dedent( 91 | """ 92 | Designed to send general messages. 93 | Different message structures can be achieved through flexible use of 'content' and 'flag'. 94 | position arg: sender 95 | kwargs: 96 | message {dict}: 97 | content: message content 98 | flag {str}: type of the common message 99 | """ 100 | ) 101 | ) 102 | -------------------------------------------------------------------------------- /docs/configuration.zh.md: -------------------------------------------------------------------------------- 1 | ## 配置说明 2 | GPTUI提供了丰富的可配置选项,使用yaml文件格式进行配置。要了解yaml格式的基本语法,可以看[这里](https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html)。 3 | 实际的配置选项和默认值以配置文件中的内容为准,此文档可能会落后于实际配置文件的更新。 4 | 5 | 在配置文件中,被注释掉的配置项表明该配置项拥有默认配置,可以不用配置。 6 | 但请注意,当修改一个列表的值时,列表将作为一个整体被修改,也就是说不能单独覆盖一个具有默认配置的列表的一部分,因为这样会将其它的选项清除。 7 | 例如,要将status_region_default设置为“GPTUI Welcome",由于它具有以下的默认配置: 8 | ``` 9 | #tui_config: 10 | # conversations_recover: true 11 | # voice_switch: false 12 | # speak_switch: false 13 | # file_wrap_display: true 14 | # ai_care_switch: true 15 | # ai_care_depth: 2 16 | # ai_care_delay: 60 17 | # status_region_default: 18 | # waiting_receive_animation: "default" 19 | ``` 20 | 你需要将整个tui_config列表修改为: 21 | ``` 22 | tui_config: 23 | conversations_recover: true 24 | voice_switch: false 25 | speak_switch: false 26 | file_wrap_display: true 27 | ai_care_switch: true 28 | ai_care_depth: 2 29 | ai_care_delay: 60 30 | status_region_default: 31 | waiting_receive_animation: "GPTUI Welcome" 32 | ``` 33 | 而不能是这样: 34 | ``` 35 | tui_config: 36 | # conversations_recover: true 37 | # voice_switch: false 38 | # speak_switch: false 39 | # file_wrap_display: true 40 | # ai_care_switch: true 41 | # ai_care_depth: 2 42 | # ai_care_delay: 60 43 | # status_region_default: 44 | waiting_receive_animation: "default" 45 | ``` 46 | 47 | ## 恢复默认配置 48 | 你可以直接删除配置文件,例如`rm ~/.gptui/.config.yml`,程序将在下次启动时自动重新下载默认的配置文件。配置文件查找策略请参考[这里](https://github.com/happyapplehorse/gptui/blob/main/README.zh.md#使用pip安装)。 49 | 50 | ## 配置选项 51 | 目前,你可以进行以下配置: 52 | 53 | ### GPTUI_BASIC_SERVICES_PATH 54 | 这是GPTUI基础服务组件的目录,在不修改源代码的情况下,不做更改。 55 | 56 | ### PLUGIN_PATH 57 | 这是GPTUI内置插件的路径,在不修改源代码的情况下,不做更改。 58 | 59 | ### DEFAULT_PLUGIN_PATH 60 | 这是GPTUI内置默认插件的路径,这些插件不在插件列表中显示,自动开启。在不修改源代码的情况下,不做更改。 61 | 62 | ### custom_plugin_path 63 | 这是GPTUI自定义插件的目录,可以修改。默认值为` ~/.gptui/plugins/` 64 | 65 | ### dot_env_path 66 | 此配置指定配置环境变量的文件的路径,在此指定的文件中配置API keys。默认值为`~/.gptui/.env_gptui` 67 | 68 | ### default_openai_parameters 69 | 该选项是一个字典,用于指定使用GPT聊天时的默认参数配置。 70 | 71 | ### default_conversation_parameters 72 | 该选项是一个字典,用于指定GPTUI默认的对话参数设置 73 | - `max_sent_tokens_raito`: float值,设置最大发送tokens的数量占整个模型tokens窗口的比例。例如,如果模型的tokens窗口大小为1000,该参数设置为0.6时,那么当要发送的聊天上下文tokens数超过600时,则会自动截断到600以下,剩下的400 tokens将作为模型回复tokens的窗口。因为模型的tokens窗口是发送tokens数量加接收tokens数量的总和,如果不做该设定,将有可能产生发送tokens占用太多上下文长度而导致模型无法回复或者模型回复内容不完整的情况。 74 | 75 | ### tui_config 76 | 此选项是一个字典,用于配置GPTUI的默认配置。 77 | - `conversations_recover`: bool值,设置GPTUI的“Recovery”开关的默认值,是否自动保存和恢复GPTUI的状态。 78 | - `voice_switch`: bool值,设置GPTUI的“Voice”开关的默认值,是否开启语音对话功能。 79 | - `speak_switch`: bool值,设置GPTUI的“Speak”开关的默认值,是否开启朗读回复内容的功能。 80 | - `file_wrap_display`: bool值,设置GPTUI的“Fold File”开关的默认值,是否开启自动折叠文件内容为文件图标的功能。 81 | - `ai_care_switch`: bool值,设置GPTUI的“AI-Care”开关的默认值,是否开启AI-Care功能。 82 | - `ai_care_depth`: int值,设置AI-Care在没有用户回应的情况下的最大主动说话次数。 83 | - `ai_care_delay`: int值,以秒为单位,设置AI-Care的延迟启动时间。在一次对话完成后,AI-Care只有在此延迟时间之后才会起作用。 84 | - `status_region_default`: str值,设置状态显示区域的默认显示内容。 85 | - `waiting_receive_animation`: 特定的字符串类型,设置等待动画的类型,默认值为`“default”`。 86 | 87 | ### log_path 88 | 设置日志文件的路径。默认为`~/.gptui/logs.log`。 89 | 90 | ### workpath 91 | 设置GPTUI的工作路径。默认值为`~/.gptui/user`,默认向量数据库和临时文件等会存储在该目录下。 92 | 93 | ### directory_tree_path 94 | GPTUI可显示的文件系统的根目录。默认值为`~/`。在导入导出文件时,GPTUI只能显示此目录下的文件和文件夹。 95 | 96 | ### conversation_path 97 | 设置导出和导入GPTUI对话记录时的文件路径。默认值为`~/.gptui/user/conversations` 98 | 99 | ### vector_memory_path 100 | 设置向量数据库的路径,默认值为`~/.gptui/user/vector_memory_database` 101 | 102 | ### terminal 103 | 设置所使用的终端,已测试的终端包括`termux`, `wezterm`。 104 | 105 | ### os 106 | 设置所使用的平台,提供四个选项: 107 | - termux 108 | - linux 109 | - macos 110 | - windows 111 | 112 | 由于termux并非一个完整的linux系统,所以把它作为一个单独的选项。 113 | 114 | ### default_plugins_used 115 | 此选项为一个列表,设置默认开启的插件,包括内置插件和自定义插件都可以在此处设置其默认开启状态。 116 | 117 | ### location_city 118 | 设置你的地理位置,让LLM可以获得你的位置信息,可以设置为你的城市名或者不设置。 119 | 120 | ### log_level 121 | 设置日志打印等级。 122 | 123 | ### openai_model_info 124 | 此选项是一个字典,存储各个模型的模型信息,模型的tokens_window在此设置。例如: 125 | ``` 126 | openai_model_info 127 | gpt-4-1106-preview: 128 | tokens_window: 128000 129 | gpt-4-0613: 130 | tokens_window: 8192 131 | ``` 132 | -------------------------------------------------------------------------------- /src/gptui/config.yml: -------------------------------------------------------------------------------- 1 | #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%# 2 | #% Note: A list is configured as a whole. %# 3 | #% For instance, to modify 'status_region_default' to 'GPTUI Welcome!', %# 4 | #% one should written it as: %# 5 | #% tui_config: %# 6 | #% conversations_recover: true %# 7 | #% voice_switch: false %# 8 | #% speak_switch: false %# 9 | #% file_wrap_display: true %# 10 | #% status_region_default: GPTUI Welcome %# 11 | #% not: %# 12 | #% tui_config: %# 13 | #% # conversations_recover: true %# 14 | #% # voice_switch: false %# 15 | #% # speak_switch: false %# 16 | #% # file_wrap_display: true %# 17 | #% status_region_default: GPTUI Welcome %# 18 | #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%# 19 | 20 | 21 | #GPTUI_BASIC_SERVICES_PATH: 22 | #PLUGIN_PATH: 23 | #DEFAULT_PLUGIN_PATH: 24 | #custom_plugin_path: ~/.gptui/plugins/ 25 | 26 | #% API keys 27 | #dot_env_path: 28 | # ~/.gptui/.env_gptui 29 | 30 | default_openai_parameters: 31 | model: gpt-4-1106-preview 32 | # This particular setting is used for 'gpt-4-1106-preview', as it outputs a maximum of 4096 tokens. 33 | # If using other models, 'max_tokens' can either be left unset or set according to your requirements. 34 | max_tokens: 4096 35 | #model: gpt-3.5-turbo 36 | stream: true 37 | 38 | #default_conversation_parameters: 39 | # max_sent_tokens_raito: 0.6 40 | 41 | #tui_config: 42 | # conversations_recover: true 43 | # voice_switch: false 44 | # speak_switch: false 45 | # file_wrap_display: true 46 | # ai_care_switch: true 47 | # ai_care_depth: 2 48 | # ai_care_delay: 60 49 | # status_region_default: 50 | # waiting_receive_animation: "default" 51 | 52 | #log_path: 53 | # ~/.gptui/logs.log 54 | 55 | #% Program working path, storing vector database, temporary files, etc. 56 | #workpath: 57 | # ~/.gptui/user 58 | 59 | #% Scope of files discoverable by the program 60 | #directory_tree_path: 61 | # ~/ 62 | 63 | #% Conversation history save and import path 64 | #conversation_path: 65 | # ~/.gptui/user/conversations 66 | 67 | #vector_memory_path: 68 | # ~/.gptui/user/vector_memory_database 69 | 70 | terminal: 71 | #% Tested terminals: {termux, wezterm} 72 | # termux 73 | wezterm 74 | 75 | os: 76 | # termux 77 | linux 78 | # macos 79 | # windows 80 | 81 | default_plugins_used: [] 82 | #% List of plugin's name of default used 83 | #- WebServe 84 | 85 | #% Set your geographic location. When LLM needs to know yoour position, it will return this location. It's optional to set. 86 | location_city: 87 | 88 | log_level: 89 | WARNING 90 | 91 | openai_model_info: 92 | gpt-4-1106-preview: 93 | tokens_window: 128000 94 | 95 | gpt-4: 96 | tokens_window: 8192 97 | 98 | gpt-4-0613: 99 | tokens_window: 8192 100 | 101 | gpt-4-0314: 102 | tokens_window: 8192 103 | 104 | gpt-4-32k: 105 | tokens_window: 32768 106 | 107 | gpt-4-32k-0613: 108 | tokens_window: 32768 109 | 110 | gpt-4-32k-0314: 111 | tokens_window: 32768 112 | 113 | gpt-3.5-turbo: 114 | tokens_window: 4096 115 | 116 | gpt-3.5-turbo-1106: 117 | tokens_window: 16385 118 | 119 | gpt-3.5-turbo-0613: 120 | tokens_window: 4096 121 | 122 | gpt-3.5-turbo-16k: 123 | tokens_window: 16384 124 | 125 | gpt-3.5-turbo-16k-0613: 126 | tokens_window: 16384 127 | 128 | gpt-3.5-turbo-0301: 129 | tokens_window: 4096 130 | 131 | text_davinci-003: 132 | tokens_window: 4097 133 | 134 | text_davinci-002: 135 | tokens_window: 4097 136 | 137 | code_davinci-002: 138 | tokens_window: 8001 139 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/blob_loaders/file_system.py: -------------------------------------------------------------------------------- 1 | """Use to load blobs from the local file system.""" 2 | from pathlib import Path 3 | from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union 4 | 5 | from .schema import Blob, BlobLoader 6 | 7 | T = TypeVar("T") 8 | 9 | 10 | def _make_iterator( 11 | length_func: Callable[[], int], show_progress: bool = False 12 | ) -> Callable[[Iterable[T]], Iterator[T]]: 13 | """Create a function that optionally wraps an iterable in tqdm.""" 14 | if show_progress: 15 | try: 16 | from tqdm.auto import tqdm 17 | except ImportError: 18 | raise ImportError( 19 | "You must install tqdm to use show_progress=True." 20 | "You can install tqdm with `pip install tqdm`." 21 | ) 22 | 23 | # Make sure to provide `total` here so that tqdm can show 24 | # a progress bar that takes into account the total number of files. 25 | def _with_tqdm(iterable: Iterable[T]) -> Iterator[T]: 26 | """Wrap an iterable in a tqdm progress bar.""" 27 | return tqdm(iterable, total=length_func()) 28 | 29 | iterator = _with_tqdm 30 | else: 31 | iterator = iter # type: ignore 32 | 33 | return iterator 34 | 35 | 36 | # PUBLIC API 37 | 38 | 39 | class FileSystemBlobLoader(BlobLoader): 40 | """Blob loader for the local file system. 41 | 42 | Example: 43 | 44 | .. code-block:: python 45 | 46 | from langchain.document_loaders.blob_loaders import FileSystemBlobLoader 47 | loader = FileSystemBlobLoader("/path/to/directory") 48 | for blob in loader.yield_blobs(): 49 | print(blob) 50 | """ 51 | 52 | def __init__( 53 | self, 54 | path: Union[str, Path], 55 | *, 56 | glob: str = "**/[!.]*", 57 | suffixes: Optional[Sequence[str]] = None, 58 | show_progress: bool = False, 59 | ) -> None: 60 | """Initialize with path to directory and how to glob over it. 61 | 62 | Args: 63 | path: Path to directory to load from 64 | glob: Glob pattern relative to the specified path 65 | by default set to pick up all non-hidden files 66 | suffixes: Provide to keep only files with these suffixes 67 | Useful when wanting to keep files with different suffixes 68 | Suffixes must include the dot, e.g. ".txt" 69 | show_progress: If true, will show a progress bar as the files are loaded. 70 | This forces an iteration through all matching files 71 | to count them prior to loading them. 72 | 73 | Examples: 74 | 75 | ... code-block:: python 76 | 77 | # Recursively load all text files in a directory. 78 | loader = FileSystemBlobLoader("/path/to/directory", glob="**/*.txt") 79 | 80 | # Recursively load all non-hidden files in a directory. 81 | loader = FileSystemBlobLoader("/path/to/directory", glob="**/[!.]*") 82 | 83 | # Load all files in a directory without recursion. 84 | loader = FileSystemBlobLoader("/path/to/directory", glob="*") 85 | """ 86 | if isinstance(path, Path): 87 | _path = path 88 | elif isinstance(path, str): 89 | _path = Path(path) 90 | else: 91 | raise TypeError(f"Expected str or Path, got {type(path)}") 92 | 93 | self.path = _path 94 | self.glob = glob 95 | self.suffixes = set(suffixes or []) 96 | self.show_progress = show_progress 97 | 98 | def yield_blobs( 99 | self, 100 | ) -> Iterable[Blob]: 101 | """Yield blobs that match the requested pattern.""" 102 | iterator = _make_iterator( 103 | length_func=self.count_matching_files, show_progress=self.show_progress 104 | ) 105 | 106 | for path in iterator(self._yield_paths()): 107 | yield Blob.from_path(path) 108 | 109 | def _yield_paths(self) -> Iterable[Path]: 110 | """Yield paths that match the requested pattern.""" 111 | paths = self.path.glob(self.glob) 112 | for path in paths: 113 | if path.is_file(): 114 | if self.suffixes and path.suffix not in self.suffixes: 115 | continue 116 | yield path 117 | 118 | def count_matching_files(self) -> int: 119 | """Count files that match the pattern without loading them.""" 120 | # Carry out a full iteration to count the files without 121 | # materializing anything expensive in memory. 122 | num = 0 123 | for _ in self._yield_paths(): 124 | num += 1 125 | return num 126 | -------------------------------------------------------------------------------- /src/gptui/models/openai_tokens_truncate.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from openai.types.chat import ChatCompletionMessageParam 4 | 5 | from .context import OpenaiContext 6 | from .utils.tokens_num import tokens_num_from_string, tokens_num_from_chat_context 7 | 8 | 9 | gptui_logger = logging.getLogger("gptui_logger") 10 | 11 | 12 | def find_position(lst, num): 13 | """ 14 | Finds the farthest left position in the list where the sum of all elements after that position is less than the given number 'num'. 15 | If you move one position backward, the sum of all elements after that new position will be greater than 'num'. 16 | 17 | Args: 18 | - lst {list}: The list of integers. 19 | - num {int}: The target number. 20 | 21 | Returns: 22 | int: The 1-based index of the position found, (index + 1) 23 | return 0 if the sum of all integers is less than num, 24 | return the length of lst if the last integers is greater than num. 25 | """ 26 | prefix_sum = [0] 27 | for x in lst: 28 | prefix_sum.append(prefix_sum[-1] + x) 29 | 30 | left, right = 0, len(prefix_sum) - 1 31 | 32 | while left < right: 33 | mid = (left + right) // 2 34 | if prefix_sum[-1] - prefix_sum[mid] < num: 35 | right = mid 36 | else: 37 | left = mid + 1 38 | 39 | return left 40 | 41 | def trim_excess_tokens( 42 | context: OpenaiContext, 43 | max_tokens_num: int | None = None, 44 | offset: int = 0 45 | ) -> list[ChatCompletionMessageParam]: 46 | """Truncate the given context according to max_tokens_num, only retaining the last part. 47 | 48 | It will return a new chat_context list and not change the original chat_context. 49 | 50 | Args: 51 | - context (OpenaiContext): the context need to be trimmed. 52 | - max_tokens_num (int): the max tokens number allowed. 53 | - offset (int): for positive value, increase max_tokens_num; for negative value, decrease max_tokens_num. 54 | 55 | Retruns: 56 | list[dict]: truncated chat_context of context. 57 | """ 58 | tokens_num_list = context.tokens_num_list 59 | if max_tokens_num is None: 60 | max_tokens_num = context.max_sending_tokens_num 61 | assert max_tokens_num is not None 62 | assert context.chat_context is not None 63 | num_after_offset = max_tokens_num + offset 64 | if num_after_offset <= 0: 65 | gptui_logger.warning( 66 | "The valid token length is less than zero, only the last message is kept. " 67 | "This could likely lead to a token lenght exceeding the limit error." 68 | ) 69 | return context.chat_context[-1:] 70 | position = find_position(lst=tokens_num_list, num=num_after_offset) 71 | if position >= len(tokens_num_list): 72 | model = context.parameters["model"] 73 | trim_status = True 74 | new_tokens_num = num_after_offset 75 | out_dict = context.chat_context[-1:][0] # Don't change the original context. 76 | while trim_status: 77 | new_tokens_num -= 5 78 | if new_tokens_num <= 0: # The number 5 is the assumed additional tokens count in message dict compared to message content 79 | out_dict["content"] = "" 80 | return [out_dict] 81 | out_dict_content = out_dict["content"] 82 | assert isinstance(out_dict_content, str) 83 | trim_string = trim_string_by_tokens(out_dict_content, max_tokens=new_tokens_num, model=model) 84 | out_dict["content"] = trim_string 85 | if tokens_num_from_chat_context([out_dict], model=model) < num_after_offset: 86 | trim_status = False 87 | return [out_dict] 88 | return context.chat_context[position:] 89 | 90 | def trim_string_by_tokens(string: str, max_tokens: int, model: str) -> str: 91 | """trims the input string based on a specified maximum token count. 92 | - If the overall token count of the input string is less than or equal to the specified maximum token count, the function returns the original string as is. 93 | - If the token count of the input string exceeds the specified maximum, the function trims words from the beginning of the string progressively until the token count does not exceed the limit. 94 | 95 | Parameters: 96 | - string (str): The input string to be trimmed. 97 | - max_tokens (int): The allowable maximum token count. 98 | 99 | Returns: 100 | - str: The trimmed string where the token count does not surpass the specified maximum token count. 101 | """ 102 | words = string.split() 103 | if tokens_num_from_string(string, model) <= max_tokens: 104 | return string 105 | 106 | left, right = 0, len(words) 107 | 108 | while left < right: 109 | mid = (left + right) // 2 110 | current_str = ' '.join(words[mid:]) 111 | current_tokens = tokens_num_from_string(current_str, model) 112 | 113 | if current_tokens == max_tokens: 114 | return current_str 115 | elif current_tokens < max_tokens: 116 | right = mid 117 | else: 118 | left = mid + 1 119 | 120 | if right == len(words): 121 | return "" 122 | return ' '.join(words[right:]) 123 | -------------------------------------------------------------------------------- /src/gptui/views/custom_tree.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Generic 3 | 4 | from rich.style import Style 5 | from textual.widgets import Tree, DirectoryTree 6 | from textual.widgets._directory_tree import DirEntry 7 | from textual.widgets._tree import TreeNode, TreeDataType, TOGGLE_STYLE 8 | 9 | from .theme import ThemeColor 10 | from .theme import theme_color as tc 11 | from ..utils.my_text import MyText as Text 12 | 13 | class MyDirectoryTree(DirectoryTree): 14 | def __init__(self, root_path: str, *args, **kwargs): 15 | self.root_path = root_path 16 | self.file_path_now = self.root_path 17 | super().__init__(*args, **kwargs) 18 | 19 | def on_directory_tree_file_selected(self, event) -> None: 20 | self.file_path_now = event.path 21 | 22 | def on_directory_tree_directory_selected(self, event) -> None: 23 | self.file_path_now = event.path 24 | if str(event.path) == self.root_path: 25 | self.reload() 26 | 27 | def render_label( 28 | self, node: TreeNode[DirEntry], base_style: Style, style: Style 29 | ) -> Text: 30 | """Render a label for the given node. 31 | 32 | Args: 33 | node: A tree node. 34 | base_style: The base style of the widget. 35 | style: The additional style for the label. 36 | 37 | Returns: 38 | A Rich Text object containing the label. 39 | """ 40 | is_monochrome_theme = True if ThemeColor._theme == "monochrome" else False 41 | 42 | node_label = node._label.copy() 43 | node_label.stylize(style) 44 | color_style = Style(color = tc("green") or "green") 45 | 46 | if node._allow_expand: 47 | if is_monochrome_theme: 48 | prefix = ("➘ " if node.is_expanded else "➩ ", base_style + color_style + TOGGLE_STYLE) 49 | else: 50 | prefix = ("📂 " if node.is_expanded else "📁 ", base_style + TOGGLE_STYLE) 51 | node_label.stylize_before( 52 | self.get_component_rich_style("directory-tree--folder", partial=True) 53 | ) 54 | else: 55 | if is_monochrome_theme: 56 | prefix = ("◉ ", base_style + color_style) 57 | else: 58 | prefix = ( 59 | "📄 ", 60 | base_style, 61 | ) 62 | node_label.stylize_before( 63 | self.get_component_rich_style("directory-tree--file", partial=True), 64 | ) 65 | node_label.highlight_regex( 66 | r"\..+$", 67 | self.get_component_rich_style( 68 | "directory-tree--extension", partial=True 69 | ), 70 | ) 71 | 72 | if node_label.plain.startswith("."): 73 | node_label.stylize_before( 74 | self.get_component_rich_style("directory-tree--hidden") 75 | ) 76 | 77 | text = Text.assemble(prefix, node_label) 78 | return text 79 | 80 | 81 | class ConversationTree(Tree, Generic[TreeDataType]): 82 | def __init__(self, conversation_path: str, *args, **kwargs): 83 | self.conversation_path = conversation_path 84 | super().__init__(*args, **kwargs) 85 | 86 | @property 87 | def file_path_now(self): 88 | if self.cursor_node: 89 | return self.cursor_node.data 90 | else: 91 | return None 92 | 93 | def on_tree_node_selected(self, event): 94 | if event.node is self.root: 95 | self.conversation_refresh() 96 | 97 | def conversation_refresh(self): 98 | self.clear() 99 | self.root.expand() 100 | conversation_path = self.conversation_path 101 | try: 102 | for filename in os.listdir(conversation_path): 103 | if filename.endswith(".json") and (filename != "_conversations_cache.json"): 104 | self.root.add_leaf(f"{filename}", data=os.path.join(conversation_path, filename)) 105 | except FileNotFoundError: 106 | pass 107 | 108 | def render_label( 109 | self, node: TreeNode[TreeDataType], base_style: Style, style: Style 110 | ) -> Text: 111 | """Render a label for the given node. Override this to modify how labels are rendered. 112 | 113 | Args: 114 | node: A tree node. 115 | base_style: The base style of the widget. 116 | style: The additional style for the label. 117 | 118 | Returns: 119 | A Rich Text object containing the label. 120 | """ 121 | node_label = node._label.copy() 122 | node_label.stylize(style) 123 | 124 | if node._allow_expand: 125 | if ThemeColor._theme == "monochrome": 126 | color_style = Style(color = tc("green") or "green") 127 | prefix = ("▼ " if node.is_expanded else "▶ ", base_style + color_style + TOGGLE_STYLE) 128 | else: 129 | prefix = ( 130 | "▼ " if node.is_expanded else "▶ ", 131 | base_style + TOGGLE_STYLE, 132 | ) 133 | else: 134 | prefix = ("", base_style) 135 | 136 | text = Text.assemble(prefix, node_label) 137 | return text 138 | -------------------------------------------------------------------------------- /src/gptui/models/role.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import copy 3 | import logging 4 | from typing import TYPE_CHECKING, Iterable, Generator, cast 5 | 6 | from agere.utils.llm_async_converters import LLMAsyncAdapter 7 | from ai_care import AICare, AICareContext 8 | from openai.types.chat import ChatCompletionMessageParam 9 | 10 | from .context import BeadOpenaiContext, OpenaiContext 11 | from .openai_error import OpenaiErrorHandler 12 | from .openai_tokens_truncate import trim_excess_tokens 13 | from .utils.openai_api import openai_api_client 14 | from ..gptui_kernel.manager import ManagerInterface 15 | 16 | if TYPE_CHECKING: 17 | from .jobs import GroupTalkManager 18 | 19 | 20 | gptui_logger = logging.getLogger("gptui_logger") 21 | 22 | 23 | class Role: 24 | def __init__( 25 | self, 26 | name: str, 27 | group_talk_manager: GroupTalkManager, 28 | manager: ManagerInterface, 29 | openai_context_parent: OpenaiContext 30 | ): 31 | """Role use the same openai parameters as in the parent conversation. 32 | """ 33 | self.name = name 34 | self.context = BeadOpenaiContext(parameters=openai_context_parent.parameters) 35 | self.group_talk_manager = group_talk_manager 36 | self.manager = manager 37 | self.openai_api_client = openai_api_client(manager.dot_env_config_path) 38 | self.context.max_sending_tokens_num = openai_context_parent.max_sending_tokens_num 39 | self.openai_context_parent = openai_context_parent 40 | self.context.chat_context_saver = "inner" 41 | self.ai_care = AICare() 42 | self.ai_care.register_to_llm_method(self.to_llm_method) 43 | self.ai_care.register_to_user_method(self.to_user_method) 44 | self.ai_care.set_config(key="delay", value=60) 45 | 46 | def set_role_prompt(self, prompt: str): 47 | self.context.bead = [{"role": "system", "content": prompt}] 48 | self.context.insert_bead() 49 | self.ai_care.set_guide( 50 | prompt 51 | + "Plase maintain your role in the group chat, but if you want to say something, " 52 | + "there is no need to ask 'Can I speak?' first." 53 | ) 54 | 55 | def chat(self, message: ChatCompletionMessageParam | list[ChatCompletionMessageParam]) -> Iterable: 56 | self.context.auto_insert_bead() 57 | if isinstance(message, dict): 58 | self.context.chat_context_append(message=message) 59 | else: 60 | for one_message in message: 61 | self.context.chat_context_append(message=one_message) 62 | self.context.parameters = self.openai_context_parent.parameters.copy() 63 | self.context.parameters["stream"] = True 64 | trim_messages = trim_excess_tokens(self.context, offset=0) 65 | try: 66 | response = self.openai_api_client.with_options(timeout=20.0).chat.completions.create( 67 | messages=trim_messages, 68 | **self.context.parameters, 69 | ) 70 | except Exception as e: 71 | OpenaiErrorHandler().openai_error_handle(error=e, context=self.context, event_loop=True) 72 | raise e 73 | return response 74 | 75 | def to_llm_method(self, chat_context, to_llm_messages: list[AICareContext]) -> Generator[str, None, None]: 76 | messages_list = [ 77 | {"role": "user", "name": "Aicarey", "content": message["content"]} if message["role"] == "ai_care" 78 | else {"role": "assistant", "content": message["content"]} 79 | for message in to_llm_messages 80 | ] 81 | context = copy.deepcopy(self.context) 82 | assert isinstance(context, BeadOpenaiContext) 83 | context.auto_insert_bead() 84 | for one_message in messages_list: 85 | one_message = cast(ChatCompletionMessageParam, one_message) 86 | context.chat_context_append(message=one_message) 87 | context.parameters = self.openai_context_parent.parameters.copy() 88 | context.parameters["stream"] = True 89 | trim_messages = trim_excess_tokens(context, offset=0) 90 | try: 91 | openai_response = self.openai_api_client.with_options(timeout=20.0).chat.completions.create( 92 | messages=trim_messages, 93 | **context.parameters, 94 | ) 95 | except Exception as e: 96 | raise e 97 | else: 98 | def response_gen(response: Iterable): 99 | for chunk in response: 100 | content = chunk.choices[0].delta.content 101 | if content is None: 102 | continue 103 | yield content 104 | return response_gen(openai_response) 105 | 106 | def to_user_method(self, to_user_message: Generator[str, None, None]) -> None: 107 | with self.group_talk_manager._ai_care_rlock: 108 | if self.group_talk_manager.speaking is not None: 109 | return 110 | async_iterable = LLMAsyncAdapter().llm_to_async_iterable(to_user_message) 111 | self.group_talk_manager.ai_care_message_buffer.append({"name": f"{self.name}", "content": async_iterable}) 112 | 113 | def ai_care_update(self): 114 | self.ai_care.chat_update(self.context) 115 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/load/serializable.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from typing import Any, Dict, List, Literal, TypedDict, Union, cast 3 | 4 | from pydantic import BaseModel, PrivateAttr 5 | 6 | 7 | class BaseSerialized(TypedDict): 8 | """Base class for serialized objects.""" 9 | 10 | lc: int 11 | id: List[str] 12 | 13 | 14 | class SerializedConstructor(BaseSerialized): 15 | """Serialized constructor.""" 16 | 17 | type: Literal["constructor"] 18 | kwargs: Dict[str, Any] 19 | 20 | 21 | class SerializedSecret(BaseSerialized): 22 | """Serialized secret.""" 23 | 24 | type: Literal["secret"] 25 | 26 | 27 | class SerializedNotImplemented(BaseSerialized): 28 | """Serialized not implemented.""" 29 | 30 | type: Literal["not_implemented"] 31 | 32 | 33 | class Serializable(BaseModel, ABC): 34 | """Serializable base class.""" 35 | 36 | @property 37 | def lc_serializable(self) -> bool: 38 | """ 39 | Return whether or not the class is serializable. 40 | """ 41 | return False 42 | 43 | @property 44 | def lc_namespace(self) -> List[str]: 45 | """ 46 | Return the namespace of the langchain object. 47 | eg. ["langchain", "llms", "openai"] 48 | """ 49 | return self.__class__.__module__.split(".") 50 | 51 | @property 52 | def lc_secrets(self) -> Dict[str, str]: 53 | """ 54 | Return a map of constructor argument names to secret ids. 55 | eg. {"openai_api_key": "OPENAI_API_KEY"} 56 | """ 57 | return dict() 58 | 59 | @property 60 | def lc_attributes(self) -> Dict: 61 | """ 62 | Return a list of attribute names that should be included in the 63 | serialized kwargs. These attributes must be accepted by the 64 | constructor. 65 | """ 66 | return {} 67 | 68 | class Config: 69 | extra = "ignore" 70 | 71 | _lc_kwargs = PrivateAttr(default_factory=dict) 72 | 73 | def __init__(self, **kwargs: Any) -> None: 74 | super().__init__(**kwargs) 75 | self._lc_kwargs = kwargs 76 | 77 | def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]: 78 | if not self.lc_serializable: 79 | return self.to_json_not_implemented() 80 | 81 | secrets = dict() 82 | # Get latest values for kwargs if there is an attribute with same name 83 | lc_kwargs = { 84 | k: getattr(self, k, v) 85 | for k, v in self._lc_kwargs.items() 86 | if not (self.__exclude_fields__ or {}).get(k, False) # type: ignore 87 | } 88 | 89 | # Merge the lc_secrets and lc_attributes from every class in the MRO 90 | for cls in [None, *self.__class__.mro()]: 91 | # Once we get to Serializable, we're done 92 | if cls is Serializable: 93 | break 94 | 95 | # Get a reference to self bound to each class in the MRO 96 | this = cast(Serializable, self if cls is None else super(cls, self)) 97 | 98 | secrets.update(this.lc_secrets) 99 | lc_kwargs.update(this.lc_attributes) 100 | 101 | # include all secrets, even if not specified in kwargs 102 | # as these secrets may be passed as an environment variable instead 103 | for key in secrets.keys(): 104 | secret_value = getattr(self, key, None) or lc_kwargs.get(key) 105 | if secret_value is not None: 106 | lc_kwargs.update({key: secret_value}) 107 | 108 | return { 109 | "lc": 1, 110 | "type": "constructor", 111 | "id": [*self.lc_namespace, self.__class__.__name__], 112 | "kwargs": lc_kwargs 113 | if not secrets 114 | else _replace_secrets(lc_kwargs, secrets), 115 | } 116 | 117 | def to_json_not_implemented(self) -> SerializedNotImplemented: 118 | return to_json_not_implemented(self) 119 | 120 | 121 | def _replace_secrets( 122 | root: Dict[Any, Any], secrets_map: Dict[str, str] 123 | ) -> Dict[Any, Any]: 124 | result = root.copy() 125 | for path, secret_id in secrets_map.items(): 126 | [*parts, last] = path.split(".") 127 | current = result 128 | for part in parts: 129 | if part not in current: 130 | break 131 | current[part] = current[part].copy() 132 | current = current[part] 133 | if last in current: 134 | current[last] = { 135 | "lc": 1, 136 | "type": "secret", 137 | "id": [secret_id], 138 | } 139 | return result 140 | 141 | 142 | def to_json_not_implemented(obj: object) -> SerializedNotImplemented: 143 | """Serialize a "not implemented" object. 144 | 145 | Args: 146 | obj: object to serialize 147 | 148 | Returns: 149 | SerializedNotImplemented 150 | """ 151 | _id: List[str] = [] 152 | try: 153 | if hasattr(obj, "__name__"): 154 | _id = [*obj.__module__.split("."), obj.__name__] 155 | elif hasattr(obj, "__class__"): 156 | _id = [*obj.__class__.__module__.split("."), obj.__class__.__name__] 157 | except Exception: 158 | pass 159 | return { 160 | "lc": 1, 161 | "type": "not_implemented", 162 | "id": _id, 163 | } 164 | -------------------------------------------------------------------------------- /src/gptui/data/vector_memory/qdrant_memory.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | import logging 3 | from logging import Logger 4 | 5 | from qdrant_client import QdrantClient 6 | from semantic_kernel.connectors.memory.qdrant.qdrant_memory_store import QdrantMemoryStore 7 | from semantic_kernel.memory.memory_record import MemoryRecord 8 | from semantic_kernel.utils.null_logger import NullLogger 9 | from qdrant_client import models as qdrant_models 10 | 11 | 12 | gptui_logger = logging.getLogger("gptui_logger") 13 | 14 | 15 | class QdrantVector(QdrantMemoryStore): 16 | 17 | def __init__( 18 | self, 19 | vector_size: int, 20 | url: str | None = None, 21 | port: int | None = 6333, 22 | logger: Logger | None = None, 23 | local: bool | None = False, 24 | ) -> None: 25 | """Initializes a new instance of the QdrantMemoryStore class. 26 | 27 | Arguments: 28 | logger {Optional[Logger]} -- The logger to use. (default: {None}) 29 | """ 30 | if local: 31 | if url: 32 | try: 33 | self._qdrantclient = QdrantClient(path=url) 34 | except KeyError as e: 35 | gptui_logger.error( 36 | f"An error occurred while initializing the local vector database. Database path: {url}. Error: {repr(e)} " 37 | "Warning: Rebuilding of the vector database may be required." 38 | "You can remove '~/.gptui/user/vector_memory_database/' to rebuild it if you are using default config." 39 | ) 40 | else: 41 | self._qdrantclient = QdrantClient(location=":memory:") 42 | else: 43 | self._qdrantclient = QdrantClient(url=url, port=port) 44 | 45 | self._logger = logger or NullLogger() 46 | self._default_vector_size = vector_size 47 | 48 | async def _convert_from_memory_record_async( 49 | self, collection_name: str, record: MemoryRecord 50 | ) -> qdrant_models.PointStruct: 51 | if record._key is not None and record._key != "": 52 | pointId = record._key 53 | 54 | else: 55 | existing_record = await self._get_existing_record_by_payload_id_async( 56 | collection_name=collection_name, 57 | payload_id=record._id, 58 | ) 59 | 60 | if existing_record: 61 | pointId = str(existing_record.id) 62 | else: 63 | pointId = str(uuid.uuid4()) 64 | 65 | payload = record.__dict__.copy() 66 | payload["storage_status"] = "unsaved" 67 | embedding = payload.pop("_embedding") 68 | 69 | return qdrant_models.PointStruct( 70 | id=pointId, vector=embedding.tolist(), payload=payload 71 | ) 72 | 73 | async def collection_save(self, collection_name: str) -> qdrant_models.UpdateResult: 74 | filter = qdrant_models.Filter( 75 | must=[ 76 | qdrant_models.FieldCondition( 77 | key="storage_status", 78 | match=qdrant_models.MatchAny(any=["unsaved", "cached"]), 79 | ) 80 | ] 81 | ) 82 | 83 | update_result = self._qdrantclient.set_payload( 84 | collection_name=str(collection_name), 85 | payload={"storage_status": "saved"}, 86 | points=filter, 87 | ) 88 | 89 | return update_result 90 | 91 | async def collection_cache(self, collection_name: str) -> qdrant_models.UpdateResult: 92 | filter = qdrant_models.Filter( 93 | must=[ 94 | qdrant_models.FieldCondition( 95 | key="storage_status", 96 | match=qdrant_models.MatchValue(value="unsaved"), 97 | ) 98 | ] 99 | ) 100 | 101 | update_result = self._qdrantclient.set_payload( 102 | collection_name=str(collection_name), 103 | payload={"storage_status": "cached"}, 104 | points=filter, 105 | ) 106 | 107 | return update_result 108 | 109 | async def collection_clean(self, collection_name: str) -> qdrant_models.UpdateResult: 110 | filter = qdrant_models.Filter( 111 | must=[ 112 | qdrant_models.FieldCondition( 113 | key="storage_status", 114 | match=qdrant_models.MatchAny(any=["unsaved", "cached"]), 115 | ) 116 | ] 117 | ) 118 | 119 | update_result = self._qdrantclient.delete( 120 | collection_name=collection_name, 121 | points_selector=qdrant_models.FilterSelector(filter=filter), 122 | ) 123 | 124 | return update_result 125 | 126 | async def collection_count(self, collection_name: str) -> tuple[int, int, int]: 127 | def make_filter(storage_status: str): 128 | filter = qdrant_models.Filter( 129 | must=[ 130 | qdrant_models.FieldCondition( 131 | key="storage_status", 132 | match=qdrant_models.MatchValue(value=storage_status), 133 | ) 134 | ] 135 | ) 136 | return filter 137 | 138 | count_saved = self._qdrantclient.count( 139 | collection_name=collection_name, 140 | count_filter=make_filter("saved"), 141 | exact=True, 142 | ) 143 | 144 | count_cached = self._qdrantclient.count( 145 | collection_name=collection_name, 146 | count_filter=make_filter("cached"), 147 | exact=True, 148 | ) 149 | 150 | count_unsaved = self._qdrantclient.count( 151 | collection_name=collection_name, 152 | count_filter=make_filter("unsaved"), 153 | exact=True, 154 | ) 155 | 156 | return count_saved.count, count_cached.count, count_unsaved.count 157 | -------------------------------------------------------------------------------- /src/gptui/controllers/chat_response_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import threading 3 | 4 | from ..utils.my_text import MyText as Text 5 | from ..utils.my_text import MyLines as Lines 6 | from ..models.signals import response_to_user_message_stream_signal, response_auxiliary_message_signal 7 | 8 | 9 | gptui_logger = logging.getLogger("gptui_logger") 10 | 11 | 12 | class ChatResponse: 13 | def __init__(self, app): 14 | self.app = app 15 | self.chat_region = app.main_screen.query_one("#chat_region") 16 | self.app_chat_tabs = app.main_screen.query_one("#chat_tabs") 17 | self.buffer = {} 18 | self.tab_not_switching = threading.Event() 19 | self.tab_not_switching.set() 20 | response_to_user_message_stream_signal.connect(self.handle_response) 21 | response_auxiliary_message_signal.connect(self.handle_group_talk_response) 22 | 23 | def delete_buffer_id(self, id: int) -> None: 24 | self.buffer.pop(id, None) 25 | 26 | def handle_response(self, sender, **kwargs): 27 | message = kwargs["message"] 28 | self.stream_display(message) 29 | 30 | def stream_display(self, message: dict, stream: bool = True, copy_code: bool = False) -> None: 31 | """Display the chat response in TUI""" 32 | # If the tab is in the process of switching, wait for the chat history to finish loadind 33 | # before displaying the newly generating chat content. 34 | self.tab_not_switching.wait(2) 35 | context_id = message["content"]["context_id"] 36 | if (active_tab := self.app_chat_tabs.active_tab) is not None: 37 | tab_id = int(active_tab.id[3:]) 38 | else: 39 | tab_id = 0 40 | #self.buffer = {} 41 | #return 42 | if context_id not in self.buffer: 43 | self.buffer[context_id] = { 44 | "chat_stream_content": {"role": "assistant", "content": ""}, 45 | "decorate_chat_stream_content_lines": Lines(), 46 | "last_tab_id": tab_id, 47 | } 48 | buffer_context = self.buffer[context_id] 49 | chat_stream_content = buffer_context["chat_stream_content"] 50 | char = message["content"]["content"] 51 | if message["flag"] == "content": 52 | # This condition being met indicates that the currently generated content corresponds with the active tab window, 53 | # and it is not the first time being displayed. 54 | if context_id == tab_id == buffer_context["last_tab_id"]: 55 | length = len(buffer_context["decorate_chat_stream_content_lines"]) 56 | self.chat_region.right_pop_lines(length, refresh=False) 57 | chat_stream_content["content"] += char 58 | if context_id == tab_id: 59 | buffer_context["decorate_chat_stream_content_lines"] = self.app.decorator(chat_stream_content, stream, copy_code) 60 | self.chat_region.write_lines(buffer_context["decorate_chat_stream_content_lines"]) 61 | elif message["flag"] == "end": 62 | if context_id == tab_id: 63 | self.stream_display(message={"content": {"content": "", "context_id": context_id}, "flag": "content"}, stream=False, copy_code=True) 64 | self.chat_region.write_lines([Text()]) 65 | chat_stream_content["content"] = "" 66 | buffer_context["decorate_chat_stream_content_lines"] = Lines() 67 | buffer_context["last_tab_id"] = tab_id 68 | 69 | def handle_group_talk_response(self, sender, **kwargs): 70 | message = kwargs["message"] 71 | message_content = message["content"] 72 | flag = message["flag"] 73 | if flag == "group_talk_response": 74 | self.group_talk_stream_display(message=message_content) 75 | 76 | def group_talk_stream_display(self, message: dict, stream: bool = True, copy_code: bool = False) -> None: 77 | """Display the group talk response in TUI""" 78 | message_dict = message["content"] 79 | group_talk_manager_id = message_dict["group_talk_manager_id"] 80 | if (active_tab := self.app_chat_tabs.active_tab) is not None: 81 | tab_id = int(active_tab.id[3:]) 82 | else: 83 | tab_id = 0 84 | if group_talk_manager_id not in self.buffer: 85 | self.buffer[group_talk_manager_id] = { 86 | "group_talk_chat_stream_content": {"role": "assistant", "name": "", "content": ""}, 87 | "group_talk_decorate_chat_stream_content_lines": Lines(), 88 | "last_tab_id": tab_id, 89 | } 90 | buffer_context = self.buffer[group_talk_manager_id] 91 | group_talk_chat_stream_content = buffer_context["group_talk_chat_stream_content"] 92 | char = message_dict["content"] 93 | group_talk_chat_stream_content["name"] = message_dict["name"] 94 | if message["flag"] == "content": 95 | # This condition being met indicates that the currently generated content corresponds with the active tab window, 96 | # and it is not the first time being displayed. 97 | if group_talk_manager_id == tab_id == buffer_context["last_tab_id"]: 98 | length = len(buffer_context["group_talk_decorate_chat_stream_content_lines"]) 99 | self.chat_region.right_pop_lines(length, refresh=False) 100 | group_talk_chat_stream_content["content"] += char 101 | buffer_context["group_talk_decorate_chat_stream_content_lines"] = self.app.decorator(group_talk_chat_stream_content, stream, copy_code) 102 | if group_talk_manager_id == tab_id: 103 | self.chat_region.write_lines(buffer_context["group_talk_decorate_chat_stream_content_lines"]) 104 | elif message["flag"] == "end": 105 | if group_talk_manager_id == tab_id: 106 | self.group_talk_stream_display(message={"content": {"role": "assistant", "name": message_dict["name"], "content": "", "group_talk_manager_id": group_talk_manager_id}, "flag": "content"}, stream=False, copy_code=True) 107 | self.chat_region.write_lines([Text()]) 108 | group_talk_chat_stream_content["content"] = "" 109 | buffer_context["group_talk_decorate_chat_stream_content_lines"] = Lines() 110 | buffer_context["last_tab_id"] = tab_id 111 | -------------------------------------------------------------------------------- /src/gptui/data/langchain/document_loaders/blob_loaders/schema.py: -------------------------------------------------------------------------------- 1 | """Schema for Blobs and Blob Loaders. 2 | 3 | The goal is to facilitate decoupling of content loading from content parsing code. 4 | 5 | In addition, content loading code should provide a lazy loading interface by default. 6 | """ 7 | from __future__ import annotations 8 | 9 | import contextlib 10 | import mimetypes 11 | from abc import ABC, abstractmethod 12 | from io import BufferedReader, BytesIO 13 | from pathlib import PurePath 14 | from typing import Any, Generator, Iterable, Mapping, Optional, Union 15 | 16 | from pydantic import BaseModel, root_validator 17 | 18 | PathLike = Union[str, PurePath] 19 | 20 | 21 | class Blob(BaseModel): 22 | """A blob is used to represent raw data by either reference or value. 23 | 24 | Provides an interface to materialize the blob in different representations, and 25 | help to decouple the development of data loaders from the downstream parsing of 26 | the raw data. 27 | 28 | Inspired by: https://developer.mozilla.org/en-US/docs/Web/API/Blob 29 | """ 30 | 31 | data: Union[bytes, str, None] # Raw data 32 | mimetype: Optional[str] = None # Not to be confused with a file extension 33 | encoding: str = "utf-8" # Use utf-8 as default encoding, if decoding to string 34 | # Location where the original content was found 35 | # Represent location on the local file system 36 | # Useful for situations where downstream code assumes it must work with file paths 37 | # rather than in-memory content. 38 | path: Optional[PathLike] = None 39 | 40 | class Config: 41 | arbitrary_types_allowed = True 42 | frozen = True 43 | 44 | @property 45 | def source(self) -> Optional[str]: 46 | """The source location of the blob as string if known otherwise none.""" 47 | return str(self.path) if self.path else None 48 | 49 | @root_validator(pre=True) 50 | def check_blob_is_valid(cls, values: Mapping[str, Any]) -> Mapping[str, Any]: 51 | """Verify that either data or path is provided.""" 52 | if "data" not in values and "path" not in values: 53 | raise ValueError("Either data or path must be provided") 54 | return values 55 | 56 | def as_string(self) -> str: 57 | """Read data as a string.""" 58 | if self.data is None and self.path: 59 | with open(str(self.path), "r", encoding=self.encoding) as f: 60 | return f.read() 61 | elif isinstance(self.data, bytes): 62 | return self.data.decode(self.encoding) 63 | elif isinstance(self.data, str): 64 | return self.data 65 | else: 66 | raise ValueError(f"Unable to get string for blob {self}") 67 | 68 | def as_bytes(self) -> bytes: 69 | """Read data as bytes.""" 70 | if isinstance(self.data, bytes): 71 | return self.data 72 | elif isinstance(self.data, str): 73 | return self.data.encode(self.encoding) 74 | elif self.data is None and self.path: 75 | with open(str(self.path), "rb") as f: 76 | return f.read() 77 | else: 78 | raise ValueError(f"Unable to get bytes for blob {self}") 79 | 80 | @contextlib.contextmanager 81 | def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]: 82 | """Read data as a byte stream.""" 83 | if isinstance(self.data, bytes): 84 | yield BytesIO(self.data) 85 | elif self.data is None and self.path: 86 | with open(str(self.path), "rb") as f: 87 | yield f 88 | else: 89 | raise NotImplementedError(f"Unable to convert blob {self}") 90 | 91 | @classmethod 92 | def from_path( 93 | cls, 94 | path: PathLike, 95 | *, 96 | encoding: str = "utf-8", 97 | mime_type: Optional[str] = None, 98 | guess_type: bool = True, 99 | ) -> Blob: 100 | """Load the blob from a path like object. 101 | 102 | Args: 103 | path: path like object to file to be read 104 | encoding: Encoding to use if decoding the bytes into a string 105 | mime_type: if provided, will be set as the mime-type of the data 106 | guess_type: If True, the mimetype will be guessed from the file extension, 107 | if a mime-type was not provided 108 | 109 | Returns: 110 | Blob instance 111 | """ 112 | if mime_type is None and guess_type: 113 | _mimetype = mimetypes.guess_type(path)[0] if guess_type else None 114 | else: 115 | _mimetype = mime_type 116 | # We do not load the data immediately, instead we treat the blob as a 117 | # reference to the underlying data. 118 | return cls(data=None, mimetype=_mimetype, encoding=encoding, path=path) 119 | 120 | @classmethod 121 | def from_data( 122 | cls, 123 | data: Union[str, bytes], 124 | *, 125 | encoding: str = "utf-8", 126 | mime_type: Optional[str] = None, 127 | path: Optional[str] = None, 128 | ) -> Blob: 129 | """Initialize the blob from in-memory data. 130 | 131 | Args: 132 | data: the in-memory data associated with the blob 133 | encoding: Encoding to use if decoding the bytes into a string 134 | mime_type: if provided, will be set as the mime-type of the data 135 | path: if provided, will be set as the source from which the data came 136 | 137 | Returns: 138 | Blob instance 139 | """ 140 | return cls(data=data, mimetype=mime_type, encoding=encoding, path=path) 141 | 142 | def __repr__(self) -> str: 143 | """Define the blob representation.""" 144 | str_repr = f"Blob {id(self)}" 145 | if self.source: 146 | str_repr += f" {self.source}" 147 | return str_repr 148 | 149 | 150 | class BlobLoader(ABC): 151 | """Abstract interface for blob loaders implementation. 152 | 153 | Implementer should be able to load raw content from a storage system according 154 | to some criteria and return the raw content lazily as a stream of blobs. 155 | """ 156 | 157 | @abstractmethod 158 | def yield_blobs( 159 | self, 160 | ) -> Iterable[Blob]: 161 | """A lazy loader for raw data represented by LangChain's Blob object. 162 | 163 | Returns: 164 | A generator over blobs 165 | """ 166 | -------------------------------------------------------------------------------- /src/gptui/models/context.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import copy 3 | import logging 4 | from dataclasses import dataclass, field 5 | from typing import Literal, TypeVar, Generic 6 | 7 | from openai.types.chat import ChatCompletionMessageParam 8 | 9 | from .utils.tokens_num import tokens_num_from_chat_context 10 | 11 | 12 | gptui_logger = logging.getLogger("gptui_logger") 13 | 14 | 15 | T = TypeVar("T") 16 | 17 | 18 | @dataclass 19 | class Context(Generic[T]): 20 | chat_context: list[T] | None = None 21 | id: str | int | None = None 22 | 23 | @property 24 | def chat_context_copy(self) -> list[T]: 25 | chat_context = self.chat_context 26 | if chat_context is None: 27 | raise ValueError("Field 'chat_context' have not been set.") 28 | return copy.deepcopy(chat_context) 29 | 30 | 31 | @dataclass 32 | class OpenaiContext(Context[ChatCompletionMessageParam]): 33 | parameters: dict = field(default_factory=dict) 34 | max_sending_tokens_num: int | None = None 35 | chat_context_saver: Literal["outer", "inner"] | None = None 36 | chat_context_saver_for_sending: Literal["outer", "inner"] | None = None 37 | plugins: list = field(default_factory=list) 38 | 39 | def __post_init__(self, *args, **kwargs): 40 | self._tokens_num_list = [] 41 | self._tokens_num_model = self.parameters.get("model") 42 | 43 | @property 44 | def tokens_num_list(self) -> list: 45 | if self.chat_context is None: 46 | self._tokens_num_list = [] 47 | return self._tokens_num_list 48 | model = self.parameters.get("model") 49 | if model is None: 50 | raise ValueError("Parameter 'model' have not been set.") 51 | if model != self._tokens_num_model: 52 | self._tokens_num_list = [tokens_num_from_chat_context([message], model=model) for message in self.chat_context] 53 | self._tokens_num_model = model 54 | return self._tokens_num_list 55 | if len(self.chat_context) == len(self._tokens_num_list): 56 | return self._tokens_num_list 57 | elif len(self.chat_context) < len(self._tokens_num_list): 58 | self._tokens_num_list = [tokens_num_from_chat_context([message], model=model) for message in self.chat_context] 59 | return self._tokens_num_list 60 | else: 61 | tokens_num_list = [tokens_num_from_chat_context([message], model=model) for message in self.chat_context[len(self._tokens_num_list):]] 62 | self._tokens_num_list.extend(tokens_num_list) 63 | return self._tokens_num_list 64 | 65 | @property 66 | def tokens_num(self) -> int | None: 67 | if self.chat_context is None: 68 | return None 69 | return sum(self.tokens_num_list) 70 | 71 | def chat_context_append(self, message: ChatCompletionMessageParam, tokens_num_update: bool = True) -> None: 72 | """Write chat message to the chat_context, automatically calculate and update the number of tokens. 73 | If the number of tokens is not needed or real-time calculation of tokens is not required, 74 | you can set tokens__num_update to False, or directly manipulate the 'chat_context' attribute. 75 | """ 76 | if self.chat_context is None: 77 | self.chat_context = [] 78 | self.chat_context.append(message) 79 | if tokens_num_update is True: 80 | model = self.parameters.get("model") 81 | if model is not None: 82 | tokens_num = tokens_num_from_chat_context([message], model=model) 83 | self._tokens_num_list.append(tokens_num) 84 | 85 | def chat_context_pop(self, pop_index: int = -1) -> ChatCompletionMessageParam: 86 | "Pop a message from chat context, and delete the correponding tokens num in _tokens_num_list." 87 | self._tokens_num_list.pop(pop_index) 88 | if self.chat_context is None: 89 | raise ValueError(f"Field 'chat_context' has not been set.") 90 | return self.chat_context.pop(pop_index) 91 | 92 | 93 | def __deepcopy__(self, memo): 94 | 95 | def dose_only_read(attr) -> bool: 96 | if attr is None: 97 | return False 98 | if getattr(attr, 'fset', None) is None: 99 | return True 100 | else: 101 | return False 102 | 103 | if id(self) in memo: 104 | return memo[id(self)] 105 | 106 | new_instance = self.__class__.__new__(self.__class__) 107 | 108 | for k in dir(self): 109 | attr = getattr(self, k) 110 | if not k.startswith("__") and not callable(attr) and not dose_only_read(getattr(self.__class__, k, None)) and k != "plugins": 111 | setattr(new_instance, k, copy.deepcopy(attr, memo)) 112 | 113 | setattr(new_instance, "plugins", copy.copy(self.plugins)) 114 | 115 | memo[id(self)] = new_instance 116 | 117 | return new_instance 118 | 119 | 120 | @dataclass 121 | class BeadOpenaiContext(OpenaiContext): 122 | bead: list[ChatCompletionMessageParam] = field(default_factory=list) 123 | bead_info: dict[str, list] = field(default_factory=lambda: {"positions": [], "lengths": []}) 124 | 125 | def insert_bead(self): 126 | """Insert the bead into the chat_context.""" 127 | bead_content = self.bead 128 | if self.chat_context is None: 129 | self.bead_info["positions"] = [0] 130 | # The length would be added below, so it is not added here. 131 | self.bead_info["lengths"] = [] 132 | else: 133 | self.bead_info["positions"].append(len(self.chat_context)) 134 | for one_message in copy.deepcopy(bead_content): 135 | self.chat_context_append(message=one_message, tokens_num_update=True) 136 | self.bead_info["lengths"].append(tokens_num_from_chat_context(chat_context=bead_content, model=self.parameters["model"])) 137 | 138 | def auto_insert_bead(self) -> bool: 139 | """Automatically determine whether the bead needs to be inserted. 140 | If so, insert the bead and return True; 141 | otherwise, return False. 142 | """ 143 | last_bead_position = self.bead_info["positions"][-1] if self.bead_info["positions"] else 0 144 | tokens_num_without_bead = sum(self.tokens_num_list[last_bead_position:]) 145 | assert self.max_sending_tokens_num is not None 146 | if tokens_num_without_bead >= self.max_sending_tokens_num * 0.95: 147 | self.insert_bead() 148 | return True 149 | return False 150 | -------------------------------------------------------------------------------- /src/gptui/plugins/SnoozeReminder.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import json 3 | import threading 4 | import time 5 | 6 | from agere.commander import Callback 7 | from semantic_kernel.orchestration.sk_context import SKContext 8 | from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter 9 | 10 | from gptui.gptui_kernel.manager import ManagerInterface, auto_init_params 11 | from gptui.models.blinker_wrapper import async_wrapper_with_loop, sync_wrapper 12 | from gptui.models.openai_chat_inner_service import chat_service_for_inner 13 | from gptui.models.openai_error import OpenaiErrorHandler 14 | from gptui.models.signals import response_auxiliary_message_signal, notification_signal 15 | from gptui.models.utils.openai_api import openai_api 16 | from gptui.utils.my_text import MyText as Text 17 | 18 | gptui_logger = logging.getLogger("gptui_logger") 19 | 20 | 21 | class SnoozeReminder: 22 | def __init__(self, manager: ManagerInterface): 23 | self.manager = manager 24 | self.openai_api = openai_api(manager.dot_env_config_path) 25 | 26 | @auto_init_params("0") 27 | @classmethod 28 | def get_init_params(cls, manager) -> tuple: 29 | return (manager,) 30 | 31 | @sk_function( 32 | description="Set a reminder. When the time comes, you will be notified of the content you set.", 33 | name="snooze_reminder", 34 | ) 35 | @sk_function_context_parameter( 36 | name="delay", 37 | description="Set the delay for the reminder, in seconds.", 38 | ) 39 | @sk_function_context_parameter( 40 | name="reminder_content", 41 | description="Content to be reminded." 42 | ) 43 | @sk_function_context_parameter( 44 | name="openai_context", 45 | description=( 46 | "The dictionary string version of the OpenaiContext instance. " 47 | "This is a special parameter that typically doesn't require manual intervention, as it is usually automatically managed." 48 | "Unless there's a clear intention, please keep its default value." 49 | ), 50 | default_value="AUTO" 51 | ) 52 | def snooze_reminder(self, context: SKContext) -> str: 53 | delay = context["delay"] 54 | 55 | try: 56 | delay = int(delay) 57 | except ValueError: 58 | return "The parameter 'delay' cannot be parsed into integer seconds." 59 | 60 | content = context["reminder_content"] 61 | openai_context_dict = json.loads(str(context["openai_context"])) 62 | conversation_id = openai_context_dict["id"] 63 | 64 | reminder = threading.Timer(delay, self.reminder_after, args=(content, conversation_id, delay)) 65 | reminder.start() 66 | return "The reminder has been set." 67 | 68 | def reminder_after(self, content: str, conversation_id: int | str, delay: int) -> None: 69 | content = f"You(!not the user!) set a reminder {str(delay)} seconds ago, and the {str(delay)} seconds have elapsed. The time is up.\n==========REMINDER CONTENT BEGIN==========\n" + content 70 | content += "\n==========REMINDER CONTENT END==========\nThis message is a reminder for you, not for the user. You should handle this message appropriately based on the conversation history." 71 | openai_chat_manager = self.manager.client.openai 72 | conversation = openai_chat_manager.conversation_dict.get(conversation_id) 73 | if conversation is None: 74 | self.manager.client.query_one("#status_region").update(Text("You have a reminder whose time has come, but it seems the conversation has been closed.", "yellow")) 75 | gptui_logger.info("You have a reminder whose time has come, but it seems the conversation has been closed.") 76 | openai_context = conversation["openai_context"] 77 | self.manager.client.query_one("#chat_tabs").active = "lqt" + str(conversation_id) 78 | time.sleep(1) 79 | functions = self.manager.available_functions_meta 80 | messages_list=[ 81 | { 82 | "role": "function", 83 | "name": "snooze_reminder", 84 | "content": content, 85 | } 86 | ] 87 | response_auxiliary_message_signal.send( 88 | self, 89 | message={ 90 | "content":{ 91 | "role": "function", 92 | "name": "snooze_reminder", 93 | "content": content, 94 | }, 95 | "flag": "function_call", 96 | } 97 | ) 98 | 99 | try: 100 | response = chat_service_for_inner( 101 | messages_list=messages_list, 102 | context=openai_context, 103 | openai_api=self.openai_api, 104 | functions=functions, 105 | function_call="auto", 106 | ) 107 | except Exception as e: 108 | OpenaiErrorHandler().openai_error_handle(error=e, context=openai_context) 109 | 110 | openai_chat_manager.openai_chat.chat_messages_extend(messages_list=messages_list, context=openai_context) 111 | 112 | ResponseJob = self.manager.get_job("ResponseJob") 113 | callback = Callback( 114 | at_job_start=[ 115 | { 116 | "function": notification_signal.send, 117 | "params": { 118 | "args": (self,), 119 | "kwargs": { 120 | "_async_wrapper": async_wrapper_with_loop, 121 | "message":{ 122 | "content":{ 123 | "content":{"status":True}, 124 | "description":"Commander status changed", 125 | }, 126 | "flag":"info", 127 | }, 128 | }, 129 | }, 130 | }, 131 | ], 132 | at_commander_end=[ 133 | { 134 | "function": notification_signal.send_async, 135 | "params": { 136 | "args": (self,), 137 | "kwargs": { 138 | "_sync_wrapper": sync_wrapper, 139 | "message":{ 140 | "content":{ 141 | "content":{"status":False}, 142 | "description":"Commander status changed", 143 | }, 144 | "flag":"info", 145 | }, 146 | }, 147 | }, 148 | }, 149 | ], 150 | ) 151 | job = ResponseJob(manager=self.manager, response=response, context=openai_context, callback=callback) 152 | self.manager.gk_kernel.commander.async_commander_run(job) 153 | -------------------------------------------------------------------------------- /src/gptui/controllers/dash_board_control.py: -------------------------------------------------------------------------------- 1 | import bisect 2 | import math 3 | 4 | from ..models.context import OpenaiContext 5 | from ..utils.my_text import MyText as Text 6 | 7 | 8 | class DashBoard: 9 | def __init__(self, app): 10 | self.app = app 11 | 12 | def dash_board_display(self, tokens_num_window: int, conversation_id: int | None = None): 13 | "Display the token's monitor in dashboard" 14 | if conversation_id is None: 15 | conversation_id = self.app.openai.conversation_active 16 | conversation = self.app.openai.conversation_dict[conversation_id] 17 | self.display(tokens_num_window=tokens_num_window, openai_context=conversation["openai_context"]) 18 | 19 | def group_talk_dash_board_display(self, tokens_num_window: int, conversation_id: int | None = None): 20 | "Display the token's monitor for group talk in dashboard" 21 | if conversation_id is None: 22 | conversation_id = self.app.openai.group_talk_conversation_active 23 | conversation = self.app.openai.group_talk_conversation_dict[conversation_id] 24 | roles = list(conversation["group_talk_manager"].roles.values()) 25 | if roles: 26 | first_role = roles[0] 27 | self.display(tokens_num_window=tokens_num_window, openai_context=first_role.context) 28 | 29 | def display(self, tokens_num_window: int, openai_context: OpenaiContext): 30 | "Display the token's monitor in dashboard" 31 | 32 | def is_inside_segment(position: int, start_positions: list[int], end_positions:list[int]): 33 | # Use bisect to find the position where coord should be inserted 34 | index = bisect.bisect_right(start_positions, position) - 1 35 | # If index is -1, it means coord is smaller than the starting position of all segments 36 | if index == -1: 37 | return False 38 | # Check if coord is within the found segment 39 | return start_positions[index] <= position <= end_positions[index] 40 | 41 | display = self.app.main_screen.query_one("#dash_board") 42 | height = display.content_size.height 43 | 44 | if tokens_num_window == 0: 45 | display.update(Text("ಠ_ಠ\n" * height, "yellow")) 46 | return 47 | 48 | tokens_num = openai_context.tokens_num 49 | assert tokens_num is not None 50 | tokens_num_list = openai_context.tokens_num_list 51 | bead_index_list = openai_context.bead_info["positions"] 52 | bead_tokens_list = [sum(tokens_num_list[:index]) for index in bead_index_list] 53 | bead_length_list = openai_context.bead_info["lengths"] 54 | 55 | tokens_proportion = tokens_num / tokens_num_window 56 | bead_positions_ratio_list_start = [num / tokens_num for num in bead_tokens_list] 57 | bead_length_ratio_list = [num / tokens_num for num in bead_length_list] 58 | bead_positions_ratio_list_end = [sum(i) for i in zip(bead_positions_ratio_list_start, bead_length_ratio_list)] 59 | indicator_content_left = [] 60 | indicator_content_middle = [] 61 | indicator_content_right = [] 62 | # dashboard left 63 | if tokens_proportion < 1: 64 | indicator_tokens_num = math.floor(tokens_proportion * height) 65 | bead_positions_indicator_left_list_start = [math.ceil(height - indicator_tokens_num + i * indicator_tokens_num) for i in bead_positions_ratio_list_start] 66 | bead_positions_indicator_left_list_end = [math.ceil(height - indicator_tokens_num + i * indicator_tokens_num) for i in bead_positions_ratio_list_end] 67 | for position in range(height): 68 | if position <= height - indicator_tokens_num: 69 | indicator_content_left.append(Text(" ")) 70 | else: 71 | status = is_inside_segment(position, start_positions=bead_positions_indicator_left_list_start, end_positions=bead_positions_indicator_left_list_end) 72 | if status is True: 73 | indicator_content_left.append(Text(u'\u00b7', "yellow")) 74 | else: 75 | indicator_content_left.append(Text("-", "green")) 76 | else: 77 | indicator_tokens_num = math.ceil(1 / tokens_proportion * height) 78 | bead_positions_indicator_left_list_start = [math.floor(i * indicator_tokens_num) for i in bead_positions_ratio_list_start] 79 | bead_positions_indicator_left_list_end = [math.floor(i * indicator_tokens_num) for i in bead_positions_ratio_list_end] 80 | for position in range(height): 81 | if position <= indicator_tokens_num: 82 | status = is_inside_segment(position, start_positions=bead_positions_indicator_left_list_start, end_positions=bead_positions_indicator_left_list_end) 83 | if status is True: 84 | indicator_content_left.append(Text(u'\u00b7', "yellow")) 85 | else: 86 | indicator_content_left.append(Text("-", "green")) 87 | else: 88 | indicator_content_left.append(Text(" ")) 89 | 90 | # dashboard middle 91 | if tokens_proportion < 1: 92 | indicator_content_middle = indicator_content_left 93 | else: 94 | bead_positions_indicator_middle_list_start = [round(i * height) for i in bead_positions_ratio_list_start] 95 | bead_positions_indicator_middle_list_end = [round(i * height) for i in bead_positions_ratio_list_end] 96 | for position in range(height): 97 | status = is_inside_segment(position, start_positions=bead_positions_indicator_middle_list_start, end_positions=bead_positions_indicator_middle_list_end) 98 | if status is True: 99 | indicator_content_middle.append(Text(u'\u00b7', "yellow")) 100 | else: 101 | indicator_content_middle.append(Text("-", "green")) 102 | 103 | # dashboard right 104 | if tokens_proportion < 1: 105 | indicator_content_right = indicator_content_left 106 | else: 107 | bead_positions_indicator_right_list_start = [height - round((tokens_num - i) / tokens_num_window * height) for i in bead_tokens_list] 108 | bead_tokens_list_end = [sum(i) for i in zip(bead_tokens_list, bead_length_list)] 109 | bead_positions_indicator_right_list_end = [height - round((tokens_num - i) / tokens_num_window * height) for i in bead_tokens_list_end] 110 | for position in range(height): 111 | status = is_inside_segment(position, start_positions=bead_positions_indicator_right_list_start, end_positions=bead_positions_indicator_right_list_end) 112 | if status is True: 113 | indicator_content_right.append(Text(u'\u00b7', "yellow")) 114 | else: 115 | indicator_content_right.append(Text("-", "green")) 116 | indicator_content = Text('') 117 | 118 | for i in range(height): 119 | indicator_content = indicator_content + indicator_content_left[i] + indicator_content_middle[i] + indicator_content_middle[i] + Text('\n') 120 | display.update(indicator_content) 121 | -------------------------------------------------------------------------------- /docs/configuration.md: -------------------------------------------------------------------------------- 1 | ## Configuration Guide 2 | 3 | GPTUI offers a wide array of configurable options, utilizing the YAML file format. 4 | To understand the basic syntax of YAML, you can visit [here](https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html). 5 | The actual configuration options and default values adhere to the contents of the configuration file, 6 | and this document might not be as current as the updates to the actual configuration file. 7 | 8 | Within the configuration file, options that are commented out indicate that they have default configurations 9 | and may not require modification. However, please note that when modifying the value of a list, 10 | the list will be changed as a whole. This means you cannot override just a part of a list that has default configurations, 11 | as this would clear the other options. For example, to set the status_region_default to “GPTUI Welcome", 12 | given its following default configuration: 13 | ``` 14 | #tui_config: 15 | # conversations_recover: true 16 | # voice_switch: false 17 | # speak_switch: false 18 | # file_wrap_display: true 19 | # ai_care_switch: true 20 | # ai_care_depth: 2 21 | # ai_care_delay: 60 22 | # status_region_default: 23 | # waiting_receive_animation: "default" 24 | ``` 25 | You would need to modify the entire tui_config list to: 26 | ``` 27 | tui_config: 28 | conversations_recover: true 29 | voice_switch: false 30 | speak_switch: false 31 | file_wrap_display: true 32 | ai_care_switch: true 33 | ai_care_depth: 2 34 | ai_care_delay: 60 35 | status_region_default: 36 | waiting_receive_animation: "GPTUI Welcome" 37 | ``` 38 | Instead of this: 39 | ``` 40 | tui_config: 41 | # conversations_recover: true 42 | # voice_switch: false 43 | # speak_switch: false 44 | # file_wrap_display: true 45 | # ai_care_switch: true 46 | # ai_care_depth: 2 47 | # ai_care_delay: 60 48 | # status_region_default: 49 | waiting_receive_animation: "default" 50 | ``` 51 | 52 | ## Resetting to Default Configuration 53 | 54 | You can simply delete the configuration file, for instance `rm ~/.gptui/.config.yml`, and the program will automatically 55 | re-download the default configuration file upon the next launch. 56 | For the configuration file search strategy, refer to [here](https://github.com/happyapplehorse/gptui/blob/main/README.md#installation). 57 | 58 | ## Configuration Options 59 | 60 | Currently, you can configure the following: 61 | 62 | ### GPTUI_BASIC_SERVICES_PATH 63 | 64 | This is the directory for GPTUI's basic service components. It should not be changed without modifying the source code. 65 | 66 | ### PLUGIN_PATH 67 | 68 | This is the path for GPTUI's built-in plugins. It should not be changed without modifying the source code. 69 | 70 | ### DEFAULT_PLUGIN_PATH 71 | 72 | This is the path for GPTUI's built-in default plugins, which are not shown in the plugin list and are automatically activated. 73 | It should not be changed without modifying the source code. 74 | 75 | ### custom_plugin_path 76 | 77 | This is the directory for GPTUI's custom plugins and can be modified. The default value is `~/.gptui/plugins/`. 78 | 79 | ### dot_env_path 80 | 81 | This setting specifies the path of the file configuring environment variables, where API keys are configured. 82 | The default value is `~/.gptui/.env_gptui`. 83 | 84 | ### default_openai_parameters 85 | 86 | This option is a dictionary used to specify default parameter configurations when using GPT for chatting. 87 | 88 | ### default_conversation_parameters 89 | 90 | This option is a dictionary used to specify GPTUI's default conversation parameter settings. 91 | - `max_sent_tokens_ratio`: A float value that sets the ratio of the maximum number of sent tokens to the 92 | entire model token window. For instance, if the model's token window size is 1000, and this parameter is set to 0.6, 93 | then when the chat context tokens to be sent exceed 600, it will automatically truncate to below 600. 94 | The remaining 400 tokens will then serve as the window for the model's response tokens. This setting is crucial 95 | as the model's token window is the sum of sent and received token numbers, and without this setting, 96 | there's a risk that sent tokens might occupy too much context length, leading to the model being unable to 97 | respond or providing incomplete responses. 98 | 99 | ### tui_config 100 | 101 | This option is a dictionary used for configuring GPTUI's default settings. 102 | - `conversations_recover`: A boolean value, sets the default state of GPTUI’s “Recovery” switch, 103 | determining whether to automatically save and recover GPTUI's state. 104 | - `voice_switch`: A boolean value, sets the default state of GPTUI’s “Voice” switch, 105 | determining whether to enable the voice conversation feature. 106 | - `speak_switch`: A boolean value, sets the default state of GPTUI’s “Speak” switch, 107 | determining whether to enable the feature to read out response content. 108 | - `file_wrap_display`: A boolean value, sets the default state of GPTUI’s “Fold File” switch, 109 | determining whether to enable the automatic folding of file content into a file icon. 110 | - `ai_care_switch`: A boolean value, sets the default state of GPTUI’s “AI-Care” switch, 111 | determining whether to enable the AI-Care feature. 112 | - `ai_care_depth`: An integer value, sets the maximum number of proactive speaking turns AI-Care can take 113 | in the absence of user response. 114 | - `ai_care_delay`: An integer value in seconds, sets the delay before AI-Care activates after a conversation finishes. 115 | AI-Care will only kick in after this delay post a completed conversation. 116 | - `status_region_default`: A string value, sets the default content displayed in the status region. 117 | - `waiting_receive_animation`: A specific string type, sets the type of waiting animation. The default value is `“default”`. 118 | 119 | ### log_path 120 | 121 | Sets the path for the log file. Default is `~/.gptui/logs.log`. 122 | 123 | ### workpath 124 | 125 | Sets the working path for GPTUI. The default is `~/.gptui/user`, where default vector databases and temporary files, 126 | among others, will be stored. 127 | 128 | ### directory_tree_path 129 | 130 | The root directory of the filesystem that GPTUI can display. The default value is `~/`. 131 | When importing and exporting files, GPTUI can only display files and folders under this directory. 132 | 133 | ### conversation_path 134 | 135 | Sets the file path for exporting and importing GPTUI conversation records. The default value is `~/.gptui/user/conversations`. 136 | 137 | ### vector_memory_path 138 | 139 | Sets the path for the vector database, the default being `~/.gptui/user/vector_memory_database`. 140 | 141 | ### terminal 142 | 143 | Sets the terminal being used, with tested terminals including `termux`, `wezterm`. 144 | 145 | ### os 146 | 147 | Sets the platform being used, offering four options: 148 | - termux 149 | - linux 150 | - macos 151 | - windows 152 | 153 | Since termux is not a complete Linux system, it's treated as a separate option. 154 | 155 | ### default_plugins_used 156 | 157 | This option is a list setting the default active state for plugins, including both built-in and custom plugins. 158 | 159 | ### location_city 160 | 161 | Sets your geographical location to allow the LLM to access your location information. 162 | This can be set to your city name or left unset. 163 | 164 | ### log_level 165 | 166 | Sets the log printing level. 167 | 168 | ### openai_model_info 169 | 170 | This option is a dictionary storing information for various models, with the model's tokens_window set here. 171 | For example: 172 | ``` 173 | openai_model_info 174 | gpt-4-1106-preview: 175 | tokens_window: 128000 176 | gpt-4-0613: 177 | tokens_window: 8192 178 | ``` 179 | --------------------------------------------------------------------------------