├── .env.example ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── chatgpt_memory ├── __init__.py ├── constants.py ├── datastore │ ├── __init__.py │ ├── config.py │ ├── datastore.py │ └── redis.py ├── environment.py ├── errors.py ├── llm_client │ ├── __init__.py │ ├── config.py │ ├── llm_client.py │ └── openai │ │ ├── __init__.py │ │ ├── conversation │ │ ├── __init__.py │ │ ├── chatgpt_client.py │ │ └── config.py │ │ └── embedding │ │ ├── __init__.py │ │ ├── config.py │ │ └── embedding_client.py ├── memory │ ├── __init__.py │ ├── manager.py │ └── memory.py └── utils │ ├── openai_utils.py │ └── reflection.py ├── examples └── simple_usage.py ├── pyproject.toml ├── rest_api.py ├── tests ├── conftest.py ├── test_llm_embedding_client.py ├── test_memory_manager.py └── test_redis_datastore.py └── ui.py /.env.example: -------------------------------------------------------------------------------- 1 | REMOTE_API_TIMEOUT_SEC=30 2 | REMOTE_API_BACKOFF_SEC=10 3 | REMOTE_API_MAX_RETRIES=5 4 | OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 5 | 6 | # Cloud data store (Redis, Pinecone etc.) 7 | REDIS_HOST=localhost 8 | REDIS_PORT=1234 9 | REDIS_PASSWORD=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | 132 | .DS_Store 133 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | ci: 2 | autofix_prs: true 3 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' 4 | autoupdate_schedule: quarterly 5 | 6 | repos: 7 | - repo: https://github.com/pre-commit/pre-commit-hooks 8 | rev: v4.4.0 9 | hooks: 10 | - id: trailing-whitespace 11 | - id: end-of-file-fixer 12 | - id: check-added-large-files 13 | - id: check-yaml 14 | - id: check-json 15 | - id: check-toml 16 | - id: debug-statements 17 | - id: detect-private-key 18 | - id: requirements-txt-fixer 19 | 20 | - repo: https://github.com/pycqa/isort 21 | rev: 5.12.0 22 | hooks: 23 | - id: isort 24 | name: isort (python) 25 | args: ["--profile", "black"] 26 | 27 | - repo: https://github.com/psf/black 28 | rev: 23.1.0 29 | hooks: 30 | - id: black 31 | # It is recommended to specify the latest version of Python 32 | # supported by your project here, or alternatively use 33 | # pre-commit's default_language_version, see 34 | # https://pre-commit.com/#top_level-default_language_version 35 | language_version: python3.10 36 | 37 | - repo: https://github.com/charliermarsh/ruff-pre-commit 38 | rev: v0.0.255 39 | hooks: 40 | - id: ruff 41 | args: ["--fix"] 42 | 43 | - repo: https://github.com/pre-commit/mirrors-mypy 44 | rev: v1.1.1 45 | hooks: 46 | - id: mypy 47 | args: [--no-strict-optional, --ignore-missing-imports] 48 | additional_dependencies: 49 | - types-requests 50 | - types-redis 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | *Development on this repository has discontinued. Please check out OpenAI's retrieval plugin instead: https://github.com/openai/chatgpt-retrieval-plugin* 2 | 3 | # ChatGPT Memory 4 | 5 | Allows to scale the ChatGPT API to multiple simultaneous sessions with infinite contextual and adaptive memory powered by GPT and Redis datastore. This can be visualized as follows 6 | 7 |

8 |
9 | 10 |
11 |

12 | 13 | ## Getting Started 14 | 15 | 1. Create your free `Redis` datastore [here](https://redis.com/try-free/). 16 | 2. Get your `OpenAI` API key [here](https://platform.openai.com/overview). 17 | 3. Install dependencies using `poetry`. 18 | 19 | ```bash 20 | poetry install 21 | ``` 22 | 23 | ### Use with UI 24 | Screenshot 2023-04-17 at 10 26 59 PM 25 | 26 | 27 | 28 | Start the FastAPI webserver. 29 | ```bash 30 | poetry run uvicorn rest_api:app --host 0.0.0.0 --port 8000 31 | ``` 32 | 33 | Run the UI. 34 | ```bash 35 | poetry run streamlit run ui.py 36 | ``` 37 | 38 | ### Use with Terminal 39 | 40 | The library is highly modular. In the following, we describe the usage of each component (visualized above). 41 | 42 | First, start out by setting the required environment variables before running your script. This is optional but recommended. 43 | You can use a `.env` file for this. See the `.env.example` file for an example. 44 | 45 | ```python 46 | from chatgpt_memory.environment import OPENAI_API_KEY, REDIS_HOST, REDIS_PASSWORD, REDIS_PORT 47 | ``` 48 | 49 | Create an instance of the `RedisDataStore` class with the `RedisDataStoreConfig` configuration. 50 | 51 | ```python 52 | from chatgpt_memory.datastore import RedisDataStoreConfig, RedisDataStore 53 | 54 | 55 | redis_datastore_config = RedisDataStoreConfig( 56 | host=REDIS_HOST, 57 | port=REDIS_PORT, 58 | password=REDIS_PASSWORD, 59 | ) 60 | redis_datastore = RedisDataStore(config=redis_datastore_config) 61 | ``` 62 | 63 | Create an instance of the `EmbeddingClient` class with the `EmbeddingConfig` configuration. 64 | 65 | ```python 66 | from chatgpt_memory.llm_client import EmbeddingConfig, EmbeddingClient 67 | 68 | embedding_config = EmbeddingConfig(api_key=OPENAI_API_KEY) 69 | embed_client = EmbeddingClient(config=embedding_config) 70 | ``` 71 | 72 | Create an instance of the `MemoryManager` class with the Redis datastore and Embedding client instances, and the `topk` value. 73 | 74 | ```python 75 | from chatgpt_memory.memory.manager import MemoryManager 76 | 77 | memory_manager = MemoryManager(datastore=redis_datastore, embed_client=embed_client, topk=1) 78 | ``` 79 | 80 | Create an instance of the `ChatGPTClient` class with the `ChatGPTConfig` configuration and the `MemoryManager` instance. 81 | 82 | ```python 83 | from chatgpt_memory.llm_client import ChatGPTClient, ChatGPTConfig 84 | 85 | chat_gpt_client = ChatGPTClient( 86 | config=ChatGPTConfig(api_key=OPENAI_API_KEY, verbose=True), memory_manager=memory_manager 87 | ) 88 | ``` 89 | 90 | Start the conversation by providing user messages to the converse method of the `ChatGPTClient` instance. 91 | 92 | ```python 93 | conversation_id = None 94 | while True: 95 | user_message = input("\n Please enter your message: ") 96 | response = chat_gpt_client.converse(message=user_message, conversation_id=conversation_id) 97 | conversation_id = response.conversation_id 98 | print(response.chat_gpt_answer) 99 | ``` 100 | 101 | This will allow you to talk to the AI assistant and extend its memory by using an external Redis datastore. 102 | 103 | ### Putting it together 104 | 105 | Here's all of the above put together. You can also find it under [`examples/simple_usage.py`](examples/simple_usage.py) 106 | 107 | ```python 108 | ## set the following ENVIRONMENT Variables before running this script 109 | # Import necessary modules 110 | from chatgpt_memory.environment import OPENAI_API_KEY, REDIS_HOST, REDIS_PASSWORD, REDIS_PORT 111 | from chatgpt_memory.datastore import RedisDataStoreConfig, RedisDataStore 112 | from chatgpt_memory.llm_client import ChatGPTClient, ChatGPTConfig, EmbeddingConfig, EmbeddingClient 113 | from chatgpt_memory.memory import MemoryManager 114 | 115 | # Instantiate an EmbeddingConfig object with the OpenAI API key 116 | embedding_config = EmbeddingConfig(api_key=OPENAI_API_KEY) 117 | 118 | # Instantiate an EmbeddingClient object with the EmbeddingConfig object 119 | embed_client = EmbeddingClient(config=embedding_config) 120 | 121 | # Instantiate a RedisDataStoreConfig object with the Redis connection details 122 | redis_datastore_config = RedisDataStoreConfig( 123 | host=REDIS_HOST, 124 | port=REDIS_PORT, 125 | password=REDIS_PASSWORD, 126 | ) 127 | 128 | # Instantiate a RedisDataStore object with the RedisDataStoreConfig object 129 | redis_datastore = RedisDataStore(config=redis_datastore_config) 130 | 131 | # Instantiate a MemoryManager object with the RedisDataStore object and EmbeddingClient object 132 | memory_manager = MemoryManager(datastore=redis_datastore, embed_client=embed_client, topk=1) 133 | 134 | # Instantiate a ChatGPTConfig object with the OpenAI API key and verbose set to True 135 | chat_gpt_config = ChatGPTConfig(api_key=OPENAI_API_KEY, verbose=True) 136 | 137 | # Instantiate a ChatGPTClient object with the ChatGPTConfig object and MemoryManager object 138 | chat_gpt_client = ChatGPTClient( 139 | config=chat_gpt_config, 140 | memory_manager=memory_manager 141 | ) 142 | 143 | # Initialize conversation_id to None 144 | conversation_id = None 145 | 146 | # Start the chatbot loop 147 | while True: 148 | # Prompt the user for input 149 | user_message = input("\n Please enter your message: ") 150 | 151 | 152 | # Use the ChatGPTClient object to generate a response 153 | response = chat_gpt_client.converse(message=user_message, conversation_id=conversation_id) 154 | 155 | # Update the conversation_id with the conversation_id from the response 156 | conversation_id = response.conversation_id 157 | 158 | 159 | # Print the response generated by the chatbot 160 | print(response.chat_gpt_answer) 161 | ``` 162 | 163 | # Acknowledgments 164 | 165 | UI has been added thanks to the awesome work by [avrabyt/MemoryBot](https://github.com/avrabyt/MemoryBot). 166 | -------------------------------------------------------------------------------- /chatgpt_memory/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/continuum-llms/chatgpt-memory/51f57a6dedd21e350012251f633366731972a927/chatgpt_memory/__init__.py -------------------------------------------------------------------------------- /chatgpt_memory/constants.py: -------------------------------------------------------------------------------- 1 | # LLM Config related 2 | """ 3 | if OpenAI embedding model type is "*-001" the set max sequence length to `2046`, 4 | otherwise for type "*-002" set `8191` 5 | """ 6 | MAX_ALLOWED_SEQ_LEN_001 = 2046 7 | MAX_ALLOWED_SEQ_LEN_002 = 8191 8 | -------------------------------------------------------------------------------- /chatgpt_memory/datastore/__init__.py: -------------------------------------------------------------------------------- 1 | from chatgpt_memory.datastore.config import DataStoreConfig, RedisDataStoreConfig, RedisIndexType # noqa: F401 2 | from chatgpt_memory.datastore.redis import RedisDataStore # noqa: F401 3 | -------------------------------------------------------------------------------- /chatgpt_memory/datastore/config.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | from pydantic import BaseModel 4 | 5 | 6 | class RedisIndexType(Enum): 7 | hnsw = "HNSW" 8 | flat = "FLAT" 9 | 10 | 11 | class DataStoreConfig(BaseModel): 12 | host: str 13 | port: int 14 | password: str 15 | 16 | 17 | class RedisDataStoreConfig(DataStoreConfig): 18 | index_type: str = RedisIndexType.hnsw.value 19 | vector_field_name: str = "embedding" 20 | vector_dimensions: int = 1024 21 | distance_metric: str = "L2" 22 | number_of_vectors: int = 686 23 | M: int = 40 24 | EF: int = 200 25 | -------------------------------------------------------------------------------- /chatgpt_memory/datastore/datastore.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, Dict, List 3 | 4 | from chatgpt_memory.datastore.config import DataStoreConfig 5 | 6 | 7 | class DataStore(ABC): 8 | """ 9 | Abstract class for datastores. 10 | """ 11 | 12 | def __init__(self, config: DataStoreConfig): 13 | self.config = config 14 | 15 | @abstractmethod 16 | def connect(self): 17 | raise NotImplementedError 18 | 19 | @abstractmethod 20 | def create_index(self): 21 | raise NotImplementedError 22 | 23 | @abstractmethod 24 | def index_documents(self, documents: List[Dict]): 25 | raise NotImplementedError 26 | 27 | @abstractmethod 28 | def search_documents(self, query_vector: Any, conversation_id: str, topk: int) -> List[Any]: 29 | raise NotImplementedError 30 | -------------------------------------------------------------------------------- /chatgpt_memory/datastore/redis.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any, Dict, List 3 | from uuid import uuid4 4 | 5 | import redis 6 | from redis.commands.search.field import TagField, TextField, VectorField 7 | from redis.commands.search.query import Query 8 | 9 | from chatgpt_memory.datastore.config import RedisDataStoreConfig 10 | from chatgpt_memory.datastore.datastore import DataStore 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class RedisDataStore(DataStore): 16 | def __init__(self, config: RedisDataStoreConfig, do_flush_data: bool = False): 17 | super().__init__(config=config) 18 | self.config = config 19 | self.do_flush_data = do_flush_data 20 | 21 | self.connect() 22 | self.create_index() 23 | 24 | def connect(self): 25 | """ 26 | Connect to the Redis server. 27 | """ 28 | connection_pool = redis.ConnectionPool( 29 | host=self.config.host, port=self.config.port, password=self.config.password 30 | ) 31 | self.redis_connection = redis.Redis(connection_pool=connection_pool) 32 | 33 | # flush data only once after establishing connection 34 | if self.do_flush_data: 35 | self.flush_all_documents() 36 | self.do_flush_data = False 37 | 38 | def flush_all_documents(self): 39 | """ 40 | Removes all documents from the redis index. 41 | """ 42 | self.redis_connection.flushall() 43 | 44 | def create_index(self): 45 | """ 46 | Creates a Redis index with a dense vector field. 47 | """ 48 | try: 49 | self.redis_connection.ft().create_index( 50 | [ 51 | VectorField( 52 | self.config.vector_field_name, 53 | self.config.index_type, 54 | { 55 | "TYPE": "FLOAT32", 56 | "DIM": self.config.vector_dimensions, 57 | "DISTANCE_METRIC": self.config.distance_metric, 58 | "INITIAL_CAP": self.config.number_of_vectors, 59 | "M": self.config.M, 60 | "EF_CONSTRUCTION": self.config.EF, 61 | }, 62 | ), 63 | TextField("text"), # contains the original message 64 | TagField("conversation_id"), # `conversation_id` for each session 65 | ] 66 | ) 67 | logger.info("Created a new Redis index for storing chat history") 68 | except redis.exceptions.ResponseError as redis_error: 69 | logger.info(f"Working with existing Redis index.\nDetails: {redis_error}") 70 | 71 | def index_documents(self, documents: List[Dict]): 72 | """ 73 | Indexes the set of documents. 74 | 75 | Args: 76 | documents (List[Dict]): List of documents to be indexed. 77 | """ 78 | redis_pipeline = self.redis_connection.pipeline(transaction=False) 79 | for document in documents: 80 | assert ( 81 | "text" in document and "conversation_id" in document 82 | ), "Document must include the fields `text`, and `conversation_id`" 83 | redis_pipeline.hset(uuid4().hex, mapping=document) 84 | redis_pipeline.execute() 85 | 86 | def search_documents( 87 | self, 88 | query_vector: bytes, 89 | conversation_id: str, 90 | topk: int = 5, 91 | ) -> List[Any]: 92 | """ 93 | Searches the redis index using the query vector. 94 | 95 | Args: 96 | query_vector (np.ndarray): Embedded query vector. 97 | topk (int, optional): Number of results. Defaults to 5. 98 | result_fields (int, optional): Name of the fields that you want to be 99 | returned from the search result documents 100 | 101 | Returns: 102 | List[Any]: Search result documents. 103 | """ 104 | query = ( 105 | Query( 106 | f"""(@conversation_id:{{{conversation_id}}})=>[KNN {topk} \ 107 | @{self.config.vector_field_name} $vec_param AS vector_score]""" 108 | ) 109 | .sort_by("vector_score") 110 | .paging(0, topk) 111 | .return_fields( 112 | # parse `result_fields` as strings separated by comma to pass as params 113 | "conversation_id", 114 | "vector_score", 115 | "text", 116 | ) 117 | .dialect(2) 118 | ) 119 | params_dict = {"vec_param": query_vector} 120 | result_documents = self.redis_connection.ft().search(query, query_params=params_dict).docs 121 | 122 | return result_documents 123 | 124 | def get_all_conversation_ids(self) -> List[str]: 125 | """ 126 | Returns conversation ids of all conversations. 127 | 128 | Returns: 129 | List[str]: List of conversation ids stored in redis. 130 | """ 131 | query = Query("*").return_fields("conversation_id") 132 | result_documents = self.redis_connection.ft().search(query).docs 133 | 134 | conversation_ids: List[str] = [] 135 | conversation_ids = list( 136 | set([getattr(result_document, "conversation_id") for result_document in result_documents]) 137 | ) 138 | 139 | return conversation_ids 140 | 141 | def delete_documents(self, conversation_id: str): 142 | """ 143 | Deletes all documents for a given conversation id. 144 | 145 | Args: 146 | conversation_id (str): Id of the conversation to be deleted. 147 | """ 148 | query = ( 149 | Query(f"""(@conversation_id:{{{conversation_id}}})""") 150 | .return_fields( 151 | "id", 152 | ) 153 | .dialect(2) 154 | ) 155 | for document in self.redis_connection.ft().search(query).docs: 156 | document_id = getattr(document, "id") 157 | deletion_status = self.redis_connection.ft().delete_document(document_id, delete_actual_document=True) 158 | 159 | assert deletion_status, f"Deletion of the document with id {document_id} failed!" 160 | -------------------------------------------------------------------------------- /chatgpt_memory/environment.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import dotenv 4 | 5 | # Load environment variables from .env file 6 | _TESTING = os.getenv("CHATGPT_MEMORY_TESTING", False) 7 | if _TESTING: 8 | # for testing we use the .env.example file instead 9 | dotenv.load_dotenv(dotenv.find_dotenv(".env.example")) 10 | else: 11 | dotenv.load_dotenv() 12 | 13 | # Any remote API (OpenAI, Cohere etc.) 14 | OPENAI_TIMEOUT = float(os.getenv("REMOTE_API_TIMEOUT_SEC", 30)) 15 | OPENAI_BACKOFF = float(os.getenv("REMOTE_API_BACKOFF_SEC", 10)) 16 | OPENAI_MAX_RETRIES = int(os.getenv("REMOTE_API_MAX_RETRIES", 5)) 17 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 18 | 19 | # Cloud data store (Redis, Pinecone etc.) 20 | REDIS_HOST = os.getenv("REDIS_HOST") 21 | REDIS_PORT = int(os.getenv("REDIS_PORT")) 22 | REDIS_PASSWORD = os.getenv("REDIS_PASSWORD") 23 | -------------------------------------------------------------------------------- /chatgpt_memory/errors.py: -------------------------------------------------------------------------------- 1 | """Custom Errors for ChatGptMemory""" 2 | 3 | from typing import Optional 4 | 5 | 6 | class ChatGPTMemoryError(Exception): 7 | """ 8 | Any error generated by ChatGptMemory. 9 | 10 | This error wraps its source transparently in such a way that its attributes 11 | can be accessed directly: for example, if the original error has a `message` 12 | attribute. 13 | """ 14 | 15 | def __init__( 16 | self, 17 | message: Optional[str] = None, 18 | ): 19 | super().__init__() 20 | if message: 21 | self.message = message 22 | 23 | def __getattr__(self, attr): 24 | # If self.__cause__ is None, it will raise the expected AttributeError 25 | getattr(self.__cause__, attr) 26 | 27 | def __repr__(self): 28 | return str(self) 29 | 30 | 31 | class OpenAIError(ChatGPTMemoryError): 32 | """Exception for issues that occur in the OpenAI APIs""" 33 | 34 | def __init__( 35 | self, 36 | message: Optional[str] = None, 37 | status_code: Optional[int] = None, 38 | ): 39 | super().__init__(message=message) 40 | self.status_code = status_code 41 | 42 | 43 | class OpenAIRateLimitError(OpenAIError): 44 | """ 45 | Rate limit error for OpenAI API (status code 429), See below: 46 | https://help.openai.com/en/articles/5955604-how-can-i-solve-429-too-many-requests-errors 47 | https://help.openai.com/en/articles/5955598-is-api-usage-subject-to-any-rate-limits 48 | """ 49 | 50 | def __init__(self, message: Optional[str] = None): 51 | super().__init__(message=message, status_code=429) 52 | 53 | def __repr__(self): 54 | return f"message= {self.message}, status_code={self.status_code}" 55 | -------------------------------------------------------------------------------- /chatgpt_memory/llm_client/__init__.py: -------------------------------------------------------------------------------- 1 | from chatgpt_memory.llm_client.openai.conversation.chatgpt_client import ( # noqa: F401 2 | ChatGPTClient, 3 | ChatGPTConfig, 4 | ChatGPTResponse, 5 | ) 6 | from chatgpt_memory.llm_client.openai.embedding.embedding_client import EmbeddingClient # noqa: F401 7 | from chatgpt_memory.llm_client.openai.embedding.embedding_client import EmbeddingConfig # noqa: F401 8 | from chatgpt_memory.llm_client.openai.embedding.embedding_client import EmbeddingModels # noqa: F401 9 | -------------------------------------------------------------------------------- /chatgpt_memory/llm_client/config.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | 4 | class LLMClientConfig(BaseModel): 5 | api_key: str 6 | time_out: float = 30 7 | -------------------------------------------------------------------------------- /chatgpt_memory/llm_client/llm_client.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | 3 | from chatgpt_memory.llm_client.config import LLMClientConfig 4 | 5 | 6 | class LLMClient(ABC): 7 | """ 8 | Wrapper for the HTTP APIs for LLMs acting as data container for API configurations. 9 | """ 10 | 11 | def __init__(self, config: LLMClientConfig): 12 | self._api_key = config.api_key 13 | self._time_out = config.time_out 14 | 15 | @property 16 | def api_key(self): 17 | return self._api_key 18 | 19 | @property 20 | def time_out(self): 21 | return self._time_out 22 | -------------------------------------------------------------------------------- /chatgpt_memory/llm_client/openai/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/continuum-llms/chatgpt-memory/51f57a6dedd21e350012251f633366731972a927/chatgpt_memory/llm_client/openai/__init__.py -------------------------------------------------------------------------------- /chatgpt_memory/llm_client/openai/conversation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/continuum-llms/chatgpt-memory/51f57a6dedd21e350012251f633366731972a927/chatgpt_memory/llm_client/openai/conversation/__init__.py -------------------------------------------------------------------------------- /chatgpt_memory/llm_client/openai/conversation/chatgpt_client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import uuid 3 | 4 | from langchain import LLMChain, OpenAI, PromptTemplate 5 | from pydantic import BaseModel 6 | 7 | from chatgpt_memory.llm_client.llm_client import LLMClient 8 | from chatgpt_memory.llm_client.openai.conversation.config import ChatGPTConfig 9 | from chatgpt_memory.memory.manager import MemoryManager 10 | from chatgpt_memory.utils.openai_utils import get_prompt 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class ChatGPTResponse(BaseModel): 16 | conversation_id: str 17 | message: str 18 | chat_gpt_answer: str 19 | 20 | 21 | class ChatGPTClient(LLMClient): 22 | """ 23 | ChatGPT client allows to interact with the ChatGPT model alonside having infinite contextual and adaptive memory. 24 | 25 | """ 26 | 27 | def __init__(self, config: ChatGPTConfig, memory_manager: MemoryManager): 28 | super().__init__(config=config) 29 | prompt = PromptTemplate(input_variables=["prompt"], template="{prompt}") 30 | self.chatgpt_chain = LLMChain( 31 | llm=OpenAI( 32 | temperature=config.temperature, 33 | openai_api_key=self.api_key, 34 | model_name=config.model_name, 35 | max_retries=config.max_retries, 36 | max_tokens=config.max_tokens, 37 | ), 38 | prompt=prompt, 39 | verbose=config.verbose, 40 | ) 41 | self.memory_manager = memory_manager 42 | 43 | def converse(self, message: str, conversation_id: str = None) -> ChatGPTResponse: 44 | """ 45 | Allows user to chat with user by leveraging the infinite contextual memor for fetching and 46 | adding historical messages to the prompt to the ChatGPT model. 47 | 48 | Args: 49 | message (str): Message by the human user. 50 | conversation_id (str, optional): Id of the conversation, if session already exists. Defaults to None. 51 | 52 | Returns: 53 | ChatGPTResponse: Response includes answer from th ChatGPT, conversation_id, and human message. 54 | """ 55 | if not conversation_id: 56 | conversation_id = uuid.uuid4().hex 57 | 58 | history = "" 59 | try: 60 | past_messages = self.memory_manager.get_messages(conversation_id=conversation_id, query=message) 61 | history = "\n".join([past_message.text for past_message in past_messages if getattr(past_message, "text")]) 62 | except ValueError as history_not_found_error: 63 | logger.warning( 64 | f"No previous chat history found for conversation_id: {conversation_id}.\nDetails: {history_not_found_error}" 65 | ) 66 | prompt = get_prompt(message=message, history=history) 67 | chat_gpt_answer = self.chatgpt_chain.predict(prompt=prompt) 68 | 69 | if len(message.strip()) and len(chat_gpt_answer.strip()): 70 | self.memory_manager.add_message(conversation_id=conversation_id, human=message, assistant=chat_gpt_answer) 71 | 72 | return ChatGPTResponse(message=message, chat_gpt_answer=chat_gpt_answer, conversation_id=conversation_id) 73 | -------------------------------------------------------------------------------- /chatgpt_memory/llm_client/openai/conversation/config.py: -------------------------------------------------------------------------------- 1 | from chatgpt_memory.llm_client.config import LLMClientConfig 2 | 3 | 4 | class ChatGPTConfig(LLMClientConfig): 5 | temperature: float = 0 6 | model_name: str = "gpt-3.5-turbo" 7 | max_retries: int = 6 8 | max_tokens: int = 256 9 | verbose: bool = False 10 | -------------------------------------------------------------------------------- /chatgpt_memory/llm_client/openai/embedding/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/continuum-llms/chatgpt-memory/51f57a6dedd21e350012251f633366731972a927/chatgpt_memory/llm_client/openai/embedding/__init__.py -------------------------------------------------------------------------------- /chatgpt_memory/llm_client/openai/embedding/config.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | from chatgpt_memory.llm_client.config import LLMClientConfig 4 | 5 | 6 | class EmbeddingModels(Enum): 7 | ada = "*-ada-*-001" 8 | babbage = "*-babbage-*-001" 9 | curie = "*-curie-*-001" 10 | davinci = "*-davinci-*-001" 11 | 12 | 13 | class EmbeddingConfig(LLMClientConfig): 14 | url: str = "https://api.openai.com/v1/embeddings" 15 | batch_size: int = 64 16 | progress_bar: bool = False 17 | model: str = EmbeddingModels.ada.value 18 | max_seq_len: int = 8191 19 | use_tiktoken: bool = False 20 | -------------------------------------------------------------------------------- /chatgpt_memory/llm_client/openai/embedding/embedding_client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any, Dict, List, Union 3 | 4 | import numpy as np 5 | from tqdm import tqdm 6 | 7 | from chatgpt_memory.constants import MAX_ALLOWED_SEQ_LEN_001, MAX_ALLOWED_SEQ_LEN_002 8 | from chatgpt_memory.llm_client.llm_client import LLMClient 9 | from chatgpt_memory.llm_client.openai.embedding.config import EmbeddingConfig, EmbeddingModels 10 | from chatgpt_memory.utils.openai_utils import count_openai_tokens, load_openai_tokenizer, openai_request 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class EmbeddingClient(LLMClient): 16 | def __init__(self, config: EmbeddingConfig): 17 | super().__init__(config=config) 18 | 19 | self.openai_embedding_config = config 20 | model_class: str = EmbeddingModels(self.openai_embedding_config.model).name 21 | 22 | tokenizer = self._setup_encoding_models( 23 | model_class, 24 | self.openai_embedding_config.model, 25 | self.openai_embedding_config.max_seq_len, 26 | ) 27 | self._tokenizer = load_openai_tokenizer( 28 | tokenizer_name=tokenizer, 29 | use_tiktoken=self.openai_embedding_config.use_tiktoken, 30 | ) 31 | 32 | def _setup_encoding_models(self, model_class: str, model_name: str, max_seq_len: int): 33 | """ 34 | Setup the encoding models for the retriever. 35 | 36 | Raises: 37 | ImportError: When `tiktoken` package is missing. 38 | To use tiktoken tokenizer install it as follows: 39 | `pip install tiktoken` 40 | """ 41 | 42 | tokenizer_name = "gpt2" 43 | # new generation of embedding models (December 2022), specify the full name 44 | if model_name.endswith("-002"): 45 | self.query_encoder_model = model_name 46 | self.doc_encoder_model = model_name 47 | self.max_seq_len = min(MAX_ALLOWED_SEQ_LEN_002, max_seq_len) 48 | if self.openai_embedding_config.use_tiktoken: 49 | try: 50 | from tiktoken.model import MODEL_TO_ENCODING 51 | 52 | tokenizer_name = MODEL_TO_ENCODING.get(model_name, "cl100k_base") 53 | except ImportError: 54 | raise ImportError( 55 | "The `tiktoken` package not found.", 56 | "To install it use the following:", 57 | "`pip install tiktoken`", 58 | ) 59 | else: 60 | self.query_encoder_model = f"text-search-{model_class}-query-001" 61 | self.doc_encoder_model = f"text-search-{model_class}-doc-001" 62 | self.max_seq_len = min(MAX_ALLOWED_SEQ_LEN_001, max_seq_len) 63 | 64 | return tokenizer_name 65 | 66 | def _ensure_text_limit(self, text: str) -> str: 67 | """ 68 | Ensure that length of the text is within the maximum length of the model. 69 | OpenAI v1 embedding models have a limit of 2046 tokens, and v2 models have 70 | a limit of 8191 tokens. 71 | 72 | Args: 73 | text (str): Text to be checked if it exceeds the max token limit 74 | 75 | Returns: 76 | text (str): Trimmed text if exceeds the max token limit 77 | """ 78 | n_tokens = count_openai_tokens(text, self._tokenizer, self.openai_embedding_config.use_tiktoken) 79 | if n_tokens <= self.max_seq_len: 80 | return text 81 | 82 | logger.warning( 83 | "The prompt has been truncated from %s tokens to %s tokens to fit" "within the max token limit.", 84 | "Reduce the length of the prompt to prevent it from being cut off.", 85 | n_tokens, 86 | self.max_seq_len, 87 | ) 88 | 89 | if self.openai_embedding_config.use_tiktoken: 90 | tokenized_payload = self._tokenizer.encode(text) 91 | decoded_string = self._tokenizer.decode(tokenized_payload[: self.max_seq_len]) 92 | else: 93 | tokenized_payload = self._tokenizer.tokenize(text) 94 | decoded_string = self._tokenizer.convert_tokens_to_string(tokenized_payload[: self.max_seq_len]) 95 | 96 | return decoded_string 97 | 98 | def embed(self, model: str, text: List[str]) -> np.ndarray: 99 | """ 100 | Embeds the batch of texts using the specified LLM. 101 | 102 | Args: 103 | model (str): LLM model name for embeddings. 104 | text (List[str]): List of documents to be embedded. 105 | 106 | Raises: 107 | ValueError: When the OpenAI API key is missing. 108 | 109 | Returns: 110 | np.ndarray: embeddings for the input documents. 111 | """ 112 | if self.api_key is None: 113 | raise ValueError( 114 | "OpenAI API key is not set. You can set it via the " "`api_key` parameter of the `LLMClient`." 115 | ) 116 | 117 | generated_embeddings: List[Any] = [] 118 | 119 | headers: Dict[str, str] = {"Content-Type": "application/json"} 120 | payload: Dict[str, Union[List[str], str]] = {"model": model, "input": text} 121 | headers["Authorization"] = f"Bearer {self.api_key}" 122 | 123 | res = openai_request( 124 | url=self.openai_embedding_config.url, 125 | headers=headers, 126 | payload=payload, 127 | timeout=self.time_out, 128 | ) 129 | 130 | unordered_embeddings = [(ans["index"], ans["embedding"]) for ans in res["data"]] 131 | ordered_embeddings = sorted(unordered_embeddings, key=lambda x: x[0]) 132 | 133 | generated_embeddings = [emb[1] for emb in ordered_embeddings] 134 | 135 | return np.array(generated_embeddings) 136 | 137 | def embed_batch(self, model: str, text: List[str]) -> np.ndarray: 138 | all_embeddings = [] 139 | for i in tqdm( 140 | range(0, len(text), self.openai_embedding_config.batch_size), 141 | disable=not self.openai_embedding_config.progress_bar, 142 | desc="Calculating embeddings", 143 | ): 144 | batch = text[i : i + self.openai_embedding_config.batch_size] 145 | batch_limited = [self._ensure_text_limit(content) for content in batch] 146 | generated_embeddings = self.embed(model, batch_limited) 147 | all_embeddings.append(generated_embeddings) 148 | 149 | return np.concatenate(all_embeddings) 150 | 151 | def embed_queries(self, queries: List[str]) -> np.ndarray: 152 | return self.embed_batch(self.query_encoder_model, queries) 153 | 154 | def embed_documents(self, docs: List[Dict]) -> np.ndarray: 155 | return self.embed_batch(self.doc_encoder_model, [d["text"] for d in docs]) 156 | -------------------------------------------------------------------------------- /chatgpt_memory/memory/__init__.py: -------------------------------------------------------------------------------- 1 | from chatgpt_memory.memory.manager import MemoryManager # noqa: F401 2 | from chatgpt_memory.memory.memory import Memory # noqa: F401 3 | -------------------------------------------------------------------------------- /chatgpt_memory/memory/manager.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List 2 | 3 | import numpy as np 4 | 5 | from chatgpt_memory.datastore.redis import RedisDataStore 6 | from chatgpt_memory.llm_client.openai.embedding.embedding_client import EmbeddingClient 7 | 8 | from .memory import Memory 9 | 10 | 11 | class MemoryManager: 12 | """ 13 | Manages the memory of conversations. 14 | 15 | Attributes: 16 | datastore (DataStore): Datastore to use for storing and retrieving memories. 17 | embed_client (EmbeddingClient): Embedding client to call for embedding conversations. 18 | conversations (List[Memory]): List of conversation IDs to memories to be managed. 19 | """ 20 | 21 | def __init__(self, datastore: RedisDataStore, embed_client: EmbeddingClient, topk: int = 5) -> None: 22 | """ 23 | Initializes the memory manager. 24 | 25 | Args: 26 | datastore (DataStore): Datastore to be used. Assumed to be connected. 27 | embed_client (EmbeddingClient): Embedding client to be used. 28 | topk (int): Number of past message to be retrieved as context for current message. 29 | """ 30 | self.datastore = datastore 31 | self.embed_client = embed_client 32 | self.topk = topk 33 | self.conversations: List[Memory] = [ 34 | Memory(conversation_id=conversation_id) for conversation_id in datastore.get_all_conversation_ids() 35 | ] 36 | 37 | def __del__(self) -> None: 38 | """Clear the memory manager when manager is deleted.""" 39 | self.clear() 40 | 41 | def add_conversation(self, conversation: Memory) -> None: 42 | """ 43 | Adds a conversation to the memory manager to be stored and manage. 44 | 45 | Args: 46 | conversation (Memory): Conversation to be added. 47 | """ 48 | if conversation not in self.conversations: 49 | self.conversations.append(conversation) 50 | 51 | def remove_conversation(self, conversation: Memory) -> None: 52 | """ 53 | Removes a conversation from the memory manager. 54 | 55 | Args: 56 | conversation (Memory): Conversation to be removed containing `conversation_id`. 57 | """ 58 | if conversation not in self.conversations: 59 | return 60 | 61 | conversation_idx = self.conversations.index(conversation) 62 | if conversation_idx >= 0: 63 | del self.conversations[conversation_idx] 64 | self.datastore.delete_documents(conversation_id=conversation.conversation_id) 65 | 66 | def clear(self) -> None: 67 | """ 68 | Clears the memory manager. 69 | """ 70 | self.datastore.flush_all_documents() 71 | self.conversations = [] 72 | 73 | def add_message(self, conversation_id: str, human: str, assistant: str) -> None: 74 | """ 75 | Adds a message to a conversation. 76 | 77 | Args: 78 | conversation_id (str): ID of the conversation to add the message to. 79 | human (str): User message. 80 | assistant (str): Assistant message. 81 | """ 82 | document: Dict = {"text": f"Human: {human}\nAssistant: {assistant}", "conversation_id": conversation_id} 83 | document["embedding"] = self.embed_client.embed_documents(docs=[document])[0].astype(np.float32).tobytes() 84 | self.datastore.index_documents(documents=[document]) 85 | 86 | # optionally check if it is a new conversation 87 | self.add_conversation(Memory(conversation_id=conversation_id)) 88 | 89 | def get_messages(self, conversation_id: str, query: str) -> List[Any]: 90 | """ 91 | Gets the messages of a conversation using the query message. 92 | 93 | Args: 94 | conversation_id (str): ID of the conversation to get the messages of. 95 | query (str): Current user message you want to pull history for to use in the prompt. 96 | topk (int): Number of messages to be returned. Defaults to 5. 97 | 98 | Returns: 99 | List[Any]: List of messages of the conversation. 100 | """ 101 | if Memory(conversation_id=conversation_id) not in self.conversations: 102 | raise ValueError(f"Conversation id: {conversation_id} is not present in past conversations.") 103 | 104 | query_vector = self.embed_client.embed_queries([query])[0].astype(np.float32).tobytes() 105 | messages = self.datastore.search_documents( 106 | query_vector=query_vector, conversation_id=conversation_id, topk=self.topk 107 | ) 108 | return messages 109 | -------------------------------------------------------------------------------- /chatgpt_memory/memory/memory.py: -------------------------------------------------------------------------------- 1 | """ 2 | Contains a memory dataclass. 3 | """ 4 | from pydantic import BaseModel 5 | 6 | 7 | class Memory(BaseModel): 8 | """ 9 | A memory dataclass. 10 | """ 11 | 12 | conversation_id: str 13 | """ID of the conversation.""" 14 | -------------------------------------------------------------------------------- /chatgpt_memory/utils/openai_utils.py: -------------------------------------------------------------------------------- 1 | """Utils for using OpenAI API""" 2 | import json 3 | import logging 4 | from typing import Any, Dict, Tuple, Union 5 | 6 | import requests 7 | from transformers import GPT2TokenizerFast 8 | 9 | from chatgpt_memory.environment import OPENAI_BACKOFF, OPENAI_MAX_RETRIES, OPENAI_TIMEOUT 10 | from chatgpt_memory.errors import OpenAIError, OpenAIRateLimitError 11 | from chatgpt_memory.utils.reflection import retry_with_exponential_backoff 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def load_openai_tokenizer(tokenizer_name: str, use_tiktoken: bool) -> Any: 17 | """ 18 | Load either the tokenizer from tiktoken (if the library is available) or 19 | fallback to the GPT2TokenizerFast from the transformers library. 20 | 21 | Args: 22 | tokenizer_name (str): The name of the tokenizer to load. 23 | use_tiktoken (bool): Use tiktoken tokenizer or not. 24 | 25 | Raises: 26 | ImportError: When `tiktoken` package is missing. 27 | To use tiktoken tokenizer install it as follows: 28 | `pip install tiktoken` 29 | 30 | Returns: 31 | tokenizer: Tokenizer of either GPT2 kind or tiktoken based. 32 | """ 33 | tokenizer = None 34 | if use_tiktoken: 35 | try: 36 | import tiktoken # pylint: disable=import-error 37 | 38 | logger.debug("Using tiktoken %s tokenizer", tokenizer_name) 39 | tokenizer = tiktoken.get_encoding(tokenizer_name) 40 | except ImportError: 41 | raise ImportError( 42 | "The `tiktoken` package not found.", 43 | "To install it use the following:", 44 | "`pip install tiktoken`", 45 | ) 46 | else: 47 | logger.warning( 48 | "OpenAI tiktoken module is not available for Python < 3.8,Linux ARM64 and " 49 | "AARCH64. Falling back to GPT2TokenizerFast." 50 | ) 51 | 52 | logger.debug("Using GPT2TokenizerFast tokenizer") 53 | tokenizer = GPT2TokenizerFast.from_pretrained(tokenizer_name) 54 | return tokenizer 55 | 56 | 57 | def count_openai_tokens(text: str, tokenizer: Any, use_tiktoken: bool) -> int: 58 | """ 59 | Count the number of tokens in `text` based on the provided OpenAI `tokenizer`. 60 | 61 | Args: 62 | text (str): A string to be tokenized. 63 | tokenizer (Any): An OpenAI tokenizer. 64 | use_tiktoken (bool): Use tiktoken tokenizer or not. 65 | 66 | Returns: 67 | int: Number of tokens in the text. 68 | """ 69 | 70 | if use_tiktoken: 71 | return len(tokenizer.encode(text)) 72 | else: 73 | return len(tokenizer.tokenize(text)) 74 | 75 | 76 | @retry_with_exponential_backoff( 77 | backoff_in_seconds=OPENAI_BACKOFF, 78 | max_retries=OPENAI_MAX_RETRIES, 79 | errors=(OpenAIRateLimitError, OpenAIError), 80 | ) 81 | def openai_request( 82 | url: str, 83 | headers: Dict, 84 | payload: Dict, 85 | timeout: Union[float, Tuple[float, float]] = OPENAI_TIMEOUT, 86 | ) -> Dict: 87 | """ 88 | Make a request to the OpenAI API given a `url`, `headers`, `payload`, and 89 | `timeout`. 90 | 91 | Args: 92 | url (str): The URL of the OpenAI API. 93 | headers (Dict): Dictionary of HTTP Headers to send with the :class:`Request`. 94 | payload (Dict): The payload to send with the request. 95 | timeout (Union[float, Tuple[float, float]], optional): The timeout length of the request. The default is 30s. 96 | Defaults to OPENAI_TIMEOUT. 97 | 98 | Raises: 99 | openai_error: If the request fails. 100 | 101 | Returns: 102 | Dict: OpenAI Embedding API response. 103 | """ 104 | 105 | response = requests.request("POST", url, headers=headers, data=json.dumps(payload), timeout=timeout) 106 | res = json.loads(response.text) 107 | 108 | # if request is unsucessful and `status_code = 429` then, 109 | # raise rate limiting error else the OpenAIError 110 | if response.status_code != 200: 111 | openai_error: OpenAIError 112 | if response.status_code == 429: 113 | openai_error = OpenAIRateLimitError(f"API rate limit exceeded: {response.text}") 114 | else: 115 | openai_error = OpenAIError( 116 | f"OpenAI returned an error.\n" 117 | f"Status code: {response.status_code}\n" 118 | f"Response body: {response.text}", 119 | status_code=response.status_code, 120 | ) 121 | raise openai_error 122 | 123 | return res 124 | 125 | 126 | def get_prompt(message: str, history: str) -> str: 127 | """ 128 | Generates the prompt based on the current history and message. 129 | 130 | Args: 131 | message (str): Current message from user. 132 | history (str): Retrieved history for the current message. 133 | History follows the following format for example: 134 | ``` 135 | Human: hello 136 | Assistant: hello, how are you? 137 | Human: good, you? 138 | Assistant: I am doing good as well. How may I help you? 139 | ``` 140 | Returns: 141 | prompt: Curated prompt for the ChatGPT API based on current params. 142 | """ 143 | prompt = f"""Assistant is a large language model trained by OpenAI. 144 | 145 | Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. 146 | 147 | Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. 148 | 149 | Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist. 150 | 151 | {history} 152 | Human: {message} 153 | Assistant:""" 154 | 155 | return prompt 156 | -------------------------------------------------------------------------------- /chatgpt_memory/utils/reflection.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import logging 3 | import time 4 | from random import random 5 | from typing import Any, Callable, Dict, Tuple 6 | 7 | from chatgpt_memory.errors import OpenAIRateLimitError 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | def args_to_kwargs(args: Tuple, func: Callable) -> Dict[str, Any]: 13 | sig = inspect.signature(func) 14 | arg_names = list(sig.parameters.keys()) 15 | # skip self and cls args for instance and class methods 16 | if any(arg_names) and arg_names[0] in ["self", "cls"]: 17 | arg_names = arg_names[1 : 1 + len(args)] 18 | args_as_kwargs = {arg_name: arg for arg, arg_name in zip(args, arg_names)} 19 | return args_as_kwargs 20 | 21 | 22 | def retry_with_exponential_backoff( 23 | backoff_in_seconds: float = 1, 24 | max_retries: int = 10, 25 | errors: tuple = (OpenAIRateLimitError,), 26 | ): 27 | """ 28 | Decorator to retry a function with exponential backoff. 29 | :param backoff_in_seconds: The initial backoff in seconds. 30 | :param max_retries: The maximum number of retries. 31 | :param errors: The errors to catch retry on. 32 | """ 33 | 34 | def decorator(function): 35 | def wrapper(*args, **kwargs): 36 | # Initialize variables 37 | num_retries = 0 38 | 39 | # Loop until a successful response or max_retries is hit or an 40 | # exception is raised 41 | while True: 42 | try: 43 | return function(*args, **kwargs) 44 | 45 | # Retry on specified errors 46 | except errors as e: 47 | # Check if max retries has been reached 48 | if num_retries > max_retries: 49 | raise Exception(f"Maximum number of retries ({max_retries}) exceeded.") 50 | 51 | # Increment the delay 52 | sleep_time = backoff_in_seconds * 2**num_retries + random() 53 | 54 | # Sleep for the delay 55 | logger.warning( 56 | "%s - %s, retry %s in %s seconds...", 57 | e.__class__.__name__, 58 | e, 59 | function.__name__, 60 | "{0:.2f}".format(sleep_time), 61 | ) 62 | time.sleep(sleep_time) 63 | 64 | # Increment retries 65 | num_retries += 1 66 | 67 | return wrapper 68 | 69 | return decorator 70 | -------------------------------------------------------------------------------- /examples/simple_usage.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python3 2 | """ 3 | This script describes a simple usage of the library. 4 | You can see a breakdown of the individual steps in the README.md file. 5 | """ 6 | from chatgpt_memory.datastore import RedisDataStore, RedisDataStoreConfig 7 | 8 | ## set the following ENVIRONMENT Variables before running this script 9 | # Import necessary modules 10 | from chatgpt_memory.environment import OPENAI_API_KEY, REDIS_HOST, REDIS_PASSWORD, REDIS_PORT 11 | from chatgpt_memory.llm_client import ChatGPTClient, ChatGPTConfig, EmbeddingClient, EmbeddingConfig 12 | from chatgpt_memory.memory import MemoryManager 13 | 14 | # Instantiate an EmbeddingConfig object with the OpenAI API key 15 | embedding_config = EmbeddingConfig(api_key=OPENAI_API_KEY) 16 | 17 | # Instantiate an EmbeddingClient object with the EmbeddingConfig object 18 | embed_client = EmbeddingClient(config=embedding_config) 19 | 20 | # Instantiate a RedisDataStoreConfig object with the Redis connection details 21 | redis_datastore_config = RedisDataStoreConfig( 22 | host=REDIS_HOST, 23 | port=REDIS_PORT, 24 | password=REDIS_PASSWORD, 25 | ) 26 | 27 | # Instantiate a RedisDataStore object with the RedisDataStoreConfig object 28 | redis_datastore = RedisDataStore(config=redis_datastore_config) 29 | 30 | # Instantiate a MemoryManager object with the RedisDataStore object and EmbeddingClient object 31 | memory_manager = MemoryManager(datastore=redis_datastore, embed_client=embed_client, topk=1) 32 | 33 | # Instantiate a ChatGPTConfig object with the OpenAI API key and verbose set to True 34 | chat_gpt_config = ChatGPTConfig(api_key=OPENAI_API_KEY, verbose=False) 35 | 36 | # Instantiate a ChatGPTClient object with the ChatGPTConfig object and MemoryManager object 37 | chat_gpt_client = ChatGPTClient(config=chat_gpt_config, memory_manager=memory_manager) 38 | 39 | 40 | # Initialize conversation_id to None 41 | conversation_id = None 42 | 43 | # Start the chatbot loop 44 | while True: 45 | # Prompt the user for input 46 | user_message = input("\n \033[92m Please enter your message: ") 47 | 48 | # Use the ChatGPTClient object to generate a response 49 | response = chat_gpt_client.converse(message=user_message, conversation_id=None) 50 | 51 | # Update the conversation_id with the conversation_id from the response 52 | conversation_id = response.conversation_id 53 | print("\n \033[96m Assisstant: " + response.chat_gpt_answer) 54 | # Print the response generated by the chatbot 55 | # print(response.chat_gpt_answer) 56 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "chatgpt-memory" 3 | version = "0.0.1" 4 | description = "" 5 | 6 | authors = [ 7 | "Shahrukh Khan ", 8 | "Navdeeppal Singh " 9 | ] 10 | readme = "README.md" 11 | packages = [{include = "chatgpt_memory"}] 12 | 13 | [tool.poetry.dependencies] 14 | python = "^3.10" 15 | pydantic = "^1.10.0" 16 | pytest = "^7.2.2" 17 | numpy = "^1.24.2" 18 | tqdm = "^4.65.0" 19 | requests = "^2.28.2" 20 | transformers = "^4.26.1" 21 | redis = "^4.5.1" 22 | openai = "^0.27.2" 23 | langchain = "^0.0.113" 24 | python-dotenv = "^1.0.0" 25 | streamlit = "^1.21.0" 26 | fastapi = "^0.95.1" 27 | uvicorn = "^0.21.1" 28 | 29 | 30 | [build-system] 31 | requires = ["poetry-core"] 32 | build-backend = "poetry.core.masonry.api" 33 | 34 | [tool.isort] 35 | profile = "black" 36 | filter_files = true 37 | multi_line_output = 3 38 | include_trailing_comma = true 39 | force_grid_wrap = 0 40 | use_parentheses = true 41 | ensure_newline_before_comments = true 42 | line_length = 120 43 | 44 | [tool.black] 45 | line-length = 120 46 | target-version = ['py310'] 47 | 48 | [tool.ruff] 49 | line-length = 120 50 | select = [ 51 | "E", "W", # see: https://pypi.org/project/pycodestyle 52 | "F", # see: https://pypi.org/project/pyflakes 53 | ] 54 | ignore = [ 55 | "E501", 56 | "E731", 57 | "E741", # Ambiguous variable name: ... 58 | "E999", # SyntaxError: invalid syntax. Got unexpected token Newline 59 | ] 60 | exclude = [ 61 | ".eggs", 62 | ".git", 63 | ".ruff_cache", 64 | "__pypackages__", 65 | "_build", 66 | "build", 67 | "dist", 68 | "docs" 69 | ] 70 | ignore-init-module-imports = true 71 | -------------------------------------------------------------------------------- /rest_api.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from fastapi import FastAPI 4 | from pydantic import BaseModel 5 | 6 | from chatgpt_memory.datastore import RedisDataStore, RedisDataStoreConfig 7 | from chatgpt_memory.environment import OPENAI_API_KEY, REDIS_HOST, REDIS_PASSWORD, REDIS_PORT 8 | from chatgpt_memory.llm_client import ChatGPTClient, ChatGPTConfig, ChatGPTResponse, EmbeddingClient, EmbeddingConfig 9 | from chatgpt_memory.memory import MemoryManager 10 | 11 | # Instantiate an EmbeddingConfig object with the OpenAI API key 12 | embedding_config = EmbeddingConfig(api_key=OPENAI_API_KEY) 13 | 14 | # Instantiate an EmbeddingClient object with the EmbeddingConfig object 15 | embed_client = EmbeddingClient(config=embedding_config) 16 | 17 | # Instantiate a RedisDataStoreConfig object with the Redis connection details 18 | redis_datastore_config = RedisDataStoreConfig( 19 | host=REDIS_HOST, 20 | port=REDIS_PORT, 21 | password=REDIS_PASSWORD, 22 | ) 23 | 24 | # Instantiate a RedisDataStore object with the RedisDataStoreConfig object 25 | redis_datastore = RedisDataStore(config=redis_datastore_config) 26 | 27 | # Instantiate a MemoryManager object with the RedisDataStore object and EmbeddingClient object 28 | memory_manager = MemoryManager(datastore=redis_datastore, embed_client=embed_client, topk=1) 29 | 30 | # Instantiate a ChatGPTConfig object with the OpenAI API key and verbose set to True 31 | chat_gpt_config = ChatGPTConfig(api_key=OPENAI_API_KEY, verbose=False) 32 | 33 | # Instantiate a ChatGPTClient object with the ChatGPTConfig object and MemoryManager object 34 | chat_gpt_client = ChatGPTClient(config=chat_gpt_config, memory_manager=memory_manager) 35 | 36 | 37 | class MessagePayload(BaseModel): 38 | conversation_id: Optional[str] 39 | message: str 40 | 41 | 42 | app = FastAPI() 43 | 44 | 45 | @app.post("/converse/") 46 | async def converse(message_payload: MessagePayload) -> ChatGPTResponse: 47 | response = chat_gpt_client.converse(**message_payload.dict()) 48 | return response 49 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from chatgpt_memory.datastore.config import RedisDataStoreConfig 4 | from chatgpt_memory.datastore.redis import RedisDataStore 5 | from chatgpt_memory.environment import OPENAI_API_KEY, REDIS_HOST, REDIS_PASSWORD, REDIS_PORT 6 | from chatgpt_memory.llm_client.openai.embedding.config import EmbeddingConfig 7 | from chatgpt_memory.llm_client.openai.embedding.embedding_client import EmbeddingClient 8 | 9 | 10 | @pytest.fixture(scope="session") 11 | def openai_embedding_client(): 12 | embedding_config = EmbeddingConfig(api_key=OPENAI_API_KEY) 13 | return EmbeddingClient(config=embedding_config) 14 | 15 | 16 | @pytest.fixture(scope="session") 17 | def redis_datastore(): 18 | redis_datastore_config = RedisDataStoreConfig( 19 | host=REDIS_HOST, 20 | port=REDIS_PORT, 21 | password=REDIS_PASSWORD, 22 | ) 23 | redis_datastore = RedisDataStore(config=redis_datastore_config, do_flush_data=True) 24 | 25 | return redis_datastore 26 | -------------------------------------------------------------------------------- /tests/test_llm_embedding_client.py: -------------------------------------------------------------------------------- 1 | from chatgpt_memory.llm_client.openai.embedding.embedding_client import EmbeddingClient 2 | 3 | SAMPLE_QUERIES = ["Where is Berlin?"] 4 | SAMPLE_DOCUMENTS = [{"text": "Berlin is located in Germany."}] 5 | 6 | EXPECTED_EMBEDDING_DIMENSIONS = (1, 1024) 7 | 8 | 9 | def test_openai_embedding_client(openai_embedding_client: EmbeddingClient): 10 | assert ( 11 | openai_embedding_client.embed_queries(SAMPLE_QUERIES).shape == EXPECTED_EMBEDDING_DIMENSIONS 12 | ), "Generated query embedding is of inconsistent dimension" 13 | 14 | assert ( 15 | openai_embedding_client.embed_documents(SAMPLE_DOCUMENTS).shape == EXPECTED_EMBEDDING_DIMENSIONS 16 | ), "Generated document(s) embedding is of inconsistent dimension" 17 | -------------------------------------------------------------------------------- /tests/test_memory_manager.py: -------------------------------------------------------------------------------- 1 | from chatgpt_memory.datastore.config import RedisDataStoreConfig 2 | from chatgpt_memory.datastore.redis import RedisDataStore 3 | from chatgpt_memory.environment import OPENAI_API_KEY, REDIS_HOST, REDIS_PASSWORD, REDIS_PORT 4 | from chatgpt_memory.llm_client.openai.embedding.config import EmbeddingConfig 5 | from chatgpt_memory.llm_client.openai.embedding.embedding_client import EmbeddingClient 6 | from chatgpt_memory.memory.manager import MemoryManager 7 | from chatgpt_memory.memory.memory import Memory 8 | 9 | 10 | class TestMemoryManager: 11 | def setup(self): 12 | # create a redis datastore 13 | redis_datastore_config = RedisDataStoreConfig( 14 | host=REDIS_HOST, 15 | port=REDIS_PORT, 16 | password=REDIS_PASSWORD, 17 | ) 18 | self.datastore = RedisDataStore(redis_datastore_config, do_flush_data=True) 19 | 20 | # create an openai embedding client 21 | embedding_client_config = EmbeddingConfig(api_key=OPENAI_API_KEY) 22 | self.embedding_client = EmbeddingClient(embedding_client_config) 23 | 24 | def test_conversation_insertion_and_deletion(self): 25 | # create a memory manager 26 | memory_manager = MemoryManager(datastore=self.datastore, embed_client=self.embedding_client) 27 | 28 | # assert that the memory manager is initially empty 29 | assert len(memory_manager.conversations) == 0 30 | 31 | # add a conversation to the memory manager 32 | memory_manager.add_conversation(Memory(conversation_id="1")) 33 | 34 | # assert that the memory manager has 1 conversation 35 | assert len(memory_manager.conversations) == 1 36 | 37 | # remove the conversation from the memory manager 38 | memory_manager.remove_conversation(Memory(conversation_id="1")) 39 | 40 | # assert that the memory manager is empty 41 | assert len(memory_manager.conversations) == 0 42 | 43 | def test_adding_messages_to_conversation(self): 44 | # create a memory manager 45 | memory_manager = MemoryManager(datastore=self.datastore, embed_client=self.embedding_client) 46 | 47 | # add a conversation to the memory manager 48 | memory_manager.add_conversation(Memory(conversation_id="1")) 49 | 50 | # assert that the memory manager has 1 conversation 51 | assert len(memory_manager.conversations) == 1 52 | 53 | # add a message to the conversation 54 | memory_manager.add_message(conversation_id="1", human="Hello", assistant="Hello. How are you?") 55 | 56 | # get messages for that conversation 57 | messages = memory_manager.get_messages(conversation_id="1", query="Hello") 58 | 59 | # assert that the message was added 60 | assert len(messages) == 1 61 | 62 | # assert that the message is correct 63 | assert messages[0].text == "Human: Hello\nAssistant: Hello. How are you?" 64 | assert messages[0].conversation_id == "1" 65 | -------------------------------------------------------------------------------- /tests/test_redis_datastore.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from chatgpt_memory.datastore.redis import RedisDataStore 4 | from chatgpt_memory.environment import OPENAI_API_KEY 5 | from chatgpt_memory.llm_client.openai.embedding.config import EmbeddingConfig 6 | from chatgpt_memory.llm_client.openai.embedding.embedding_client import EmbeddingClient 7 | 8 | SAMPLE_QUERIES = ["Where is Berlin?"] 9 | SAMPLE_DOCUMENTS = [ 10 | {"text": "Berlin is located in Germany.", "conversation_id": "1"}, 11 | {"text": "Vienna is in Austria.", "conversation_id": "1"}, 12 | {"text": "Salzburg is in Austria.", "conversation_id": "2"}, 13 | ] 14 | 15 | 16 | def test_redis_datastore(redis_datastore: RedisDataStore): 17 | embedding_config = EmbeddingConfig(api_key=OPENAI_API_KEY) 18 | openai_embedding_client = EmbeddingClient(config=embedding_config) 19 | assert ( 20 | redis_datastore.redis_connection.ping() 21 | ), "Redis connection failed,\ 22 | double check your connection parameters" 23 | 24 | document_embeddings: np.ndarray = openai_embedding_client.embed_documents(SAMPLE_DOCUMENTS) 25 | for idx, embedding in enumerate(document_embeddings): 26 | SAMPLE_DOCUMENTS[idx]["embedding"] = embedding.astype(np.float32).tobytes() 27 | redis_datastore.index_documents(documents=SAMPLE_DOCUMENTS) 28 | 29 | query_embeddings: np.ndarray = openai_embedding_client.embed_queries(SAMPLE_QUERIES) 30 | query_vector = query_embeddings[0].astype(np.float32).tobytes() 31 | search_results = redis_datastore.search_documents(query_vector=query_vector, conversation_id="1", topk=1) 32 | assert len(search_results), "No documents returned, expected 1 document." 33 | 34 | assert search_results[0].text == "Berlin is located in Germany.", "Incorrect document returned as search result." 35 | 36 | redis_datastore.delete_documents(conversation_id="1") 37 | assert redis_datastore.get_all_conversation_ids() == [ 38 | "2" 39 | ], "Document deletion failed, inconsistent documents in redis index" 40 | -------------------------------------------------------------------------------- /ui.py: -------------------------------------------------------------------------------- 1 | """ 2 | Adapted from https://github.com/avrabyt/MemoryBot 3 | """ 4 | 5 | import requests 6 | 7 | # Import necessary libraries 8 | import streamlit as st 9 | 10 | from chatgpt_memory.environment import OPENAI_API_KEY, REDIS_HOST, REDIS_PASSWORD, REDIS_PORT 11 | 12 | # Set Streamlit page configuration 13 | st.set_page_config(page_title="🧠MemoryBot🤖", layout="wide") 14 | # Initialize session states 15 | if "generated" not in st.session_state: 16 | st.session_state["generated"] = [] 17 | if "past" not in st.session_state: 18 | st.session_state["past"] = [] 19 | if "input" not in st.session_state: 20 | st.session_state["input"] = "" 21 | if "stored_session" not in st.session_state: 22 | st.session_state["stored_session"] = [] 23 | if "conversation_id" not in st.session_state: 24 | st.session_state["conversation_id"] = None 25 | 26 | 27 | # Define function to get user input 28 | def get_text(): 29 | """ 30 | Get the user input text. 31 | 32 | Returns: 33 | (str): The text entered by the user 34 | """ 35 | input_text = st.text_input( 36 | "You: ", 37 | st.session_state["input"], 38 | key="input", 39 | placeholder="Your AI assistant here! Ask me anything ...", 40 | label_visibility="hidden", 41 | on_change=send_text, 42 | ) 43 | 44 | return input_text 45 | 46 | 47 | def send_text(): 48 | user_input = st.session_state["input"] 49 | if user_input: 50 | # Use the ChatGPTClient object to generate a response 51 | url = "http://localhost:8000/converse" 52 | payload = {"message": user_input, "conversation_id": st.session_state.conversation_id} 53 | 54 | response = requests.post(url, json=payload).json() 55 | # Update the conversation_id with the conversation_id from the response 56 | if not st.session_state.conversation_id: 57 | st.session_state.conversation_id = response["conversation_id"] 58 | st.session_state.past.insert(0, user_input) 59 | st.session_state.generated.insert(0, response["chat_gpt_answer"]) 60 | st.session_state["input"] = "" 61 | 62 | 63 | # Define function to start a new chat 64 | def new_chat(): 65 | """ 66 | Clears session state and starts a new chat. 67 | """ 68 | save = [] 69 | for i in range(len(st.session_state["generated"]) - 1, -1, -1): 70 | save.append("Human:" + st.session_state["past"][i]) 71 | save.append("Assistant:" + st.session_state["generated"][i]) 72 | st.session_state["stored_session"].append(save) 73 | st.session_state["generated"] = [] 74 | st.session_state["past"] = [] 75 | st.session_state["input"] = "" 76 | st.session_state["conversation_id"] = None 77 | 78 | 79 | # Set up the Streamlit app layout 80 | st.title("🤖 Chat Bot with 🧠") 81 | st.subheader(" Powered by ChatGPT Memory + Redis Search") 82 | 83 | 84 | # Session state storage would be ideal 85 | if not OPENAI_API_KEY: 86 | st.sidebar.warning("API key required to try this app. The API key is not stored in any form.") 87 | elif not (REDIS_HOST and REDIS_PASSWORD and REDIS_PORT): 88 | st.sidebar.warning( 89 | "Redis `REDIS_HOST`, `REDIS_PASSWORD`, `REDIS_PORT` are required to try this app. Please set them as env variables properly." 90 | ) 91 | 92 | 93 | # Add a button to start a new chat 94 | st.sidebar.button("New Chat", on_click=new_chat, type="primary") 95 | 96 | # Get the user input 97 | user_input = get_text() 98 | 99 | # Allow to download as well 100 | download_str = [] 101 | # Display the conversation history using an expander, and allow the user to download it 102 | with st.expander("Conversation", expanded=True): 103 | for i in range(len(st.session_state["generated"]) - 1, -1, -1): 104 | st.info(st.session_state["past"][i], icon="🧐") 105 | st.success(st.session_state["generated"][i], icon="🤖") 106 | download_str.append(st.session_state["past"][i]) 107 | download_str.append(st.session_state["generated"][i]) 108 | 109 | # Can throw error - requires fix 110 | download_str = ["\n".join(download_str)] 111 | if download_str: 112 | st.download_button("Download", download_str[0]) 113 | 114 | # Display stored conversation sessions in the sidebar 115 | for i, sublist in enumerate(st.session_state.stored_session): 116 | with st.sidebar.expander(label=f"Conversation-Session:{i}"): 117 | st.write(sublist) 118 | 119 | # Allow the user to clear all stored conversation sessions 120 | if st.session_state.stored_session: 121 | if st.sidebar.checkbox("Clear-all"): 122 | del st.session_state.stored_session 123 | --------------------------------------------------------------------------------