├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── docs ├── _static │ └── custom.css ├── conf.py ├── examples │ ├── base_usage.py │ ├── basic_usage.rst │ ├── complete_usage.py │ └── index.rst ├── index.rst ├── installation.rst └── quickstart.rst ├── mem4ai ├── __init__.py ├── core │ ├── __init__.py │ ├── embedding_manager.py │ ├── memory.py │ └── memory_manager.py ├── memtor.py ├── strategies │ ├── __init__.py │ ├── embedding_strategy.py │ ├── knowledge_extraction │ │ ├── __init__.py │ │ ├── base.py │ │ ├── echo.py │ │ ├── llm.py │ │ └── summary.py │ ├── search_strategy.py │ └── storage_strategy.py └── utils │ ├── __init__.py │ └── config_manager.py ├── playground.py ├── setup.py └── tests └── test_memtor.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | docs/make.bat 74 | docs/Makefile 75 | 76 | 77 | # PyBuilder 78 | .pybuilder/ 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | # For a library or package, you might want to ignore these files since the code is 90 | # intended to run in multiple environments; otherwise, check them in: 91 | # .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # poetry 101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 102 | # This is especially recommended for binary packages to ensure reproducibility, and is more 103 | # commonly ignored for libraries. 104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 105 | #poetry.lock 106 | 107 | # pdm 108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 109 | #pdm.lock 110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 111 | # in version control. 112 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 113 | .pdm.toml 114 | .pdm-python 115 | .pdm-build/ 116 | 117 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 118 | __pypackages__/ 119 | 120 | # Celery stuff 121 | celerybeat-schedule 122 | celerybeat.pid 123 | 124 | # SageMath parsed files 125 | *.sage.py 126 | 127 | # Environments 128 | .env 129 | .venv 130 | env/ 131 | venv/ 132 | ENV/ 133 | env.bak/ 134 | venv.bak/ 135 | 136 | # Spyder project settings 137 | .spyderproject 138 | .spyproject 139 | 140 | # Rope project settings 141 | .ropeproject 142 | 143 | # mkdocs documentation 144 | /site 145 | 146 | # mypy 147 | .mypy_cache/ 148 | .dmypy.json 149 | dmypy.json 150 | 151 | # Pyre type checker 152 | .pyre/ 153 | 154 | # pytype static type analyzer 155 | .pytype/ 156 | 157 | # Cython debug symbols 158 | cython_debug/ 159 | 160 | # PyCharm 161 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 162 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 163 | # and can be added to the global gitignore or merged into this file. For a more nuclear 164 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 165 | #.idea/ 166 | 167 | .mem4ai 168 | .pytest_cache 169 | 170 | todo.md 171 | tmp/ 172 | 173 | git_changes.md 174 | git_changes.py 175 | git_issues.py 176 | git_issues.md 177 | 178 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## [2024-11-01 - 18:05] Added Initial Modules for Core, Strategies, and Utils 4 | Initialized new files for core, strategies, and utilities modules. This update lays the foundation for further development in the project, enabling better organization and modularization of code. 5 | 6 | ## [2024-11-01 - 17:57] Update Package Inclusion in Setup Configuration 7 | Modified the package inclusion to allow only packages starting with 'mem4ai' and added package data to include JSON and YAML configuration files. 8 | 9 | ## [2024-11-01 15:20:40] Update numpy dependency version in setup.py 10 | Changed the numpy dependency from 'numpy>=2.0.1' to 'numpy>=1.26.0,<2.1.1' to ensure compatibility with existing code and prevent potential issues with version 2.0 or higher. 11 | 12 | ## [2024-11-01 - 16:20] Version Update and New Strategy Inclusion 13 | Updated the package version to 0.1.1. Included 'KnowledgeExtractionStrategy' in the package exports to enhance functionality. 14 | 15 | ## [31-Oct-2024] Enhanced Memory Storage and Retrieval System 16 | 17 | ### Added 18 | - Timestamp tracking for all memories 19 | - New indexed storage capabilities: 20 | - Timestamp-based indexing 21 | - Metadata-based indexing 22 | - Multi-dimensional filtering 23 | - New retrieval methods: 24 | - `find_recent`: Get latest memories with optional filters 25 | - `find_by_time`: Query memories within time ranges 26 | - `find_by_meta`: Search using metadata combinations 27 | - Separate LMDB environments for efficient index management 28 | 29 | ### Enhanced 30 | - Memory class with automatic timestamp tracking 31 | - Storage strategy with improved filtering capabilities 32 | - LMDB storage implementation with index management 33 | - Deletion handling with proper index cleanup 34 | 35 | ### Developer Notes 36 | - Added comprehensive test suite for storage and retrieval 37 | - Implemented efficient index updating mechanisms 38 | - Added support for complex queries combining time and metadata filters 39 | - Improved memory cleanup with proper index maintenance 40 | 41 | ## [31-Oct-2024] Knowledge Extraction for Enhanced Memory Context 42 | 43 | ### Added 44 | - Knowledge extraction system for memories with configurable strategies 45 | - Support for LLM-based and simple extraction methods 46 | - Automatic context generation from user-assistant interactions 47 | - Configurable extraction settings in config manager 48 | - New context field in Memory class to store extracted knowledge 49 | - Extended memory creation to handle both user messages and assistant responses 50 | 51 | ### Changed 52 | - Updated Memory class to support context storage and history 53 | - Modified Memtor initialization to include extraction strategy configuration 54 | - Enhanced memory update system to track context changes in history 55 | - Expanded add_memory method to process combined user-assistant interactions 56 | 57 | ### Configuration 58 | - Added new extraction configuration options: 59 | - `extraction.enabled`: Toggle knowledge extraction 60 | - `extraction.type`: Choose between 'llm' or 'simple' strategies 61 | - `extraction.model`: Specify LLM model for extraction 62 | - `extraction.store_full_response`: Control response storage 63 | - `extraction.extraction_timeout`: Set processing timeout 64 | - `extraction.retries`: Configure retry attempts 65 | 66 | ### Developer Notes 67 | - Added comprehensive test suite for knowledge extraction functionality 68 | - Implemented two test approaches: 69 | - Complex extraction with detailed context structure 70 | - Simple summary-based extraction with keywords -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [2023] [Taranjeet Singh] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | recursive-include mem4ai *.py 3 | recursive-include mem4ai/core *.py 4 | recursive-include mem4ai/strategies *.py 5 | recursive-include mem4ai/utils *.py 6 | recursive-include mem4ai/strategies/knowledge_extraction *.py -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 26 | 27 | # 🧠 Mem4AI: A LLM Friendly memory management library. 28 | 29 | Mem4AI enhances AI assistants and agents with an intelligent memory layer, enabling personalized and context-aware AI interactions. Perfect for building smarter chatbots, AI assistants, and autonomous systems that remember and learn. 30 | 31 | ## ✨ Core Features 32 | 33 | - 🔀 **Multi-Level Memory**: User, Session, and AI Agent memory retention 34 | - 🎯 **Adaptive Personalization**: Continuous improvement based on interactions 35 | - 🚀 **Fast Local Storage**: Efficient on-disk storage with quick access 36 | - 🔍 **Advanced Semantic Search**: Find relevant memories quickly 37 | - 🏷️ **Flexible Metadata**: Tag and filter memories easily 38 | - 🔧 **Customizable Strategies**: Adapt embedding and storage to your needs 39 | - 💾 **Persistent Storage**: Memories that last across sessions 40 | 41 | ## 🚀 Quick Start 42 | 43 | Install Mem4AI: 44 | 45 | ```bash 46 | pip install Mem4AI 47 | ``` 48 | 49 | Here's a simple example of how to use Mem4AI: 50 | 51 | ```python 52 | from mem4ai import Memtor 53 | 54 | # Initialize Memtor 55 | memtor = Memtor() 56 | 57 | # Add a memory 58 | memory_id = memtor.add_memory( 59 | "The user prefers dark mode in all applications", 60 | metadata={"preference": "ui", "mode": "dark"}, 61 | user_id="alice" 62 | ) 63 | 64 | # Search for memories 65 | results = memtor.search_memories("user interface preferences", user_id="alice") 66 | print(results[0].content) # Output: The user prefers dark mode in all applications 67 | 68 | # Update a memory 69 | memtor.update_memory( 70 | memory_id, 71 | "The user prefers dark mode, except for document editing", 72 | metadata={"preference": "ui", "mode": "dark", "exception": "document_editing"} 73 | ) 74 | 75 | # Delete a memory 76 | memtor.delete_memory(memory_id) 77 | ``` 78 | 79 | ## 🛠️ Use Cases 80 | 81 | - 🤖 Chatbots with long-term memory 82 | - 📊 Personalized recommendation systems 83 | - 🧠 Knowledge management systems 84 | - 🎯 Context-aware AI assistants 85 | 86 | ## 📚 Documentation 87 | 88 | Coming soon! 89 | 90 | ## 🤝 Contributing 91 | 92 | We welcome contributions! Check out our [contribution guidelines](CONTRIBUTING.md) for more information. 93 | 94 | ## 📄 License 95 | 96 | Mem4AI is released under the Apache 2.0 license. See [LICENSE](LICENSE) for more information. 97 | 98 | --- 99 | 100 | Built with ❤️ by Unclecode. (https://x.com/unclecode) 101 | 102 | Give Mem4AI a star ⭐ if you find it helpful! -------------------------------------------------------------------------------- /docs/_static/custom.css: -------------------------------------------------------------------------------- 1 | /* Dark terminal-like theme */ 2 | body { 3 | background-color: #1e1e1e; 4 | color: #d4d4d4; 5 | font-family: 'Consolas', 'Courier New', monospace; 6 | } 7 | 8 | .wy-nav-content-wrap { 9 | background-color: #1e1e1e; 10 | } 11 | 12 | .wy-nav-content { 13 | background-color: #1e1e1e; 14 | max-width: 900px; 15 | } 16 | 17 | .wy-side-nav-search { 18 | background-color: #252526; 19 | } 20 | 21 | .wy-menu-vertical li.current { 22 | background-color: #252526; 23 | } 24 | 25 | .wy-menu-vertical li.toctree-l2.current > a { 26 | background-color: #2d2d2d; 27 | } 28 | 29 | .rst-content div[class^="highlight"] { 30 | background-color: #252526; 31 | border: 1px solid #373737; 32 | } 33 | 34 | .rst-content code { 35 | background-color: #252526; 36 | border: 1px solid #373737; 37 | color: #d4d4d4; 38 | } 39 | 40 | a { 41 | color: #569cd6; 42 | } 43 | 44 | a:hover { 45 | color: #9cdcfe; 46 | } 47 | 48 | .wy-nav-top { 49 | background-color: #252526; 50 | } 51 | 52 | /* Adjust other elements as needed */ 53 | 54 | /* Add this to your custom.css */ 55 | .rst-content h1::after { 56 | content: '|'; 57 | animation: blink 1s step-end infinite; 58 | margin-left: 5px; 59 | } 60 | 61 | @keyframes blink { 62 | 0% { opacity: 1; } 63 | 50% { opacity: 0; } 64 | 100% { opacity: 1; } 65 | } -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | import sphinx_rtd_theme 2 | # Configuration file for the Sphinx documentation builder. 3 | # 4 | # For the full list of built-in configuration values, see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Project information ----------------------------------------------------- 8 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 9 | 10 | project = 'memtor' 11 | copyright = '2024, unclecode' 12 | author = 'unclecode' 13 | release = '0.1.0' 14 | 15 | # -- General configuration --------------------------------------------------- 16 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 17 | 18 | extensions = [] 19 | 20 | templates_path = ['_templates'] 21 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 22 | 23 | 24 | 25 | # -- Options for HTML output ------------------------------------------------- 26 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 27 | 28 | html_theme = 'alabaster' 29 | html_static_path = ['_static'] 30 | 31 | 32 | html_theme = 'sphinx_rtd_theme' 33 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 34 | 35 | # Add custom CSS 36 | html_static_path = ['_static'] 37 | html_css_files = ['custom.css'] -------------------------------------------------------------------------------- /docs/examples/base_usage.py: -------------------------------------------------------------------------------- 1 | from mem4ai import Memtor 2 | 3 | def main(): 4 | # Initialize Memtor 5 | memtor = Memtor() 6 | print("Memtor initialized successfully.") 7 | 8 | # Add memories 9 | print("\nAdding memories...") 10 | memory1_id = memtor.add_memory( 11 | "The quick brown fox jumps over the lazy dog", 12 | metadata={"tag": "animals"}, 13 | user_id="user1", 14 | session_id="session1" 15 | ) 16 | print(f"Memory 1 added with ID: {memory1_id}") 17 | 18 | memory2_id = memtor.add_memory( 19 | "To be or not to be, that is the question", 20 | metadata={"tag": "literature"}, 21 | user_id="user1", 22 | session_id="session1" 23 | ) 24 | print(f"Memory 2 added with ID: {memory2_id}") 25 | 26 | # Search memories 27 | print("\nSearching memories...") 28 | search_results = memtor.search_memories("fox", top_k=1, user_id="user1") 29 | print(f"Search result: {search_results[0].content}") 30 | 31 | # Update memory 32 | print("\nUpdating memory...") 33 | updated = memtor.update_memory( 34 | memory1_id, 35 | "The quick brown fox leaps over the lazy dog", 36 | metadata={"tag": "animals", "updated": True} 37 | ) 38 | if updated: 39 | print("Memory updated successfully") 40 | 41 | # Retrieve updated memory 42 | updated_memory = memtor.get_memory(memory1_id) 43 | print(f"Updated memory content: {updated_memory.content}") 44 | print(f"Updated memory metadata: {updated_memory.metadata}") 45 | 46 | # List memories 47 | print("\nListing memories for user1...") 48 | user_memories = memtor.list_memories(user_id="user1") 49 | for memory in user_memories: 50 | print(f"- {memory.content}") 51 | 52 | # Delete memory 53 | print("\nDeleting memory...") 54 | deleted = memtor.delete_memory(memory2_id) 55 | if deleted: 56 | print("Memory deleted successfully") 57 | 58 | # Verify deletion 59 | remaining_memories = memtor.list_memories(user_id="user1") 60 | print(f"Remaining memories: {len(remaining_memories)}") 61 | 62 | # Clean up (optional) 63 | print("\nCleaning up...") 64 | memtor.delete_memories_by_user("user1") 65 | print("All test memories deleted") 66 | 67 | if __name__ == "__main__": 68 | main() -------------------------------------------------------------------------------- /docs/examples/basic_usage.rst: -------------------------------------------------------------------------------- 1 | Basic Usage Example 2 | =================== 3 | 4 | This example demonstrates the basic usage of Memtor, including how to add, retrieve, update, and delete memories. 5 | 6 | .. code-block:: python 7 | 8 | from mem4ai import Memtor 9 | 10 | # Initialize Memtor 11 | memtor = Memtor() 12 | 13 | # Add a memory 14 | memory_id = memtor.add_memory( 15 | "The quick brown fox jumps over the lazy dog", 16 | metadata={"tag": "animals"}, 17 | user_id="user1", 18 | session_id="session1" 19 | ) 20 | print(f"Added memory with ID: {memory_id}") 21 | 22 | # Retrieve the memory 23 | memory = memtor.get_memory(memory_id) 24 | print(f"Retrieved memory: {memory.content}") 25 | 26 | # Update the memory 27 | memtor.update_memory( 28 | memory_id, 29 | "The quick brown fox leaps over the lazy dog", 30 | metadata={"tag": "animals", "updated": True} 31 | ) 32 | 33 | # Search for memories 34 | results = memtor.search_memories("fox", top_k=1, user_id="user1") 35 | print(f"Search result: {results[0].content}") 36 | 37 | # Delete the memory 38 | memtor.delete_memory(memory_id) 39 | print("Memory deleted") 40 | 41 | This script covers the basic operations you can perform with Memtor. It demonstrates how to: 42 | 43 | 1. Initialize a Memtor instance 44 | 2. Add a new memory with content, metadata, user ID, and session ID 45 | 3. Retrieve a memory by its ID 46 | 4. Update an existing memory 47 | 5. Search for memories 48 | 6. Delete a memory 49 | 50 | You can run this example by executing the script in the `examples` directory. -------------------------------------------------------------------------------- /docs/examples/complete_usage.py: -------------------------------------------------------------------------------- 1 | """ 2 | Memtor: Complete Usage Guide 3 | 4 | This script demonstrates all the features and functionalities of the Mem4AI library. 5 | It covers initialization, memory operations, searching, filtering, and advanced usage. 6 | """ 7 | 8 | from mem4ai import Memtor, Memory 9 | from mem4ai.strategies import CustomEmbeddingStrategy, CustomStorageStrategy, CustomSearchStrategy 10 | import numpy as np 11 | 12 | def main(): 13 | # Initialize Mem4AI Memtor 14 | print("Initializing Memtor...") 15 | memtor = Memtor() 16 | print("Memtor initialized successfully.\n") 17 | 18 | # Basic Memory Operations 19 | print("Performing basic memory operations...") 20 | 21 | # Adding memories 22 | memory1_id = memtor.add_memory( 23 | "The quick brown fox jumps over the lazy dog", 24 | metadata={"tag": "animals", "complexity": "simple"}, 25 | user_id="user1", 26 | session_id="session1" 27 | ) 28 | print(f"Added memory 1 with ID: {memory1_id}") 29 | 30 | memory2_id = memtor.add_memory( 31 | "To be or not to be, that is the question", 32 | metadata={"tag": "literature", "author": "Shakespeare"}, 33 | user_id="user1", 34 | session_id="session1" 35 | ) 36 | print(f"Added memory 2 with ID: {memory2_id}") 37 | 38 | memory3_id = memtor.add_memory( 39 | "E = mc^2", 40 | metadata={"tag": "science", "field": "physics"}, 41 | user_id="user2", 42 | session_id="session2" 43 | ) 44 | print(f"Added memory 3 with ID: {memory3_id}") 45 | 46 | # Retrieving memories 47 | retrieved_memory = memtor.get_memory(memory1_id) 48 | print(f"Retrieved memory: {retrieved_memory.content}") 49 | 50 | # Updating memories 51 | updated = memtor.update_memory( 52 | memory1_id, 53 | "The quick brown fox leaps over the lazy dog", 54 | metadata={"tag": "animals", "complexity": "simple", "action": "leap"} 55 | ) 56 | print(f"Memory updated: {updated}") 57 | 58 | # Deleting memories 59 | deleted = memtor.delete_memory(memory3_id) 60 | print(f"Memory deleted: {deleted}") 61 | 62 | print("Basic memory operations completed.\n") 63 | 64 | # Searching and Filtering 65 | print("Demonstrating search and filter capabilities...") 66 | 67 | # Simple search 68 | results = memtor.search_memories("fox", top_k=1) 69 | print(f"Search result for 'fox': {results[0].content}") 70 | 71 | # Search with user filter 72 | user_results = memtor.search_memories("question", top_k=1, user_id="user1") 73 | print(f"Search result for 'question' (user1): {user_results[0].content}") 74 | 75 | # Search with metadata filter 76 | metadata_results = memtor.search_memories( 77 | "animal", 78 | top_k=1, 79 | metadata_filters=[("tag", "==", "animals")] 80 | ) 81 | print(f"Search result with metadata filter: {metadata_results[0].content}") 82 | 83 | print("Search and filter demonstrations completed.\n") 84 | 85 | # Advanced Usage 86 | print("Demonstrating advanced features...") 87 | 88 | # List all memories 89 | all_memories = memtor.list_memories() 90 | print(f"Total memories: {len(all_memories)}") 91 | 92 | # List memories by user 93 | user_memories = memtor.list_memories(user_id="user1") 94 | print(f"Memories for user1: {len(user_memories)}") 95 | 96 | # List memories by session 97 | session_memories = memtor.list_memories(session_id="session1") 98 | print(f"Memories for session1: {len(session_memories)}") 99 | 100 | # Delete memories by user 101 | deleted_count = memtor.delete_memories_by_user("user2") 102 | print(f"Deleted {deleted_count} memories for user2") 103 | 104 | # Delete memories by session 105 | deleted_count = memtor.delete_memories_by_session("session2") 106 | print(f"Deleted {deleted_count} memories for session2") 107 | 108 | # Clear all storage (use with caution!) 109 | # memtor.clear_all_storage() 110 | # print("All storage cleared") 111 | 112 | print("Advanced feature demonstrations completed.\n") 113 | 114 | # Custom Strategies 115 | print("Demonstrating custom strategies...") 116 | 117 | # Custom Embedding Strategy 118 | class SimpleEmbeddingStrategy(CustomEmbeddingStrategy): 119 | def embed(self, text): 120 | # Simple embedding: use the length of words as features 121 | return [len(word) for word in text.split()] 122 | 123 | # Custom Storage Strategy 124 | class InMemoryStorageStrategy(CustomStorageStrategy): 125 | def __init__(self): 126 | self.storage = {} 127 | 128 | def save(self, memory): 129 | self.storage[memory.id] = memory 130 | 131 | def load(self, memory_id): 132 | return self.storage.get(memory_id) 133 | 134 | def update(self, memory_id, memory): 135 | if memory_id in self.storage: 136 | self.storage[memory_id] = memory 137 | return True 138 | return False 139 | 140 | def delete(self, memory_id): 141 | if memory_id in self.storage: 142 | del self.storage[memory_id] 143 | return True 144 | return False 145 | 146 | def list_all(self): 147 | return list(self.storage.values()) 148 | 149 | def clear_all(self): 150 | self.storage.clear() 151 | 152 | # Custom Search Strategy 153 | class SimpleSearchStrategy(CustomSearchStrategy): 154 | def search(self, query, memories, top_k, keywords, metadata_filters): 155 | # Simple search: rank by number of common words 156 | query_words = set(query.lower().split()) 157 | scored_memories = [ 158 | (memory, len(set(memory.content.lower().split()) & query_words)) 159 | for memory in memories 160 | ] 161 | sorted_memories = sorted(scored_memories, key=lambda x: x[1], reverse=True) 162 | return [memory for memory, score in sorted_memories[:top_k]] 163 | 164 | # Initialize Memtor with custom strategies 165 | custom_memtor = Memtor( 166 | embedding_strategy=SimpleEmbeddingStrategy(), 167 | storage_strategy=InMemoryStorageStrategy(), 168 | search_strategy=SimpleSearchStrategy() 169 | ) 170 | 171 | # Use custom Memtor 172 | custom_memory_id = custom_memtor.add_memory("Custom strategies are powerful and flexible") 173 | custom_results = custom_memtor.search_memories("powerful strategies", top_k=1) 174 | print(f"Custom search result: {custom_results[0].content}") 175 | 176 | print("Custom strategy demonstrations completed.\n") 177 | 178 | # Configuration 179 | print("Demonstrating configuration...") 180 | 181 | # Update configuration 182 | memtor.configure(embedding={"dimension": 128}, search={"algorithm": "cosine"}) 183 | print("Configuration updated") 184 | 185 | print("Configuration demonstration completed.\n") 186 | 187 | print("Mem4AI complete usage guide finished.") 188 | 189 | if __name__ == "__main__": 190 | main() -------------------------------------------------------------------------------- /docs/examples/index.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ======== 3 | 4 | This section contains various examples demonstrating how to use Mem4AI in different scenarios. 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | :caption: Available Examples: 9 | 10 | basic_usage 11 | advanced_search 12 | custom_strategies 13 | integration_with_llms 14 | 15 | Basic Usage 16 | ----------- 17 | 18 | The :doc:`basic_usage` example demonstrates the fundamental operations of Mem4AI, including adding, retrieving, updating, and deleting memories. 19 | 20 | Advanced Search 21 | --------------- 22 | 23 | In the :doc:`advanced_search` example, you'll learn how to perform complex searches using metadata filters and keywords. 24 | 25 | Custom Strategies 26 | ----------------- 27 | 28 | The :doc:`custom_strategies` example shows how to create and use custom embedding, storage, and search strategies. 29 | 30 | Integration with LLMs 31 | --------------------- 32 | 33 | :doc:`integration_with_llms` provides an example of how to integrate Mem4AI with Large Language Models for enhanced functionality. 34 | 35 | Running the Examples 36 | -------------------- 37 | 38 | To run these examples, make sure you have Mem4AI installed and then execute the Python scripts in the `examples` directory. 39 | 40 | For instance, to run the basic usage example: 41 | 42 | .. code-block:: bash 43 | 44 | python examples/basic_usage.py 45 | 46 | Make sure to check the comments in each example file for any specific instructions or requirements. -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to Mem4AI's documentation! 2 | ================================== 3 | 4 | Mem4AI is a powerful memory management library for LLMs and AI systems. 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | :caption: Contents: 9 | 10 | installation 11 | quickstart 12 | examples/index 13 | 14 | Indices and tables 15 | ================== 16 | 17 | * :ref:`genindex` 18 | * :ref:`modindex` 19 | * :ref:`search` -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | This section covers the installation process for Mem4AI. 5 | 6 | Requirements 7 | ------------ 8 | 9 | - Python 3.7+ 10 | - pip 11 | 12 | Installing Mem4AI 13 | ----------------- 14 | 15 | You can install Mem4AI using pip: 16 | 17 | .. code-block:: bash 18 | 19 | pip install mem4ai 20 | 21 | Or, if you're installing from source: 22 | 23 | .. code-block:: bash 24 | 25 | git clone https://github.com/unclecode/mem4ai.git 26 | cd mem4ai 27 | pip install -e . 28 | 29 | Verifying Installation 30 | ---------------------- 31 | 32 | After installation, you can verify that Mem4AI is installed correctly by running: 33 | 34 | .. code-block:: python 35 | 36 | import mem4ai 37 | print(mem4ai.__version__) 38 | 39 | This should print the version number of Mem4AI. -------------------------------------------------------------------------------- /docs/quickstart.rst: -------------------------------------------------------------------------------- 1 | Quick Start Guide 2 | ================= 3 | 4 | This guide will help you get started with Memtor quickly. 5 | 6 | Creating a Memtor Instance 7 | -------------------------- 8 | 9 | First, import Memtor and create an instance: 10 | 11 | .. code-block:: python 12 | 13 | from mem4ai import Memtor 14 | 15 | memtor = Memtor() 16 | 17 | Adding Memories 18 | --------------- 19 | 20 | You can add memories like this: 21 | 22 | .. code-block:: python 23 | 24 | memory_id = memtor.add_memory( 25 | "The quick brown fox jumps over the lazy dog", 26 | metadata={"tag": "animals"}, 27 | user_id="user1", 28 | session_id="session1" 29 | ) 30 | 31 | Searching Memories 32 | ------------------ 33 | 34 | To search for memories: 35 | 36 | .. code-block:: python 37 | 38 | results = memtor.search_memories("fox", top_k=1, user_id="user1") 39 | print(results[0].content) 40 | 41 | Updating and Deleting Memories 42 | ------------------------------ 43 | 44 | Update a memory: 45 | 46 | .. code-block:: python 47 | 48 | memtor.update_memory( 49 | memory_id, 50 | "The quick brown fox leaps over the lazy dog", 51 | metadata={"tag": "animals", "updated": True} 52 | ) 53 | 54 | Delete a memory: 55 | 56 | .. code-block:: python 57 | 58 | memtor.delete_memory(memory_id) 59 | 60 | For more detailed information, check out the full documentation and API reference. -------------------------------------------------------------------------------- /mem4ai/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mem4AI: A memory management library for LLMs and AI systems. 3 | 4 | This library provides functionality for storing, retrieving, and searching memories, 5 | with support for embedding-based similarity search and metadata filtering. 6 | """ 7 | 8 | from .memtor import Memtor 9 | from .core.memory import Memory 10 | from .strategies.embedding_strategy import EmbeddingStrategy 11 | from .strategies.storage_strategy import StorageStrategy 12 | from .strategies.search_strategy import SearchStrategy 13 | from .strategies.knowledge_extraction import LLMExtractionStrategy, SummaryExtractionStrategy, EchoKnowledgeStrategy, SummaryExtractionStrategy 14 | from .strategies.knowledge_extraction import KnowledgeExtractionStrategy 15 | from .utils.config_manager import config_manager 16 | 17 | # Version of the memtor package 18 | __version__ = "0.1.1" 19 | 20 | # Define what should be importable from the package 21 | __all__ = ['Memtor', 'Memory', 'EmbeddingStrategy', 'StorageStrategy', 'SearchStrategy', 'config_manager', 'LLMExtractionStrategy', 'SummaryExtractionStrategy', 'EchoKnowledgeStrategy', 'KnowledgeExtractionStrategy'] 22 | 23 | # Package level initialization code (if any) 24 | def initialize(): 25 | """ 26 | Perform any necessary package-level initialization. 27 | This function is called when the package is imported. 28 | """ 29 | # For now, we don't need any initialization, but we can add code here if needed in the future 30 | pass 31 | 32 | # Call the initialize function 33 | initialize() -------------------------------------------------------------------------------- /mem4ai/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unclecode/mem4ai/5b6eb4c79210a51507fa86b148fe7fb68cdf541f/mem4ai/core/__init__.py -------------------------------------------------------------------------------- /mem4ai/core/embedding_manager.py: -------------------------------------------------------------------------------- 1 | from ..strategies.embedding_strategy import get_embedding_strategy, EmbeddingStrategy 2 | from ..utils.config_manager import config_manager 3 | import numpy as np 4 | 5 | class EmbeddingManager: 6 | def __init__(self, embedding_strategy: EmbeddingStrategy = None): 7 | self.embedding_strategy = embedding_strategy or get_embedding_strategy() 8 | 9 | def embed(self, text) -> np.ndarray: 10 | return self.embedding_strategy.embed(text) 11 | 12 | @property 13 | def dimension(self) -> int: 14 | return self.embedding_strategy.dimension -------------------------------------------------------------------------------- /mem4ai/core/memory.py: -------------------------------------------------------------------------------- 1 | import uuid, json 2 | from typing import Any, Dict, List, Optional 3 | from datetime import datetime 4 | 5 | class Memory: 6 | def __init__(self, content: str, metadata: Optional[Dict[str, Any]] = None, 7 | embedding: Optional[Any] = None, context: Optional[Dict[str, Any]] = None, 8 | user_id: Optional[str] = None, session_id: Optional[str] = None, 9 | agent_id: Optional[str] = None): 10 | self.id = str(uuid.uuid4()) 11 | self.content = content 12 | self.embedding = embedding 13 | self.context = context or {} # New field for extracted knowledge 14 | self.metadata = metadata or {} 15 | self.update_history = [] 16 | self.timestamp = datetime.now() 17 | 18 | if user_id: 19 | self.metadata['user_id'] = user_id 20 | if session_id: 21 | self.metadata['session_id'] = session_id 22 | if agent_id: 23 | self.metadata['agent_id'] = agent_id 24 | 25 | def update(self, new_content: str, new_metadata: Dict[str, Any] = None, 26 | new_context: Dict[str, Any] = None): 27 | self.update_history.append({ 28 | "content": self.content, 29 | "metadata": self.metadata.copy(), 30 | "context": self.context.copy() # Also store context history 31 | }) 32 | self.content = new_content 33 | self.timestamp = datetime.now() 34 | if new_metadata: 35 | self.metadata.update(new_metadata) 36 | if new_context: 37 | self.context.update(new_context) 38 | 39 | 40 | def dumps(self) -> Dict[str, Any]: 41 | return json.dumps({ 42 | "id": self.id, 43 | "content": self.content, 44 | # "embedding": self.embedding, 45 | "context": self.context, 46 | "metadata": self.metadata, 47 | # "update_history": self.update_history, 48 | "timestamp": self.timestamp.isoformat() 49 | }) 50 | 51 | def __repr__(self): 52 | return f"Memory(id={self.id}, content={self.content[:50]}...)" -------------------------------------------------------------------------------- /mem4ai/core/memory_manager.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Any, Optional 2 | from .memory import Memory 3 | from ..strategies.embedding_strategy import * 4 | from ..strategies.storage_strategy import * 5 | from ..strategies.search_strategy import * 6 | from ..utils.config_manager import config_manager 7 | 8 | class MemoryManager: 9 | def __init__(self, embedding_strategy: EmbeddingStrategy, 10 | storage_strategy: StorageStrategy, 11 | search_strategy: SearchStrategy): 12 | self.embedding_strategy = embedding_strategy or self._get_default_embedding_strategy() 13 | self.storage_strategy = storage_strategy or self._get_default_storage_strategy() 14 | self.search_strategy = search_strategy or self._get_default_search_strategy() 15 | self.max_history = config_manager.get('memory.max_history', 5) 16 | 17 | def _get_default_embedding_strategy(self): 18 | return get_embedding_strategy() 19 | 20 | def _get_default_storage_strategy(self): 21 | return get_storage_strategy() 22 | 23 | def _get_default_search_strategy(self): 24 | return get_search_strategy() 25 | 26 | def add_memory(self, content: str, user_id: Optional[str] = None, 27 | session_id: Optional[str] = None, agent_id: Optional[str] = None, 28 | metadata: Dict[str, Any] = None) -> str: 29 | metadata = metadata or {} 30 | if user_id: 31 | metadata['user_id'] = user_id 32 | if session_id: 33 | metadata['session_id'] = session_id 34 | if agent_id: 35 | metadata['agent_id'] = agent_id 36 | 37 | memory = Memory(content, metadata) 38 | memory.embedding = self.embedding_strategy.embed(content) 39 | self.storage_strategy.save(memory) 40 | return memory.id 41 | 42 | def get_memory(self, memory_id: str) -> Optional[Memory]: 43 | return self.storage_strategy.load(memory_id) 44 | 45 | def update_memory(self, memory_id: str, new_content: str, metadata: Dict[str, Any] = None) -> bool: 46 | memory = self.get_memory(memory_id) 47 | if memory: 48 | memory.update(new_content, metadata) 49 | memory.embedding = self.embedding_strategy.embed(new_content) 50 | self.storage_strategy.update(memory_id, memory) 51 | return True 52 | return False 53 | 54 | def delete_memory(self, memory_id: str) -> bool: 55 | return self.storage_strategy.delete(memory_id) 56 | 57 | def list_memories(self, user_id: Optional[str] = None, session_id: Optional[str] = None, 58 | agent_id: Optional[str] = None, metadata_filters: List[tuple] = None) -> List[Memory]: 59 | all_memories = self.storage_strategy.list_all() 60 | filtered_memories = self.storage_strategy.apply_filters(all_memories, metadata_filters or []) 61 | 62 | return [mem for mem in filtered_memories 63 | if (not user_id or mem.metadata.get('user_id') == user_id) and 64 | (not session_id or mem.metadata.get('session_id') == session_id) and 65 | (not agent_id or mem.metadata.get('agent_id') == agent_id)] 66 | 67 | def search_memories(self, query: str, top_k: int = 10, user_id: Optional[str] = None, 68 | session_id: Optional[str] = None, agent_id: Optional[str] = None, 69 | keywords: List[str] = None, metadata_filters: List[Tuple[str, str, Any]] = None) -> List[Memory]: 70 | top_k = top_k or config_manager.get('search.top_k', 10) 71 | all_memories = self.list_memories(user_id, session_id, agent_id, metadata_filters) 72 | return self.search_strategy.search(query, all_memories, top_k, keywords or [], metadata_filters or []) -------------------------------------------------------------------------------- /mem4ai/memtor.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Dict, Any, Tuple 2 | from .core.embedding_manager import EmbeddingManager 3 | from .strategies.embedding_strategy import get_embedding_strategy, EmbeddingStrategy 4 | from .strategies.storage_strategy import get_storage_strategy, StorageStrategy 5 | from .strategies.search_strategy import get_search_strategy, SearchStrategy 6 | from .strategies.knowledge_extraction import get_extraction_strategy 7 | from .core.memory import Memory 8 | from .utils.config_manager import config_manager 9 | from datetime import datetime 10 | 11 | class Memtor: 12 | def __init__(self, embedding_strategy=None, storage_strategy=None, 13 | search_strategy=None, extraction_strategy=None): 14 | """ 15 | Initialize the Memtor instance with the specified strategies. 16 | 17 | :param embedding_strategy: Strategy for creating embeddings 18 | :param storage_strategy: Strategy for storing memories 19 | :param search_strategy: Strategy for searching memories 20 | :param extraction_strategy: Strategy for knowledge extraction. 21 | Can be None to disable extraction 22 | """ 23 | self.embedding_manager = EmbeddingManager(embedding_strategy) 24 | self.storage_strategy = storage_strategy or get_storage_strategy() 25 | self.search_strategy = search_strategy or get_search_strategy(self.embedding_manager) 26 | 27 | # Handle extraction strategy like other strategies 28 | if extraction_strategy is None: 29 | # Check config to see if extraction should be enabled 30 | if config_manager.get('extraction.enabled', True): 31 | self.extraction_strategy = get_extraction_strategy() 32 | else: 33 | self.extraction_strategy = None 34 | else: 35 | # If strategy is provided directly, use it 36 | self.extraction_strategy = extraction_strategy 37 | 38 | def add_memory(self, user_message: str, assistant_response: str, 39 | metadata: Optional[Dict[str, Any]] = None, 40 | user_id: Optional[str] = None, session_id: Optional[str] = None, 41 | agent_id: Optional[str] = None) -> str: 42 | """ 43 | Add a new memory with both user message and assistant response. 44 | """ 45 | if not isinstance(user_message, str) or not isinstance(assistant_response, str): 46 | raise TypeError("Messages must be strings") 47 | 48 | # Combine messages for embedding 49 | combined_content = f"User: {user_message}\nAssistant: {assistant_response}" 50 | 51 | # Extract knowledge context if strategy exists 52 | context = None 53 | if self.extraction_strategy is not None: 54 | try: 55 | context = self.extraction_strategy.extract_knowledge(user_message, assistant_response) 56 | except Exception as e: 57 | print(f"Warning: Knowledge extraction failed: {str(e)}") 58 | context = None 59 | 60 | # Create embedding 61 | embedding = self.embedding_manager.embed(combined_content) 62 | 63 | # Create memory 64 | metadata = metadata or {} 65 | memory = Memory( 66 | content=combined_content, 67 | metadata=metadata, 68 | embedding=embedding, 69 | context=context, 70 | user_id=user_id, 71 | session_id=session_id, 72 | agent_id=agent_id 73 | ) 74 | 75 | self.storage_strategy.save(memory) 76 | return memory.id 77 | 78 | def get_memory(self, memory_id: str) -> Optional[Memory]: 79 | """ 80 | Retrieve a memory by its ID. 81 | 82 | :param memory_id: The ID of the memory to retrieve. 83 | :return: The Memory object if found, None otherwise. 84 | """ 85 | if not isinstance(memory_id, str): 86 | raise TypeError("Memory ID must be a string") 87 | 88 | return self.storage_strategy.load(memory_id) 89 | 90 | def update_memory(self, memory_id: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> bool: 91 | """ 92 | Update an existing memory. 93 | 94 | :param memory_id: The ID of the memory to update. 95 | :param content: The new content for the memory. 96 | :param metadata: New metadata to merge with existing metadata. 97 | :return: True if the update was successful, False otherwise. 98 | """ 99 | if not isinstance(memory_id, str) or not isinstance(content, str): 100 | raise TypeError("Memory ID and content must be strings") 101 | 102 | memory = self.get_memory(memory_id) 103 | if memory: 104 | memory.update(content, metadata) 105 | memory.embedding = self.embedding_manager.embed(content) 106 | return self.storage_strategy.update(memory_id, memory) 107 | return False 108 | 109 | def delete_memory(self, memory_id: str) -> bool: 110 | """ 111 | Delete a memory by its ID. 112 | 113 | :param memory_id: The ID of the memory to delete. 114 | :return: True if the deletion was successful, False otherwise. 115 | """ 116 | if not isinstance(memory_id, str): 117 | raise TypeError("Memory ID must be a string") 118 | 119 | return self.storage_strategy.delete(memory_id) 120 | 121 | def delete_memories_by_user(self, user_id: str) -> int: 122 | """ 123 | Delete all memories associated with a specific user. 124 | 125 | :param user_id: The ID of the user whose memories should be deleted. 126 | :return: The number of memories deleted. 127 | """ 128 | memories = self.list_memories(user_id=user_id) 129 | deleted_count = 0 130 | for memory in memories: 131 | if self.delete_memory(memory.id): 132 | deleted_count += 1 133 | return deleted_count 134 | 135 | def delete_memories_by_session(self, session_id: str) -> int: 136 | """ 137 | Delete all memories associated with a specific session. 138 | 139 | :param session_id: The ID of the session whose memories should be deleted. 140 | :return: The number of memories deleted. 141 | """ 142 | memories = self.list_memories(session_id=session_id) 143 | deleted_count = 0 144 | for memory in memories: 145 | if self.delete_memory(memory.id): 146 | deleted_count += 1 147 | return deleted_count 148 | 149 | def clear_all_storage(self) -> bool: 150 | """ 151 | Clear all memories from storage. 152 | 153 | :return: True if the operation was successful, False otherwise. 154 | """ 155 | try: 156 | self.storage_strategy.clear_all() 157 | return True 158 | except Exception as e: 159 | print(f"Error clearing storage: {str(e)}") 160 | return False 161 | 162 | def list_memories(self, user_id: Optional[str] = None, session_id: Optional[str] = None, 163 | agent_id: Optional[str] = None, metadata_filters: Optional[List[Tuple[str, str, Any]]] = None) -> List[Memory]: 164 | """ 165 | List memories based on optional filters. 166 | 167 | :param user_id: Optional user ID to filter memories. 168 | :param session_id: Optional session ID to filter memories. 169 | :param agent_id: Optional agent ID to filter memories. 170 | :param metadata_filters: Optional list of metadata filters. 171 | :return: List of Memory objects that match the filters. 172 | """ 173 | all_memories = self.storage_strategy.list_all() 174 | filtered_memories = [ 175 | mem for mem in all_memories 176 | if (not user_id or mem.metadata.get('user_id') == user_id) and 177 | (not session_id or mem.metadata.get('session_id') == session_id) and 178 | (not agent_id or mem.metadata.get('agent_id') == agent_id) 179 | ] 180 | 181 | if metadata_filters: 182 | filtered_memories = self.storage_strategy.apply_filters(filtered_memories, metadata_filters) 183 | 184 | return filtered_memories 185 | 186 | def search_memories(self, query: Optional[str] = None, top_k: int = 10, 187 | start_time: Optional[datetime] = None, end_time: Optional[datetime] = None, 188 | user_id: Optional[str] = None, session_id: Optional[str] = None, 189 | agent_id: Optional[str] = None, keywords: Optional[List[str]] = None, 190 | metadata_filters: Optional[List[Tuple[str, str, Any]]] = None, 191 | sort_by: str = 'relevance') -> List[Memory]: 192 | """ 193 | Enhanced search function that supports multiple search modes and combinations. 194 | 195 | :param query: Optional search query for semantic search 196 | :param top_k: Maximum number of results to return 197 | :param start_time: Optional start time for time-range filtering 198 | :param end_time: Optional end time for time-range filtering 199 | :param user_id: Optional user ID filter 200 | :param session_id: Optional session ID filter 201 | :param agent_id: Optional agent ID filter 202 | :param keywords: Optional list of keywords to boost in search 203 | :param metadata_filters: Optional list of metadata filters as (key, op, value) tuples 204 | :param sort_by: How to sort results ('relevance', 'time_desc', 'time_asc') 205 | :return: List of Memory objects matching the search criteria 206 | """ 207 | # Input validation 208 | if sort_by not in ('relevance', 'time_desc', 'time_asc'): 209 | raise ValueError("sort_by must be one of: 'relevance', 'time_desc', 'time_asc'") 210 | 211 | # Build metadata filters dictionary 212 | meta_dict = {} 213 | if user_id: 214 | meta_dict['user_id'] = user_id 215 | if session_id: 216 | meta_dict['session_id'] = session_id 217 | if agent_id: 218 | meta_dict['agent_id'] = agent_id 219 | 220 | # Determine search mode and get initial results 221 | if query: 222 | # Semantic search mode 223 | if start_time or end_time: 224 | # Get time-filtered memories first 225 | memories = self.storage_strategy.find_by_time( 226 | start_time or datetime.min, 227 | end_time or datetime.max, 228 | **meta_dict 229 | ) 230 | # Then apply semantic search 231 | results = self.search_strategy.search( 232 | query=query, 233 | memories=memories, 234 | top_k=top_k, 235 | keywords=keywords or [], 236 | metadata_filters=metadata_filters or [] 237 | ) 238 | else: 239 | # Get filtered memories and apply semantic search 240 | memories = self.storage_strategy.find_by_meta(meta_dict) if meta_dict else self.storage_strategy.list_all() 241 | if metadata_filters: 242 | memories = self.storage_strategy.apply_filters(memories, metadata_filters) 243 | results = self.search_strategy.search( 244 | query=query, 245 | memories=memories, 246 | top_k=top_k, 247 | keywords=keywords or [], 248 | metadata_filters=[] # Already applied 249 | ) 250 | else: 251 | # Non-semantic search mode 252 | if start_time or end_time: 253 | # Time-based search 254 | results = self.storage_strategy.find_by_time( 255 | start_time or datetime.min, 256 | end_time or datetime.max, 257 | **meta_dict 258 | ) 259 | elif meta_dict or metadata_filters: 260 | # Metadata-based search 261 | results = self.storage_strategy.find_by_meta(meta_dict) 262 | if metadata_filters: 263 | results = self.storage_strategy.apply_filters(results, metadata_filters) 264 | else: 265 | # Recent memories 266 | results = self.storage_strategy.find_recent(top_k, **meta_dict) 267 | 268 | # Sort results if needed 269 | if sort_by != 'relevance' or not query: 270 | if sort_by == 'time_desc' or not query: 271 | results = sorted(results, key=lambda x: x.timestamp, reverse=True) 272 | elif sort_by == 'time_asc': 273 | results = sorted(results, key=lambda x: x.timestamp) 274 | 275 | return results[:top_k] 276 | 277 | def __repr__(self): 278 | return f"Memtor(embedding_strategy={self.embedding_manager.embedding_strategy.__class__.__name__}, " \ 279 | f"storage_strategy={self.storage_strategy.__class__.__name__}, " \ 280 | f"search_strategy={self.search_strategy.__class__.__name__})" 281 | 282 | if __name__ == "__main__": 283 | # Usage example 284 | memtor = Memtor() 285 | 286 | # Add some memories 287 | memory1_id = memtor.add_memory("The quick brown fox jumps over the lazy dog", {"tag": "animals"}) 288 | memory2_id = memtor.add_memory("To be or not to be, that is the question", {"tag": "literature"}) 289 | 290 | # Search memories 291 | results = memtor.search_memories("animal fox", top_k=1) 292 | if results: 293 | print(f"Top search result: {results[0].content}") 294 | 295 | # Update a memory 296 | memtor.update_memory(memory1_id, "The quick brown fox leaps over the lazy dog", {"tag": "animals", "updated": True}) 297 | 298 | # List memories with a filter 299 | filtered_memories = memtor.list_memories(metadata_filters=[("tag", "==", "animals")]) 300 | print(f"Number of animal-related memories: {len(filtered_memories)}") 301 | 302 | # Delete a memory 303 | memtor.delete_memory(memory2_id) 304 | 305 | print("Memtor usage example completed successfully!") -------------------------------------------------------------------------------- /mem4ai/strategies/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unclecode/mem4ai/5b6eb4c79210a51507fa86b148fe7fb68cdf541f/mem4ai/strategies/__init__.py -------------------------------------------------------------------------------- /mem4ai/strategies/embedding_strategy.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import List, Union 3 | import os 4 | from litellm import embedding 5 | from ..utils.config_manager import config_manager 6 | import numpy as np 7 | 8 | class EmbeddingStrategy(ABC): 9 | @abstractmethod 10 | def embed(self, input: Union[str, List[str]]) -> List[List[float]]: 11 | pass 12 | 13 | @property 14 | @abstractmethod 15 | def dimension(self) -> int: 16 | pass 17 | 18 | class LiteLLMEmbeddingStrategy(EmbeddingStrategy): 19 | def __init__(self): 20 | self.model = config_manager.get('embedding.model', 'text-embedding-3-small') 21 | self._dimension = config_manager.get('embedding.dimension', 1536) # Default for text-embedding-3-small 22 | self.api_key = config_manager.get('embedding.api_key', os.getenv('OPENAI_API_KEY')) 23 | self.input_type = config_manager.get('embedding.input_type', None) 24 | 25 | if 'openai' in self.model.lower(): 26 | os.environ['OPENAI_API_KEY'] = self.api_key 27 | elif 'huggingface' in self.model.lower(): 28 | os.environ['HUGGINGFACE_API_KEY'] = self.api_key 29 | 30 | def embed(self, input: Union[str, List[str]]) -> np.ndarray: 31 | if isinstance(input, str): 32 | input = [input] 33 | 34 | kwargs = { 35 | "model": self.model, 36 | "input": input, 37 | } 38 | 39 | if 'text-embedding-3' in self.model: 40 | kwargs["dimensions"] = self._dimension 41 | 42 | if self.input_type and self.model.startswith('huggingface'): 43 | kwargs["input_type"] = self.input_type 44 | 45 | response = embedding(**kwargs) 46 | data = [item['embedding'] for item in response['data']] 47 | return np.array(data) 48 | 49 | @property 50 | def dimension(self) -> int: 51 | return self._dimension 52 | 53 | def get_embedding_strategy() -> EmbeddingStrategy: 54 | strategy_name = config_manager.get('embedding.strategy', 'litellm') 55 | if strategy_name == 'litellm': 56 | return LiteLLMEmbeddingStrategy() 57 | else: 58 | raise ValueError(f"Unknown embedding strategy: {strategy_name}") -------------------------------------------------------------------------------- /mem4ai/strategies/knowledge_extraction/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import KnowledgeExtractionStrategy 2 | from .llm import LLMExtractionStrategy 3 | from .summary import SummaryExtractionStrategy 4 | from .echo import EchoKnowledgeStrategy 5 | from ...utils.config_manager import config_manager 6 | 7 | def get_extraction_strategy() -> KnowledgeExtractionStrategy: 8 | """ 9 | Factory function to get the appropriate knowledge extraction strategy based on configuration. 10 | """ 11 | strategy_name = config_manager.get('extraction.strategy', 'llm') 12 | 13 | if strategy_name == 'llm': 14 | return LLMExtractionStrategy() 15 | elif strategy_name == 'simple': 16 | return SummaryExtractionStrategy() 17 | elif strategy_name == 'none': 18 | return None 19 | else: 20 | raise ValueError(f"Unknown extraction strategy: {strategy_name}") 21 | 22 | # Make these accessible when importing from knowledge_extraction 23 | __all__ = [ 24 | 'KnowledgeExtractionStrategy', 25 | 'LLMExtractionStrategy', 26 | 'SummaryExtractionStrategy', 27 | 'get_extraction_strategy', 28 | 'EchoKnowledgeStrategy', 29 | ] -------------------------------------------------------------------------------- /mem4ai/strategies/knowledge_extraction/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Dict, Any 3 | 4 | class KnowledgeExtractionStrategy(ABC): 5 | @abstractmethod 6 | def extract_knowledge(self, user_message: str, assistant_response: str) -> Dict[str, Any]: 7 | """Extract knowledge from a conversation exchange.""" 8 | pass -------------------------------------------------------------------------------- /mem4ai/strategies/knowledge_extraction/echo.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import List, Dict, Any, Literal 3 | from datetime import datetime 4 | from pydantic import BaseModel, Field, ConfigDict 5 | from litellm import completion 6 | import os, json 7 | from .base import KnowledgeExtractionStrategy 8 | from ...utils.config_manager import config_manager 9 | 10 | class EchoKnowledgeStrategy(KnowledgeExtractionStrategy): 11 | def __init__(self): 12 | pass 13 | 14 | def extract_knowledge( 15 | self, user_message: str, assistant_response: str 16 | ) -> Dict[str, Any]: 17 | return { 18 | # "user_message": user_message, 19 | # "assistant_response": assistant_response, 20 | "timestamp": datetime.now().isoformat(), 21 | } -------------------------------------------------------------------------------- /mem4ai/strategies/knowledge_extraction/llm.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, List, Literal, Optional 2 | from datetime import datetime 3 | from pydantic import BaseModel, Field, ConfigDict 4 | from litellm import completion 5 | import os 6 | from .base import KnowledgeExtractionStrategy 7 | from ...utils.config_manager import config_manager 8 | import json 9 | 10 | # Pydantic models for structured knowledge extraction 11 | class ActionDetails(BaseModel): 12 | type: Literal['list_modification', 'search', 'creation', 'deletion'] = Field( 13 | ..., # Make it required 14 | description="The type of action performed" 15 | ) 16 | target: Literal['favorites_list', 'watch_later', 'custom_list'] = Field( 17 | ..., 18 | description="The target of the action" 19 | ) 20 | status: Literal['completed', 'failed', 'partial'] = Field( 21 | ..., 22 | description="The status of the action" 23 | ) 24 | 25 | class Config: 26 | extra = 'forbid' 27 | 28 | class ModifiedElements(BaseModel): 29 | lists: List[str] = Field( 30 | ..., # Changed from default_factory=list to required 31 | description="Names of modified lists" 32 | ) 33 | movies: List[str] = Field( 34 | ..., # Changed from default_factory=list to required 35 | description="Titles of modified movies" 36 | ) 37 | preferences: List[str] = Field( 38 | ..., # Changed from default_factory=list to required 39 | description="Modified preferences" 40 | ) 41 | class Config: 42 | extra = 'forbid' 43 | 44 | class ActionInfo(BaseModel): 45 | primary_action: ActionDetails = Field( 46 | ..., 47 | description="Primary action details" 48 | ) 49 | modified_elements: ModifiedElements = Field( 50 | ..., 51 | description="Elements modified by the action" 52 | ) 53 | class Config: 54 | extra = 'forbid' 55 | 56 | class ReferencedValues(BaseModel): 57 | movies: List[str] = Field( 58 | ..., # Changed from default_factory=list to required 59 | description="Movie titles referenced" 60 | ) 61 | ratings: List[str] = Field( 62 | ..., # Changed from default_factory=list to required 63 | description="Ratings referenced" 64 | ) 65 | dates: List[str] = Field( 66 | ..., # Changed from default_factory=list to required 67 | description="Dates referenced" 68 | ) 69 | other_values: List[str] = Field( 70 | ..., # Changed from default_factory=list to required 71 | description="Other referenced values" 72 | ) 73 | class Config: 74 | extra = 'forbid' 75 | 76 | class KeyInformation(BaseModel): 77 | explicit_mentions: List[str] = Field( 78 | ..., 79 | description="Explicitly mentioned terms" 80 | ) 81 | implicit_context: List[str] = Field( 82 | ..., 83 | description="Implicitly derived context" 84 | ) 85 | referenced_values: ReferencedValues = Field( 86 | ..., 87 | description="Values referenced in the conversation" 88 | ) 89 | class Config: 90 | extra = 'forbid' 91 | 92 | class ConversationDetails(BaseModel): 93 | intent: Literal['movie_organization', 'information_query', 'general_discussion'] = Field( 94 | ..., 95 | description="The intent of the conversation" 96 | ) 97 | topic: str = Field( 98 | ..., 99 | description="The main topic of conversation" 100 | ) 101 | key_information: KeyInformation = Field( 102 | ..., 103 | description="Key information extracted from the conversation" 104 | ) 105 | user_message: str = Field( 106 | ..., 107 | description="Original user message" 108 | ) 109 | assistant_response: str = Field( 110 | ..., 111 | description="Original assistant response" 112 | ) 113 | class Config: 114 | extra = 'forbid' 115 | 116 | class Summary(BaseModel): 117 | request_essence: str = Field( 118 | ..., 119 | description="Essence of the user's request" 120 | ) 121 | response_essence: str = Field( 122 | ..., 123 | description="Essence of the assistant's response" 124 | ) 125 | key_points: List[str] = Field( 126 | ..., 127 | description="Key points from the conversation" 128 | ) 129 | class Config: 130 | extra = 'forbid' 131 | 132 | class MemoryContext(BaseModel): 133 | timestamp: str = Field( # Changed from datetime to str for easier serialization 134 | ..., 135 | description="Timestamp of the interaction" 136 | ) 137 | interaction_type: Literal['action_based', 'conversational', 'information_seeking'] = Field( 138 | ..., 139 | description="Type of interaction" 140 | ) 141 | action_details: Optional[ActionInfo] = Field( 142 | None, 143 | description="Details of any actions performed" 144 | ) 145 | conversation_details: ConversationDetails = Field( 146 | ..., 147 | description="Details of the conversation" 148 | ) 149 | summary: Summary = Field( 150 | ..., 151 | description="Summary of the interaction" 152 | ) 153 | 154 | class Config: 155 | extra = 'forbid' 156 | 157 | class LLMExtractionStrategy(KnowledgeExtractionStrategy): 158 | def __init__(self): 159 | self.model = config_manager.get('extraction.model', 'gpt-4o-mini') 160 | self.api_key = config_manager.get('extraction.api_key', os.getenv('OPENAI_API_KEY')) 161 | 162 | if not self.api_key: 163 | raise ValueError("No API key found for LLM extraction") 164 | 165 | os.environ['OPENAI_API_KEY'] = self.api_key 166 | 167 | def _create_system_prompt(self) -> str: 168 | return """You are a knowledge extraction system for a movie application assistant. Your task is to analyze conversations between users and the assistant, extracting structured information about both action-based interactions (like creating lists or modifying preferences) and general conversations about movies. 169 | 170 | Given a conversation, you must: 171 | 1. Identify if this is an action-based interaction (e.g., creating lists, adding movies) or a conversational interaction (e.g., discussing movies, asking opinions) 172 | 2. Extract all relevant information including explicit mentions and implicit context 173 | 3. Preserve important referenced values (movies, ratings, dates) for future use 174 | 4. Create a concise summary capturing the essence of the interaction 175 | 176 | For action-based interactions, focus on: 177 | - Specific actions performed (creation, modification, deletion) 178 | - Changes made to lists, movies, or preferences 179 | - Status of the actions performed 180 | 181 | For conversational interactions, focus on: 182 | - Main topics and themes discussed 183 | - Movie-related information shared 184 | - User preferences and opinions expressed 185 | 186 | Example 1 (Action-based): 187 | User message: "Create a new list called 'Horror Nights' and add some recent psychological horror movies with good ratings." 188 | Assistant response: {"action":"create_list", "list_id":"hn_123", "movies":[{"title":"Talk to Me","year":2023,"rating":7.1},{"title":"Hereditary","year":2018,"rating":7.3}]} I've created your 'Horror Nights' list and added some highly-rated psychological horror films. Would you like to see more options? 189 | 190 | { 191 | "timestamp": "2024-10-31T14:30:00Z", 192 | "interaction_type": "action_based", 193 | "action_details": { 194 | "primary_action": { 195 | "type": "creation", 196 | "target": "custom_list", 197 | "status": "completed" 198 | }, 199 | "modified_elements": { 200 | "lists": ["Horror Nights"], 201 | "movies": ["Talk to Me", "Hereditary"], 202 | "preferences": ["psychological horror"] 203 | } 204 | }, 205 | "conversation_details": { 206 | "intent": "movie_organization", 207 | "topic": "horror movies", 208 | "key_information": { 209 | "explicit_mentions": ["horror", "psychological", "recent"], 210 | "implicit_context": ["user prefers quality over quantity"], 211 | "referenced_values": { 212 | "movies": ["Talk to Me", "Hereditary"], 213 | "ratings": ["7.1", "7.3"], 214 | "dates": ["2023", "2018"], 215 | "other_values": [] 216 | } 217 | }, 218 | "user_message": "Create a new list called 'Horror Nights' and add some recent psychological horror movies with good ratings.", 219 | "assistant_response": "I've created your 'Horror Nights' list and added some highly-rated psychological horror films. Would you like to see more options?" 220 | }, 221 | "summary": { 222 | "request_essence": "Create new horror movie list with recent, well-rated films", 223 | "response_essence": "Created list with two psychological horror movies", 224 | "key_points": ["List creation", "Focus on psychological horror", "Recent releases", "High ratings"] 225 | } 226 | } 227 | 228 | Example 2 (Conversational): 229 | User message: "What did you think about the ending of Hereditary? Was it too disturbing?" 230 | Assistant response: "The ending of Hereditary is particularly impactful because it completes the family's tragic arc. The final treehouse scene reveals the cult's true purpose. While disturbing, it serves the story's themes of family trauma and fate perfectly." 231 | 232 | { 233 | "timestamp": "2024-10-31T14:35:00Z", 234 | "interaction_type": "conversational", 235 | "action_details": null, 236 | "conversation_details": { 237 | "intent": "information_query", 238 | "topic": "movie analysis", 239 | "key_information": { 240 | "explicit_mentions": ["Hereditary", "ending", "disturbing"], 241 | "implicit_context": ["user seeking opinion", "concerned about content intensity"], 242 | "referenced_values": { 243 | "movies": ["Hereditary"], 244 | "ratings": [], 245 | "dates": [], 246 | "other_values": ["treehouse scene", "family trauma", "cult"] 247 | } 248 | }, 249 | "user_message": "What did you think about the ending of Hereditary? Was it too disturbing?", 250 | "assistant_response": "The ending of Hereditary is particularly impactful because it completes the family's tragic arc. The final treehouse scene reveals the cult's true purpose. While disturbing, it serves the story's themes of family trauma and fate perfectly." 251 | }, 252 | "summary": { 253 | "request_essence": "Opinion request about Hereditary's ending", 254 | "response_essence": "Analysis of ending's significance and themes", 255 | "key_points": ["Movie analysis", "Thematic discussion", "Content intensity"] 256 | } 257 | } 258 | 259 | Required Response Format: 260 | Your response must strictly follow this schema: 261 | { 262 | "timestamp": string (ISO format), 263 | "interaction_type": "action_based" | "conversational" | "information_seeking", 264 | "action_details": { // Optional, null for non-action interactions 265 | "primary_action": { 266 | "type": "list_modification" | "search" | "creation" | "deletion", 267 | "target": "favorites_list" | "watch_later" | "custom_list", 268 | "status": "completed" | "failed" | "partial" 269 | }, 270 | "modified_elements": { 271 | "lists": [string], // Required, can be empty 272 | "movies": [string], // Required, can be empty 273 | "preferences": [string] // Required, can be empty 274 | } 275 | }, 276 | "conversation_details": { 277 | "intent": "movie_organization" | "information_query" | "general_discussion", 278 | "topic": string, 279 | "key_information": { 280 | "explicit_mentions": [string], // Required 281 | "implicit_context": [string], // Required 282 | "referenced_values": { 283 | "movies": [string], // Required 284 | "ratings": [string], // Required 285 | "dates": [string], // Required 286 | "other_values": [string] // Required 287 | } 288 | }, 289 | "user_message": string, // Original message 290 | "assistant_response": string // Original response 291 | }, 292 | "summary": { 293 | "request_essence": string, 294 | "response_essence": string, 295 | "key_points": [string] // Required 296 | } 297 | }""" 298 | 299 | 300 | def extract_knowledge(self, user_message: str, assistant_response: str) -> Dict[str, Any]: 301 | messages = [ 302 | {"role": "system", "content": self._create_system_prompt()}, 303 | {"role": "user", "content": f"User message: {user_message}\nAssistant response: {assistant_response}"} 304 | ] 305 | 306 | try: 307 | response = completion( 308 | model=self.model, 309 | messages=messages, 310 | response_format=MemoryContext 311 | ) 312 | 313 | return json.loads(response.model_dump()['choices'][0]['message']['content']) 314 | except Exception as e: 315 | print(f"Error in knowledge extraction: {str(e)}") 316 | # Return a basic context on error 317 | return MemoryContext( 318 | timestamp=datetime.now(), 319 | interaction_type="conversational", 320 | conversation_details=ConversationDetails( 321 | intent="general_discussion", 322 | topic="general", 323 | original_exchange={ 324 | "user_message": user_message, 325 | "assistant_response": assistant_response 326 | } 327 | ), 328 | summary=Summary( 329 | request_essence="Error in extraction", 330 | response_essence="Error in extraction", 331 | key_points=["Error in knowledge extraction"] 332 | ) 333 | ).model_dump() -------------------------------------------------------------------------------- /mem4ai/strategies/knowledge_extraction/summary.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import List, Dict, Any, Literal 3 | from datetime import datetime 4 | from pydantic import BaseModel, Field, ConfigDict 5 | from litellm import completion 6 | import os, json 7 | from .base import KnowledgeExtractionStrategy 8 | from ...utils.config_manager import config_manager 9 | 10 | 11 | class SimpleSummaryContext(BaseModel): 12 | model_config = ConfigDict(extra="forbid") 13 | 14 | timestamp: str = Field(..., description="Timestamp of the interaction") 15 | summary: str = Field(..., description="Concise summary of the interaction") 16 | keywords: List[str] = Field( 17 | ..., description="Key terms and concepts from the conversation" 18 | ) 19 | interaction_type: Literal["task", "discussion", "query"] = Field( 20 | ..., description="Basic type of the interaction" 21 | ) 22 | 23 | 24 | class SummaryExtractionStrategy(KnowledgeExtractionStrategy): 25 | def __init__(self): 26 | self.model = config_manager.get("extraction.model", "gpt-4o-mini") 27 | self.api_key = config_manager.get( 28 | "extraction.api_key", os.getenv("OPENAI_API_KEY") 29 | ) 30 | 31 | if not self.api_key: 32 | raise ValueError("No API key found for LLM extraction") 33 | 34 | os.environ["OPENAI_API_KEY"] = self.api_key 35 | 36 | def _create_system_prompt(self) -> str: 37 | return """You are a knowledge extraction system focusing on creating concise summaries and extracting keywords from conversations. Your task is to analyze conversations and produce a simple, focused summary of what was discussed or accomplished. 38 | 39 | Given a conversation between a user and an assistant, you must: 40 | 1. Create a brief, informative summary of the interaction 41 | 2. Extract relevant keywords that capture the main concepts 42 | 3. Determine the basic type of interaction (task, discussion, or query) 43 | 44 | Example 1: 45 | User: "Can you recommend some classic noir films from the 1940s?" 46 | Assistant: "Here are some essential film noir classics from the 1940s: 'Double Indemnity' (1944), 'The Maltese Falcon' (1941), and 'The Big Sleep' (1946). These films defined the genre with their dark themes, cynical characters, and stylish cinematography." 47 | 48 | { 49 | "timestamp": "2024-10-31T14:30:00Z", 50 | "summary": "User requested recommendations for 1940s film noir classics, receiving suggestions of three defining movies from the genre", 51 | "keywords": ["film noir", "1940s", "movie recommendations", "Double Indemnity", "Maltese Falcon", "Big Sleep", "classics"], 52 | "interaction_type": "query" 53 | } 54 | 55 | Example 2: 56 | User: "Add 'The Godfather' and 'Goodfellas' to my 'Crime Classics' watchlist" 57 | Assistant: "I've added both 'The Godfather' and 'Goodfellas' to your 'Crime Classics' watchlist. Would you like me to suggest similar crime films?" 58 | 59 | { 60 | "timestamp": "2024-10-31T14:35:00Z", 61 | "summary": "User requested addition of two classic crime films to their 'Crime Classics' watchlist", 62 | "keywords": ["The Godfather", "Goodfellas", "watchlist", "crime movies", "list management"], 63 | "interaction_type": "task" 64 | } 65 | 66 | Required Response Format: 67 | { 68 | "timestamp": string (ISO format), 69 | "summary": string (concise description of the interaction), 70 | "keywords": [string] (list of relevant terms and concepts), 71 | "interaction_type": "task" | "discussion" | "query" 72 | }""" 73 | 74 | def extract_knowledge( 75 | self, user_message: str, assistant_response: str 76 | ) -> Dict[str, Any]: 77 | messages = [ 78 | {"role": "system", "content": self._create_system_prompt()}, 79 | { 80 | "role": "user", 81 | "content": f"User message: {user_message}\nAssistant response: {assistant_response}", 82 | }, 83 | ] 84 | 85 | try: 86 | response = completion( 87 | model=self.model, 88 | messages=messages, 89 | response_format=SimpleSummaryContext, 90 | ) 91 | 92 | return json.loads(response.model_dump()["choices"][0]["message"]["content"]) 93 | except Exception as e: 94 | print(f"Error in simple knowledge extraction: {str(e)}") 95 | # Return a basic context on error 96 | return SimpleSummaryContext( 97 | timestamp=datetime.now().isoformat(), 98 | summary="Error occurred during extraction", 99 | keywords=["error"], 100 | interaction_type="discussion", 101 | ).model_dump() 102 | -------------------------------------------------------------------------------- /mem4ai/strategies/search_strategy.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import List, Union, Dict, Any, Tuple 3 | import numpy as np 4 | from sklearn.feature_extraction.text import TfidfVectorizer 5 | from sklearn.metrics.pairwise import cosine_similarity 6 | from ..core.memory import Memory 7 | from ..utils.config_manager import config_manager 8 | from ..core.embedding_manager import EmbeddingManager 9 | 10 | class SearchStrategy(ABC): 11 | @abstractmethod 12 | def search(self, query: Union[str, np.ndarray], memories: List[Memory], top_k: int, 13 | keywords: List[str], metadata_filters: List[Tuple[str, str, Any]]) -> List[Memory]: 14 | pass 15 | 16 | class DefaultSearchStrategy: 17 | def __init__(self, embedding_manager: EmbeddingManager): 18 | self.embedding_manager = embedding_manager 19 | self.tfidf_vectorizer = TfidfVectorizer(stop_words='english') 20 | self.k1: float = config_manager.get('search.bm25_k1', 1.5) 21 | self.b: float = config_manager.get('search.bm25_b', 0.75) 22 | 23 | def search(self, query: Union[str, np.ndarray], memories: List[Memory], top_k: int, 24 | keywords: List[str], metadata_filters: List[Tuple[str, str, Any]]) -> List[Memory]: 25 | if not isinstance(memories, list) or not all(isinstance(m, Memory) for m in memories): 26 | raise TypeError("memories must be a list of Memory objects") 27 | if not memories: 28 | return [] 29 | 30 | if not isinstance(top_k, int) or top_k <= 0: 31 | raise ValueError("top_k must be a positive integer") 32 | 33 | if not isinstance(keywords, list) or not all(isinstance(k, str) for k in keywords): 34 | raise TypeError("keywords must be a list of strings") 35 | 36 | if not isinstance(metadata_filters, list): 37 | raise TypeError("metadata_filters must be a list") 38 | 39 | filtered_memories = self._apply_metadata_filters(memories, metadata_filters) 40 | if not filtered_memories: 41 | return [] 42 | 43 | # Stage 1: Cosine Similarity 44 | try: 45 | if isinstance(query, str): 46 | query_embedding = self.embedding_manager.embed(query) 47 | elif isinstance(query, np.ndarray) and query.dtype == float: 48 | query_embedding = query 49 | else: 50 | raise TypeError("query must be a string or a numpy array of floats") 51 | 52 | cosine_scores = self._calculate_cosine_similarity(query_embedding, filtered_memories) 53 | top_k = min(top_k, len(filtered_memories)) 54 | top_cosine_indices = np.argsort(cosine_scores)[-top_k:][::-1] 55 | top_memories = [filtered_memories[i] for i in top_cosine_indices] 56 | except Exception as e: 57 | print(f"Error during cosine similarity calculation: {str(e)}") 58 | return [] 59 | 60 | # Stage 2: BM25 Re-ranking (only for string queries) 61 | if isinstance(query, str): 62 | try: 63 | bm25_scores = self._calculate_bm25_scores(query, top_memories, keywords) 64 | final_indices = np.argsort(bm25_scores)[::-1] 65 | if final_indices == [0]: 66 | return top_memories 67 | return [top_memories[i] for i in final_indices] 68 | except Exception as e: 69 | print(f"Error during BM25 re-ranking: {str(e)}") 70 | return top_memories # Fall back to cosine similarity results if BM25 fails 71 | else: 72 | return top_memories 73 | 74 | def _apply_metadata_filters(self, memories, filters): 75 | if not filters: 76 | return memories 77 | 78 | def passes_filter(memory, filter_tuple): 79 | key, op, value = filter_tuple 80 | if key not in memory.metadata: 81 | return False 82 | mem_value = memory.metadata[key] 83 | if op == '==': 84 | return mem_value == value 85 | elif op == '!=': 86 | return mem_value != value 87 | elif op == '>': 88 | return mem_value > value 89 | elif op == '>=': 90 | return mem_value >= value 91 | elif op == '<': 92 | return mem_value < value 93 | elif op == '<=': 94 | return mem_value <= value 95 | else: 96 | raise ValueError(f"Unknown operator: {op}") 97 | 98 | return [mem for mem in memories if all(passes_filter(mem, f) for f in filters)] 99 | 100 | def _calculate_cosine_similarity(self, query_embedding : np.ndarray, memories : List[Memory]) -> np.ndarray: 101 | memory_embeddings = np.array([memory.embedding[0] for memory in memories]) 102 | 103 | if memory_embeddings.shape[0] == 0: 104 | return np.array([]) 105 | 106 | # Ensure query_embedding is 2D 107 | if query_embedding.ndim == 1: 108 | query_embedding = query_embedding.reshape(1, -1) 109 | 110 | # Ensure memory_embeddings is 2D 111 | if memory_embeddings.ndim == 1: 112 | memory_embeddings = memory_embeddings.reshape(1, -1) 113 | 114 | # return cosine_similarity(query_embedding, memory_embeddings)[0] 115 | return np.dot(memory_embeddings, query_embedding.T).flatten() 116 | 117 | def _calculate_bm25_scores(self, query : str, memories : List[Memory], keywords : List[str]) -> np.ndarray: 118 | try: 119 | corpus = [memory.content for memory in memories] 120 | tfidf_matrix = self.tfidf_vectorizer.fit_transform(corpus) 121 | doc_lens = tfidf_matrix.sum(axis=1).flatten() # Sum across rows and flatten 122 | avg_doc_len = doc_lens.mean() 123 | 124 | query_vec = self.tfidf_vectorizer.transform([query]) 125 | query_terms = query_vec.indices 126 | 127 | keyword_boost = np.zeros(len(memories)) 128 | for keyword in keywords: 129 | if keyword in self.tfidf_vectorizer.vocabulary_: 130 | keyword_idx = self.tfidf_vectorizer.vocabulary_[keyword] 131 | keyword_boost += tfidf_matrix[:, keyword_idx].toarray().flatten() 132 | 133 | scores = np.zeros(len(memories)) 134 | for idx in query_terms: 135 | qi = np.array(query_vec[0, idx]) 136 | fi = tfidf_matrix[:, idx].toarray().flatten() 137 | numerator = fi * (self.k1 + 1) 138 | denominator = fi + self.k1 * (1 - self.b + self.b * doc_lens / avg_doc_len) 139 | x = qi * (numerator / denominator) 140 | scores = scores + x 141 | 142 | final_scores = scores + keyword_boost 143 | return final_scores.tolist()[0] 144 | except Exception as e: 145 | print(f"Error in BM25 calculation: {str(e)}") 146 | return [0.0] * len(memories) # Return neutral scores if calculation fails 147 | 148 | def get_search_strategy(embedding_manager=None): 149 | strategy_name = config_manager.get('search.strategy', 'default') 150 | if strategy_name == 'default': 151 | return DefaultSearchStrategy(embedding_manager or EmbeddingManager()) 152 | else: 153 | raise ValueError(f"Unknown search strategy: {strategy_name}") -------------------------------------------------------------------------------- /mem4ai/strategies/storage_strategy.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import List, Optional, Dict, Any, Tuple 3 | import os 4 | import lmdb 5 | import pickle 6 | from datetime import datetime, timedelta 7 | from ..core.memory import Memory 8 | from ..utils.config_manager import config_manager 9 | 10 | class StorageStrategy(ABC): 11 | @abstractmethod 12 | def save(self, memory: Memory) -> None: 13 | pass 14 | 15 | @abstractmethod 16 | def load(self, memory_id: str) -> Optional[Memory]: 17 | pass 18 | 19 | @abstractmethod 20 | def update(self, memory_id: str, memory: Memory) -> bool: 21 | pass 22 | 23 | @abstractmethod 24 | def delete(self, memory_id: str) -> bool: 25 | pass 26 | 27 | @abstractmethod 28 | def list_all(self) -> List[Memory]: 29 | pass 30 | 31 | @abstractmethod 32 | def apply_filters(self, memories: List[Memory], filters: List[tuple]) -> List[Memory]: 33 | pass 34 | 35 | @abstractmethod 36 | def clear_all(self) -> None: 37 | pass 38 | 39 | @abstractmethod 40 | def find_recent(self, limit: int, **kwargs) -> List[Memory]: 41 | """Get the most recent memories with optional filters""" 42 | pass 43 | 44 | @abstractmethod 45 | def find_by_time(self, start_time: datetime, end_time: datetime, **kwargs) -> List[Memory]: 46 | """Get memories within a time range with optional filters""" 47 | pass 48 | 49 | @abstractmethod 50 | def find_by_meta(self, metadata_filters: Dict[str, Any]) -> List[Memory]: 51 | """Get memories by metadata filters""" 52 | pass 53 | 54 | class LMDBStorageStrategy(StorageStrategy): 55 | METADATA_KEYS = ['user_id', 'session_id', 'agent_id'] 56 | 57 | def __init__(self): 58 | self.path: str = config_manager.get('storage.path', './mem4ai_storage') 59 | self.map_size: int = config_manager.get('storage.map_size', 10 * 1024 * 1024 * 1024) # 10GB default 60 | self._ensure_directory() 61 | 62 | # Main environment for storing memories 63 | self.env = lmdb.open(self.path, map_size=self.map_size) 64 | 65 | # Separate environments for indices 66 | self.timestamp_env = lmdb.open(f"{self.path}_timestamp_index", map_size=1024 * 1024 * 1024) 67 | self.metadata_env = lmdb.open(f"{self.path}_metadata_index", map_size=1024 * 1024 * 1024) 68 | 69 | self._init_indices() 70 | 71 | def _ensure_directory(self) -> None: 72 | if not os.path.exists(self.path): 73 | os.makedirs(self.path) 74 | 75 | def _init_indices(self) -> None: 76 | """Initialize index structures if they don't exist""" 77 | with self.timestamp_env.begin(write=True) as txn: 78 | if not txn.get(b'timestamp_index'): 79 | txn.put(b'timestamp_index', pickle.dumps({})) 80 | 81 | with self.metadata_env.begin(write=True) as txn: 82 | for key in self.METADATA_KEYS: 83 | if not txn.get(key.encode()): 84 | txn.put(key.encode(), pickle.dumps({})) 85 | 86 | def save(self, memory: Memory) -> None: 87 | if not isinstance(memory, Memory): 88 | raise TypeError(f"Expected Memory object, got {type(memory)}") 89 | 90 | with self.env.begin(write=True) as txn: 91 | txn.put(memory.id.encode(), pickle.dumps(memory)) 92 | 93 | # Update indices 94 | with self.timestamp_env.begin(write=True) as txn: 95 | self._update_timestamp_index(memory, txn) 96 | 97 | with self.metadata_env.begin(write=True) as txn: 98 | self._update_metadata_index(memory, txn) 99 | 100 | def load(self, memory_id: str) -> Optional[Memory]: 101 | if not isinstance(memory_id, str): 102 | raise TypeError(f"Expected string for memory_id, got {type(memory_id)}") 103 | 104 | with self.env.begin() as txn: 105 | data = txn.get(memory_id.encode()) 106 | if data is None: 107 | return None 108 | return pickle.loads(data) 109 | 110 | def update(self, memory_id: str, memory: Memory) -> bool: 111 | if not isinstance(memory_id, str) or not isinstance(memory, Memory): 112 | raise TypeError("Invalid types for update operation") 113 | 114 | with self.env.begin(write=True) as txn: 115 | if txn.get(memory_id.encode()) is None: 116 | return False 117 | txn.put(memory_id.encode(), pickle.dumps(memory)) 118 | 119 | # Update indices 120 | with self.timestamp_env.begin(write=True) as ts_txn: 121 | self._update_timestamp_index(memory, ts_txn) 122 | 123 | with self.metadata_env.begin(write=True) as meta_txn: 124 | self._update_metadata_index(memory, meta_txn) 125 | 126 | return True 127 | 128 | def delete(self, memory_id: str) -> bool: 129 | if not isinstance(memory_id, str): 130 | raise TypeError(f"Expected string for memory_id, got {type(memory_id)}") 131 | 132 | memory = self.load(memory_id) 133 | if not memory: 134 | return False 135 | 136 | # Remove from main storage and indices 137 | with self.env.begin(write=True) as txn: 138 | txn.delete(memory_id.encode()) 139 | 140 | self._remove_from_indices(memory) 141 | return True 142 | 143 | def list_all(self) -> List[Memory]: 144 | memories = [] 145 | with self.env.begin() as txn: 146 | cursor = txn.cursor() 147 | for _, value in cursor: 148 | memories.append(pickle.loads(value)) 149 | return memories 150 | 151 | def apply_filters(self, memories: List[Memory], filters: List[tuple]) -> List[Memory]: 152 | if not isinstance(memories, list) or not isinstance(filters, list): 153 | raise TypeError("Invalid types for apply_filters operation") 154 | 155 | def passes_filter(memory: Memory, filter_tuple: tuple) -> bool: 156 | key, op, value = filter_tuple 157 | if key not in memory.metadata: 158 | return False 159 | mem_value = memory.metadata[key] 160 | if op == '==': 161 | return mem_value == value 162 | elif op == '!=': 163 | return mem_value != value 164 | elif op == '>': 165 | return mem_value > value 166 | elif op == '>=': 167 | return mem_value >= value 168 | elif op == '<': 169 | return mem_value < value 170 | elif op == '<=': 171 | return mem_value <= value 172 | else: 173 | raise ValueError(f"Unknown operator: {op}") 174 | 175 | return [mem for mem in memories if all(passes_filter(mem, f) for f in filters)] 176 | 177 | def clear_all(self) -> None: 178 | with self.env.begin(write=True) as txn: 179 | txn.drop(self.env.open_db()) 180 | 181 | with self.timestamp_env.begin(write=True) as txn: 182 | txn.drop(self.timestamp_env.open_db()) 183 | 184 | with self.metadata_env.begin(write=True) as txn: 185 | txn.drop(self.metadata_env.open_db()) 186 | 187 | self._init_indices() 188 | 189 | def _update_timestamp_index(self, memory: Memory, txn) -> None: 190 | timestamp_key = memory.timestamp.isoformat().encode() 191 | txn.put(f"ts:{memory.id}".encode(), timestamp_key) 192 | 193 | index = pickle.loads(txn.get(b'timestamp_index') or pickle.dumps({})) 194 | if timestamp_key not in index: 195 | index[timestamp_key] = set() 196 | index[timestamp_key].add(memory.id) 197 | txn.put(b'timestamp_index', pickle.dumps(index)) 198 | 199 | def _update_metadata_index(self, memory: Memory, txn) -> None: 200 | for key in self.METADATA_KEYS: 201 | if key in memory.metadata: 202 | value = memory.metadata[key] 203 | index_key = f"{key}:{value}".encode() 204 | index = pickle.loads(txn.get(index_key) or pickle.dumps(set())) 205 | index.add(memory.id) 206 | txn.put(index_key, pickle.dumps(index)) 207 | 208 | def _remove_from_indices(self, memory: Memory) -> None: 209 | # Remove from timestamp index 210 | with self.timestamp_env.begin(write=True) as txn: 211 | index = pickle.loads(txn.get(b'timestamp_index')) 212 | timestamp_key = memory.timestamp.isoformat().encode() 213 | if timestamp_key in index: 214 | index[timestamp_key].discard(memory.id) 215 | if not index[timestamp_key]: 216 | del index[timestamp_key] 217 | txn.put(b'timestamp_index', pickle.dumps(index)) 218 | 219 | # Remove from metadata indices 220 | with self.metadata_env.begin(write=True) as txn: 221 | for key in self.METADATA_KEYS: 222 | if key in memory.metadata: 223 | index_key = f"{key}:{memory.metadata[key]}".encode() 224 | index = pickle.loads(txn.get(index_key) or pickle.dumps(set())) 225 | index.discard(memory.id) 226 | if index: 227 | txn.put(index_key, pickle.dumps(index)) 228 | else: 229 | txn.delete(index_key) 230 | 231 | def find_recent(self, limit: int, **kwargs) -> List[Memory]: 232 | memories = [] 233 | metadata_filters = {k: v for k, v in kwargs.items() 234 | if k in self.METADATA_KEYS and v is not None} 235 | 236 | with self.timestamp_env.begin() as ts_txn: 237 | index = pickle.loads(ts_txn.get(b'timestamp_index')) 238 | sorted_timestamps = sorted(index.keys(), reverse=True) 239 | 240 | with self.env.begin() as mem_txn: 241 | for ts_key in sorted_timestamps: 242 | memory_ids = index[ts_key] 243 | if metadata_filters: 244 | memory_ids = self._filter_by_metadata(memory_ids, metadata_filters) 245 | 246 | for memory_id in memory_ids: 247 | memory = pickle.loads(mem_txn.get(memory_id.encode())) 248 | if memory: 249 | memories.append(memory) 250 | if len(memories) >= limit: 251 | return memories[:limit] 252 | return memories 253 | 254 | def find_by_time(self, start_time: datetime, end_time: datetime, **kwargs) -> List[Memory]: 255 | memories = [] 256 | start_key = start_time.isoformat().encode() 257 | end_key = end_time.isoformat().encode() 258 | metadata_filters = {k: v for k, v in kwargs.items() 259 | if k in self.METADATA_KEYS and v is not None} 260 | 261 | with self.timestamp_env.begin() as ts_txn: 262 | index = pickle.loads(ts_txn.get(b'timestamp_index')) 263 | 264 | with self.env.begin() as mem_txn: 265 | for ts_key in index: 266 | if start_key <= ts_key <= end_key: 267 | memory_ids = index[ts_key] 268 | if metadata_filters: 269 | memory_ids = self._filter_by_metadata(memory_ids, metadata_filters) 270 | 271 | for memory_id in memory_ids: 272 | memory = pickle.loads(mem_txn.get(memory_id.encode())) 273 | if memory: 274 | memories.append(memory) 275 | 276 | return sorted(memories, key=lambda x: x.timestamp) 277 | 278 | def _filter_by_metadata(self, memory_ids: set, metadata_filters: Dict[str, Any]) -> set: 279 | result_ids = memory_ids 280 | for key, value in metadata_filters.items(): 281 | with self.metadata_env.begin() as txn: 282 | index_key = f"{key}:{value}".encode() 283 | current_ids = pickle.loads(txn.get(index_key) or pickle.dumps(set())) 284 | result_ids = result_ids.intersection(current_ids) 285 | if not result_ids: # Short circuit if no matches 286 | break 287 | return result_ids 288 | 289 | def find_by_meta(self, metadata_filters: Dict[str, Any]) -> List[Memory]: 290 | valid_filters = {k: v for k, v in metadata_filters.items() 291 | if k in self.METADATA_KEYS and v is not None} 292 | 293 | if not valid_filters: 294 | return [] 295 | 296 | key, value = next(iter(valid_filters.items())) 297 | with self.metadata_env.begin() as meta_txn: 298 | memory_ids = pickle.loads(meta_txn.get(f"{key}:{value}".encode()) or pickle.dumps(set())) 299 | 300 | if len(valid_filters) > 1: 301 | memory_ids = self._filter_by_metadata(memory_ids, valid_filters) 302 | 303 | memories = [] 304 | with self.env.begin() as mem_txn: 305 | for memory_id in memory_ids: 306 | memory_data = mem_txn.get(memory_id.encode()) 307 | if memory_data: 308 | memories.append(pickle.loads(memory_data)) 309 | 310 | return sorted(memories, key=lambda x: x.timestamp, reverse=True) 311 | 312 | 313 | def get_storage_strategy() -> StorageStrategy: 314 | strategy_name: str = config_manager.get('storage.strategy', 'lmdb') 315 | if strategy_name == 'lmdb': 316 | return LMDBStorageStrategy() 317 | else: 318 | raise ValueError(f"Unknown storage strategy: {strategy_name}") 319 | 320 | -------------------------------------------------------------------------------- /mem4ai/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/unclecode/mem4ai/5b6eb4c79210a51507fa86b148fe7fb68cdf541f/mem4ai/utils/__init__.py -------------------------------------------------------------------------------- /mem4ai/utils/config_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | from typing import Any, Dict 4 | 5 | class ConfigManager: 6 | DEFAULT_CONFIG = { 7 | 'embedding': { 8 | 'model': 'text-embedding-3-small', 9 | 'dimension': 768, 10 | }, 11 | 'storage': { 12 | 'type': 'lmdb', 13 | 'path': './memtor_storage', 14 | }, 15 | 'search': { 16 | 'algorithm': 'cosine_bm25', 17 | 'top_k': 10, 18 | }, 19 | 'memory': { 20 | 'max_history': 5, 21 | }, 22 | 'extraction': { 23 | 'enabled': True, # Whether to use knowledge extraction 24 | 'type': 'llm', # 'llm' or 'simple' 25 | 'model': 'gpt-4o-mini', # Model for LLM-based strategies 26 | 'store_full_response': True, 27 | 'extraction_timeout': 30, 28 | 'retries': 2, 29 | } 30 | } 31 | 32 | def __init__(self, config_path: str = None): 33 | self.config_path = config_path or os.path.expanduser('~/.mem4ai/config.yaml') 34 | self.config = self.load_config() 35 | 36 | def load_config(self) -> Dict[str, Any]: 37 | if os.path.exists(self.config_path): 38 | with open(self.config_path, 'r') as f: 39 | user_config = yaml.safe_load(f) 40 | return self.merge_configs(self.DEFAULT_CONFIG, user_config) 41 | return self.DEFAULT_CONFIG.copy() 42 | 43 | def merge_configs(self, default: Dict[str, Any], user: Dict[str, Any]) -> Dict[str, Any]: 44 | for key, value in user.items(): 45 | if isinstance(value, dict) and key in default: 46 | default[key] = self.merge_configs(default[key], value) 47 | else: 48 | default[key] = value 49 | return default 50 | 51 | def get(self, key: str, default: Any = None) -> Any: 52 | keys = key.split('.') 53 | value = self.config 54 | for k in keys: 55 | if k in value: 56 | value = value[k] 57 | else: 58 | return default 59 | return value 60 | 61 | def save(self): 62 | os.makedirs(os.path.dirname(self.config_path), exist_ok=True) 63 | with open(self.config_path, 'w') as f: 64 | yaml.dump(self.config, f) 65 | 66 | config_manager = ConfigManager() -------------------------------------------------------------------------------- /playground.py: -------------------------------------------------------------------------------- 1 | from mem4ai.strategies.embedding_strategy import LiteLLMEmbeddingStrategy 2 | from mem4ai.strategies.storage_strategy import LMDBStorageStrategy 3 | from mem4ai.strategies.search_strategy import DefaultSearchStrategy 4 | from mem4ai.core.embedding_manager import EmbeddingManager 5 | from mem4ai.core.memory import Memory 6 | from mem4ai.memtor import Memtor 7 | import pytest 8 | from mem4ai import Memtor, Memory 9 | 10 | def test_embedding(): 11 | # Test the LiteLLMEmbeddingStrategy 12 | embedding_strategy = LiteLLMEmbeddingStrategy() 13 | 14 | # Test single string embedding 15 | test_string = "This is a test sentence for embedding." 16 | embedding = embedding_strategy.embed(test_string) 17 | print(f"Single string embedding shape: {len(embedding)}") 18 | assert len(embedding[0]) == embedding_strategy.dimension, "Embedding dimension mismatch" 19 | 20 | # Test list of strings embedding 21 | test_strings = [ 22 | "The quick brown fox jumps over the lazy dog.", 23 | "To be or not to be, that is the question.", 24 | "I think, therefore I am." 25 | ] 26 | embeddings = embedding_strategy.embed(test_strings) 27 | print(f"List of strings embedding shape: {len(embeddings)}x{len(embeddings[0])}") 28 | assert all(len(emb) == embedding_strategy.dimension for emb in embeddings), "Embedding dimension mismatch" 29 | 30 | # Test embedding similarity 31 | from sklearn.metrics.pairwise import cosine_similarity 32 | similarity = cosine_similarity([embeddings[0]], [embeddings[1]])[0][0] 33 | print(f"Cosine similarity between first two sentences: {similarity}") 34 | 35 | print("All tests passed successfully!") 36 | 37 | def test_storage(): 38 | # Test the LMDBStorageStrategy 39 | storage = LMDBStorageStrategy() 40 | 41 | # Test save and load 42 | test_memory = Memory("Test content", {"tag": "test"}) 43 | storage.save(test_memory) 44 | loaded_memory = storage.load(test_memory.id) 45 | assert loaded_memory is not None, "Failed to load saved memory" 46 | assert loaded_memory.content == test_memory.content, "Loaded memory content does not match" 47 | 48 | # Test update 49 | test_memory.content = "Updated content" 50 | assert storage.update(test_memory.id, test_memory), "Failed to update memory" 51 | updated_memory = storage.load(test_memory.id) 52 | assert updated_memory is not None, "Failed to load updated memory" 53 | assert updated_memory.content == "Updated content", "Memory content was not updated" 54 | 55 | # Test delete 56 | assert storage.delete(test_memory.id), "Failed to delete memory" 57 | assert storage.load(test_memory.id) is None, "Memory was not deleted" 58 | 59 | # Test list_all and apply_filters 60 | memory1 = Memory("Content 1", {"value": 10}) 61 | memory2 = Memory("Content 2", {"value": 20}) 62 | storage.save(memory1) 63 | storage.save(memory2) 64 | all_memories = storage.list_all() 65 | assert len(all_memories) == 2, "list_all did not return all memories" 66 | filtered_memories = storage.apply_filters(all_memories, [("value", ">", 15)]) 67 | assert len(filtered_memories) == 1, "apply_filters did not filter correctly" 68 | assert filtered_memories[0].metadata["value"] == 20, "apply_filters returned wrong memory" 69 | 70 | print("All tests passed successfully!") 71 | 72 | def test_search_strategy(): 73 | # Create EmbeddingManager 74 | embedding_manager = EmbeddingManager() 75 | 76 | # Create SearchStrategy with EmbeddingManager 77 | search_strategy = DefaultSearchStrategy(embedding_manager) 78 | 79 | # Create test memories 80 | test_memories = [ 81 | Memory("The quick brown fox jumps over the lazy dog", {"tag": "animals", "year": 2020}), 82 | Memory("A journey of a thousand miles begins with a single step", {"tag": "motivation", "year": 2019}), 83 | Memory("To be or not to be, that is the question", {"tag": "literature", "year": 2021}), 84 | Memory("I think, therefore I am", {"tag": "philosophy", "year": 2018}) 85 | ] 86 | 87 | # Assign embeddings to test memories using EmbeddingManager 88 | for memory in test_memories: 89 | memory.embedding = embedding_manager.embed(memory.content) 90 | 91 | # Test search with string query and metadata filter 92 | query = "What animal jumps?" 93 | metadata_filters = [("year", ">=", 2020)] 94 | results = search_strategy.search(query, test_memories, top_k=2, keywords=["animal"], metadata_filters=metadata_filters) 95 | assert len(results) == 2, f"Expected 2 results, got {len(results)}" 96 | assert "fox" in results[0].content.lower(), f"Expected 'fox' in top result, got: {results[0].content}" 97 | assert all(m.metadata["year"] >= 2020 for m in results), "Metadata filter not applied correctly" 98 | 99 | # Test search with embedding query and metadata filter 100 | query_embedding = embedding_manager.embed("A philosophical question") 101 | metadata_filters = [("tag", "==", "philosophy")] 102 | results = search_strategy.search(query_embedding, test_memories, top_k=1, keywords=[], metadata_filters=metadata_filters) 103 | assert len(results) == 1, f"Expected 1 result, got {len(results)}" 104 | assert results[0].metadata["tag"] == "philosophy", "Metadata filter not applied correctly" 105 | 106 | # Test error handling 107 | with pytest.raises(TypeError, match="Query must be a string or a list of floats"): 108 | search_strategy.search(123, test_memories, top_k=2, keywords=[], metadata_filters=[]) 109 | 110 | print("All tests passed successfully!") 111 | print("All tests passed successfully!") 112 | 113 | def test_memtor(): 114 | # Initialize Memtor 115 | memtor = Memtor() 116 | print("Memtor initialized successfully.") 117 | 118 | # Remove previous memroy for users and sessions 119 | memtor.storage_strategy.clear_all() 120 | 121 | # Add memories 122 | memory1_id = memtor.add_memory("The quick brown fox jumps over the lazy dog", 123 | metadata={"tag": "animals"}, 124 | user_id="user1", 125 | session_id="session1") 126 | memory2_id = memtor.add_memory("To be or not to be, that is the question", 127 | metadata={"tag": "literature"}, 128 | user_id="user1", 129 | session_id="session1") 130 | memory3_id = memtor.add_memory("E = mc^2", 131 | metadata={"tag": "science"}, 132 | user_id="user2", 133 | session_id="session2") 134 | print("Three memories added successfully.") 135 | 136 | # List memories 137 | all_memories = memtor.list_memories() 138 | assert len(all_memories) == 3, f"Expected 3 memories, got {len(all_memories)}" 139 | print("List memories successful.") 140 | 141 | # Search memories 142 | search_results = memtor.search_memories("fox", top_k=1, user_id="user1") 143 | assert len(search_results) == 1, f"Expected 1 search result, got {len(search_results)}" 144 | assert "fox" in search_results[0].content, "Search result doesn't contain 'fox'" 145 | print("Search memories successful.") 146 | 147 | # Update memory 148 | updated = memtor.update_memory(memory1_id, "The quick brown fox leaps over the lazy dog", 149 | metadata={"tag": "animals", "updated": True}) 150 | assert updated, "Memory update failed" 151 | updated_memory = memtor.get_memory(memory1_id) 152 | assert "leaps" in updated_memory.content, "Memory content not updated correctly" 153 | assert updated_memory.metadata.get("updated") == True, "Memory metadata not updated correctly" 154 | print("Update memory successful.") 155 | 156 | # Delete memory 157 | deleted = memtor.delete_memory(memory3_id) 158 | assert deleted, "Memory deletion failed" 159 | remaining_memories = memtor.list_memories() 160 | assert len(remaining_memories) == 2, f"Expected 2 memories after deletion, got {len(remaining_memories)}" 161 | print("Delete memory successful.") 162 | 163 | # Test metadata filtering 164 | filtered_memories = memtor.list_memories(user_id="user1", metadata_filters=[("tag", "==", "literature")]) 165 | assert len(filtered_memories) == 1, f"Expected 1 filtered memory, got {len(filtered_memories)}" 166 | assert filtered_memories[0].metadata["tag"] == "literature", "Metadata filtering failed" 167 | print("Metadata filtering successful.") 168 | 169 | print("All tests passed successfully!") 170 | 171 | def test_knowledge_extraction(): 172 | """Test the knowledge extraction functionality of Memtor""" 173 | # Initialize Memtor 174 | memtor = Memtor() 175 | 176 | # Clear previous memories 177 | memtor.storage_strategy.clear_all() 178 | 179 | # Sample user message asking to create a horror movie list 180 | user_message = """Can you create a new favorites list called 'Horror Nights' and add some top-rated recent horror movies? I prefer psychological horror over gore.""" 181 | 182 | # Sample assistant response with both JSON data and natural language 183 | assistant_response = """{ 184 | "action": "create_and_populate_list", 185 | "list": { 186 | "name": "Horror Nights", 187 | "id": "hn_123", 188 | "movies": [ 189 | {"id": "m1", "title": "Talk to Me", "year": 2023, "rating": 7.1}, 190 | {"id": "m2", "title": "Hereditary", "year": 2018, "rating": 7.3}, 191 | {"id": "m3", "title": "The Black Phone", "year": 2021, "rating": 7.0} 192 | ] 193 | } 194 | } 195 | I've created a new list called 'Horror Nights' and added some highly-rated psychological horror movies. I included 'Talk to Me' (2023) which deals with supernatural communication, 'Hereditary' (2018) which is a masterpiece of psychological horror, and 'The Black Phone' (2021) which blends supernatural elements with psychological tension. Would you like me to add more movies or adjust the selection based on your preferences?""" 196 | 197 | # Add memory with the conversation 198 | memory_id = memtor.add_memory(user_message, assistant_response) 199 | 200 | # Retrieve the memory to check the extracted context 201 | memory = memtor.get_memory(memory_id) 202 | assert memory is not None, "Failed to retrieve memory" 203 | 204 | # Verify context structure 205 | assert 'context' in memory.__dict__, "Memory doesn't have context field" 206 | context = memory.context 207 | 208 | # Basic structure checks 209 | assert 'timestamp' in context, "Context missing timestamp" 210 | assert 'interaction_type' in context, "Context missing interaction_type" 211 | assert context['interaction_type'] == 'action_based', "Incorrect interaction type" 212 | 213 | # Check action details 214 | assert 'action_details' in context, "Context missing action_details" 215 | action_details = context['action_details'] 216 | assert action_details['primary_action']['type'] == 'creation', "Incorrect action type" 217 | assert action_details['primary_action']['target'] == 'favorite_list', "Incorrect action target" 218 | assert 'Horror Nights' in action_details['modified_elements']['lists'], "Created list not in modified elements" 219 | 220 | # Check conversation details 221 | assert 'conversation_details' in context, "Context missing conversation_details" 222 | conv_details = context['conversation_details'] 223 | assert conv_details['intent'] == 'movie_organization', "Incorrect conversation intent" 224 | assert 'horror' in conv_details['key_information']['explicit_mentions'], "Missing explicit mention of horror" 225 | 226 | # Check referenced values 227 | assert 'Talk to Me' in conv_details['referenced_values']['movies'], "Missing referenced movie" 228 | assert '7.1' in conv_details['referenced_values']['ratings'], "Missing referenced rating" 229 | assert '2023' in conv_details['referenced_values']['dates'], "Missing referenced date" 230 | 231 | # Check summary 232 | assert 'summary' in context, "Context missing summary" 233 | assert len(context['summary']['key_points']) > 0, "Summary missing key points" 234 | 235 | # Test searching with extracted knowledge 236 | search_results = memtor.search_memories("psychological horror movies") 237 | assert len(search_results) > 0, "Failed to find memory using extracted knowledge" 238 | assert memory_id == search_results[0].id, "Search didn't return the correct memory" 239 | 240 | # Test another type of interaction (conversational) 241 | user_message_2 = "What did you think about the ending of Hereditary?" 242 | assistant_response_2 = "The ending of Hereditary is particularly haunting because it shows the culmination of the family's tragic fate. The final scene in the treehouse reveals that the entire series of events was orchestrated by the cult of Paimon. The possession of Peter and his ritualistic crowning as Paimon's vessel creates a deeply disturbing conclusion that fits perfectly with the film's themes of family trauma and predestination." 243 | 244 | memory_id_2 = memtor.add_memory(user_message_2, assistant_response_2) 245 | memory_2 = memtor.get_memory(memory_id_2) 246 | 247 | # Verify conversational context 248 | context_2 = memory_2.context 249 | assert context_2['interaction_type'] == 'conversational', "Incorrect interaction type for conversation" 250 | assert 'Hereditary' in context_2['conversation_details']['referenced_values']['movies'], "Missing movie reference" 251 | assert context_2['conversation_details']['intent'] == 'information_query', "Incorrect conversation intent" 252 | 253 | print("All knowledge extraction tests passed successfully!") 254 | 255 | def test_summary_knowledge_extraction(): 256 | """Test the simple summary knowledge extraction functionality of Memtor""" 257 | # Initialize Memtor with simple extraction strategy 258 | from mem4ai.strategies.knowledge_extraction import SummaryExtractionStrategy 259 | memtor = Memtor(extraction_strategy=SummaryExtractionStrategy()) 260 | 261 | # Clear previous memories 262 | memtor.storage_strategy.clear_all() 263 | 264 | # Sample user message asking to create a horror movie list 265 | user_message = """Can you create a new favorites list called 'Horror Nights' and add some top-rated recent horror movies? I prefer psychological horror over gore.""" 266 | 267 | # Sample assistant response with both JSON data and natural language 268 | assistant_response = """{ 269 | "action": "create_and_populate_list", 270 | "list": { 271 | "name": "Horror Nights", 272 | "id": "hn_123", 273 | "movies": [ 274 | {"id": "m1", "title": "Talk to Me", "year": 2023, "rating": 7.1}, 275 | {"id": "m2", "title": "Hereditary", "year": 2018, "rating": 7.3}, 276 | {"id": "m3", "title": "The Black Phone", "year": 2021, "rating": 7.0} 277 | ] 278 | } 279 | } 280 | I've created a new list called 'Horror Nights' and added some highly-rated psychological horror movies. I included 'Talk to Me' (2023) which deals with supernatural communication, 'Hereditary' (2018) which is a masterpiece of psychological horror, and 'The Black Phone' (2021) which blends supernatural elements with psychological tension. Would you like me to add more movies or adjust the selection based on your preferences?""" 281 | 282 | # Add memory with the conversation 283 | memory_id = memtor.add_memory(user_message, assistant_response) 284 | 285 | # Retrieve the memory to check the extracted context 286 | memory = memtor.get_memory(memory_id) 287 | assert memory is not None, "Failed to retrieve memory" 288 | 289 | # Verify context structure 290 | assert 'context' in memory.__dict__, "Memory doesn't have context field" 291 | context = memory.context 292 | 293 | # Basic structure checks 294 | assert 'timestamp' in context, "Context missing timestamp" 295 | assert 'interaction_type' in context, "Context missing interaction_type" 296 | assert context['interaction_type'] == 'task', "Incorrect interaction type" 297 | 298 | # Check summary 299 | assert 'summary' in context, "Context missing summary" 300 | assert isinstance(context['summary'], str), "Summary should be a string" 301 | assert 'Horror Nights' in context['summary'], "Summary should mention the created list" 302 | 303 | # Check keywords 304 | assert 'keywords' in context, "Context missing keywords" 305 | assert isinstance(context['keywords'], list), "Keywords should be a list" 306 | assert any('horror' in keyword.lower() for keyword in context['keywords']), "Keywords should include 'horror'" 307 | assert any('psychological' in keyword.lower() for keyword in context['keywords']), "Keywords should include 'psychological'" 308 | 309 | # Test conversational interaction 310 | user_message_2 = "What did you think about the ending of Hereditary?" 311 | assistant_response_2 = "The ending of Hereditary is particularly haunting because it shows the culmination of the family's tragic fate. The final scene in the treehouse reveals that the entire series of events was orchestrated by the cult of Paimon. The possession of Peter and his ritualistic crowning as Paimon's vessel creates a deeply disturbing conclusion that fits perfectly with the film's themes of family trauma and predestination." 312 | 313 | memory_id_2 = memtor.add_memory(user_message_2, assistant_response_2) 314 | memory_2 = memtor.get_memory(memory_id_2) 315 | 316 | # Verify conversational context 317 | context_2 = memory_2.context 318 | assert context_2['interaction_type'] == 'discussion', "Incorrect interaction type for conversation" 319 | assert isinstance(context_2['summary'], str), "Summary should be a string" 320 | assert 'Hereditary' in context_2['summary'], "Summary should mention the movie" 321 | assert any('ending' in keyword.lower() for keyword in context_2['keywords']), "Keywords should include 'ending'" 322 | assert any('hereditary' in keyword.lower() for keyword in context_2['keywords']), "Keywords should include 'hereditary'" 323 | 324 | # Test searching with extracted knowledge 325 | search_results = memtor.search_memories("psychological horror") 326 | assert len(search_results) > 0, "Failed to find memory using extracted knowledge" 327 | assert memory_id == search_results[0].id, "Search didn't return the correct memory" 328 | 329 | print("All summary knowledge extraction tests passed successfully!") 330 | 331 | def test_enhanced_storage_retrieval(): 332 | """Test the enhanced storage retrieval capabilities""" 333 | from datetime import datetime, timedelta 334 | from mem4ai.strategies.storage_strategy import LMDBStorageStrategy 335 | from mem4ai.core.memory import Memory 336 | 337 | # Initialize storage 338 | storage = LMDBStorageStrategy() 339 | storage.clear_all() # Start with clean storage 340 | 341 | print("\nTesting Enhanced Storage Retrieval...") 342 | 343 | # Create test data spanning different times, users, sessions, and agents 344 | base_time = datetime(2024, 1, 1, 12, 0) # Start from noon on Jan 1, 2024 345 | test_data = [ 346 | # User 1, Session 1, Agent 1 347 | { 348 | "content": "Memory 1 for User 1", 349 | "metadata": {"user_id": "user1", "session_id": "session1", "agent_id": "agent1"}, 350 | "time_offset": timedelta(hours=1) 351 | }, 352 | { 353 | "content": "Memory 2 for User 1", 354 | "metadata": {"user_id": "user1", "session_id": "session1", "agent_id": "agent1"}, 355 | "time_offset": timedelta(hours=2) 356 | }, 357 | # User 1, Session 2, Agent 2 358 | { 359 | "content": "Memory 3 for User 1", 360 | "metadata": {"user_id": "user1", "session_id": "session2", "agent_id": "agent2"}, 361 | "time_offset": timedelta(hours=3) 362 | }, 363 | # User 2, Session 3, Agent 1 364 | { 365 | "content": "Memory 4 for User 2", 366 | "metadata": {"user_id": "user2", "session_id": "session3", "agent_id": "agent1"}, 367 | "time_offset": timedelta(hours=4) 368 | }, 369 | # User 2, Session 3, Agent 2 370 | { 371 | "content": "Memory 5 for User 2", 372 | "metadata": {"user_id": "user2", "session_id": "session3", "agent_id": "agent2"}, 373 | "time_offset": timedelta(hours=5) 374 | } 375 | ] 376 | 377 | # Store test memories 378 | stored_ids = [] 379 | for data in test_data: 380 | memory = Memory( 381 | content=data["content"], 382 | metadata=data["metadata"] 383 | ) 384 | memory.timestamp = base_time + data["time_offset"] 385 | storage.save(memory) 386 | stored_ids.append(memory.id) 387 | 388 | print("Test data stored successfully.") 389 | 390 | # Test 1: Recent memories with no filters 391 | print("\nTest 1: Recent memories (no filters)") 392 | recent_memories = storage.find_recent(limit=3) 393 | assert len(recent_memories) == 3, f"Expected 3 recent memories, got {len(recent_memories)}" 394 | assert recent_memories[0].content == "Memory 5 for User 2", "Most recent memory should be Memory 5" 395 | print("✓ Recent memories test passed") 396 | 397 | # Test 2: Recent memories for specific user 398 | print("\nTest 2: Recent memories for User 1") 399 | user1_memories = storage.find_recent(limit=5, user_id="user1") 400 | assert len(user1_memories) == 3, f"Expected 3 memories for user1, got {len(user1_memories)}" 401 | assert all(m.metadata["user_id"] == "user1" for m in user1_memories), "All memories should be from user1" 402 | print("✓ User-specific recent memories test passed") 403 | 404 | # Test 3: Time range queries 405 | print("\nTest 3: Time range queries") 406 | time_range_start = base_time + timedelta(hours=2) 407 | time_range_end = base_time + timedelta(hours=4) 408 | time_range_memories = storage.find_by_time( 409 | start_time=time_range_start, 410 | end_time=time_range_end 411 | ) 412 | assert len(time_range_memories) == 3, f"Expected 3 memories in time range, got {len(time_range_memories)}" 413 | print("✓ Time range query test passed") 414 | 415 | # Test 4: Time range with user filter 416 | print("\nTest 4: Time range for specific user") 417 | user_time_memories = storage.find_by_time( 418 | start_time=base_time, 419 | end_time=base_time + timedelta(hours=6), 420 | user_id="user2" 421 | ) 422 | assert len(user_time_memories) == 2, f"Expected 2 memories for user2 in time range, got {len(user_time_memories)}" 423 | assert all(m.metadata["user_id"] == "user2" for m in user_time_memories), "All memories should be from user2" 424 | print("✓ Time range with user filter test passed") 425 | 426 | # Test 5: Metadata combination queries 427 | print("\nTest 5: Multiple metadata filters") 428 | filtered_memories = storage.find_by_meta({ 429 | "user_id": "user1", 430 | "session_id": "session1", 431 | "agent_id": "agent1" 432 | }) 433 | assert len(filtered_memories) == 2, f"Expected 2 memories with combined filters, got {len(filtered_memories)}" 434 | print("✓ Multiple metadata filters test passed") 435 | 436 | # Test 6: Partial metadata queries 437 | print("\nTest 6: Partial metadata filters") 438 | session_agent_memories = storage.find_by_meta({ 439 | "session_id": "session3", 440 | "agent_id": "agent1" 441 | }) 442 | assert len(session_agent_memories) == 1, f"Expected 1 memory with session3/agent1, got {len(session_agent_memories)}" 443 | print("✓ Partial metadata filters test passed") 444 | 445 | # Test 7: Recent memories with multiple filters 446 | print("\nTest 7: Recent memories with multiple filters") 447 | recent_filtered = storage.find_recent( 448 | limit=5, 449 | user_id="user1", 450 | agent_id="agent1" 451 | ) 452 | assert len(recent_filtered) == 2, f"Expected 2 recent filtered memories, got {len(recent_filtered)}" 453 | assert all(m.metadata["user_id"] == "user1" and m.metadata["agent_id"] == "agent1" 454 | for m in recent_filtered), "Incorrect filter application" 455 | print("✓ Recent memories with multiple filters test passed") 456 | 457 | # Test 8: Edge cases 458 | print("\nTest 8: Edge cases") 459 | # Test empty time range 460 | empty_time_range = storage.find_by_time( 461 | start_time=base_time - timedelta(days=1), 462 | end_time=base_time - timedelta(hours=1) 463 | ) 464 | assert len(empty_time_range) == 0, "Should get no memories for empty time range" 465 | 466 | # Test non-existent metadata 467 | no_results = storage.find_by_meta({"user_id": "nonexistent"}) 468 | assert len(no_results) == 0, "Should get no memories for non-existent metadata" 469 | 470 | # Test future time range 471 | future_memories = storage.find_by_time( 472 | start_time=base_time + timedelta(days=1), 473 | end_time=base_time + timedelta(days=2) 474 | ) 475 | assert len(future_memories) == 0, "Should get no memories for future time range" 476 | print("✓ Edge cases test passed") 477 | 478 | # Clean up 479 | storage.clear_all() 480 | print("\nAll enhanced storage retrieval tests passed successfully!") 481 | 482 | if __name__ == "__main__": 483 | # Existing tests... 484 | # test_embedding() 485 | # test_storage() 486 | # test_search_strategy() 487 | # test_memtor() 488 | # test_knowledge_extraction() 489 | # test_summary_knowledge_extraction() 490 | 491 | # New enhanced storage test 492 | test_enhanced_storage_retrieval() -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setuptools import setup, find_packages 3 | 4 | # Read the contents of your README file 5 | this_directory = os.path.abspath(os.path.dirname(__file__)) 6 | with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f: 7 | long_description = f.read() 8 | 9 | # Read the project version from package __init__.py 10 | def read_version(): 11 | with open(os.path.join('mem4ai', '__init__.py'), 'r') as f: 12 | for line in f: 13 | if line.startswith('__version__'): 14 | return line.split('=')[1].strip().strip("'").strip('"') 15 | raise RuntimeError('Unable to find version string.') 16 | 17 | setup( 18 | name='mem4ai', 19 | version=read_version(), 20 | author='unclecode', 21 | author_email='unclecode@kidocode.com', 22 | description='A powerful memory management library for LLMs and AI systems', 23 | long_description=long_description, 24 | long_description_content_type='text/markdown', 25 | url='https://github.com/unclecode/mem4ai', 26 | packages=find_packages(include=['mem4ai*']), # Modified this line 27 | package_data={'': ['*.json', '*.yaml', '*.yml']}, # Added this line for config files if any 28 | classifiers=[ 29 | 'Development Status :: 3 - Alpha', 30 | 'Intended Audience :: Developers', 31 | 'License :: OSI Approved :: MIT License', 32 | 'Programming Language :: Python :: 3', 33 | 'Programming Language :: Python :: 3.7', 34 | 'Programming Language :: Python :: 3.8', 35 | 'Programming Language :: Python :: 3.9', 36 | 'Programming Language :: Python :: 3.10', 37 | 'Programming Language :: Python :: 3.11', 38 | 'Programming Language :: Python :: 3.12', 39 | ], 40 | python_requires='>=3.7', 41 | install_requires=[ 42 | 'numpy>=1.26.0,<2.1.1', 43 | 'scikit-learn>=1.5.1', 44 | 'lmdb>=1.5.1', 45 | 'litellm>=1.43.4', # Adjust version as needed 46 | # Add any other core dependencies your project needs 47 | ], 48 | extras_require={ 49 | 'dev': [ 50 | 'pytest>=6.0', 51 | 'pytest-cov>=2.0', 52 | 'flake8>=3.9', 53 | 'black>=21.5b1', 54 | ], 55 | 'docs': [ 56 | 'sphinx>=4.0', 57 | 'sphinx_rtd_theme>=0.5', 58 | ], 59 | }, 60 | include_package_data=True, 61 | keywords='memory management llm ai', 62 | project_urls={ 63 | 'Bug Reports': 'https://github.com/unclecode/mem4ai/issues', 64 | 'Source': 'https://github.com/unclecode/mem4ai/', 65 | 'Documentation': 'https://mem4ai.readthedocs.io/', 66 | }, 67 | ) -------------------------------------------------------------------------------- /tests/test_memtor.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) 3 | import pytest 4 | from mem4ai import Memtor, Memory 5 | from typing import List, Dict, Any 6 | 7 | @pytest.fixture 8 | def memtor(): 9 | return Memtor() 10 | 11 | @pytest.fixture 12 | def clean_memtor(): 13 | m = Memtor() 14 | # Clear memories for test users and sessions 15 | m.delete_memories_by_user("test_user") 16 | m.delete_memories_by_session("test_session") 17 | return m 18 | 19 | @pytest.fixture 20 | def sample_memories(): 21 | return [ 22 | {"content": "The quick brown fox jumps over the lazy dog", "metadata": {"tag": "animals", "year": 2020}}, 23 | {"content": "To be or not to be, that is the question", "metadata": {"tag": "literature", "year": 2021}}, 24 | {"content": "I think, therefore I am", "metadata": {"tag": "philosophy", "year": 2019}}, 25 | {"content": "The only way to do great work is to love what you do", "metadata": {"tag": "motivation", "year": 2022}} 26 | ] 27 | 28 | def test_add_memory(clean_memtor: Memtor): 29 | memory_id = clean_memtor.add_memory("Test content", {"tag": "test"}, user_id="test_user") 30 | assert isinstance(memory_id, str) 31 | assert len(memory_id) > 0 32 | 33 | def test_get_memory(clean_memtor: Memtor): 34 | content = "Test content for get" 35 | memory_id = clean_memtor.add_memory(content, {"tag": "test"}, user_id="test_user") 36 | retrieved_memory = clean_memtor.get_memory(memory_id) 37 | assert isinstance(retrieved_memory, Memory) 38 | assert retrieved_memory.content == content 39 | assert retrieved_memory.metadata["tag"] == "test" 40 | 41 | def test_update_memory(clean_memtor: Memtor): 42 | memory_id = clean_memtor.add_memory("Original content", {"tag": "original"}, user_id="test_user") 43 | updated = clean_memtor.update_memory(memory_id, "Updated content", {"tag": "updated"}) 44 | assert updated is True 45 | updated_memory = clean_memtor.get_memory(memory_id) 46 | assert updated_memory.content == "Updated content" 47 | assert updated_memory.metadata["tag"] == "updated" 48 | 49 | def test_delete_memory(clean_memtor: Memtor): 50 | memory_id = clean_memtor.add_memory("Content to delete", {"tag": "delete"}, user_id="test_user") 51 | deleted = clean_memtor.delete_memory(memory_id) 52 | assert deleted is True 53 | assert clean_memtor.get_memory(memory_id) is None 54 | 55 | def test_list_memories(clean_memtor: Memtor, sample_memories: List[Dict[str, Any]]): 56 | for mem in sample_memories: 57 | clean_memtor.add_memory(mem["content"], mem["metadata"], user_id="test_user", session_id="test_session") 58 | 59 | all_memories = clean_memtor.list_memories(user_id="test_user") 60 | assert len(all_memories) == len(sample_memories) 61 | 62 | animal_memories = clean_memtor.list_memories(user_id="test_user", metadata_filters=[("tag", "==", "animals")]) 63 | assert len(animal_memories) == 1 64 | assert animal_memories[0].metadata["tag"] == "animals" 65 | 66 | recent_memories = clean_memtor.list_memories(user_id="test_user", metadata_filters=[("year", ">=", 2021)]) 67 | assert len(recent_memories) == 2 68 | assert all(mem.metadata["year"] >= 2021 for mem in recent_memories) 69 | 70 | def test_search_memories(clean_memtor: Memtor, sample_memories: List[Dict[str, Any]]): 71 | for mem in sample_memories: 72 | clean_memtor.add_memory(mem["content"], mem["metadata"], user_id="test_user", session_id="test_session") 73 | 74 | results = clean_memtor.search_memories("fox", top_k=1, user_id="test_user") 75 | assert len(results) == 1 76 | assert "fox" in results[0].content.lower() 77 | 78 | results = clean_memtor.search_memories("philosophy", top_k=2, user_id="test_user") 79 | assert len(results) == 2 80 | assert any("think" in mem.content.lower() for mem in results) 81 | 82 | results = clean_memtor.search_memories("work", top_k=1, user_id="test_user", metadata_filters=[("year", ">", 2020)]) 83 | assert len(results) == 1 84 | assert "work" in results[0].content.lower() 85 | assert results[0].metadata["year"] > 2020 86 | 87 | def test_error_handling(clean_memtor: Memtor): 88 | with pytest.raises(TypeError): 89 | clean_memtor.add_memory(123) # type: ignore 90 | 91 | with pytest.raises(TypeError): 92 | clean_memtor.get_memory(123) # type: ignore 93 | 94 | with pytest.raises(TypeError): 95 | clean_memtor.update_memory(123, "content") # type: ignore 96 | 97 | with pytest.raises(TypeError): 98 | clean_memtor.delete_memory(123) # type: ignore 99 | 100 | with pytest.raises(TypeError): 101 | clean_memtor.search_memories(123) # type: ignore 102 | 103 | def test_memory_persistence(clean_memtor: Memtor): 104 | memory_id = clean_memtor.add_memory("Persistent content", {"tag": "persist"}, user_id="test_user") 105 | 106 | # Create a new Memtor instance to check if the memory persists 107 | new_memtor = Memtor() 108 | retrieved_memory = new_memtor.get_memory(memory_id) 109 | assert retrieved_memory is not None 110 | assert retrieved_memory.content == "Persistent content" 111 | assert retrieved_memory.metadata["tag"] == "persist" 112 | 113 | def test_delete_memories_by_user(clean_memtor: Memtor): 114 | for i in range(5): 115 | clean_memtor.add_memory(f"Test content {i}", {"tag": "test"}, user_id="test_user") 116 | 117 | deleted_count = clean_memtor.delete_memories_by_user("test_user") 118 | assert deleted_count == 5 119 | 120 | remaining_memories = clean_memtor.list_memories(user_id="test_user") 121 | assert len(remaining_memories) == 0 122 | 123 | def test_delete_memories_by_session(clean_memtor: Memtor): 124 | for i in range(3): 125 | clean_memtor.add_memory(f"Test content {i}", {"tag": "test"}, session_id="test_session") 126 | 127 | deleted_count = clean_memtor.delete_memories_by_session("test_session") 128 | assert deleted_count == 3 129 | 130 | remaining_memories = clean_memtor.list_memories(session_id="test_session") 131 | assert len(remaining_memories) == 0 132 | 133 | if __name__ == "__main__": 134 | pytest.main([__file__]) --------------------------------------------------------------------------------