├── .gitignore ├── LICENSE.md ├── README.md ├── imgs ├── planning.png └── reaction.png ├── log.txt ├── main_agent.ipynb └── server ├── __init__.py ├── generativeAgent.py ├── gptq ├── __init__.py ├── gptq.py ├── modelutils.py └── quant.py ├── model.py ├── prompt.py └── time_weighted_retriever.py /.gitignore: -------------------------------------------------------------------------------- 1 | main_agent_demo.ipynb 2 | # Created by https://www.toptal.com/developers/gitignore/api/python 3 | # Edit at https://www.toptal.com/developers/gitignore?templates=python 4 | 5 | ### Python ### 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | share/python-wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .nox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | *.py,cover 55 | .hypothesis/ 56 | .pytest_cache/ 57 | cover/ 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | local_settings.py 66 | db.sqlite3 67 | db.sqlite3-journal 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyBuilder 80 | .pybuilder/ 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | 90 | # pyenv 91 | # For a library or package, you might want to ignore these files since the code is 92 | # intended to run in multiple environments; otherwise, check them in: 93 | # .python-version 94 | 95 | # pipenv 96 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 97 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 98 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 99 | # install all needed dependencies. 100 | #Pipfile.lock 101 | 102 | # poetry 103 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 104 | # This is especially recommended for binary packages to ensure reproducibility, and is more 105 | # commonly ignored for libraries. 106 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 107 | #poetry.lock 108 | 109 | # pdm 110 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 111 | #pdm.lock 112 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 113 | # in version control. 114 | # https://pdm.fming.dev/#use-with-ide 115 | .pdm.toml 116 | 117 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 118 | __pypackages__/ 119 | 120 | # Celery stuff 121 | celerybeat-schedule 122 | celerybeat.pid 123 | 124 | # SageMath parsed files 125 | *.sage.py 126 | 127 | # Environments 128 | .env 129 | .venv 130 | env/ 131 | venv/ 132 | ENV/ 133 | env.bak/ 134 | venv.bak/ 135 | 136 | # Spyder project settings 137 | .spyderproject 138 | .spyproject 139 | 140 | # Rope project settings 141 | .ropeproject 142 | 143 | # mkdocs documentation 144 | /site 145 | 146 | # mypy 147 | .mypy_cache/ 148 | .dmypy.json 149 | dmypy.json 150 | 151 | # Pyre type checker 152 | .pyre/ 153 | 154 | # pytype static type analyzer 155 | .pytype/ 156 | 157 | # Cython debug symbols 158 | cython_debug/ 159 | 160 | # PyCharm 161 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 162 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 163 | # and can be added to the global gitignore or merged into this file. For a more nuclear 164 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 165 | #.idea/ 166 | 167 | ### Python Patch ### 168 | # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration 169 | poetry.toml 170 | 171 | # ruff 172 | .ruff_cache/ 173 | 174 | # LSP config files 175 | pyrightconfig.json 176 | 177 | # End of https://www.toptal.com/developers/gitignore/api/python 178 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # Attribution-NonCommercial 4.0 International 2 | 3 | > *Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.* 4 | > 5 | > ### Using Creative Commons Public Licenses 6 | > 7 | > Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. 8 | > 9 | > * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors). 10 | > 11 | > * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees). 12 | 13 | ## Creative Commons Attribution-NonCommercial 4.0 International Public License 14 | 15 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. 16 | 17 | ### Section 1 – Definitions. 18 | 19 | a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. 20 | 21 | b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. 22 | 23 | c. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. 24 | 25 | d. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. 26 | 27 | e. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. 28 | 29 | f. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License. 30 | 31 | g. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. 32 | 33 | h. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License. 34 | 35 | i. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. 36 | 37 | j. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. 38 | 39 | k. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. 40 | 41 | l. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. 42 | 43 | ### Section 2 – Scope. 44 | 45 | a. ___License grant.___ 46 | 47 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: 48 | 49 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and 50 | 51 | B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 52 | 53 | 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 54 | 55 | 3. __Term.__ The term of this Public License is specified in Section 6(a). 56 | 57 | 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 58 | 59 | 5. __Downstream recipients.__ 60 | 61 | A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. 62 | 63 | B. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 64 | 65 | 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). 66 | 67 | b. ___Other rights.___ 68 | 69 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 70 | 71 | 2. Patent and trademark rights are not licensed under this Public License. 72 | 73 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. 74 | 75 | ### Section 3 – License Conditions. 76 | 77 | Your exercise of the Licensed Rights is expressly made subject to the following conditions. 78 | 79 | a. ___Attribution.___ 80 | 81 | 1. If You Share the Licensed Material (including in modified form), You must: 82 | 83 | A. retain the following if it is supplied by the Licensor with the Licensed Material: 84 | 85 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); 86 | 87 | ii. a copyright notice; 88 | 89 | iii. a notice that refers to this Public License; 90 | 91 | iv. a notice that refers to the disclaimer of warranties; 92 | 93 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; 94 | 95 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and 96 | 97 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 98 | 99 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 100 | 101 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 102 | 103 | 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License. 104 | 105 | ### Section 4 – Sui Generis Database Rights. 106 | 107 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: 108 | 109 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; 110 | 111 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and 112 | 113 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. 114 | 115 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. 116 | 117 | ### Section 5 – Disclaimer of Warranties and Limitation of Liability. 118 | 119 | a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__ 120 | 121 | b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__ 122 | 123 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. 124 | 125 | ### Section 6 – Term and Termination. 126 | 127 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. 128 | 129 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 130 | 131 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 132 | 133 | 2. upon express reinstatement by the Licensor. 134 | 135 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. 136 | 137 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. 138 | 139 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. 140 | 141 | ### Section 7 – Other Terms and Conditions. 142 | 143 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. 144 | 145 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. 146 | 147 | ### Section 8 – Interpretation. 148 | 149 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. 150 | 151 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. 152 | 153 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. 154 | 155 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. 156 | 157 | > Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. 158 | > 159 | > Creative Commons may be contacted at creativecommons.org 160 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Generative Agents with Guidance, Langchain, and local LLMs 2 | This is the implementation of paper ["Generative Agents: Interactive Simulacra of Human Behavior"](https://arxiv.org/pdf/2304.03442.pdf). This is based on the [Langchain implementation](https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html). We improve and add more features to make it like the original paper as much as possible. 3 | For more explaination, please check [my medium post](https://medium.com/@gartist/implement-generative-agent-with-local-llm-guidance-and-langchain-full-features-fa57655f3de1). 4 | 5 | Note that: I just fixed a conflict between current prompts and the guidance. I recommend using the guidance==0.063 because of the stability. 6 | 7 | ### Supported Features: 8 | - [x] Work with local LLM 9 | - [x] Memory and Retrieval 10 | - [x] Reflection 11 | - [x] Planning (need to improve) 12 | - [x] Reacting and re-planning 13 | - [x] Dialogue generation (need to improve) 14 | - [x] Agent summary 15 | - [x] Interview 16 | - [ ] Web UI (Gradio) 17 | 18 | # How to use 19 | ### Install 20 | Python packages: 21 | - [Guidance](https://github.com/microsoft/guidance) `pip install guidance==0.0.63` 22 | - [GPTQ-for-LLaMa](https://github.com/oobabooga/GPTQ-for-LLaMa.git) 23 | - [Langchain](https://github.com/hwchase17/langchain) `pip install langchain==0.0.190` 24 | - [Faiss](https://github.com/facebookresearch/faiss) (For VectorStore, feel free to change to your own VectorStore at [this link](https://python.langchain.com/en/latest/modules/indexes/vectorstores.html)) 25 | 26 | The GPTQ-for-LLaMa I used is the oobabooga's fork. You can install it with [this command](https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md#step-1-install-gptq-for-llama). 27 | 28 | ### Run 29 | Please check the notebook file. I use the [wizard-mega-13B-GPTQ](https://huggingface.co/TheBloke/wizard-mega-13B-GPTQ) model. Feel free to try others. 30 | 31 | # Notebook 32 | ### Define Generative Agent 33 | ```python 34 | description = "Sam is a Ph.D student, his major is CS;Sam likes computer;Sam lives with his friend, Bob;Sam's farther is a doctor;Sam has a dog, named Max" 35 | sam = GenerativeAgent(guidance=guidance, 36 | name='Sam', 37 | age=23, 38 | des=description, 39 | trails='funny, like football, play CSGO', 40 | embeddings_model=embeddings_model) 41 | ``` 42 | ### Add memories 43 | ```python 44 | sam_observations = [ 45 | "Sam wake up in the morning", 46 | "Sam feels tired because of playing games", 47 | "Sam has a assignment of AI course", 48 | "Sam see Max is sick", 49 | "Bob say hello to Sam", 50 | "Bob leave the room", 51 | "Sam say goodbye to Bob", 52 | ] 53 | sam.add_memories(sam_observations) 54 | ``` 55 | ### Summary 56 | ```python 57 | summary = sam.get_summary(force_refresh=True) 58 | print(summary) 59 | """ 60 | Name: Sam (age: 23) 61 | Summary: Sam can be described as a Ph.D student who is interested in computer science and has a dog named Max. He is also a student of AI course and has a father who is a doctor. Sam is also a gamer and lives with his friend Bob. Additionally, Sam is a caring person who feels tired due to playing games and says goodbye to his friend Bob.. Sam is a Ph.D student majoring in Computer Science. He wakes up in the morning and lives with his friend Bob. Sam has a dog named Max and he is currently feeling tired due to playing games. Sam also has an assignment for his AI course.. it is difficult to determine Sam's feeling about his recent progress in life. However, if we assume that Sam is satisfied with his progress, we can describe his feeling as content or fulfilled. 62 | """ 63 | ``` 64 | ### Planning and update status 65 | ```python 66 | status = sam.update_status() 67 | ``` 68 | ![alt text](https://github.com/QuangBK/generativeAgent_LLM/blob/main/imgs/planning.png?raw=true) 69 | 70 | ### Reaction 71 | ```python 72 | bool_react, reaction, context = sam.react(observation='The dog bowl is empty', 73 | observed_entity='Dog bowl', 74 | entity_status='The dog bowl is empty') 75 | print(f"{bool_react}\nReaction: {reaction}\nContext: {context}") 76 | """ 77 | Yes 78 | Reaction: Sam could put food in the dog's bowl and then call Max over to eat. 79 | Context: Sam has a dog named Max, and he is a Ph.D student majoring in CS. Sam's father is a doctor, and Sam lives with his friend Bob. Sam likes computers and is currently taking an AI course. Sam is tired because of playing games. Bob left the room and said hello to Sam. Sam woke up in the morning and saw that the dog bowl was empty. 80 | """ 81 | ``` 82 | ![alt text](https://github.com/QuangBK/generativeAgent_LLM/blob/main/imgs/reaction.png?raw=true) 83 | 84 | ### Dialogue generation 85 | ```python 86 | bool_react, reaction, context = sam.react(observation='Bob come room with a new PC', 87 | observed_entity=bob, 88 | entity_status='Bob is setting up his new PC') 89 | 90 | print(sam.dialogue_list[0]) 91 | """ 92 | Friday June 02, 2023, 18:15 93 | 94 | Bob: Hey Sam, check this out! I got a new PC and it's amazing. 95 | Sam: That's great, Bob. Do you need any help setting it up? 96 | Bob: No, I got it all set up already. It's just for gaming, but I'm really excited. 97 | Sam: That's awesome. I'm always interested in trying out new hardware. Maybe I'll stop by and check it out. 98 | Bob: Yeah, of course. I was just thinking, maybe you could help me with a few settings. I'm not that great at this stuff. 99 | Sam: Sure, I'd be happy to help. When do you want to get started? 100 | Bob: How about later on tonight? I'll call you when I'm ready to get going. 101 | Sam: No problem. Let me know when you're ready and I'll head over. 102 | """ 103 | ``` 104 | 105 | ### Interview 106 | ```python 107 | response = sam.interview('Friend', 'Who do you live with?') 108 | print(response) 109 | """ 110 | I live with a friend of mine, his name is Bob. 111 | """ 112 | ``` 113 | 114 | # License 115 | 116 | Creative Commons Attribution-NonCommercial (CC BY-NC-4.0) 117 | -------------------------------------------------------------------------------- /imgs/planning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuangBK/generativeAgent_LLM/eba48cfe172ba16614c26426a26d38a3e4e803e4/imgs/planning.png -------------------------------------------------------------------------------- /imgs/reaction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuangBK/generativeAgent_LLM/eba48cfe172ba16614c26426a26d38a3e4e803e4/imgs/reaction.png -------------------------------------------------------------------------------- /log.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuangBK/generativeAgent_LLM/eba48cfe172ba16614c26426a26d38a3e4e803e4/log.txt -------------------------------------------------------------------------------- /server/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuangBK/generativeAgent_LLM/eba48cfe172ba16614c26426a26d38a3e4e803e4/server/__init__.py -------------------------------------------------------------------------------- /server/generativeAgent.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | from langchain.docstore import InMemoryDocstore 3 | # from langchain.retrievers import TimeWeightedVectorStoreRetriever 4 | from langchain.vectorstores import FAISS 5 | from langchain.schema import Document 6 | 7 | import math 8 | import faiss 9 | from .prompt import * 10 | from .time_weighted_retriever import TimeWeightedVectorStoreRetrieverModified 11 | 12 | import numpy as np 13 | 14 | def score_normalizer(val: float) -> float: 15 | return 1 - 1 / (1 + np.exp(val)) 16 | 17 | # def score_normalizer(val: float) -> float: 18 | # return val 19 | 20 | def get_text_from_docs(list_docs, include_time = False): 21 | texts = "" 22 | for i, doc in enumerate(list_docs): 23 | if include_time: 24 | time_t = doc.metadata['created_at'].strftime('%A %B %d, %Y, %H:%M') + ": " 25 | else: 26 | time_t = "" 27 | if i == 0: 28 | texts += "- " + time_t + doc.page_content 29 | else: 30 | texts += "\n- " + time_t + doc.page_content 31 | return texts 32 | 33 | def merge_docs(docs1, docs2): 34 | list_index1 = [] 35 | docs_merged = [] 36 | for doc_t in docs1: 37 | list_index1.append(doc_t.metadata['buffer_idx']) 38 | docs_merged.append(doc_t) 39 | for doc_t in docs2: 40 | if not (doc_t.metadata['buffer_idx'] in list_index1): 41 | docs_merged.append(doc_t) 42 | return docs_merged 43 | 44 | # Based on 45 | # https://github.com/hwchase17/langchain/blob/master/langchain/experimental/generative_agents/generative_agent.py 46 | class GenerativeAgent: 47 | def __init__(self, guidance, name, age, des, trails, embeddings_model, current_time=None): 48 | self.guidance = guidance 49 | self.name = name 50 | self.age = str(age) 51 | self.des = des.split(';') 52 | self.trails = trails 53 | self.summary = trails 54 | self.plan = [] 55 | self.status = None 56 | embedding_size = 384 57 | index = faiss.IndexFlatL2(embedding_size) 58 | vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}, relevance_score_fn=score_normalizer) 59 | self.retriever = TimeWeightedVectorStoreRetrieverModified(vectorstore=vectorstore, other_score_keys=["importance"], k=10, decay_rate=0.01) 60 | self.current_time = current_time 61 | if self.current_time is None: 62 | self.last_refreshed = datetime.now() 63 | else: 64 | self.last_refreshed = current_time 65 | self.summary_refresh_seconds = 3600 66 | self.aggregate_importance = 0 67 | self.reflecting = False 68 | self.reflection_threshold = 25 69 | self.dialogue_list = [] 70 | self.relevant_memories = '' 71 | self.silent = False 72 | 73 | self.add_memories(self.des) 74 | 75 | def set_current_time(self, time): 76 | self.current_time = time 77 | 78 | def get_current_time(self,): 79 | if self.current_time is not None: 80 | return self.current_time 81 | else: 82 | return datetime.now() 83 | 84 | def next_task(self,): 85 | self.set_current_time(self.status['to']) 86 | return self.update_status() 87 | 88 | def update_status(self,): 89 | current_time = self.get_current_time() 90 | need_replan = True 91 | for task_temp in self.plan: 92 | # task_to_temp = datetime.strptime(task_temp['to'], '%H:%M') 93 | task_to_temp = task_temp['to'] 94 | if task_to_temp > current_time: 95 | self.status = task_temp 96 | need_replan = False 97 | break 98 | if need_replan: 99 | new_plan = self.make_plan() 100 | self.status = new_plan[0] 101 | return self.status 102 | 103 | def add_memories(self, list_mem): 104 | for mem_temp in list_mem: 105 | if isinstance(mem_temp, dict): 106 | mem_des, mem_time = mem_temp 107 | else: 108 | mem_des = mem_temp 109 | mem_time = self.get_current_time() 110 | 111 | prompt = self.guidance(PROMPT_ADDMEM, silent=self.silent) 112 | result = prompt(mem=mem_des) 113 | # importance_score_temp = int(result['rate'])*self.importance_weight 114 | importance_score_temp = int(result['rate']) 115 | self.retriever.add_documents([Document(page_content=mem_des, metadata={"importance": importance_score_temp, "created_at": mem_time})], current_time=mem_time) 116 | self.aggregate_importance += int(result['rate']) 117 | 118 | 119 | 120 | if not self.reflecting and self.aggregate_importance > self.reflection_threshold: 121 | self.reflecting = True 122 | self._relection() 123 | self.aggregate_importance = 0.0 124 | self.reflecting = False 125 | 126 | def _get_salient(self,): 127 | # number of recent memories 128 | last_k = 20 129 | recent_memories_list = self.retriever.memory_stream[-last_k:] 130 | recent_memories_text = get_text_from_docs(recent_memories_list, include_time = True) 131 | 132 | prompt = self.guidance(PROMPT_SALIENT, silent=self.silent) 133 | result = prompt(recent_memories=recent_memories_text) 134 | return result['items'] 135 | 136 | def _get_insights(self, list_docs): 137 | docs = list_docs 138 | statements = get_text_from_docs(docs, include_time = False) 139 | prompt = self.guidance(PROMPT_INSIGHTS, silent=self.silent) 140 | result = prompt(statements=statements) 141 | return result['items'] 142 | 143 | def _relection(self,): 144 | list_salient = self._get_salient() 145 | list_docs = [] 146 | for salient_temp in list_salient: 147 | docs = self.retriever.get_relevant_documents(salient_temp, self.get_current_time()) 148 | list_docs = merge_docs(list_docs, docs) 149 | list_insights = self._get_insights(list_docs) 150 | self.add_memories(list_insights) 151 | 152 | def get_summary(self, force_refresh=False, now=None): 153 | current_time = self.get_current_time() if now is None else now 154 | since_refresh = (current_time - self.last_refreshed).seconds 155 | 156 | if ( 157 | not self.summary 158 | or since_refresh >= self.summary_refresh_seconds 159 | or force_refresh 160 | ): 161 | core_characteristics = self._run_characteristics() 162 | daily_occupation = self._run_occupation() 163 | feeling = self._run_feeling() 164 | 165 | description = core_characteristics + '. ' + daily_occupation + '. ' + feeling 166 | self.summary = (f"Name: {self.name} (age: {self.age})" + f"\nTrails: {self.trails}" + f"\nSummary: {description}") 167 | self.last_refreshed = current_time 168 | return self.summary 169 | 170 | def _run_characteristics(self,): 171 | docs = self.retriever.get_relevant_documents(self.name + "'s core characteristics", self.get_current_time()) 172 | statements = get_text_from_docs(docs, include_time = False) 173 | 174 | prompt = self.guidance(PROMPT_CHARACTERISTICS, silent=self.silent) 175 | result = prompt(statements=statements, name=self.name) 176 | return result['res'] 177 | 178 | def _run_occupation(self,): 179 | docs = self.retriever.get_relevant_documents(self.name + "'s current daily occupation", self.get_current_time()) 180 | statements = get_text_from_docs(docs, include_time = False) 181 | 182 | prompt = self.guidance(PROMPT_OCCUPATION, silent=self.silent) 183 | result = prompt(statements=statements, name=self.name) 184 | return result['res'] 185 | 186 | def _run_feeling(self,): 187 | docs = self.retriever.get_relevant_documents(self.name + "'s feeling about his recent progress in life", self.get_current_time()) 188 | statements = get_text_from_docs(docs, include_time = False) 189 | 190 | prompt = self.guidance(PROMPT_FEELING, silent=self.silent) 191 | result = prompt(statements=statements, name=self.name) 192 | return result['res'] 193 | 194 | def make_plan(self,): 195 | now = self.get_current_time().strftime('%H:%M') 196 | prompt = self.guidance(PROMPT_PLAN, silent=self.silent) 197 | result = prompt(summary=self.summary, 198 | name=self.name, 199 | now=now, 200 | current_time=self.get_current_time().strftime('%A %B %d, %Y, %H:%M') 201 | ) 202 | 203 | current_date = self.get_current_time() 204 | list_task = result['items'] 205 | list_task.insert(0, {'from': now, 'to': result['to'], 'task': result['task']}) 206 | list_task_time = [] 207 | for i, task_temp in enumerate(list_task): 208 | t_from = datetime.strptime(task_temp['from'], '%H:%M') 209 | t_from = current_date.replace(hour=t_from.hour, minute=t_from.minute) 210 | t_to = datetime.strptime(task_temp['to'], '%H:%M') 211 | t_to = current_date.replace(hour=t_to.hour, minute=t_to.minute) 212 | delta_time = (t_to - t_from) 213 | if delta_time.total_seconds() < 0: 214 | t_to += timedelta(days=1) 215 | list_task_time.append({'from': t_from, 'to': t_to, 'task': task_temp['task']}) 216 | 217 | self.plan = list_task_time 218 | return list_task_time 219 | 220 | def react(self, observation, observed_entity, entity_status): 221 | self.add_memories([observation]) 222 | if isinstance(observed_entity, str): 223 | name_observed_entity = observed_entity 224 | else: 225 | name_observed_entity = observed_entity.name 226 | 227 | bool_react, reaction, context = self._check_reaction(observation, name_observed_entity, entity_status) 228 | if bool_react == 'Yes': 229 | if isinstance(observed_entity, GenerativeAgent): 230 | self._start_dialogue(observation, name_observed_entity, entity_status, context, reaction) 231 | new_plan = self._replan(observation, reaction) 232 | self.plan = new_plan 233 | self.update_status() 234 | return bool_react, reaction, context 235 | 236 | def _start_dialogue(self, observation, name_observed_entity, entity_status, context, reaction): 237 | prompt = self.guidance(PROMPT_DIALOGUE, silent=self.silent) 238 | result = prompt(summary=self.summary, 239 | name=self.name, 240 | status=self.status['task'], 241 | observation=observation, 242 | reaction=reaction, 243 | observed_entity=name_observed_entity, 244 | context=context, 245 | current_time=self.get_current_time().strftime('%A %B %d, %Y, %H:%M') 246 | ) 247 | self.dialogue_list.append(f"{self.get_current_time().strftime('%A %B %d, %Y, %H:%M')}\n{result['dialogue']}") 248 | return result['dialogue'] 249 | 250 | def _get_relevant_context(self, observed_entity, entity_status): 251 | docs1 = self.retriever.get_relevant_documents(f"What is {self.name}'s relationship with {observed_entity}?", self.get_current_time()) 252 | docs2 = self.retriever.get_relevant_documents(entity_status, self.get_current_time()) 253 | 254 | docs = merge_docs(docs1, docs2) 255 | statements = get_text_from_docs(docs, include_time = False) 256 | self.relevant_memories = statements 257 | prompt = self.guidance(PROMPT_CONTEXT, silent=self.silent) 258 | result = prompt(statements=statements, name=self.name, observed_entity=observed_entity, entity_status=entity_status) 259 | return result['context'] 260 | 261 | def _check_reaction(self, observation, observed_entity, entity_status): 262 | context = self._get_relevant_context(observed_entity, entity_status) 263 | prompt = self.guidance(PROMPT_REACT, silent=self.silent) 264 | result = prompt(summary=self.summary, 265 | name=self.name, 266 | status=self.status['task'], 267 | observation=observation, 268 | observed_entity=observed_entity, 269 | context=context, 270 | current_time=self.get_current_time().strftime('%A %B %d, %Y, %H:%M'), 271 | valid_opts=['Yes', 'No'] 272 | ) 273 | return result['reaction'], result['result'], context 274 | 275 | def _replan(self, observation, reaction): 276 | now = self.get_current_time().strftime('%H:%M') 277 | prompt = self.guidance(PROMPT_REPLAN, silent=self.silent) 278 | result = prompt(summary=self.summary, 279 | name=self.name, 280 | status=self.status['task'], 281 | observation=observation, 282 | reaction=reaction, 283 | now=now, 284 | current_time=self.get_current_time().strftime('%A %B %d, %Y, %H:%M') 285 | ) 286 | list_task = result['items'] 287 | list_task.insert(0, {'from': now, 'to': result['to'], 'task': result['task']}) 288 | 289 | current_date = self.get_current_time() 290 | list_task_time = [] 291 | for i, task_temp in enumerate(list_task): 292 | t_from = datetime.strptime(task_temp['from'], '%H:%M') 293 | t_from = current_date.replace(hour=t_from.hour, minute=t_from.minute) 294 | t_to = datetime.strptime(task_temp['to'], '%H:%M') 295 | t_to = current_date.replace(hour=t_to.hour, minute=t_to.minute) 296 | delta_time = (t_to - t_from) 297 | if delta_time.total_seconds() < 0: 298 | t_to += timedelta(days=1) 299 | list_task_time.append({'from': t_from, 'to': t_to, 'task': task_temp['task']}) 300 | return list_task_time 301 | 302 | def interview(self, user, question): 303 | # context = self._get_relevant_context(user, question) 304 | docs = self.retriever.get_relevant_documents(question, self.get_current_time()) 305 | context = get_text_from_docs(docs, include_time = False) 306 | self.relevant_memories = context 307 | 308 | prompt = self.guidance(PROMPT_INTERVIEW, silent=self.silent) 309 | result = prompt(summary=self.summary, 310 | name=self.name, 311 | status=self.status['task'], 312 | user=user, 313 | context=context, 314 | question=question, 315 | current_time=self.get_current_time().strftime('%A %B %d, %Y, %H:%M') 316 | ) 317 | return result['response'] 318 | -------------------------------------------------------------------------------- /server/gptq/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuangBK/generativeAgent_LLM/eba48cfe172ba16614c26426a26d38a3e4e803e4/server/gptq/__init__.py -------------------------------------------------------------------------------- /server/gptq/gptq.py: -------------------------------------------------------------------------------- 1 | import math 2 | import time 3 | 4 | import torch 5 | import torch.nn as nn 6 | import transformers 7 | 8 | from .quant import * 9 | 10 | 11 | DEBUG = False 12 | 13 | torch.backends.cuda.matmul.allow_tf32 = False 14 | torch.backends.cudnn.allow_tf32 = False 15 | 16 | 17 | class GPTQ: 18 | def __init__(self, layer): 19 | self.layer = layer 20 | self.dev = self.layer.weight.device 21 | W = layer.weight.data.clone() 22 | if isinstance(self.layer, nn.Conv2d): 23 | W = W.flatten(1) 24 | if isinstance(self.layer, transformers.Conv1D): 25 | W = W.t() 26 | self.rows = W.shape[0] 27 | self.columns = W.shape[1] 28 | self.H = torch.zeros((self.columns, self.columns), device=self.dev) 29 | self.nsamples = 0 30 | 31 | def add_batch(self, inp, out): 32 | if DEBUG: 33 | self.inp1 = inp 34 | self.out1 = out 35 | if len(inp.shape) == 2: 36 | inp = inp.unsqueeze(0) 37 | tmp = inp.shape[0] 38 | if isinstance(self.layer, nn.Linear) or isinstance(self.layer, transformers.Conv1D): 39 | if len(inp.shape) == 3: 40 | inp = inp.reshape((-1, inp.shape[-1])) 41 | inp = inp.t() 42 | if isinstance(self.layer, nn.Conv2d): 43 | unfold = nn.Unfold( 44 | self.layer.kernel_size, 45 | dilation=self.layer.dilation, 46 | padding=self.layer.padding, 47 | stride=self.layer.stride 48 | ) 49 | inp = unfold(inp) 50 | inp = inp.permute([1, 0, 2]) 51 | inp = inp.flatten(1) 52 | self.H *= self.nsamples / (self.nsamples + tmp) 53 | self.nsamples += tmp 54 | # inp = inp.float() 55 | inp = math.sqrt(2 / self.nsamples) * inp.float() 56 | # self.H += 2 / self.nsamples * inp.matmul(inp.t()) 57 | self.H += inp.matmul(inp.t()) 58 | 59 | def fasterquant( 60 | self, blocksize=128, percdamp=.01, groupsize=-1, actorder=False 61 | ): 62 | W = self.layer.weight.data.clone() 63 | if isinstance(self.layer, nn.Conv2d): 64 | W = W.flatten(1) 65 | if isinstance(self.layer, transformers.Conv1D): 66 | W = W.t() 67 | W = W.float() 68 | 69 | tick = time.time() 70 | 71 | if not self.quantizer.ready(): 72 | self.quantizer.find_params(W, weight=True) 73 | 74 | H = self.H 75 | del self.H 76 | dead = torch.diag(H) == 0 77 | H[dead, dead] = 1 78 | W[:, dead] = 0 79 | 80 | if actorder: 81 | perm = torch.argsort(torch.diag(H), descending=True) 82 | W = W[:, perm] 83 | H = H[perm][:, perm] 84 | 85 | Losses = torch.zeros_like(W) 86 | Q = torch.zeros_like(W) 87 | 88 | damp = percdamp * torch.mean(torch.diag(H)) 89 | diag = torch.arange(self.columns, device=self.dev) 90 | H[diag, diag] += damp 91 | H = torch.linalg.cholesky(H) 92 | H = torch.cholesky_inverse(H) 93 | H = torch.linalg.cholesky(H, upper=True) 94 | Hinv = H 95 | 96 | scale = [] 97 | zero = [] 98 | now_idx = 1 99 | 100 | for i1 in range(0, self.columns, blocksize): 101 | i2 = min(i1 + blocksize, self.columns) 102 | count = i2 - i1 103 | 104 | W1 = W[:, i1:i2].clone() 105 | Q1 = torch.zeros_like(W1) 106 | Err1 = torch.zeros_like(W1) 107 | Losses1 = torch.zeros_like(W1) 108 | Hinv1 = Hinv[i1:i2, i1:i2] 109 | 110 | for i in range(count): 111 | w = W1[:, i] 112 | d = Hinv1[i, i] 113 | 114 | if groupsize != -1: 115 | if (i1 + i) % groupsize == 0: 116 | self.quantizer.find_params(W[:, (i1 + i):(i1 + i + groupsize)], weight=True) 117 | 118 | if ((i1 + i) // groupsize) - now_idx == -1: 119 | scale.append(self.quantizer.scale) 120 | zero.append(self.quantizer.zero) 121 | now_idx += 1 122 | 123 | q = quantize( 124 | w.unsqueeze(1), self.quantizer.scale, self.quantizer.zero, self.quantizer.maxq 125 | ).flatten() 126 | Q1[:, i] = q 127 | Losses1[:, i] = (w - q) ** 2 / d ** 2 128 | 129 | err1 = (w - q) / d 130 | W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0)) 131 | Err1[:, i] = err1 132 | 133 | Q[:, i1:i2] = Q1 134 | Losses[:, i1:i2] = Losses1 / 2 135 | 136 | W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:]) 137 | 138 | if DEBUG: 139 | self.layer.weight.data[:, :i2] = Q[:, :i2] 140 | self.layer.weight.data[:, i2:] = W[:, i2:] 141 | print(torch.sum((self.layer(self.inp1) - self.out1) ** 2)) 142 | print(torch.sum(Losses)) 143 | 144 | torch.cuda.synchronize() 145 | print('time %.2f' % (time.time() - tick)) 146 | print('error', torch.sum(Losses).item()) 147 | 148 | if actorder: 149 | invperm = torch.argsort(perm) 150 | Q = Q[:, invperm] 151 | 152 | if isinstance(self.layer, transformers.Conv1D): 153 | Q = Q.t() 154 | self.layer.weight.data = Q.reshape(self.layer.weight.shape).to(self.layer.weight.data.dtype) 155 | if DEBUG: 156 | print(torch.sum((self.layer(self.inp1) - self.out1) ** 2)) 157 | 158 | if scale == []: 159 | scale.append(self.quantizer.scale) 160 | zero.append(self.quantizer.zero) 161 | scale = torch.cat(scale,dim=1) 162 | zero = torch.cat(zero,dim=1) 163 | return scale,zero 164 | 165 | def free(self): 166 | if DEBUG: 167 | self.inp1 = None 168 | self.out1 = None 169 | self.H = None 170 | self.Losses = None 171 | self.Trace = None 172 | torch.cuda.empty_cache() 173 | -------------------------------------------------------------------------------- /server/gptq/modelutils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | DEV = torch.device('cuda:0') 6 | 7 | 8 | def find_layers(module, layers=[nn.Conv2d, nn.Linear], name=''): 9 | if type(module) in layers: 10 | return {name: module} 11 | res = {} 12 | for name1, child in module.named_children(): 13 | res.update(find_layers( 14 | child, layers=layers, name=name + '.' + name1 if name != '' else name1 15 | )) 16 | return res 17 | -------------------------------------------------------------------------------- /server/gptq/quant.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import math 5 | 6 | def quantize(x, scale, zero, maxq): 7 | if maxq < 0: 8 | return (x > scale / 2).float() * scale + (x < zero / 2).float() * zero 9 | q = torch.clamp(torch.round(x / scale) + zero, 0, maxq) 10 | return scale * (q - zero) 11 | 12 | class Quantizer(nn.Module): 13 | 14 | def __init__(self, shape=1): 15 | super(Quantizer, self).__init__() 16 | self.register_buffer('maxq', torch.tensor(0)) 17 | self.register_buffer('scale', torch.zeros(shape)) 18 | self.register_buffer('zero', torch.zeros(shape)) 19 | 20 | def configure( 21 | self, 22 | bits, perchannel=False, sym=True, 23 | mse=False, norm=2.4, grid=100, maxshrink=.8, 24 | trits=False 25 | ): 26 | 27 | self.maxq = torch.tensor(2 ** bits - 1) 28 | self.perchannel = perchannel 29 | self.sym = sym 30 | self.mse = mse 31 | self.norm = norm 32 | self.grid = grid 33 | self.maxshrink = maxshrink 34 | if trits: 35 | self.maxq = torch.tensor(-1) 36 | 37 | def find_params(self, x, weight=False): 38 | dev = x.device 39 | self.maxq = self.maxq.to(dev) 40 | 41 | shape = x.shape 42 | if self.perchannel: 43 | if weight: 44 | x = x.flatten(1) 45 | else: 46 | if len(shape) == 4: 47 | x = x.permute([1, 0, 2, 3]) 48 | x = x.flatten(1) 49 | if len(shape) == 3: 50 | x = x.reshape((-1, shape[-1])).t() 51 | if len(shape) == 2: 52 | x = x.t() 53 | else: 54 | x = x.flatten().unsqueeze(0) 55 | 56 | tmp = torch.zeros(x.shape[0], device=dev) 57 | xmin = torch.minimum(x.min(1)[0], tmp) 58 | xmax = torch.maximum(x.max(1)[0], tmp) 59 | 60 | if self.sym: 61 | xmax = torch.maximum(torch.abs(xmin), xmax) 62 | tmp = xmin < 0 63 | if torch.any(tmp): 64 | xmin[tmp] = -xmax[tmp] 65 | tmp = (xmin == 0) & (xmax == 0) 66 | xmin[tmp] = -1 67 | xmax[tmp] = +1 68 | 69 | if self.maxq < 0: 70 | self.scale = xmax 71 | self.zero = xmin 72 | else: 73 | self.scale = (xmax - xmin) / self.maxq 74 | if self.sym: 75 | self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2) 76 | else: 77 | self.zero = torch.round(-xmin / self.scale) 78 | 79 | if self.mse: 80 | best = torch.full([x.shape[0]], float('inf'), device=dev) 81 | for i in range(int(self.maxshrink * self.grid)): 82 | p = 1 - i / self.grid 83 | xmin1 = p * xmin 84 | xmax1 = p * xmax 85 | scale1 = (xmax1 - xmin1) / self.maxq 86 | zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero 87 | q = quantize(x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq) 88 | q -= x 89 | q.abs_() 90 | q.pow_(self.norm) 91 | err = torch.sum(q, 1) 92 | tmp = err < best 93 | if torch.any(tmp): 94 | best[tmp] = err[tmp] 95 | self.scale[tmp] = scale1[tmp] 96 | self.zero[tmp] = zero1[tmp] 97 | if not self.perchannel: 98 | if weight: 99 | tmp = shape[0] 100 | else: 101 | tmp = shape[1] if len(shape) != 3 else shape[2] 102 | self.scale = self.scale.repeat(tmp) 103 | self.zero = self.zero.repeat(tmp) 104 | 105 | if weight: 106 | shape = [-1] + [1] * (len(shape) - 1) 107 | self.scale = self.scale.reshape(shape) 108 | self.zero = self.zero.reshape(shape) 109 | return 110 | if len(shape) == 4: 111 | self.scale = self.scale.reshape((1, -1, 1, 1)) 112 | self.zero = self.zero.reshape((1, -1, 1, 1)) 113 | if len(shape) == 3: 114 | self.scale = self.scale.reshape((1, 1, -1)) 115 | self.zero = self.zero.reshape((1, 1, -1)) 116 | if len(shape) == 2: 117 | self.scale = self.scale.unsqueeze(0) 118 | self.zero = self.zero.unsqueeze(0) 119 | 120 | def quantize(self, x): 121 | if self.ready(): 122 | return quantize(x, self.scale, self.zero, self.maxq) 123 | return x 124 | 125 | def enabled(self): 126 | return self.maxq > 0 127 | 128 | def ready(self): 129 | return torch.all(self.scale != 0) 130 | 131 | 132 | try: 133 | import quant_cuda 134 | except: 135 | print('CUDA extension not installed.') 136 | 137 | # Assumes layer is perfectly divisible into 256 * 256 blocks 138 | class QuantLinear(nn.Module): 139 | def __init__(self, bits, groupsize, infeatures, outfeatures, faster=False, kernel_switch_threshold=128): 140 | super().__init__() 141 | if bits not in [2,3,4,8]: 142 | raise NotImplementedError("Only 2,3,4,8 bits are supported.") 143 | self.infeatures = infeatures 144 | self.outfeatures = outfeatures 145 | self.bits = bits 146 | if groupsize != -1 and groupsize < 32 and groupsize != int(math.pow(2,int(math.log2(groupsize)))): 147 | raise NotImplementedError("groupsize supports powers of 2 greater than 32. (e.g. : 32,64,128,etc)") 148 | groupsize = groupsize if groupsize != -1 else infeatures 149 | self.groupsize = groupsize 150 | self.register_buffer('qzeros', torch.zeros((math.ceil(infeatures/groupsize),outfeatures // 256 * (bits * 8)), dtype=torch.int)) 151 | self.register_buffer('scales', torch.zeros((math.ceil(infeatures/groupsize),outfeatures))) 152 | self.register_buffer('bias', torch.zeros(outfeatures)) 153 | self.register_buffer( 154 | 'qweight', torch.zeros((infeatures // 32 * bits, outfeatures), dtype=torch.int) 155 | ) 156 | self.half_indim = self.infeatures // 2 157 | self._initialized_quant_state = False 158 | self.faster = faster 159 | # kernel_switch_threshold is the cutoff input size after which matmul 160 | # is performed by unpacking the weights and using torch.matmul 161 | self.kernel_switch_threshold = kernel_switch_threshold 162 | if isinstance(self.kernel_switch_threshold, bool): 163 | self.kernel_switch_threshold = 128 if self.kernel_switch_threshold else None 164 | if not self.kernel_switch_threshold is None: 165 | # Buffers for bit shifting weight unpacking 166 | if self.bits == 2: 167 | self.register_buffer( 168 | 'wf1', 169 | torch.tensor([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], dtype=torch.int32).unsqueeze(0).unsqueeze(2), 170 | persistent=False 171 | ) 172 | self.register_buffer( 173 | 'wf2', 174 | torch.tensor([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], dtype=torch.int32).unsqueeze(0).unsqueeze(0), 175 | persistent=False 176 | ) 177 | elif self.bits == 3: 178 | self.register_buffer('wf1', torch.tensor([ 179 | [0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 0], 180 | [0, 1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31], 181 | [0, 2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 0], 182 | ], dtype=torch.int32).reshape(1,3,12,1), persistent=False) 183 | self.register_buffer('wf2', torch.tensor([ 184 | [0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 0], 185 | [0, 1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31], 186 | [0, 2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 0], 187 | ], dtype=torch.int32).reshape(1,1,3,12), persistent=False) 188 | elif self.bits == 4: 189 | self.register_buffer( 190 | 'wf1', 191 | torch.tensor([0, 4, 8, 12, 16, 20, 24, 28], dtype=torch.int32).unsqueeze(0).unsqueeze(2), 192 | persistent=False 193 | ) 194 | self.register_buffer( 195 | 'wf2', 196 | torch.tensor([0, 4, 8, 12, 16, 20, 24, 28], dtype=torch.int32).unsqueeze(0).unsqueeze(0), 197 | persistent=False 198 | ) 199 | elif self.bits == 8: 200 | self.register_buffer( 201 | 'wf1', 202 | torch.tensor([0, 8, 16, 24], dtype=torch.int32).unsqueeze(0).unsqueeze(2), 203 | persistent=False 204 | ) 205 | self.register_buffer( 206 | 'wf2', 207 | torch.tensor([0, 8, 16, 24], dtype=torch.int32).unsqueeze(0).unsqueeze(0), 208 | persistent=False 209 | ) 210 | 211 | def pack(self, linear, scales, zeros): 212 | scales = scales.t().contiguous() 213 | zeros = zeros.t().contiguous() 214 | scale_zeros = zeros * scales 215 | self.scales = scales.clone() 216 | if linear.bias is not None: 217 | self.bias = linear.bias.clone() 218 | 219 | intweight = [] 220 | for idx in range(self.infeatures): 221 | g_idx = idx // self.groupsize 222 | intweight.append(torch.round((linear.weight.data[:,idx] + scale_zeros[g_idx]) / self.scales[g_idx]).to(torch.int)[:,None]) 223 | intweight = torch.cat(intweight,dim=1) 224 | intweight = intweight.t().contiguous() 225 | intweight = intweight.numpy().astype(np.uint32) 226 | qweight = np.zeros( 227 | (intweight.shape[0] // 32 * self.bits, intweight.shape[1]), dtype=np.uint32 228 | ) 229 | i = 0 230 | row = 0 231 | while row < qweight.shape[0]: 232 | if self.bits in [2,4,8]: 233 | for j in range(i, i + (32//self.bits)): 234 | qweight[row] |= intweight[j] << (self.bits * (j - i)) 235 | i += 32//self.bits 236 | row += 1 237 | elif self.bits == 3: 238 | for j in range(i, i + 10): 239 | qweight[row] |= intweight[j] << (3 * (j - i)) 240 | i += 10 241 | qweight[row] |= intweight[i] << 30 242 | row += 1 243 | qweight[row] |= (intweight[i] >> 2) & 1 244 | i += 1 245 | for j in range(i, i + 10): 246 | qweight[row] |= intweight[j] << (3 * (j - i) + 1) 247 | i += 10 248 | qweight[row] |= intweight[i] << 31 249 | row += 1 250 | qweight[row] |= (intweight[i] >> 1) & 0x3 251 | i += 1 252 | for j in range(i, i + 10): 253 | qweight[row] |= intweight[j] << (3 * (j - i) + 2) 254 | i += 10 255 | row += 1 256 | else: 257 | raise NotImplementedError("Only 2,3,4,8 bits are supported.") 258 | 259 | qweight = qweight.astype(np.int32) 260 | self.qweight = torch.from_numpy(qweight) 261 | 262 | zeros -= 1; 263 | zeros = zeros.numpy().astype(np.uint32) 264 | qzeros = np.zeros((zeros.shape[0], zeros.shape[1] // 256 * (self.bits * 8)), dtype=np.uint32) 265 | i = 0 266 | col = 0 267 | while col < qzeros.shape[1]: 268 | if self.bits in [2,4,8]: 269 | for j in range(i, i + (32//self.bits)): 270 | qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i)) 271 | i += 32//self.bits 272 | col += 1 273 | elif self.bits == 3: 274 | for j in range(i, i + 10): 275 | qzeros[:, col] |= zeros[:, j] << (3 * (j - i)) 276 | i += 10 277 | qzeros[:, col] |= zeros[:, i] << 30 278 | col += 1 279 | qzeros[:, col] |= (zeros[:, i] >> 2) & 1 280 | i += 1 281 | for j in range(i, i + 10): 282 | qzeros[:, col] |= zeros[:, j] << (3 * (j - i) + 1) 283 | i += 10 284 | qzeros[:, col] |= zeros[:, i] << 31 285 | col += 1 286 | qzeros[:, col] |= (zeros[:, i] >> 1) & 0x3 287 | i += 1 288 | for j in range(i, i + 10): 289 | qzeros[:, col] |= zeros[:, j] << (3 * (j - i) + 2) 290 | i += 10 291 | col += 1 292 | else: 293 | raise NotImplementedError("Only 2,3,4,8 bits are supported.") 294 | 295 | qzeros = qzeros.astype(np.int32) 296 | self.qzeros = torch.from_numpy(qzeros) 297 | 298 | def forward(self, x): 299 | if not self._initialized_quant_state: 300 | # Do we even have a bias? Check for at least one non-zero element. 301 | if self.bias is not None and bool(torch.any(self.bias != 0)): 302 | # Then make sure it's the right type. 303 | self.bias.data = self.bias.data.to(torch.float32) 304 | else: 305 | self.bias = None 306 | 307 | if not self.kernel_switch_threshold is None and (x.shape[0] * x.shape[1]) >= self.kernel_switch_threshold: 308 | if self.bits == 2: 309 | # Unpack 2bit weights 310 | 311 | weight = torch.bitwise_right_shift(torch.unsqueeze(self.qweight, 1).expand(-1, 16, -1), self.wf1).to(torch.int8) 312 | torch.bitwise_and(weight, 0x00000003, out=weight) 313 | weight = weight.reshape(-1, self.groupsize, weight.shape[2]) 314 | 315 | zeros = torch.bitwise_right_shift(torch.unsqueeze(self.qzeros, 2).expand(-1, -1, 16), self.wf2).to(torch.int8) 316 | torch.bitwise_and(zeros, 0x00000003, out=zeros) 317 | zeros = zeros + 1 318 | zeros = zeros.reshape(-1, 1, zeros.shape[1] * zeros.shape[2]) 319 | 320 | scales = self.scales 321 | scales = scales.reshape(-1, 1, scales.shape[-1]) 322 | 323 | weights = (scales * (weight - zeros)) 324 | weights = weights.reshape(weights.shape[0] * weight.shape[1], weights.shape[2]) 325 | x = torch.matmul(x, weights.to(x.dtype)) 326 | x = x + self.bias if self.bias is not None else x 327 | return x 328 | 329 | elif self.bits == 3: 330 | 331 | # Unpack 3bit weights 332 | weight = self.qweight.reshape(self.qweight.shape[0]//3, 3, 1, self.qweight.shape[1]).expand(-1, -1, 12, -1) 333 | weight = (weight >> self.wf1)&0x7 334 | weight[:,0,10] = (weight[:,0,10]&0x3) | ((weight[:,1,0] << 2)&0x4) 335 | weight[:,1,11] = (weight[:,1,11]&0x1) | ((weight[:,2,0] << 1)&0x6) 336 | weight = weight & 0x7 337 | weight = torch.cat([weight[:,0,:11], weight[:,1,1:12], weight[:,2,1:11]], dim=1) 338 | weight = weight.reshape(-1, self.groupsize, weight.shape[2]) 339 | 340 | zeros = self.qzeros.reshape(self.qzeros.shape[0], self.qzeros.shape[1]//3, 3, 1).expand(-1, -1, -1, 12) 341 | zeros = (zeros >> self.wf2) 342 | zeros[:,:,0,10] = (zeros[:,:,0,10]&0x3) | ((zeros[:,:,1,0] << 2)&0x4) 343 | zeros[:,:,1,11] = (zeros[:,:,1,11]&0x1) | ((zeros[:,:,2,0] << 1)&0x6) 344 | zeros = zeros & 0x7 345 | zeros = torch.cat([zeros[:,:,0,:11], zeros[:,:,1,1:12], zeros[:,:,2,1:11]], dim=2) 346 | zeros = zeros.reshape(-1, 1, zeros.shape[1] * zeros.shape[2]) 347 | zeros = zeros + 1 348 | 349 | scales = self.scales 350 | scales = scales.reshape(-1, 1, scales.shape[-1]) 351 | 352 | weights = (scales * (weight - zeros)) 353 | weights = weights.reshape(weights.shape[0] * weight.shape[1], weights.shape[2]) 354 | x = torch.matmul(x, weights.to(x.dtype)) 355 | x = x + self.bias if self.bias is not None else x 356 | return x 357 | 358 | elif self.bits == 4: 359 | # Unpack 4bit weights 360 | weight = torch.bitwise_right_shift(torch.unsqueeze(self.qweight, 1).expand(-1, 8, -1), self.wf1).to(torch.int8) 361 | torch.bitwise_and(weight, 0x0000000F, out=weight) 362 | weight = weight.reshape(-1, self.groupsize, weight.shape[2]) 363 | 364 | zeros = torch.bitwise_right_shift(torch.unsqueeze(self.qzeros, 2).expand(-1, -1, 8), self.wf2).to(torch.int8) 365 | torch.bitwise_and(zeros, 0x0000000F, out=zeros) 366 | zeros = zeros + 1 367 | zeros = zeros.reshape(-1, 1, zeros.shape[1] * zeros.shape[2]) 368 | 369 | scales = self.scales 370 | scales = scales.reshape(-1, 1, scales.shape[-1]) 371 | 372 | weights = (scales * (weight - zeros)) 373 | weights = weights.reshape(weights.shape[0] * weight.shape[1], weights.shape[2]) 374 | x = torch.matmul(x, weights.to(x.dtype)) 375 | x = x + self.bias if self.bias is not None else x 376 | return x 377 | 378 | elif self.bits == 8: 379 | # Unpack 8bit weights 380 | weight = torch.bitwise_right_shift(torch.unsqueeze(self.qweight, 1).expand(-1, 4, -1), self.wf1).to(torch.int8) 381 | torch.bitwise_and(weight, 0x000000FF, out=weight) 382 | weight = weight.reshape(-1, self.groupsize, weight.shape[2]) 383 | 384 | zeros = torch.bitwise_right_shift(torch.unsqueeze(self.qzeros, 2).expand(-1, -1, 4), self.wf2).to(torch.int8) 385 | torch.bitwise_and(zeros, 0x000000FF, out=zeros) 386 | zeros = zeros + 1 387 | zeros = zeros.reshape(-1, 1, zeros.shape[1] * zeros.shape[2]) 388 | 389 | scales = self.scales 390 | scales = scales.reshape(-1, 1, scales.shape[-1]) 391 | 392 | weights = (scales * (weight - zeros)) 393 | weights = weights.reshape(weights.shape[0] * weight.shape[1], weights.shape[2]) 394 | x = torch.matmul(x, weights.to(x.dtype)) 395 | x = x + self.bias if self.bias is not None else x 396 | return x 397 | else: 398 | raise NotImplementedError("Only 2,3,4,8 bits are supported.") 399 | 400 | outshape = list(x.shape) 401 | outshape[-1] = self.outfeatures 402 | x = x.reshape(-1, x.shape[-1]) 403 | if self.bias is None: 404 | y = torch.zeros(x.shape[0], outshape[-1], dtype=torch.float32, device=x.device) 405 | else: 406 | y = self.bias.clone().repeat(x.shape[0], 1) 407 | 408 | output_dtype = x.dtype 409 | if self.faster: 410 | x = x.half() 411 | if self.bits == 2: 412 | quant_cuda.vecquant2matmul_faster(x, self.qweight, y, self.scales, self.qzeros, self.groupsize, self.half_indim) 413 | elif self.bits == 3: 414 | quant_cuda.vecquant3matmul_faster(x, self.qweight, y, self.scales, self.qzeros, self.groupsize, self.half_indim) 415 | elif self.bits == 4: 416 | quant_cuda.vecquant4matmul_faster(x, self.qweight, y, self.scales, self.qzeros, self.groupsize, self.half_indim) 417 | else: 418 | raise NotImplementedError("Only 2,3,4 bits are supported.") 419 | else: 420 | x = x.float() 421 | if self.bits == 2: 422 | quant_cuda.vecquant2matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) 423 | elif self.bits == 3: 424 | quant_cuda.vecquant3matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) 425 | elif self.bits == 4: 426 | quant_cuda.vecquant4matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) 427 | elif self.bits == 8: 428 | quant_cuda.vecquant8matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) 429 | else: 430 | raise NotImplementedError("Only 2,3,4,8 bits are supported.") 431 | y = y.to(output_dtype) 432 | return y.reshape(outshape) 433 | 434 | def make_quant(module, names, bits, groupsize, faster=False, name='', kernel_switch_threshold=128): 435 | if isinstance(module, QuantLinear): 436 | return 437 | for attr in dir(module): 438 | tmp = getattr(module, attr) 439 | name1 = name + '.' + attr if name != '' else attr 440 | if name1 in names: 441 | delattr(module, attr) 442 | setattr( 443 | module, attr, QuantLinear(bits, groupsize, tmp.in_features, tmp.out_features, faster=faster, kernel_switch_threshold=kernel_switch_threshold) 444 | ) 445 | for name1, child in module.named_children(): 446 | make_quant(child, names, bits, groupsize, faster, name + '.' + name1 if name != '' else name1, kernel_switch_threshold=kernel_switch_threshold) 447 | -------------------------------------------------------------------------------- /server/model.py: -------------------------------------------------------------------------------- 1 | import time 2 | import torch 3 | import torch.nn as nn 4 | from .gptq.gptq import * 5 | from .gptq.modelutils import * 6 | from .gptq.quant import * 7 | 8 | from transformers import AutoTokenizer 9 | import os 10 | 11 | def get_llama(model): 12 | import torch 13 | def skip(*args, **kwargs): 14 | pass 15 | torch.nn.init.kaiming_uniform_ = skip 16 | torch.nn.init.uniform_ = skip 17 | torch.nn.init.normal_ = skip 18 | from transformers import LlamaForCausalLM 19 | model = LlamaForCausalLM.from_pretrained(model, torch_dtype='auto') 20 | model.seqlen = 2048 21 | return model 22 | 23 | def load_quant(model, checkpoint, wbits, groupsize): 24 | from transformers import LlamaConfig, LlamaForCausalLM 25 | config = LlamaConfig.from_pretrained(model) 26 | def noop(*args, **kwargs): 27 | pass 28 | torch.nn.init.kaiming_uniform_ = noop 29 | torch.nn.init.uniform_ = noop 30 | torch.nn.init.normal_ = noop 31 | 32 | torch.set_default_dtype(torch.half) 33 | transformers.modeling_utils._init_weights = False 34 | torch.set_default_dtype(torch.half) 35 | model = LlamaForCausalLM(config) 36 | torch.set_default_dtype(torch.float) 37 | model = model.eval() 38 | layers = find_layers(model) 39 | for name in ['lm_head']: 40 | if name in layers: 41 | del layers[name] 42 | make_quant(model, layers, wbits, groupsize) 43 | 44 | print('Loading model ...') 45 | if checkpoint.endswith('.safetensors'): 46 | from safetensors.torch import load_file as safe_load 47 | model.load_state_dict(safe_load(checkpoint)) 48 | else: 49 | model.load_state_dict(torch.load(checkpoint)) 50 | model.seqlen = 2048 51 | print('Done.') 52 | 53 | return model 54 | 55 | def load_model_main(model_para, checkpoint_para, device): 56 | model = load_quant(model_para, checkpoint_para, 4, 128) 57 | model.to(device) 58 | tokenizer = AutoTokenizer.from_pretrained(model_para) 59 | return model, tokenizer 60 | 61 | 62 | -------------------------------------------------------------------------------- /server/prompt.py: -------------------------------------------------------------------------------- 1 | PROMPT_ADDMEM = """### Instruction: 2 | On the scale of 1 to 10, where 1 is purely mundane (e.g., brushing teeth, making bed) and 10 is extremely poignant (e.g., a break up, college acceptance), rate the likely poignancy of the following piece of memory. Respond with a single integer. 3 | 4 | ### Input: 5 | Memory: {{mem}} 6 | 7 | ### Response: 8 | Rating: {{gen 'rate' pattern='[0-9]+' stop='\n'}}""" 9 | 10 | PROMPT_SALIENT = """### Instruction: 11 | {{recent_memories}} 12 | 13 | ### Input: 14 | Given only the information above, what are 3 most salient high-level questions we can answer about the subjects in the statements? 15 | 16 | ### Response: 17 | {{#geneach 'items' num_iterations=3}}{{gen 'this' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='\n'}} 18 | {{/geneach}}""" 19 | 20 | PROMPT_INSIGHTS = """### Instruction: 21 | {{statements}} 22 | 23 | ### Input: 24 | What 3 high-level insights can you infer from the above statements? 25 | 26 | ### Response: 27 | {{#geneach 'items' num_iterations=3}}{{gen 'this' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='\n'}} 28 | {{/geneach}}""" 29 | 30 | PROMPT_CHARACTERISTICS = """### Instruction: 31 | {{statements}} 32 | 33 | ### Input: 34 | How would one describe {{name}}’s core characteristics given the following statements? 35 | 36 | ### Response: 37 | Based on the given statements, {{gen 'res' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='\n'}}""" 38 | 39 | PROMPT_OCCUPATION = """### Instruction: 40 | {{statements}} 41 | 42 | ### Input: 43 | How would one describe {{name}}’s current daily occupation given the following statements? 44 | 45 | ### Response: 46 | Based on the given statements, {{gen 'res' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='\n'}}""" 47 | 48 | PROMPT_FEELING = """### Instruction: 49 | {{statements}} 50 | 51 | ### Input: 52 | How would one describe {{name}}’s feeling about his recent progress in life given the following statements? 53 | 54 | ### Response: 55 | Based on the given statements, {{gen 'res' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='\n'}}""" 56 | 57 | PROMPT_PLAN = """### Instruction: 58 | 59 | Example for plan: 60 | Here is {{name}}'s plan from now at 7:14: 61 | [From 7:14 to 7:45]: Wake up and complete the morining routine 62 | [From 7:45 to 8:35]: Eat breakfirst 63 | [From 8:35 to 17:10]: Go to school and study 64 | [From 17:10 to 22:30]: Play CSGO 65 | [From 22:30 to 7:30]: Go to sleep 66 | 67 | ### Input: 68 | Today is {{current_time}}. Please make a plan today for {{name}} in broad strokes. Given the summary: 69 | {{summary}} 70 | 71 | ### Response: 72 | Here is {{name}}'s plan from now at {{current_time}}: 73 | [From {{now}} to {{gen 'to' pattern='[0-9]+:[0-9][0-9]' stop=']'}}]: {{gen 'task' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='\n'}} 74 | {{#geneach 'items' num_iterations=3}}[From {{gen 'this.from' pattern='[0-9]+:[0-9][0-9]' stop=' '}} to {{gen 'this.to' pattern='[0-9]+:[0-9][0-9]' stop=']'}}]: {{gen 'this.task' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='\n'}} 75 | {{/geneach}}""" 76 | 77 | PROMPT_CONTEXT = """### Instruction: 78 | Summarize those statements. 79 | 80 | Example: 81 | Given statements: 82 | - Gosun has power, but he is struggling to deal with living costs 83 | - Gosun see Max is sick 84 | - Gosun has a dog, named Max 85 | - Bob is in dangerous 86 | 87 | Focus on Gosun and Max and statement: "Max is sick". 88 | 89 | Summary: Gosun has a dog named Max, who is sick. Gosun has power, but he is struggling to deal with living costs. His friend, Bob, is in dangerous. 90 | 91 | ### Input: 92 | Given statements: 93 | {{statements}} 94 | 95 | Summarize those statements, focus on {{name}} and {{observed_entity}} and statement: "{{entity_status}}". 96 | 97 | ### Response: 98 | Summary: {{gen 'context' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 max_tokens=300 stop='\n'}}""" 99 | 100 | PROMPT_REACT = """### Instruction: 101 | {{summary}} 102 | 103 | It is {{current_time}}. 104 | {{name}}'s status: {{status}} 105 | Observation: {{observation}} 106 | 107 | Summary of relevant context from {{name}}'s memory: {{context}} 108 | 109 | ### Input: 110 | Should {{name}} react to the observation, and if so, what would be an appropriate reaction? 111 | 112 | ### Response: 113 | Reaction: {{select 'reaction' options=valid_opts}}. 114 | Appropriate reaction: {{gen 'result' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='\n'}}""" 115 | 116 | PROMPT_REPLAN = """### Instruction: 117 | 118 | Example for plan for Tim: 119 | It is Friday June 09, 2023, 20:07 now 120 | Tim's status: Tim is at home 121 | Observation: Tim' mom is sick 122 | Tim's reaction: Tim should check his mother is okay or not, give her some medicine if needed. 123 | Here is Tim's plan from now at 20:07: 124 | [From 20:07 to 20:45]: Check Tim's mother is okay or not, find some medicine 125 | [From 20:45 to 22:30]: Make some food 126 | [From 22:30 to 7:30]: Go to sleep 127 | 128 | ### Input: 129 | {{summary}} 130 | 131 | It is {{current_time}} now. Please make a plan from now for {{name}} in broad strokes given his/her reaction. 132 | 133 | It is {{current_time}} now. 134 | {{name}}'s status: {{status}} 135 | Observation: {{observation}} 136 | {{name}}'s reaction: {{reaction}} 137 | 138 | ### Response: 139 | Here is {{name}}'s plan from now at {{current_time}}: 140 | [From {{now}} to {{gen 'to' pattern='[0-9]+:[0-9][0-9]' stop=']'}}]: {{gen 'task' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='\n'}} 141 | {{#geneach 'items' num_iterations=3}}[From {{gen 'this.from' pattern='[0-9]+:[0-9][0-9]' stop=' '}} to {{gen 'this.to' pattern='[0-9]+:[0-9][0-9]' stop=']'}}]: {{gen 'this.task' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='\n'}} 142 | {{/geneach}}""" 143 | 144 | PROMPT_DIALOGUE = """### Instruction: 145 | {{summary}} 146 | 147 | It is {{current_time}}. 148 | {{name}}'s status:{{status}} 149 | Observation: {{observation}} 150 | 151 | Summary of relevant context from {{name}}'s memory: {{context}} 152 | 153 | Example of dialogue: 154 | A: Wow, it is a nice haircut 155 | B: Thank you! How is your school project? 156 | A: I'm still trying. 157 | B: Good luck. 158 | 159 | ### Input: 160 | {{name}}'s reaction: {{reaction}} 161 | What would {{name}} say to {{observed_entity}}? Make a short dialogue. 162 | 163 | ### Response: 164 | Here is the short dialogue:{{gen 'dialogue' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99}}""" 165 | 166 | PROMPT_INTERVIEW = """### Instruction: 167 | {{summary}} 168 | 169 | It is {{current_time}} now. 170 | {{name}}'s status:{{status}} 171 | 172 | Summary of relevant context from {{name}}'s memory: 173 | {{context}} 174 | 175 | ### Input: 176 | The {{user}} say "{{question}}". What should {{name}} response? 177 | 178 | ### Response: 179 | Here is the response from {{name}}: "{{gen 'response' top_k=30 top_p=0.18 repetition_penalty=1.15 temperature=1.99 stop='"'}}\"""" -------------------------------------------------------------------------------- /server/time_weighted_retriever.py: -------------------------------------------------------------------------------- 1 | """Retriever that combines embedding similarity with recency in retrieving values.""" 2 | import datetime 3 | from copy import deepcopy 4 | from typing import Any, Dict, List, Optional, Tuple 5 | 6 | from pydantic import BaseModel, Field 7 | 8 | from langchain.schema import BaseRetriever, Document 9 | from langchain.vectorstores.base import VectorStore 10 | import numpy as np 11 | 12 | def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float: 13 | """Get the hours passed between two datetime objects.""" 14 | return (time - ref_time).total_seconds() / 3600 15 | 16 | 17 | class TimeWeightedVectorStoreRetrieverModified(BaseRetriever, BaseModel): 18 | """Retriever combining embedding similarity with recency.""" 19 | 20 | vectorstore: VectorStore 21 | """The vectorstore to store documents and determine salience.""" 22 | 23 | search_kwargs: dict = Field(default_factory=lambda: dict(k=100)) 24 | """Keyword arguments to pass to the vectorstore similarity search.""" 25 | 26 | # TODO: abstract as a queue 27 | memory_stream: List[Document] = Field(default_factory=list) 28 | """The memory_stream of documents to search through.""" 29 | 30 | decay_rate: float = Field(default=0.01) 31 | """The exponential decay factor used as (1.0-decay_rate)**(hrs_passed).""" 32 | 33 | k: int = 4 34 | """The maximum number of documents to retrieve in a given call.""" 35 | 36 | other_score_keys: List[str] = [] 37 | """Other keys in the metadata to factor into the score, e.g. 'importance'.""" 38 | 39 | default_salience: Optional[float] = None 40 | """The salience to assign memories not retrieved from the vector store. 41 | 42 | None assigns no salience to documents not fetched from the vector store. 43 | """ 44 | 45 | class Config: 46 | """Configuration for this pydantic object.""" 47 | 48 | arbitrary_types_allowed = True 49 | 50 | def _get_combined_score( 51 | self, 52 | document: Document, 53 | vector_relevance: Optional[float], 54 | current_time: datetime.datetime, 55 | ) -> float: 56 | """Return the combined score for a document.""" 57 | hours_passed = _get_hours_passed( 58 | current_time, 59 | document.metadata["last_accessed_at"], 60 | ) 61 | score = (1.0 - self.decay_rate) ** hours_passed 62 | for key in self.other_score_keys: 63 | if key in document.metadata: 64 | score += document.metadata[key] 65 | if vector_relevance is not None: 66 | score += vector_relevance 67 | return score 68 | 69 | def _get_combined_score_list( 70 | self, 71 | document: Document, 72 | vector_relevance: Optional[float], 73 | current_time: datetime.datetime, 74 | ) -> float: 75 | """Return the combined score for a document.""" 76 | hours_passed = _get_hours_passed( 77 | current_time, 78 | document.metadata["last_accessed_at"], 79 | ) 80 | if hours_passed < 0: 81 | hours_passed = 0 82 | score_time = (1.0 - self.decay_rate) ** hours_passed 83 | if score_time > 1: 84 | score_time = 1 85 | list_scores = [] 86 | list_scores.append(score_time) 87 | for key in self.other_score_keys: 88 | if key in document.metadata: 89 | # score += document.metadata[key] 90 | list_scores.append(document.metadata[key]) 91 | if vector_relevance is not None: 92 | # score += vector_relevance 93 | list_scores.append(1-vector_relevance) 94 | return list_scores 95 | 96 | def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]: 97 | """Return documents that are salient to the query.""" 98 | docs_and_scores: List[Tuple[Document, float]] 99 | docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores( 100 | query, **self.search_kwargs 101 | ) 102 | results = {} 103 | for fetched_doc, relevance in docs_and_scores: 104 | if "buffer_idx" in fetched_doc.metadata: 105 | buffer_idx = fetched_doc.metadata["buffer_idx"] 106 | doc = self.memory_stream[buffer_idx] 107 | results[buffer_idx] = (doc, relevance) 108 | return results 109 | 110 | def get_relevant_documents(self, query: str, current_time: Optional[Any]) -> List[Document]: 111 | """Return documents that are relevant to the query.""" 112 | if current_time is None: 113 | current_time = datetime.datetime.now() 114 | docs_and_scores = { 115 | doc.metadata["buffer_idx"]: (doc, self.default_salience) 116 | for doc in self.memory_stream[-self.k :] 117 | } 118 | # If a doc is considered salient, update the salience score 119 | docs_and_scores.update(self.get_salient_docs(query)) 120 | rescored_docs = [ 121 | (doc, self._get_combined_score_list(doc, relevance, current_time)) 122 | for doc, relevance in docs_and_scores.values() 123 | ] 124 | 125 | score_array = [b for a,b in rescored_docs] 126 | score_array_np = np.array(score_array) 127 | delta_np = score_array_np.max(axis=0)-score_array_np.min(axis=0) 128 | delta_np = np.where(delta_np == 0, 1, delta_np) 129 | x_norm = (score_array_np-score_array_np.min(axis=0))/delta_np 130 | # Weight importance score less 131 | x_norm[:,0] = x_norm[:,0]*0.9 132 | x_norm[:,1] = x_norm[:,1]*0.9 133 | x_norm_sum = x_norm.sum(axis=1) 134 | rescored_docs = [ 135 | (doc, score) 136 | for (doc, _), score in zip(rescored_docs,x_norm_sum) 137 | ] 138 | 139 | rescored_docs.sort(key=lambda x: x[1], reverse=True) 140 | result = [] 141 | # Ensure frequently accessed memories aren't forgotten 142 | for doc, _ in rescored_docs[: self.k]: 143 | # TODO: Update vector store doc once `update` method is exposed. 144 | buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]] 145 | buffered_doc.metadata["last_accessed_at"] = current_time 146 | result.append(buffered_doc) 147 | return result 148 | 149 | async def aget_relevant_documents(self, query: str) -> List[Document]: 150 | """Return documents that are relevant to the query.""" 151 | raise NotImplementedError 152 | 153 | def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: 154 | """Add documents to vectorstore.""" 155 | current_time = kwargs.get("current_time") 156 | if current_time is None: 157 | current_time = datetime.datetime.now() 158 | # Avoid mutating input documents 159 | dup_docs = [deepcopy(d) for d in documents] 160 | for i, doc in enumerate(dup_docs): 161 | if "last_accessed_at" not in doc.metadata: 162 | doc.metadata["last_accessed_at"] = current_time 163 | if "created_at" not in doc.metadata: 164 | doc.metadata["created_at"] = current_time 165 | doc.metadata["buffer_idx"] = len(self.memory_stream) + i 166 | self.memory_stream.extend(dup_docs) 167 | return self.vectorstore.add_documents(dup_docs, **kwargs) 168 | 169 | async def aadd_documents( 170 | self, documents: List[Document], **kwargs: Any 171 | ) -> List[str]: 172 | """Add documents to vectorstore.""" 173 | current_time = kwargs.get("current_time") 174 | if current_time is None: 175 | current_time = datetime.datetime.now() 176 | # Avoid mutating input documents 177 | dup_docs = [deepcopy(d) for d in documents] 178 | for i, doc in enumerate(dup_docs): 179 | if "last_accessed_at" not in doc.metadata: 180 | doc.metadata["last_accessed_at"] = current_time 181 | if "created_at" not in doc.metadata: 182 | doc.metadata["created_at"] = current_time 183 | doc.metadata["buffer_idx"] = len(self.memory_stream) + i 184 | self.memory_stream.extend(dup_docs) 185 | return await self.vectorstore.aadd_documents(dup_docs, **kwargs) --------------------------------------------------------------------------------