├── .gitignore ├── LICENSE ├── README.md ├── README_en.md ├── act.py ├── call_llm.py ├── fix_hypothesis.py ├── hypothesis.py ├── judge_finish.py ├── main.py ├── make_memory.py ├── requirements.txt └── word.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Sudy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AutoMATA (Auto Massively Parallel Thought Agent system) 2 | 3 | ![automata](https://github.com/sudy-super/AutoMATA/assets/128252727/14e00e91-e0ef-43f2-b679-3240f16e0c03) 4 | 5 | 6 | LLMに能動的推論と疑似意識を実装するツール 7 | 8 | [English ver.](https://github.com/sudy-super/AutoMATA/blob/main/README_en.md) 9 | 10 | # クイックスタート 11 | 12 | 1. レポジトリをクローン 13 | ``` 14 | git clone https://github.com/sudy-super/AutoMATA.git 15 | ``` 16 | 17 | 2. 該当ディレクトリに移動 18 | 19 | ``` 20 | cd AutoMATA 21 | ``` 22 | 23 | 3. 依存ライブラリをインストール 24 | 25 | ``` 26 | pip install -r requirements.txt 27 | ``` 28 | 29 | 4. call_llm.pyの6行目にOpenAI APIキーを入力 30 | 31 | 5. 実行 32 | 33 | ``` 34 | python main.py 35 | ``` 36 | 37 | # アーキテクチャ 38 | 39 | **全体アーキテクチャ** 40 | 41 | ![スクリーンショット (254)](https://github.com/sudy-super/AutoMATA/assets/128252727/c36f314b-a722-476a-a257-52378204c53e) 42 | 43 | 44 | **仮説作成/修正・行動生成モジュール** 45 | 46 | ![スクリーンショット (255)](https://github.com/sudy-super/AutoMATA/assets/128252727/78c8dd9f-c0c6-4aa3-943b-b63ed1e184f2) 47 | 48 | 49 | # 動作例 50 | 51 | ![スクリーンショット (263)](https://github.com/sudy-super/AutoMATA/assets/128252727/10d0c4a6-fd82-4c0f-b156-83483cf80133) 52 | 53 | 54 | # TODO 55 | 56 | □応答のパース構造の改良 57 | 58 | □複数のLLMで議決を取る機能を排した軽量モードの実装 59 | 60 | □ワーキングメモリ(過去の経験の蓄積)を参照する機能の実装 61 | 62 | □脳内会議メンバーにPaLM2, LLaMA2-70bの追加 63 | 64 | □マルチモーダル対応 65 | -------------------------------------------------------------------------------- /README_en.md: -------------------------------------------------------------------------------- 1 | # AutoMATA (Auto Massively Parallel Thought Agent system) 2 | 3 | Tools to implement active inferring and pseudo-consciousness in LLM 4 | 5 | ![automata](https://github.com/sudy-super/AutoMATA/assets/128252727/85717201-23c8-4679-b826-b22c44103179) 6 | 7 | 8 | [Japanese ver.](https://github.com/sudy-super/AutoMATA/blob/main/README.md) 9 | 10 | # Quick Start 11 | 12 | 1. Clone repository 13 | 14 | ``` 15 | git clone https://github.com/sudy-super/AutoMATA.git 16 | ``` 17 | 18 | 2. Move to directory 19 | 20 | ``` 21 | cd AutoMATA 22 | ``` 23 | 24 | 3. Install requirements 25 | 26 | ``` 27 | pip install -r requirements.txt 28 | ``` 29 | 30 | 4. Run 31 | 32 | ``` 33 | python main.py 34 | ``` 35 | 36 | # Architecture 37 | 38 | **Overview of Architecture** 39 | 40 | ![スクリーンショット (254)](https://github.com/sudy-super/AutoMATA/assets/128252727/0c454e0d-a556-41c8-aca4-36294d5cdf1a) 41 | 42 | 43 | **Making hypothesis/fixing・Making Action Module** 44 | 45 | ![スクリーンショット (255)](https://github.com/sudy-super/AutoMATA/assets/128252727/45766039-3bed-48bf-ad1c-cc0111d37d06) 46 | 47 | 48 | # Example 49 | 50 | ![スクリーンショット (263)](https://github.com/sudy-super/AutoMATA/assets/128252727/de030fe5-f250-4da3-845f-4f508098fc8a) 51 | 52 | 53 | # TODO 54 | 55 | □Implementation of a lightweight mode that eliminates the function of voting by multiple LLMs 56 | 57 | □Implementation of a function to refer to memory (accumulated past experience) 58 | 59 | □Add PaLM2, LLaMA2-70b, etc. to voting members 60 | -------------------------------------------------------------------------------- /act.py: -------------------------------------------------------------------------------- 1 | from call_llm import CallLLM 2 | import json 3 | import random 4 | 5 | class MakeAction: 6 | def __init__(self): 7 | self.call_llm = CallLLM() 8 | self.prompt = """The Hypothesis in User's input is based on the Input. Please take these into account and consider utterance that can validate the Hypothesis.""" 9 | """ 10 | ユーザー入力の仮説は入力に基づいています。これらを考慮し、仮説を検証できる発言を考えてください。 11 | """ 12 | 13 | def making_action(self, input_t, hypothesis): 14 | prompt = self.prompt 15 | 16 | random_llm = random.randint(1, 4) # 思考方法を入力として仮説を提案する脳内会議メンバーを選択 17 | angel_prompt = """You are an angel. You always try to be positive and tolerant. You are also sincere, ascetic and optimistic about things.""" 18 | devil_prompt = """You are the devil. You constantly try to be critical and intolerant. You are also dishonest, hedonistic, and pessimistic about things.""" 19 | hardboiled_prompt = """You are a hard-boiled person. You are ruthless, not driven by emotions or circumstances, but because you are ruthless, you keep your promises and are dependable.""" 20 | emotional_prompt = """You are an emotional person. You tend to rely on passion and momentum, and you tend to be intense in your joy, anger, and sorrow.""" 21 | 22 | if random_llm == 1: # 天使 23 | system_prompt = angel_prompt 24 | elif random_llm == 2: # 悪魔 25 | system_prompt = devil_prompt 26 | elif random_llm == 3: # ハードボイルド 27 | system_prompt = hardboiled_prompt 28 | else: # 悲観的 29 | system_prompt = emotional_prompt 30 | 31 | while True: # 否決された場合永遠にフィードバックをするための全体ループ 32 | try: 33 | action = action # メンバーからのフィードバック時のみフィードバック前の行動として定義 34 | except NameError: 35 | action = None 36 | 37 | try: 38 | feedback1 = feedback_count[0] # 各メンバーのフィードバックを定義 39 | except NameError: 40 | feedback1 = None 41 | try: 42 | feedback2 = feedback_count[1] 43 | except NameError: 44 | feedback2 = None 45 | try: 46 | feedback3 = feedback_count[2] 47 | except NameError: 48 | feedback3 = None 49 | if (feedback1 == None) and (feedback2 == None) and (feedback3 == None): 50 | feedback = "None" 51 | else: 52 | feedback_list = [feedback1, feedback2, feedback3] 53 | # Noneが格納されている変数を排除 54 | filtered_feedback = ["- " + one_of_feedbacks for one_of_feedbacks in feedback_list if one_of_feedbacks is not None] 55 | # 改行で区切った文字列を生成 56 | feedback = "\n".join(filtered_feedback) 57 | 58 | sys_prompt = system_prompt + "\n\n" + prompt + f"Also, if an Utterance and Feedback exist, please modify the Hypothesis according to the Feedback.\n\n##ExistingUtterance\n{action}\n\n##Feedback\n{feedback}" # + '\n\n##Example\n{{"action": "From this input, it may be said that the woman was lonely."}}' 59 | main_prompt = f"##Hypothesis\n{hypothesis}\n\n##Input\n{input_t}" 60 | action = self.call_llm.call_llms(sys_prompt, main_prompt) # 仮説を生成するのであってjsonで出力はしないのでループはしない 61 | 62 | # ユーザーの入力は仮説を検証するための発話です。この発話に対する賛否と、Examplesを参照したフィードバックを出力してください。 63 | vote_prompt = f'''User's input is an utterance to validate the Hypothesis. Please output your approval or disapproval of this utterance and your feedback with reference to Examples. 64 | 65 | Please outout with JSON format. 66 | 67 | ##Hypothesis 68 | {hypothesis} 69 | 70 | ##Examples 71 | {{"vote": "agree", "feedback": "The utterance is appropriate and consistent with the situation."}} 72 | {{"vote": "disagree", "feedback": "That utterance misses the point. We should consider the utterance to be taken more faithful to the hypothesis."}}''' 73 | 74 | if random_llm == 1: 75 | while True: # パース失敗に備えたループ 76 | dev_prompt = devil_prompt + "\n\n" + vote_prompt 77 | response1 = self.call_llm.call_llms(dev_prompt, action) 78 | 79 | try: 80 | response1 = json.loads(response1) 81 | break 82 | except Exception as e: 83 | # print(e) 84 | print("[INFO] 3-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 85 | 86 | while True: # パース失敗に備えたループ 87 | hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt 88 | response2 = self.call_llm.call_llms(hard_prompt, action) 89 | try: 90 | response2 = json.loads(response2) 91 | break 92 | except Exception as e: 93 | # print(e) 94 | print("[INFO] 3-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 95 | 96 | while True: # パース失敗に備えたループ 97 | emo_prompt = emotional_prompt + "\n\n" + vote_prompt 98 | response3 = self.call_llm.call_llms(emo_prompt, action) 99 | 100 | try: 101 | response3 = json.loads(response3) 102 | break 103 | except Exception as e: 104 | # print(e) 105 | print("[This is an Expected Error3-3] The response from OpenAI API didn't follow the specified format, so it is re-running now.") 106 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 107 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 108 | agree_count = 0 109 | disagree_count = 0 110 | for item in vote_count: 111 | if item == "agree": 112 | agree_count += 1 113 | elif item == "disagree": 114 | disagree_count += 1 115 | 116 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 117 | if agree_count >= 2: 118 | print("[Resolution] Approval") 119 | break 120 | else: 121 | print("[Resolution] Rejection") 122 | elif random_llm == 2: 123 | while True: 124 | ang_prompt = angel_prompt + "\n\n" + vote_prompt 125 | response1 = self.call_llm.call_llms(ang_prompt, action) 126 | try: 127 | response1 = json.loads(response1) 128 | break 129 | except Exception as e: 130 | # print(e) 131 | print("[INFO] 3-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 132 | 133 | while True: 134 | hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt 135 | response2 = self.call_llm.call_llms(hard_prompt, action) 136 | 137 | try: 138 | response2 = json.loads(response2) 139 | break 140 | except Exception as e: 141 | # print(e) 142 | print("[INFO] 3-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 143 | 144 | while True: 145 | emo_prompt = emotional_prompt + "\n\n" + vote_prompt 146 | response3 = self.call_llm.call_llms(emo_prompt, action) 147 | 148 | try: 149 | response3 = json.loads(response3) 150 | break 151 | except Exception as e: 152 | # print(e) 153 | print("[This is an Expected Error3-3] The response from OpenAI API didn't follow the specified format, so it is re-running now.") 154 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 155 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 156 | agree_count = 0 157 | disagree_count = 0 158 | for item in vote_count: 159 | if item == "agree": 160 | agree_count += 1 161 | elif item == "disagree": 162 | disagree_count += 1 163 | 164 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 165 | if agree_count >= 2: 166 | print("[Resolution] Approval") 167 | break 168 | else: 169 | print("[Resolution] Rejection") 170 | elif random_llm == 3: 171 | while True: 172 | ang_prompt = angel_prompt + "\n\n" + vote_prompt 173 | response1 = self.call_llm.call_llms(ang_prompt, action) 174 | 175 | try: 176 | response1 = json.loads(response1) 177 | break 178 | except Exception as e: 179 | # print(e) 180 | print("[INFO] 3-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 181 | 182 | while True: 183 | dev_prompt = devil_prompt + "\n\n" + vote_prompt 184 | response2 = self.call_llm.call_llms(dev_prompt, action) 185 | 186 | try: 187 | response2 = json.loads(response2) 188 | break 189 | except Exception as e: 190 | # print(e) 191 | print("[INFO] 3-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 192 | 193 | while True: 194 | emo_prompt = emotional_prompt + "\n\n" + vote_prompt 195 | response3 = self.call_llm.call_llms(emo_prompt, action) 196 | 197 | try: 198 | response3 = json.loads(response3) 199 | break 200 | except Exception as e: 201 | # print(e) 202 | print("[INFO] 3-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 203 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 204 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 205 | agree_count = 0 206 | disagree_count = 0 207 | for item in vote_count: 208 | if item == "agree": 209 | agree_count += 1 210 | elif item == "disagree": 211 | disagree_count += 1 212 | 213 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 214 | if agree_count >= 2: 215 | print("[Resolution] Approval") 216 | break 217 | else: 218 | print("[Resolution] Rejection") 219 | elif random_llm == 4: 220 | while True: 221 | ang_prompt = angel_prompt + "\n\n" + vote_prompt 222 | response1 = self.call_llm.call_llms(ang_prompt, action) 223 | 224 | try: 225 | response1 = json.loads(response1) 226 | break 227 | except Exception as e: 228 | # print(e) 229 | print("[INFO] 3-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 230 | 231 | while True: 232 | dev_prompt = devil_prompt + "\n\n" + vote_prompt 233 | response2 = self.call_llm.call_llms(dev_prompt, action) 234 | 235 | try: 236 | response2 = json.loads(response2) 237 | break 238 | except Exception as e: 239 | # print(e) 240 | print("[INFO] 3-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 241 | 242 | while True: 243 | hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt 244 | response3 = self.call_llm.call_llms(hard_prompt, action) 245 | 246 | try: 247 | response3 = json.loads(response3) 248 | break 249 | except Exception as e: 250 | # print(e) 251 | print("[INFO] 3-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 252 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 253 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 254 | agree_count = 0 255 | disagree_count = 0 256 | for item in vote_count: 257 | if item == "agree": 258 | agree_count += 1 259 | elif item == "disagree": 260 | disagree_count += 1 261 | 262 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 263 | if agree_count >= 2: 264 | print("[Resolution] Approval") 265 | break 266 | else: 267 | print("[Resolution] Rejection") 268 | return action -------------------------------------------------------------------------------- /call_llm.py: -------------------------------------------------------------------------------- 1 | import openai 2 | import os 3 | 4 | class CallLLM: 5 | def __init__(self): 6 | self.openai_api_key = "sk-replaced_me" # os.environ["OPENAI_API_KEY"] 7 | self.llm_kind = "gpt" 8 | 9 | def call_llms(self, system_prompt, prompt): 10 | if self.llm_kind == "gpt": 11 | res = self.call_gpt(system_prompt, prompt) 12 | print("[INFO]Requested OpenAI API") 13 | 14 | return res 15 | 16 | def call_gpt(self, system_prompt, prompt): 17 | openai.api_key = self.openai_api_key 18 | 19 | response = openai.ChatCompletion.create( 20 | model="gpt-3.5-turbo-0613", 21 | messages=[ 22 | {"role": "system", "content": system_prompt}, 23 | {"role": "user", "content": prompt} 24 | ], 25 | # temperature=1 26 | ) 27 | response = response["choices"][0]["message"]["content"] 28 | 29 | return response 30 | -------------------------------------------------------------------------------- /fix_hypothesis.py: -------------------------------------------------------------------------------- 1 | from call_llm import CallLLM 2 | import json 3 | import random 4 | 5 | class FixHypothesis: 6 | def __init__(self): 7 | self.call_llm = CallLLM() 8 | 9 | def fixing_hypothesis(self, input_t, tool, hypothesis, input_t_n): 10 | random_llm = random.randint(1, 4) # 思考方法を入力として仮説を提案する脳内会議メンバーを選択 11 | angel_prompt = """You are an angel. You always try to be positive and tolerant. You are also sincere, ascetic and optimistic about things.""" 12 | devil_prompt = """You are the devil. You constantly try to be critical and intolerant. You are also dishonest, hedonistic, and pessimistic about things.""" 13 | hardboiled_prompt = """You are a hard-boiled person. You are ruthless, not driven by emotions or circumstances, but because you are ruthless, you keep your promises and are dependable.""" 14 | emotional_prompt = """You are an emotional person. You tend to rely on passion and momentum, and you tend to be intense in your joy, anger, and sorrow.""" 15 | 16 | if random_llm == 1: # 天使 17 | system_prompt = angel_prompt 18 | elif random_llm == 2: # 悪魔 19 | system_prompt = devil_prompt 20 | elif random_llm == 3: # ハードボイルド 21 | system_prompt = hardboiled_prompt 22 | else: # 悲観的 23 | system_prompt = emotional_prompt 24 | 25 | 26 | if tool == "Random Idea method": 27 | prompt = """From the User input, please select a thing at random (or look up a dictionary and select a noun at random) and expand your ideas and a hypothesis in relation to your area of interest.""" 28 | elif tool == "Stimulating Ideas": 29 | prompt = """From the User input, please make a list of what you would like it to be like, what would happen if you exaggerated certain parts, reversed it, eliminated it, put it together with something, etc., and choose the most outlandish of these to modify the ExistingHypothesis.""" 30 | elif tool == "Challenging Ideas": 31 | prompt = """Please think about why it exists or what it is for and formulate the ExistingHypothesis for the User's input.""" 32 | elif tool == "Conceptual Diffusion Ideation": 33 | prompt = """For the User's input, please consider whether this concept can be applied broadly to other things and modify the ExistingHypothesis.""" 34 | elif tool == "Rebuttal Ideation": 35 | prompt = """For User input, please modify the ExistingHypothesis by debunking widely held beliefs, questioning obvious and obvious assumptions, and attempting to convincingly disprove them.""" 36 | elif tool == "Linear thinking": 37 | prompt = """For the User's input, please focus on one thing and modify the ExistingHypothesis by inferring a causal relationship.""" 38 | elif tool == "Critical thinking": 39 | prompt = """Please do not uncritically accept things and information in response to User's input, but rather consider them from various angles, understand them logically and objectively, and modify the ExistingHypothesis by meta-analyzing them from a single higher standpoint.""" 40 | elif tool == "Integrated thinking": 41 | prompt = """Please modify the ExistingHypothesis by looking at things from a short, medium, and long term perspective for the User's input.""" 42 | 43 | while True: # 否決された場合永遠にフィードバックをするための全体ループ 44 | try: 45 | feedback1 = feedback_count[0] # 各メンバーのフィードバックを定義 46 | except NameError: 47 | feedback1 = None 48 | try: 49 | feedback2 = feedback_count[1] 50 | except NameError: 51 | feedback2 = None 52 | try: 53 | feedback3 = feedback_count[2] 54 | except NameError: 55 | feedback3 = None 56 | if (feedback1 == None) and (feedback2 == None) and (feedback3 == None): 57 | feedback = "None" 58 | else: 59 | feedback_list = [feedback1, feedback2, feedback3] 60 | # Noneが格納されている変数を排除 61 | filtered_feedback = ["- " + one_of_feedbacks for one_of_feedbacks in feedback_list if one_of_feedbacks is not None] 62 | # 改行で区切った文字列を生成 63 | feedback = "\n".join(filtered_feedback) 64 | 65 | while True: 66 | sys_prompt = system_prompt + "\n\n" + prompt + f"Also, if a Hypothesis and Feedback exist, please modify the Hypothesis according to the Feedback.\n\nPlease output in JSON format referring to Example.\n\n##ExistingHypothesis\n{hypothesis}\n\n##Feedback\n{feedback}" + '\n\n##Example\n{{"hypothesis": "From this feedback, it may be said that the woman was lonely."}}' 67 | hypothesis = self.call_llm.call_llms(sys_prompt, input_t) 68 | 69 | try: 70 | hypothesis = json.loads(hypothesis) 71 | hypothesis = hypothesis["hypothesis"] 72 | break 73 | except Exception as e: 74 | # print(e) 75 | print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 76 | 77 | vote_prompt = f'''User's input is the hypothesis for the Proposition. Please output your approval or disapproval of the hypothesis and feedback with reference to Examples. 78 | 79 | Please outout with JSON format. 80 | 81 | ##Proposition 82 | {input_t} 83 | 84 | ##Examples 85 | {{"vote": "agree", "feedback": "The hypothesis is appropriate and consistent with the situation."}} 86 | {{"vote": "disagree", "feedback": "That hypothesis misses the point. The likelihood of that phenomenon occurring in general is infinitesimally small, so it can be ignored."}}''' 87 | 88 | if random_llm == 1: 89 | while True: # パース失敗に備えたループ 90 | dev_prompt = devil_prompt + "\n\n" + vote_prompt 91 | response1 = self.call_llm.call_llms(dev_prompt, hypothesis) 92 | 93 | try: 94 | response1 = json.loads(response1) 95 | break 96 | except Exception as e: 97 | # print(e) 98 | print("[INFO] 6-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 99 | 100 | while True: # パース失敗に備えたループ 101 | hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt 102 | response2 = self.call_llm.call_llms(hard_prompt, hypothesis) 103 | 104 | try: 105 | response2 = json.loads(response2) 106 | break 107 | except Exception as e: 108 | # print(e) 109 | print("[INFO] 6-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 110 | 111 | while True: # パース失敗に備えたループ 112 | emo_prompt = emotional_prompt + "\n\n" + vote_prompt 113 | response3 = self.call_llm.call_llms(emo_prompt, hypothesis) 114 | 115 | try: 116 | response3 = json.loads(response3) 117 | break 118 | except Exception as e: 119 | # print(e) 120 | print("[INFO] 6-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 121 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 122 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 123 | agree_count = 0 124 | disagree_count = 0 125 | for item in vote_count: 126 | if item == "agree": 127 | agree_count += 1 128 | elif item == "disagree": 129 | disagree_count += 1 130 | 131 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 132 | if agree_count >= 2: 133 | print("[Resolution] Approval") 134 | break 135 | else: 136 | print("[Resolution] Rejection") 137 | elif random_llm == 2: 138 | while True: 139 | ang_prompt = angel_prompt + "\n\n" + vote_prompt 140 | response1 = self.call_llm.call_llms(ang_prompt, hypothesis) 141 | 142 | try: 143 | response1 = json.loads(response1) 144 | break 145 | except Exception as e: 146 | # print(e) 147 | print("[INFO] 6-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 148 | 149 | while True: 150 | hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt 151 | response2 = self.call_llm.call_llms(hard_prompt, hypothesis) 152 | 153 | try: 154 | response2 = json.loads(response2) 155 | break 156 | except Exception as e: 157 | # print(e) 158 | print("[INFO] 6-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 159 | 160 | while True: 161 | emo_prompt = emotional_prompt + "\n\n" + vote_prompt 162 | response3 = self.call_llm.call_llms(emo_prompt, hypothesis) 163 | 164 | try: 165 | response3 = json.loads(response3) 166 | break 167 | except Exception as e: 168 | # print(e) 169 | print("[INFO] 6-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 170 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 171 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 172 | agree_count = 0 173 | disagree_count = 0 174 | for item in vote_count: 175 | if item == "agree": 176 | agree_count += 1 177 | elif item == "disagree": 178 | disagree_count += 1 179 | 180 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 181 | if agree_count >= 2: 182 | print("[Resolution] Approval") 183 | break 184 | else: 185 | print("[Resolution] Rejection") 186 | elif random_llm == 3: 187 | while True: 188 | ang_prompt = angel_prompt + "\n\n" + vote_prompt 189 | response1 = self.call_llm.call_llms(ang_prompt, hypothesis) 190 | 191 | try: 192 | response1 = json.loads(response1) 193 | break 194 | except Exception as e: 195 | # print(e) 196 | print("[INFO] 6-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 197 | 198 | while True: 199 | dev_prompt = devil_prompt + "\n\n" + vote_prompt 200 | response2 = self.call_llm.call_llms(dev_prompt, hypothesis) 201 | 202 | try: 203 | response2 = json.loads(response2) 204 | break 205 | except Exception as e: 206 | # print(e) 207 | print("[INFO] 6-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 208 | 209 | while True: 210 | emo_prompt = emotional_prompt + "\n\n" + vote_prompt 211 | response3 = self.call_llm.call_llms(emo_prompt, hypothesis) 212 | 213 | try: 214 | response3 = json.loads(response3) 215 | break 216 | except Exception as e: 217 | # print(e) 218 | print("[INFO] 6-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 219 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 220 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 221 | agree_count = 0 222 | disagree_count = 0 223 | for item in vote_count: 224 | if item == "agree": 225 | agree_count += 1 226 | elif item == "disagree": 227 | disagree_count += 1 228 | 229 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 230 | if agree_count >= 2: 231 | print("[Resolution] Approval") 232 | break 233 | else: 234 | print("[Resolution] Rejection") 235 | elif random_llm == 4: 236 | while True: 237 | ang_prompt = angel_prompt + "\n\n" + vote_prompt 238 | response1 = self.call_llm.call_llms(ang_prompt, hypothesis) 239 | 240 | try: 241 | response1 = json.loads(response1) 242 | break 243 | except Exception as e: 244 | # print(e) 245 | print("[INFO] 6-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 246 | 247 | while True: 248 | dev_prompt = devil_prompt + "\n\n" + vote_prompt 249 | response2 = self.call_llm.call_llms(dev_prompt, hypothesis) 250 | 251 | try: 252 | response2 = json.loads(response2) 253 | break 254 | except Exception as e: 255 | # print(e) 256 | print("[INFO] 6-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 257 | 258 | while True: 259 | hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt 260 | response3 = self.call_llm.call_llms(hard_prompt, hypothesis) 261 | 262 | try: 263 | response3 = json.loads(response3) 264 | break 265 | except Exception as e: 266 | # print(e) 267 | print("[INFO] 6-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 268 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 269 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 270 | agree_count = 0 271 | disagree_count = 0 272 | for item in vote_count: 273 | if item == "agree": 274 | agree_count += 1 275 | elif item == "disagree": 276 | disagree_count += 1 277 | 278 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 279 | if agree_count >= 2: 280 | print("[Resolution] Approval") 281 | break 282 | else: 283 | print("[Resolution] Rejection") 284 | return hypothesis -------------------------------------------------------------------------------- /hypothesis.py: -------------------------------------------------------------------------------- 1 | from call_llm import CallLLM 2 | import json 3 | import random 4 | 5 | class MakeHypothesis: 6 | def __init__(self): 7 | self.call_llm = CallLLM() 8 | self.prompt = '''The following options are given as tools for generating a hypothesis. 9 | Please select the tools you need to make a hypothesis about the situation inferred from the User's input, and output them in JSON format referring to Example. 10 | 11 | - Lateral thinking: Generate intuitive ideas by looking at things from a variety of perspectives. 12 | - Linear thinking: focus on one thing to infer causal relationships. 13 | - Critical thinking: Examine things and information from diverse angles and understand them logically and objectively, rather than accepting them uncritically. Examine the thoughts of oneself and others without assuming that one's own beliefs are correct. Think meta-advantageously and from one higher standpoint. 14 | - Integrated thinking: See and think about things from short-, medium-, and long-term perspectives. 15 | 16 | ##Example 17 | {"tool": "Lateral thinking"}''' 18 | 19 | def making_thinking_tool(self, input_t): 20 | prompt = self.prompt 21 | 22 | while True: # 思考方法選択のパース失敗に備えたループ 23 | response = self.call_llm.call_llms(prompt, input_t) 24 | try: 25 | response = json.loads(response) 26 | break 27 | except Exception as e: 28 | # print(e) 29 | print("[INFO] 1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 30 | # pass 31 | 32 | if response["tool"]=="Lateral thinking": # 水平思考が選択された場合 33 | lateral_prompt = '''The following options are given as tools for generating a hypothesis. 34 | Please select the tools you need to make a hypothesis about the situation inferred from the User's input, and output them in JSON format referring to Example. 35 | 36 | - Random Idea method: A random selection of things (or a random selection of nouns from a dictionary) is used to expand the idea by associating it with an area of interest. 37 | - Stimulating Ideas: This method involves making a list of things that you wish were this way, exaggerating certain parts, reversing, eliminating, or combining with other things, etc., and then selecting the most outlandish of these ideas as the basis for a new idea. 38 | - Challenging Ideas: This is a method of generating new ideas by considering why something exists or why it is the way it is. 39 | - Conceptual Diffusion Ideation: This method generates ideas by considering whether a concept can be applied broadly to other things. 40 | - Rebuttal Ideation: Generating ideas by questioning what is considered obvious and needless to say and attempting to disprove it persuasively, considering widely supported ideas to be wrong. 41 | 42 | ##Example 43 | {"tool": "Stimulating Ideas"}''' 44 | 45 | while True: # 水平思考の選択のパース失敗に備えたループ 46 | response = self.call_llm.call_llms(lateral_prompt, input_t) 47 | try: 48 | response = json.loads(response) 49 | break 50 | except Exception as e: 51 | # print(e) 52 | print("[This is an Expected Error1-2] The response from OpenAI API didn't follow the specified format, so it is re-running now.") 53 | # pass 54 | 55 | return response["tool"] 56 | 57 | def making_hypothesis(self, input_t, tool): 58 | random_llm = random.randint(1, 4) # 思考方法を入力として仮説を提案する脳内会議メンバーを選択 59 | angel_prompt = """You are an angel. You always try to be positive and tolerant. You are also sincere, ascetic and optimistic about things.""" 60 | devil_prompt = """You are the devil. You constantly try to be critical and intolerant. You are also dishonest, hedonistic, and pessimistic about things.""" 61 | hardboiled_prompt = """You are a hard-boiled person. You are ruthless, not driven by emotions or circumstances, but because you are ruthless, you keep your promises and are dependable.""" 62 | emotional_prompt = """You are an emotional person. You tend to rely on passion and momentum, and you tend to be intense in your joy, anger, and sorrow.""" 63 | 64 | if random_llm == 1: # 天使 65 | system_prompt = angel_prompt 66 | elif random_llm == 2: # 悪魔 67 | system_prompt = devil_prompt 68 | elif random_llm == 3: # ハードボイルド 69 | system_prompt = hardboiled_prompt 70 | else: # 悲観的 71 | system_prompt = emotional_prompt 72 | 73 | 74 | if tool == "Random Idea method": 75 | prompt = """From the User input, please select a thing at random (or look up a dictionary and select a noun at random) and expand your ideas and a hypothesis in relation to your area of interest.""" 76 | elif tool == "Stimulating Ideas": 77 | prompt = """From the User input, please make a list of what you would like it to be like, what would happen if you exaggerated certain parts, reversed it, eliminated it, put it together with something, etc., and choose the most outlandish of these to form a hypothesis.""" 78 | elif tool == "Challenging Ideas": 79 | prompt = """Please think about why it exists or what it is for and formulate a hypothesis for the User's input.""" 80 | elif tool == "Conceptual Diffusion Ideation": 81 | prompt = """For the User's input, please consider whether this concept can be applied broadly to other things and formulate a hypothesis.""" 82 | elif tool == "Rebuttal Ideation": 83 | prompt = """For User input, please formulate a hypothesis by debunking widely held beliefs, questioning obvious and obvious assumptions, and attempting to convincingly disprove them.""" 84 | elif tool == "Linear thinking": 85 | prompt = """For the User's input, please focus on one thing and make a hypothesis by inferring a causal relationship.""" 86 | elif tool == "Critical thinking": 87 | prompt = """Please do not uncritically accept things and information in response to User's input, but rather consider them from various angles, understand them logically and objectively, and formulate a hypothesis by meta-analyzing them from a single higher standpoint.""" 88 | elif tool == "Integrated thinking": 89 | prompt = """Please make a hypothesis by looking at things from a short, medium, and long term perspective for the User's input.""" 90 | 91 | while True: # 否決された場合永遠にフィードバックをするための全体ループ 92 | try: 93 | hypothesis = hypothesis # メンバーからのフィードバック時のみフィードバック前の仮説として定義 94 | except NameError: 95 | hypothesis = None 96 | 97 | try: 98 | feedback1 = feedback_count[0] # 各メンバーのフィードバックを定義 99 | except NameError: 100 | feedback1 = None 101 | try: 102 | feedback2 = feedback_count[1] 103 | except NameError: 104 | feedback2 = None 105 | try: 106 | feedback3 = feedback_count[2] 107 | except NameError: 108 | feedback3 = None 109 | if (feedback1 == None) and (feedback2 == None) and (feedback3 == None): 110 | feedback = "None" 111 | else: 112 | feedback_list = [feedback1, feedback2, feedback3] 113 | # Noneが格納されている変数を排除 114 | filtered_feedback = ["- " + one_of_feedbacks for one_of_feedbacks in feedback_list if one_of_feedbacks is not None] 115 | # 改行で区切った文字列を生成 116 | feedback = "\n".join(filtered_feedback) 117 | 118 | while True: 119 | sys_prompt = system_prompt + "\n\n" + prompt + f"Also, if a Hypothesis and Feedback exist, please modify the Hypothesis according to the Feedback.\n\nPlease output in JSON format referring to Example.\n\n##ExistingHypothesis\n{hypothesis}\n\n##Feedback\n{feedback}" + '\n\n##Example\n{{"hypothesis": "From this input, it may be said that the woman was lonely."}}' 120 | hypothesis = self.call_llm.call_llms(sys_prompt, input_t) 121 | 122 | try: 123 | hypothesis = json.loads(hypothesis) 124 | hypothesis = hypothesis["hypothesis"] 125 | break 126 | except Exception as e: 127 | # print(e) 128 | print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 129 | 130 | vote_prompt = f'''User's input is the hypothesis for the Proposition. Please output your approval or disapproval of the hypothesis and feedback with reference to Examples. 131 | 132 | Please outout with JSON format. 133 | 134 | ##Proposition 135 | {input_t} 136 | 137 | ##Examples 138 | {{"vote": "agree", "feedback": "The hypothesis is appropriate and consistent with the situation."}} 139 | {{"vote": "disagree", "feedback": "That hypothesis misses the point. The likelihood of that phenomenon occurring in general is infinitesimally small, so it can be ignored."}}''' 140 | 141 | if random_llm == 1: 142 | while True: # パース失敗に備えたループ 143 | dev_prompt = devil_prompt + "\n\n" + vote_prompt 144 | response1 = self.call_llm.call_llms(dev_prompt, hypothesis) 145 | 146 | try: 147 | response1 = json.loads(response1) 148 | break 149 | except Exception as e: 150 | # print(e) 151 | print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 152 | 153 | while True: # パース失敗に備えたループ 154 | hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt 155 | response2 = self.call_llm.call_llms(hard_prompt, hypothesis) 156 | 157 | try: 158 | response2 = json.loads(response2) 159 | break 160 | except Exception as e: 161 | # print(e) 162 | print("[INFO] 2-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 163 | 164 | while True: # パース失敗に備えたループ 165 | emo_prompt = emotional_prompt + "\n\n" + vote_prompt 166 | response3 = self.call_llm.call_llms(emo_prompt, hypothesis) 167 | 168 | try: 169 | response3 = json.loads(response3) 170 | break 171 | except Exception as e: 172 | # print(e) 173 | print("[INFO] 2-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 174 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 175 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 176 | agree_count = 0 177 | disagree_count = 0 178 | for item in vote_count: 179 | if item == "agree": 180 | agree_count += 1 181 | elif item == "disagree": 182 | disagree_count += 1 183 | 184 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 185 | if agree_count >= 2: 186 | print("[Resolution] Approval") 187 | break 188 | else: 189 | print("[Resolution] Rejection") 190 | 191 | elif random_llm == 2: 192 | while True: 193 | ang_prompt = angel_prompt + "\n\n" + vote_prompt 194 | response1 = self.call_llm.call_llms(ang_prompt, hypothesis) 195 | 196 | try: 197 | response1 = json.loads(response1) 198 | break 199 | except Exception as e: 200 | # print(e) 201 | print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 202 | 203 | while True: 204 | hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt 205 | response2 = self.call_llm.call_llms(hard_prompt, hypothesis) 206 | 207 | try: 208 | response2 = json.loads(response2) 209 | break 210 | except Exception as e: 211 | # print(e) 212 | print("[INFO] 2-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 213 | 214 | while True: 215 | emo_prompt = emotional_prompt + "\n\n" + vote_prompt 216 | response3 = self.call_llm.call_llms(emo_prompt, hypothesis) 217 | 218 | try: 219 | response3 = json.loads(response3) 220 | break 221 | except Exception as e: 222 | # print(e) 223 | print("[INFO] 2-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 224 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 225 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 226 | agree_count = 0 227 | disagree_count = 0 228 | for item in vote_count: 229 | if item == "agree": 230 | agree_count += 1 231 | elif item == "disagree": 232 | disagree_count += 1 233 | 234 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 235 | if agree_count >= 2: 236 | print("[Resolution] Approval") 237 | break 238 | else: 239 | print("[Resolution] Rejection") 240 | elif random_llm == 3: 241 | while True: 242 | ang_prompt = angel_prompt + "\n\n" + vote_prompt 243 | response1 = self.call_llm.call_llms(ang_prompt, hypothesis) 244 | 245 | try: 246 | response1 = json.loads(response1) 247 | break 248 | except Exception as e: 249 | # print(e) 250 | print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 251 | 252 | while True: 253 | dev_prompt = devil_prompt + "\n\n" + vote_prompt 254 | response2 = self.call_llm.call_llms(dev_prompt, hypothesis) 255 | 256 | try: 257 | response2 = json.loads(response2) 258 | break 259 | except Exception as e: 260 | # print(e) 261 | print("[INFO] 2-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 262 | 263 | while True: 264 | emo_prompt = emotional_prompt + "\n\n" + vote_prompt 265 | response3 = self.call_llm.call_llms(emo_prompt, hypothesis) 266 | 267 | try: 268 | response3 = json.loads(response3) 269 | break 270 | except Exception as e: 271 | # print(e) 272 | print("[INFO] 2-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 273 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 274 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 275 | agree_count = 0 276 | disagree_count = 0 277 | for item in vote_count: 278 | if item == "agree": 279 | agree_count += 1 280 | elif item == "disagree": 281 | disagree_count += 1 282 | 283 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 284 | if agree_count >= 2: 285 | print("[Resolution] Approval") 286 | break 287 | else: 288 | print("[Resolution] Rejection") 289 | elif random_llm == 4: 290 | while True: 291 | ang_prompt = angel_prompt + "\n\n" + vote_prompt 292 | response1 = self.call_llm.call_llms(ang_prompt, hypothesis) 293 | 294 | try: 295 | response1 = json.loads(response1) 296 | break 297 | except Exception as e: 298 | # print(e) 299 | print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 300 | 301 | while True: 302 | dev_prompt = devil_prompt + "\n\n" + vote_prompt 303 | response2 = self.call_llm.call_llms(dev_prompt, hypothesis) 304 | 305 | try: 306 | response2 = json.loads(response2) 307 | break 308 | except Exception as e: 309 | # print(e) 310 | print("[INFO] 2-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 311 | 312 | while True: 313 | hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt 314 | response3 = self.call_llm.call_llms(hard_prompt, hypothesis) 315 | 316 | try: 317 | response3 = json.loads(response3) 318 | break 319 | except Exception as e: 320 | # print(e) 321 | print("[INFO] 2-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 322 | vote_count = [response1["vote"], response2["vote"], response3["vote"]] 323 | feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]] 324 | agree_count = 0 325 | disagree_count = 0 326 | for item in vote_count: 327 | if item == "agree": 328 | agree_count += 1 329 | elif item == "disagree": 330 | disagree_count += 1 331 | 332 | print("Agree/Disagree: ", agree_count, "/", disagree_count) 333 | if agree_count >= 2: 334 | print("[Resolution] Approval") 335 | break 336 | else: 337 | print("[Resolution] Rejection") 338 | return hypothesis -------------------------------------------------------------------------------- /judge_finish.py: -------------------------------------------------------------------------------- 1 | from call_llm import CallLLM 2 | 3 | class JudgeFinish: 4 | def __init__(self): 5 | self.call_llm = CallLLM() 6 | self.system_prompt = "The Hypothesis in User's input is derived from the Input, the UtteredContent is the utterance made to verify the Hypothesis, and the NewInput is the reply to the UtteredContent. Now you have a loop that velifies the Hypothesis derived from the Input by uttering the UtteredContent, receives the NewInput, and modifies the Hypothesis. If there is a surprise between the Hypothesis and the NewInput, please output False because the loop must continue, and output True because the loop doesn't need to continue if it determines that there is not a surprise." 7 | # User's inputのHypothesisはInputから導かれ、UtteredContentはHypothesisを検証するためにした発言で、NewInputはUtteredContentの返事です。今はInputから導かれたHypothesisをUtteredContentを発話することで検証し、NewInputを受け取ってHypothesisを修正するループをしています。User's inputを考慮して、HypothesisとNewInputの間に乖離があると判断したらループを続ける必要があるのでFalse、一致していると判断したらループを続ける必要はないのでTrueと出力してください。 8 | 9 | def judging_finish(self, input_t, hypothesis, word, input_t_n): 10 | while True: 11 | sys_prompt = self.system_prompt 12 | main_prompt = f"##Input\n{input_t}\n\n##Hypothesis\n{hypothesis}\n\n##UtteredContent\n{word}\n\n##NewInput\n{input_t_n}" 13 | response = self.call_llm.call_llms(sys_prompt, main_prompt) 14 | 15 | if (response == "True") or (response == "False"): 16 | break 17 | print("[INFO] 5: The response from OpenAI API didn't follow the specified format, so it is re-running now.") 18 | 19 | return response -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from hypothesis import MakeHypothesis 2 | from act import MakeAction 3 | from word import MakeWord 4 | from judge_finish import JudgeFinish 5 | from make_memory import MakeMemory 6 | from fix_hypothesis import FixHypothesis 7 | 8 | class AutoMATA(): 9 | def __init__(self): 10 | self.make_hypothesis = MakeHypothesis() 11 | self.make_action = MakeAction() 12 | self.make_word = MakeWord() 13 | self.judge_finish = JudgeFinish() 14 | self.make_memory = MakeMemory() 15 | self.fix_hypothesis = FixHypothesis() 16 | 17 | def main(self): 18 | # parallel_type_hypothesis = input("Parallel Mode in Hypothesis Generation:(y/n):") 19 | # parallel_type_action = input("Parallel Mode in Action Generation:(y/n):") 20 | input_t = input("User:") 21 | all_list = [] 22 | tool = self.make_hypothesis.making_thinking_tool(input_t) 23 | print(f"[TOOL]{tool}") 24 | hypothesis = self.make_hypothesis.making_hypothesis(input_t, tool) 25 | print(f"[HYPOTHESIS]{hypothesis}") 26 | 27 | while True: 28 | action = self.make_action.making_action(input_t, hypothesis) 29 | print(f"[ACTION]{action}") 30 | word = self.make_word.making_word(action) 31 | print("System:" + word) 32 | input_t_n = input("User:") 33 | all_dict = { 34 | "input_t": input_t, 35 | "hypothesis": hypothesis, 36 | "input_t_n": input_t_n 37 | } 38 | 39 | judge_result = self.judge_finish.judging_finish(input_t, hypothesis, word, input_t_n) 40 | if judge_result == "True": 41 | self.make_memory.making_memory(all_dict) 42 | break 43 | hypothesis = self.fix_hypothesis.fixing_hypothesis(input_t, tool, hypothesis, input_t_n) 44 | print(f"[FIX_HYPOTHESIS]{hypothesis}") 45 | print("----Finish----") 46 | 47 | if __name__ == "__main__": 48 | automata = AutoMATA() 49 | automata.main() -------------------------------------------------------------------------------- /make_memory.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | class MakeMemory: 5 | def __init__(self): 6 | script_directory = os.path.dirname(os.path.abspath(__file__)) 7 | self.memory_store_path = os.path.join(script_directory, "memory_store.json") 8 | 9 | def making_memory(self, content): 10 | with open(self.memory_store_path, "a", encoding = "utf-8") as f: 11 | load_data = json.load(f) 12 | content["id"] = str(len(load_data)) 13 | json.dump(content, f, indent=4) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openai 2 | -------------------------------------------------------------------------------- /word.py: -------------------------------------------------------------------------------- 1 | from call_llm import CallLLM 2 | 3 | class MakeWord: 4 | def __init__(self): 5 | self.call_llm = CallLLM() 6 | self.system_prompt = "User's input is an utterance you should make. Please generate the utterance based on it in Japanese." 7 | """ 8 | User's inputはあなたがするべき発言です。それに基づいて発言を日本語で生成してください。 9 | """ 10 | 11 | def making_word(self, action): 12 | sys_prompt = self.system_prompt 13 | response = self.call_llm.call_llms(sys_prompt, action) 14 | 15 | return response --------------------------------------------------------------------------------