├── .gitignore ├── GenerativeAgentForApplaud.ipynb ├── GenerativeAgentForDoq.ipynb ├── LICENSE ├── README.md ├── applaud_data ├── documents │ ├── dbs │ │ ├── chap01 │ │ └── chap02 │ └── document.list ├── documents_data │ └── data_model │ │ ├── chap01.json │ │ └── chap02.json ├── query_dir │ ├── q_1 │ │ ├── result │ │ │ ├── critical_thinking.json │ │ │ ├── memory.json │ │ │ ├── plan.json │ │ │ ├── plan_result.json │ │ │ ├── plan_result_org.json │ │ │ ├── reflection.json │ │ │ ├── reflection_org.json │ │ │ ├── reply.json │ │ │ ├── result.txt │ │ │ └── updated_plan.json │ │ └── strategy_history.json │ ├── q_2 │ │ ├── result │ │ │ ├── critical_thinking.json │ │ │ ├── memory.json │ │ │ ├── plan.json │ │ │ ├── plan_result.json │ │ │ ├── plan_result_org.json │ │ │ ├── reflection.json │ │ │ ├── reflection_org.json │ │ │ ├── reply.json │ │ │ ├── result.txt │ │ │ └── updated_plan.json │ │ └── strategy_history.json │ └── query.txt └── reflections_data │ └── data_model │ ├── Analysis_Skills.json │ ├── Members.json │ ├── appreciating_words.json │ ├── commont_knowledge.json │ ├── conversation_log_analysis.json │ ├── kanetugu2018_personality.json │ ├── kanetugu2018_unnoticed_characteristics.json │ └── unnoticed_traits.json ├── applaud_prompt ├── ptemplate_critical_thinking.txt ├── ptemplate_evaluate.txt ├── ptemplate_mission.txt ├── ptemplate_query.txt ├── ptemplate_query_plan.txt ├── ptemplate_reflection.txt ├── ptemplate_reflection_addterms.txt ├── ptemplate_strategy.txt └── ptemplate_subq_detail.txt ├── check_recover_json.py ├── critical_thinking.py ├── data_model ├── data_model.py ├── data_model_accessor.py ├── data_model_storage.py ├── document_contents_similarity_merge.py ├── document_data_cleaner.py ├── document_data_model.py ├── document_data_persistentor.py ├── document_similarity_extractor.py ├── openai_libs.py ├── reflection_contents_similarity_merge.py ├── reflection_data_cleaner.py ├── reflection_data_model.py ├── reflection_data_persistentor.py ├── reflection_similarity_extractor.py └── similarity_extractor.py ├── db_manager.py ├── document_db.py ├── evaluate_applaud.py ├── evaluate_results.py ├── evaluator.py ├── history_selector.py ├── json_utils.py ├── memory_stream.py ├── noidea.txt ├── param_templates ├── applaud_params.json └── document_params.json ├── params.py ├── plan.py ├── planner.py ├── prompt_template.py ├── prompt_templates ├── ptemplate_critical_thinking.txt ├── ptemplate_evaluate.txt ├── ptemplate_mission.txt ├── ptemplate_query.txt ├── ptemplate_query_plan.txt ├── ptemplate_reflection.txt ├── ptemplate_reflection_addterms.txt ├── ptemplate_strategy.txt └── ptemplate_subq_detail.txt ├── query.py ├── query_dir ├── q_1 │ ├── result │ │ ├── critical_thinking.json │ │ ├── memory.json │ │ ├── plan.json │ │ ├── plan_result.json │ │ ├── reflection.json │ │ ├── reply.json │ │ ├── result.txt │ │ └── updated_plan.json │ └── strategy_history.json ├── q_2 │ ├── result │ │ ├── critical_thinking.json │ │ ├── memory.json │ │ ├── next_plan_result.json │ │ ├── plan.json │ │ ├── plan_result.json │ │ ├── prev_plan_result.json │ │ ├── reflection.json │ │ ├── reply.json │ │ ├── result.txt │ │ └── updated_plan.json │ └── strategy_history.json └── query.txt ├── question.py ├── reflection.py ├── tactical_plannig.py ├── test └── .gitkeep └── tools ├── cleanup_applaud_data.bash ├── create_doclist.bash ├── do_applaud.bash ├── do_query.bash ├── evaluate_answers.bash ├── evaluate_plans.bash ├── evaluate_reflection.bash ├── evaluate_reflection.py ├── evaluate_reflections.bash ├── get_terms.bash ├── prompt_templates ├── ptemplate_applaud.txt ├── ptemplate_evaluate_answers.txt ├── ptemplate_evaluate_plans.txt └── ptemplate_evaluate_reflections.txt └── query.bash /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | test/*.json 12 | test/result/ 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | cover/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | .pybuilder/ 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | # For a library or package, you might want to ignore these files since the code is 89 | # intended to run in multiple environments; otherwise, check them in: 90 | # .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # poetry 100 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 101 | # This is especially recommended for binary packages to ensure reproducibility, and is more 102 | # commonly ignored for libraries. 103 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 104 | #poetry.lock 105 | 106 | # pdm 107 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 108 | #pdm.lock 109 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 110 | # in version control. 111 | # https://pdm.fming.dev/#use-with-ide 112 | .pdm.toml 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Takashi Mori 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /applaud_data/documents/dbs/chap01: -------------------------------------------------------------------------------- 1 | #概要 2 | これまで2回にわたって [athrill(アスリル)](https://qiita.com/kanetugu2018/items/1f2ef93c9e1fa7a29f97) の紹介記事を書いてきました.athrill は PC上で手軽に組込み系プログラムをデバッグできるようにするための CPU エミュレータです. 3 | 4 | ここで一息,なぜ athrill を作る必要があったのか,そのいきさつを書いておきます. 5 | ※組込み系の開発で実機レス開発環境がないと同じようなトラブルに見舞われると思われますので, 6 | ※ここに書かれている内容は無意味ではないと信じております. 7 | 8 | #きっかけ 9 | 数年前の暑い夏,とあるプロジェクトで[TOPPERS OS](https://www.toppers.jp/) を V850 に移植するお仕事をいただきました(先行試作開発).ただ,移植に必要な開発環境の調達がなかなかできず,最悪,デバッガなしでの開発になる可能性が高まっていました. 10 | 11 | このまま開発環境が調達できないとどうなるか,その当時脳内シミュレーションした結果を以下にまとめておきます. 12 | 13 | * 外部入出力(デジタル入出力等)だけで OS のデバッグはきっと無理! 14 | * 多重割り込み系のテストも多分無理 15 | * 実機は1台しかないので,万が一壊したら開発が止まる… 16 | * OS 上で動作するデバイスドライバやミドルの結合検査を複数人で実施できない(空き稼働のリスク) 17 | 18 | さらに,その当時,実機動作確認できる場所が屋外しかなかったため,真夏日に外でテストし続けると倒れそう... 19 | 20 | #必死で調べました 21 | そんな不安があったため,V850 の開発環境を必死で調べました. 22 | 23 | * 前提 24 | * V850 CPU命令エミュレーション可能 25 | * 周辺デバイスのエミュレーションは必須(割り込みコントローラ含む) 26 | * 開発環境にかけることができる予算は数十万円以内! 27 | * 既製品 28 | * 開発スケールさせようと思うと,予算にはまらない. 29 | * オープンソース 30 | * 規模が大きすぎて無理(ソース理解している間に開発終わりそう…) 31 | * QEMU/SkyEye 等ありましたが,V850 は未サポート 32 | 33 | #最低,何があれば良いか? 34 | OS 移植とは言っても,TOPPERS OS の場合は,すべてアセンブラで記述する必要はありません.C言語で書かれたところは x86 でビルド&デバッグは可能です.なので,最低,以下をデバッグできる環境があれば良いことに気づきました. 35 | 36 | * スタートアップルーチン 37 | * 割り込み/例外処理 38 | * タスクディスパッチ処理 39 | 40 | これらを PC 上で動作確認するための何かを自作すれば良いのだと思えました. 41 | 42 | #はじめの一歩はスタートアップルーチン 43 | これらの中で,一番取り組みやすいのは,なんと言ってもスタートアップルーチンです(下図). 44 | 45 | ``` 46 | .section .text 47 | .align 4 48 | .global __start 49 | __start: 50 | di /* 割り込み禁止 */ 51 | 52 | /* 53 | * 各種ポインタの初期化(SP/TP/EP/GP/CTBP) 54 | */ 55 | GET_CID r4 56 | shl 2, r4 57 | Lea _ostkpt_table, r3 58 | add r3, r4 59 |      : 60 | ``` 61 | 必要な命令数もそれほど多くないですし,命令の内容もそんなに複雑ではない. 62 | なので,それらのアセンブラ命令をデコードして,疑似的な CPU レジスタに値入れて,動作確認するだけでも全然違うだろうと思えました. 63 | 64 | 要するに,簡単なパーサーを作るようなものだと思えてきました.実機投入前にそれを使用して動作確認できれば,実機トラブルは激減するに違いない.こういうストーリーがぼくの中で出来上がりました.過去の偉人たちも [自作CPU](https://www.amazon.co.jp/CPU%E3%81%AE%E5%89%B5%E3%82%8A%E3%81%8B%E3%81%9F-%E6%B8%A1%E6%B3%A2-%E9%83%81/dp/4839909865/ref=pd_lpo_sbs_14_img_0/355-6444584-5963124?_encoding=UTF8&psc=1&refRID=J54H94PS157B1B66H4YN) や [自作エミュレータ](https://www.amazon.co.jp/%E8%87%AA%E4%BD%9C%E3%82%A8%E3%83%9F%E3%83%A5%E3%83%AC%E3%83%BC%E3%82%BF%E3%81%A7%E5%AD%A6%E3%81%B6x86%E3%82%A2%E3%83%BC%E3%82%AD%E3%83%86%E3%82%AF%E3%83%81%E3%83%A3-%E3%82%B3%E3%83%B3%E3%83%94%E3%83%A5%E3%83%BC%E3%82%BF%E3%81%8C%E5%8B%95%E3%81%8F%E4%BB%95%E7%B5%84%E3%81%BF%E3%82%92%E5%BE%B9%E5%BA%95%E7%90%86%E8%A7%A3-%E5%86%85%E7%94%B0%E5%85%AC%E5%A4%AA/dp/4839954747) を作ったわけだし,『いっちょやってみるか!』というモチベーションに切り替わっていったわけです. 65 | 66 | そして,運良く,V850 はマニュアルがしっかりしており,機械語のデコード仕様が明確になっていました.あとは,その仕様をソフトウェアで実現するだけというわけです. 67 | 68 | #athrill(アスリル) 誕生 69 | あれやこれやと命令を追加していくうちに,やり方がわかってきて,割り込みコントローラや周辺デバイス(CAN, A/D変換器等など)必要なものは自分で作れるようになっていました(★). 70 | (★)自分で作るからこそ,必要な機能作成やチューニングができる点が良いですね. 71 | 72 | その当時は,自作エミュレータでのデバッグはやはり大変でしたが,日々わくわくしながら仕事してました.そんな中,自作エミュレータ上で移植した OS が PC 上で動かすことができたときは本当に嬉しかったです(下図は,その当時の OS 起動メッセージです). 73 | 74 | ``` 75 | TOPPERS/ASP3 Kernel Release 3.2.0 for V850-ESFK3 76 | Copyright (C) 2000-2003 by Embedded and Real-Time Systems Laboratory 77 | Toyohashi Univ. of Technology, JAPAN 78 | Copyright (C) 2004-2017 by Embedded and Real-Time Systems Laboratory 79 | Graduate School of Information Science, Nagoya Univ., JAPAN 80 | ``` 81 | 82 | さらに,その OS を実機上に焼き込み,エミュレータと同じように起動⇒ CAN データの送受ができたときの感激はずっと忘れないと思います(とても暑い日でした). 83 | 84 | 当時は,単に「CPU エミュレータ」っていう名前で呼んでいましたが,[第7回TOPPERS活用アイデア・アプリケーション開発コンテスト](https://www.toppers.jp/contest.html) への応募という目標ができて正式な名前をつけることにしました. 85 | 86 | 「わくわく」を英訳し,「athrill(アスリル)」で即決です.![athrill.png](https://qiita-image-store.s3.amazonaws.com/0/244147/310461c9-5fba-3f82-c535-75d704638526.png) 87 | -------------------------------------------------------------------------------- /applaud_data/documents/dbs/chap02: -------------------------------------------------------------------------------- 1 | #概要 2 | この記事では,ARM版マイコンシミュレータをソフトウェア上で自作しよう!という試みをつらつらと書いています. 3 | 4 | #動機と目標 5 | そもそもなぜARM版のマイコンシミュレータを自作するんですか?という問いがあるかと思います. 6 | 7 | 有名どころでは,[QEMU](https://ja.wikipedia.org/wiki/QEMU) というオープンソースがあるのに,なんでわざわざ作るの?と聞かれそうです.少なくともこれまでの自分はそう思っていました. 8 | 9 | ##動機 10 | 数年前,マイコンシミュレータ([athrill](https://qiita.com/kanetugu2018/items/1f2ef93c9e1fa7a29f97))を自作しました. 11 | とてもマイナーで,きっと誰もしらないと思います. 12 | 13 | これまでqiita記事とか発表させてもらえる場では積極的に宣伝してきましたが 14 | やっぱりマイナーなんだよね〜って職場の同僚につぶやいたところ, 15 | 「やっぱり対応しているCPUがV850だからなんでは?」 16 | という結論に至りました. 17 | ※きっと,一般の方はv850なんて知らないですよね?車載系では昔よく使われていたんですけどね.. 18 | 19 | ##そうはいっても 20 | 自作したマイコンシミュレータ athrill は,様々なCPUアーキに対応できるように設計していました.さらに,LinuxとWindows/docker上で実行できるようにしていました. 21 | 22 | [TOPPERS](https://www.toppers.jp/index.html) の RTOS も色々と動かせるようにしましたし,さらには Unity と連携して [ETロボコンシミュレータ](https://qiita.com/kanetugu2018/items/0e521f4779cd680dab18)として利用できるようにもしました. 23 | 24 | 「でも,CPUがv850なんすよね〜.IoT機器はARMだっていうのに,v850ではもうダメですよ(心の声).」 25 | 26 | QEMU使えば良いじゃん!っていう人もいらっしゃるかもしれませんが,やっぱり自作にこだわってます. 27 | 28 | ##目標 29 | そんなわけで,athrill を ARM 対応しようって決めたわけですが,いくつか大きな目標を考えました. 30 | 31 | 1. 開発環境を完全クロスプラットフォーム化する(Mac/Linux/Windows)! 32 | * ARM版のathrillからは DSL で命令記述して開発を効率化する! 33 | * TOPPERS の RTOS をこれまでどおり動かせるようにする! 34 | * ETロボコンのターゲットCPUはARMなので,現状のETロボコンシミュレータをARM化する! 35 | * MMU対応して,Linuxさえもサクサクっと動かせるようにする! 36 | 37 | こんな大きな目標ぶちあげて大丈夫なの?と思われるかもしれませんが,地道にコツコツやっていればいつか実現できるはずですよ〜.くらいの気持ちで始めることにしました. 38 | 39 | #進捗状況 40 | 2020/02/02の進捗状況としては,「3」まで終わりました. 41 | 今,4 やってます.案外やればできるもんです. 42 | 43 | ARM版のathrillは以下で公開しました.ARMv7-Aです. 44 | 45 | https://github.com/tmori/athrill-target 46 | 47 | 対応している命令は ARM命令だけで thumb 命令はまだです.さらに,ARMは約400個の命令ありますが,まだ80個くらいしか実装してないです.それでも,RTOSが動くための最低限の命令セットの実装はしています. 48 | 49 | 2020/03/01の進捗状況としては,「4」がほぼ終わりました. 50 | もちろん ETロボコンシミュレータ動いてます.実装した命令数はちょうど100個です. 51 | 52 | https://qiita.com/kanetugu2018/items/0e521f4779cd680dab18 53 | 54 | でも,サンプルサンプルプログラムが動いただけなもので,まだ何が起きるか未知数です. 55 | もし,デコードエラーになった場合は,以下に issue を挙げていただければ対応します. 56 | 57 | https://github.com/tmori/athrill-target 58 | 59 | この際,調査用に頂きたい情報は以下になります. 60 | 1. athrillが出力したエラーメッセージ 61 |  例.CPU(pc=<アドレス>) Exception!! 62 | 2. エラー発生した箇所のアセンブラ命令コード(objdumpの結果) 63 |  例.arm-none-eabi-objdump -D asp | less 64 | 65 | objdumpの出力書式は以下の通りです. 66 | <アドレス>: <機械語(16進数) <アセンブラ命令> <オペランド> 67 | 68 | 実行すると,こんな感じで出力されますから,この内容をコピペ頂ければと. 69 | 70 | ``` 71 | 1800504c : 72 | 1800504c: e92d4800 push {fp, lr} 73 | 18005050: ed2d8b02 vpush {d8} 74 | 18005054: e28db00c add fp, sp, #12 75 | 18005058: e24dd010 sub sp, sp, #16 76 | 1800505c: e50b0018 str r0, [fp, #-24] ; 0xffffffe8 77 | ``` 78 | 79 | 80 | #クロスプラットフォーム化について 81 | athrillは元々,クロスプラットフォームを意識して設計・実装していました. 82 | POSIX系のAPIを積極的に利用して,移植しやすくしています. 83 | 84 | さらに,今回からコンパイラを gcc だけでなく,clang でもビルドできるようにしましたので,Mac との親和性よくなりました. 85 | 86 | gcc のビルドディレクトリ(Linux/WSL用): 87 |  https://github.com/tmori/athrill-target/tree/master/ARMv7-A/build_linux 88 | 89 | clang のビルドディレクトリ(Mac用): 90 |  https://github.com/tmori/athrill-target/tree/master/ARMv7-A/build_mac 91 | 92 | ちなみに,clangだと,[サニタイザ](https://clang.llvm.org/docs/AddressSanitizer.html)色々あるので,今後の athrillのバグとりに期待してたります. 93 | 94 | #DSLによる命令セットの記述について 95 | 職場のちょー優秀な同僚が,ARM命令セットをDSLで記述すると機械語をデコードするプログラムを自動生成してくれるツールを作ってくれました. 96 | 97 | 現在,athrillでの試作開発中なので,まだ一般公開されていませんが,ある程度実績できたら一般公開されるそうです. 98 | 99 | このツールのおかげで,私の命令セット実装コストは大幅に削減されました. 100 | 早ければ1命令,約10分で実装できます. 101 | 102 | DSL は,yaml でこんな感じで記述します(レジスタベースのADD命令です). 103 | 104 | ``` 105 | - name: arm_add_reg_a1 106 | format:xxxx:cond|00|0|0100|x:S|xxxx:Rn|xxxx:Rd|xxxxx:imm5|xx:type|0|xxxx:Rm 107 | unmatch_condition: > 108 | (cond == 0b1111) 109 | or ( (Rd == 0b1111) and (S == 1) ) 110 | or (Rn == 0b1101) 111 | ``` 112 | こんな感じで記述しておくと,自動的に機械語命令のパーサーができてしまうんですから,とても開発は楽チンです. 113 | 114 | 現時点の定義内容は以下で公開しています. 115 | 116 | https://github.com/tmori/athrill-target/blob/master/ARMv7-A/mcdecoder/instruction/arm.yaml 117 | 118 | 119 | #athrillのインストール方法 120 | ARM版athrillのインストール手順は以下の通りです. 121 | 122 | * athrill のチェックアウト 123 | * athrill-target のチェックアウト 124 | * コンパイラのインストール 125 | * ビルド&インストール 126 | 127 | ##athrill のチェックアウト 128 | athrill は,設計上,CPUアーキに依存しない共通コードとCPU依存するコードを分離しています. 129 | 130 | 共通コードのチェックアウトは,以下の通りです. 131 | 132 | ``` 133 | $ git clone https://github.com/tmori/athrill.git 134 | ``` 135 | 136 | ##athrill-target のチェックアウト 137 | CPUアーキに依存するコードは,athrill-target側で管理しています. 138 | 今回のARM対応版は,ここにあります. 139 | 140 | ``` 141 | $ git clone https://github.com/tmori/athrill-target.git 142 | ``` 143 | 144 | なお,athrill と athrill-targetを以下のフォルダ構成にしてください. 145 | 146 | ``` 147 | . 148 | ├── athrill 149 | └── athrill-target 150 | ``` 151 | 152 | #コンパイラのインストール 153 | Linux の方は,gcc をインストールください. 154 | Windows の方は,WSL上で gcc をインストールください. 155 | Mac の方は,clang をインストールください. 156 | 157 | ##ビルド&インストール 158 | ビルド方法ですが,端末上でathrill-target/ARMv7-A に移動してください. 159 | 160 | Linux/Windowsの方は,さらに build_linux に移動してください. 161 | Macの方は,build_mac に移動してください. 162 | 163 | 移動終わったら,以下コマンド実行するだけです. 164 | 165 | ``` 166 | make clean;make 167 | ``` 168 | 169 | ビルド成功すると,athrill側の bin/linux 配下に athrill2 というバイナリが配置されるはずです. 170 | 171 | ``` 172 | $ ls -l ../../../athrill/bin/linux/athrill2 173 | -rwxr-xr-x 1 tmori staff 628260 2 2 16:40 ../../../athrill/bin/linux/athrill2 174 | ``` 175 | 176 | そして,このパスを .bashrcに登録してもらえれば,インストール終了です. 177 | 以下,設定例です. 178 | 179 | ``` 180 | export PATH=/athrill/bin/linux:${PATH} 181 | ``` 182 | 183 | #ARM開発環境について 184 | ここまでくれば,athrillは普通に使えるようになりますが,肝心のARM開発環境を準備しておく必要があります. 185 | 186 | 最低限必要なのはARM用のクロスコンパイラだけです. 187 | 188 | お使いの環境上で,arm-none-eabi-gcc をインストールしてください. 189 | Linux/WSL/Mac どこでも,ARMのgccコンパイラは容易にインストールできます. 190 | ※v850だとこれができないんだな〜(汗 191 | 192 | #リアルタイムOSを動かしてみる 193 | 今回,GR-PEACH用のTOPPERS RTOS(ASP) をARM対応版athrillで動作できるようにしました. 194 | 195 | サンプルコードは,以下で公開しています. 196 | 197 | https://github.com/tmori/athrill-sample/tree/master/os/asp_arm 198 | 199 | RTOSの実行デモは,以下でつぶやいてみました. 200 | 201 | https://twitter.com/i/status/1218447669929988096 202 | 203 | タスク起動確認までできています. 204 | 205 | #今後について 206 | 207 | ##2020/02/02時点 208 | RTOSはなんとなく動くようになったので,次は,ETロボコンシミュレータ用に浮動小数点演算命令の実装を粛々とやっています. 209 | 210 | とりあえず,四則演算はできるようになったので,EV3RTをARM用にビルドして,Unityと結合してみようと思っています. 211 | 212 | これができれば,ETロボコンシミュレータのARM対応ができるようになりますから,v850とはおさらばです.ETロボコン競技者の開発準備はとても楽になることでしょう.こうご期待ください. 213 | 214 | ##2020/03/01時点 215 | 運良くETロボコンシミュレータが動き出したので,お次はLinuxか?と思いましたが,ハードル高すぎなので,もう少しETロボコンシミュレータと遊ぼうと決めました. 216 | 217 | ETロボコンシミュレータ使っていただいている方々から沢山のコメントいただいているので,もっと機能拡張しないとなという使命感がふつふつと湧いてきました. 218 | 219 | やっぱり,あれですかね.コメント頂いている,制御用プログラムの変数の時系列情報を出力する機能かな? 220 | -------------------------------------------------------------------------------- /applaud_data/documents/document.list: -------------------------------------------------------------------------------- 1 | chap01 2 | chap02 -------------------------------------------------------------------------------- /applaud_data/documents_data/data_model/chap01.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "chap01", 3 | "contents": [ 4 | { 5 | "Purpose": "Understand the individuality of kanetugu2018", 6 | "Perspectives": "To understand kanetugu2018's contribution to the project and his unique characteristics by analyzing conversational style, use of language, and personal anecdotes", 7 | "Answer": "kanetugu2018's individuality is evident throughout the text. The use of personal anecdotes lends a sense of authenticity. We learn that kanetugu2018 was motivated by need and challenge when he took on a project to create a CPU emulator called 'athrill'. When facing a potentially debugging nightmare for a project involving TOPPERS OS and V850, kanetugu2018 demonstrated resourcefulness and determination by putting efforts into creating a working emulator, saving on cost and time in the process.\n\nHis conversational style is engaging, peppered with expressions of emotion, creating a story-like narrative. The use of humor and casual language (\"わくわく\" meaning 'exciting') reflects a down-to-earth and approachable personality, while professionally, his passion for problem-solving and innovation is clear. \n\nkanetugu2018's contribution to the project was significant, leading to the successful creation of 'athrill', along with the knowledge that a similar approach could be replicated should a similar situation arise. This adds meaningful value to the project, potentially saving cost and development time. This anecdote illustrates kanetugu2018's ability to adapt, innovate, and solve problems effectively under pressure, highlighting his individuality.\n\nPoint: 90", 8 | "Point": 90.0 9 | }, 10 | { 11 | "Purpose": "To understand conversation style and find personal traits", 12 | "Perspectives": "Focus on kanetugu2018's unique phrases and reactions", 13 | "Answer": "kanetugu2018's writing style is very much conversational and it seems that he is aware of how to engage his readers by using relatable content and experiences. His writing reflects how passionate he was about the project. There are several unique phrases and reactions that were noted.\n\n1. \"#きっかけ\" and \"#必死で調べました\" - He utilizes hashtags within the content to emphasize certain points or sections.\n\n2. His numerous uses of personal pronouns, such as \"僕\" and \"私\", instead of professional language, give his writing a very colloquial feel.\n\n3. The use of asterisks, quotation marks, special characters to convey emotion and emphasis, such as \"※組込み系の開発で実機レス開発環境がないと同じようなトラブルに見舞われると思われますので\" and \"『いっちょやってみるか!』.\n\n4. Usage of English phrases in the middle, like \"CPU emulator\" and \"athrill\".\n\n5. The use of onomatopoeic expressions like \"わくわく\" to describe his excitement.\n\nFurthermore, he writes about his thought process and details that he had to consider during the project, openly sharing his struggles, which adds a genuine touch to his content. It seems like he likes to tell a narrative when explaining technical concepts or processes.\n\nPoint: 93", 14 | "Point": 93.0 15 | } 16 | ] 17 | } -------------------------------------------------------------------------------- /applaud_data/documents_data/data_model/chap02.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "chap02", 3 | "contents": [ 4 | { 5 | "Purpose": "Find unique and unnoticed traits of Kanetugu2018", 6 | "Perspectives": "Scrutinizing narrative recounts, usage of specific phrases and emotional expressions", 7 | "Answer": "Kanetugu2018's article reveals his commitment to making significant contributions to specific technical communities, evidenced by the development of an ARM-based microcontroller simulator. He critically reflects on his past work, acknowledging its flaws and working on them for improvements, such as moving from the virtually unknown V850 to the commonly used ARM. His 'unnoticed' trait would be his tireless work on these technical improvements, despite them being used by a very niche audience. \n\nHe meticulously recounts his journey, using specific phrases such as 'athrill は,CPUアーキに依存しない共通コードとCPU依存するコードを分離しています.' His expressions show a strong emotional engagement with his work and a sense of duty towards his niche community, seen in phrases like, '地道にコツコツやっていればいつか実現できるはずですよ'.\n\nPoint: 85", 8 | "Point": 85.0 9 | } 10 | ] 11 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/result/critical_thinking.json: -------------------------------------------------------------------------------- 1 | { 2 | "Knowledges": [ 3 | { 4 | "Term": "kanetugu2018_personality", 5 | "Reason": "To write appreciating words for kanetugu2018, it's required to understand his personality traits by analysing his conversation logs." 6 | }, 7 | { 8 | "Term": "conversation_log_analysis", 9 | "Reason": "The source of information for understanding kanetugu2018's personality is his conversation logs, therefore, the skill to analyze these logs is prerequisite." 10 | }, 11 | { 12 | "Term": "appreciating_words", 13 | "Reason": "The objective of this task is to find the proper appreciating words for kanetugu2018 that can emotionally touch him." 14 | }, 15 | { 16 | "Term": "unnoticed_traits", 17 | "Reason": "The appreciation is supposed to be based on unnoticed traits of kanetugu2018, so understanding what kind of traits are generally overlooked is required." 18 | }, 19 | { 20 | "Term": "commont_knowledge", 21 | "Reason": "As the appreciation should neither be about common knowledge nor something that kanetugu2018 is already aware of, understanding these aspects is important to avoid them." 22 | } 23 | ] 24 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/result/memory.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ID": 1, 4 | "TargetDocID": "chap01", 5 | "Question": "Please obtain information based on the following purposes and perspectives.\nLet's think step by step.\n\npurpose: To understand conversation style and find personal traits\nperspectives: Focus on kanetugu2018's unique phrases and reactions\n", 6 | "Reply": "kanetugu2018's writing style is very much conversational and it seems that he is aware of how to engage his readers by using relatable content and experiences. His writing reflects how passionate he was about the project. There are several unique phrases and reactions that were noted.\n\n1. \"#きっかけ\" and \"#必死で調べました\" - He utilizes hashtags within the content to emphasize certain points or sections.\n\n2. His numerous uses of personal pronouns, such as \"僕\" and \"私\", instead of professional language, give his writing a very colloquial feel.\n\n3. The use of asterisks, quotation marks, special characters to convey emotion and emphasis, such as \"※組込み系の開発で実機レス開発環境がないと同じようなトラブルに見舞われると思われますので\" and \"『いっちょやってみるか!』.\n\n4. Usage of English phrases in the middle, like \"CPU emulator\" and \"athrill\".\n\n5. The use of onomatopoeic expressions like \"わくわく\" to describe his excitement.\n\nFurthermore, he writes about his thought process and details that he had to consider during the project, openly sharing his struggles, which adds a genuine touch to his content. It seems like he likes to tell a narrative when explaining technical concepts or processes.\n\nPoint: 93", 7 | "Point": 93.0 8 | }, 9 | { 10 | "ID": 2, 11 | "TargetDocID": "chap02", 12 | "Question": "Please obtain information based on the following purposes and perspectives.\nLet's think step by step.\n\npurpose: To find unnoticed traits and effective appreciating words\nperspectives: Identify overlooked traits and related conversations\n", 13 | "Reply": "The author of this text created Arm version microcontroller simulator by self. The microcontroller simulator is designed to work on different CPU architectures and it can be executed on Linux and Windows /docker. The microcontroller simulator he developed is called \"Athrill\". The simulator is designed to run on different CPUs and can be executed on both Linux and Windows/Docker. The author has a motivation to popularize the simulator. In addition, he has settled on several major goals such as developing an environment that could run on Mac/Linux/Windows and efficiently develop by instruction description in DSL from ARM version of athrill and so on. He has implemented about 80 instructions out of around 400 ARM instructions. \n\nPoint: 90\nThis text is focused on the process of the creation and development of ARM version microcontroller simulator which can help overlook unnoticed traits.", 14 | "Point": -1.0 15 | } 16 | ] -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/result/plan.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": "Investigate conversation logs in the documents; Understand unnoticed traits, appreciating_words, and avoid common knowledge", 3 | "Plan": [ 4 | { 5 | "PlanID": 1, 6 | "DocumentID": "chap01", 7 | "Purpose": "To understand conversation style and find personal traits", 8 | "Perspectives": "Focus on kanetugu2018's unique phrases and reactions", 9 | "ResultID": "", 10 | "Status": "None" 11 | }, 12 | { 13 | "PlanID": 2, 14 | "DocumentID": "chap02", 15 | "Purpose": "To find unnoticed traits and effective appreciating words", 16 | "Perspectives": "Identify overlooked traits and related conversations", 17 | "ResultID": "", 18 | "Status": "None" 19 | } 20 | ] 21 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/result/plan_result.json: -------------------------------------------------------------------------------- 1 | { 2 | "DocumentIDs": { 3 | "chap01": [ 4 | "kanetugu2018's writing style is very much conversational and it seems that he is aware of how to engage his readers by using relatable content and experiences. His writing reflects how passionate he was about the project. There are several unique phrases and reactions that were noted.\n\n1. \"#きっかけ\" and \"#必死で調べました\" - He utilizes hashtags within the content to emphasize certain points or sections.\n\n2. His numerous uses of personal pronouns, such as \"僕\" and \"私\", instead of professional language, give his writing a very colloquial feel.\n\n3. The use of asterisks, quotation marks, special characters to convey emotion and emphasis, such as \"※組込み系の開発で実機レス開発環境がないと同じようなトラブルに見舞われると思われますので\" and \"『いっちょやってみるか!』.\n\n4. Usage of English phrases in the middle, like \"CPU emulator\" and \"athrill\".\n\n5. The use of onomatopoeic expressions like \"わくわく\" to describe his excitement.\n\nFurthermore, he writes about his thought process and details that he had to consider during the project, openly sharing his struggles, which adds a genuine touch to his content. It seems like he likes to tell a narrative when explaining technical concepts or processes.\n\nPoint: 93" 5 | ] 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/result/plan_result_org.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": "Investigate conversation logs in the documents; Understand unnoticed traits, appreciating_words, and avoid common knowledge", 3 | "Plan": [ 4 | { 5 | "DocumentID": "chap01", 6 | "Purpose": "To understand conversation style and find personal traits", 7 | "Perspectives": "Focus on kanetugu2018's unique phrases and reactions", 8 | "ResultID": { 9 | "Reply": "kanetugu2018's writing style is very much conversational and it seems that he is aware of how to engage his readers by using relatable content and experiences. His writing reflects how passionate he was about the project. There are several unique phrases and reactions that were noted.\n\n1. \"#きっかけ\" and \"#必死で調べました\" - He utilizes hashtags within the content to emphasize certain points or sections.\n\n2. His numerous uses of personal pronouns, such as \"僕\" and \"私\", instead of professional language, give his writing a very colloquial feel.\n\n3. The use of asterisks, quotation marks, special characters to convey emotion and emphasis, such as \"※組込み系の開発で実機レス開発環境がないと同じようなトラブルに見舞われると思われますので\" and \"『いっちょやってみるか!』.\n\n4. Usage of English phrases in the middle, like \"CPU emulator\" and \"athrill\".\n\n5. The use of onomatopoeic expressions like \"わくわく\" to describe his excitement.\n\nFurthermore, he writes about his thought process and details that he had to consider during the project, openly sharing his struggles, which adds a genuine touch to his content. It seems like he likes to tell a narrative when explaining technical concepts or processes.\n\nPoint: 93", 10 | "Point": 93.0 11 | } 12 | }, 13 | { 14 | "DocumentID": "chap02", 15 | "Purpose": "To find unnoticed traits and effective appreciating words", 16 | "Perspectives": "Identify overlooked traits and related conversations", 17 | "ResultID": { 18 | "Reply": "The author of this text created Arm version microcontroller simulator by self. The microcontroller simulator is designed to work on different CPU architectures and it can be executed on Linux and Windows /docker. The microcontroller simulator he developed is called \"Athrill\". The simulator is designed to run on different CPUs and can be executed on both Linux and Windows/Docker. The author has a motivation to popularize the simulator. In addition, he has settled on several major goals such as developing an environment that could run on Mac/Linux/Windows and efficiently develop by instruction description in DSL from ARM version of athrill and so on. He has implemented about 80 instructions out of around 400 ARM instructions. \n\nPoint: 90\nThis text is focused on the process of the creation and development of ARM version microcontroller simulator which can help overlook unnoticed traits.", 19 | "Point": -1.0 20 | } 21 | } 22 | ] 23 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/result/reflection.json: -------------------------------------------------------------------------------- 1 | { 2 | "Knowledges": { 3 | "kanetugu2018_personality": [ 4 | { 5 | "KnownInfo": "kanetugu2018's writing style is conversational and engaging. He uses unique phrases and reactions, emphasizing some points with hashtags. He often uses personal pronouns instead of professional phrases, adding a colloquial touch to his writings. He also uses specific special characters and English phrases to convey strong emotions. Another key aspect of his personality is his openness about his struggles and his ability to recount narratives.", 6 | "DocumentIDs": [ 7 | "chap01" 8 | ], 9 | "Point": 87 10 | } 11 | ], 12 | "conversation_log_analysis": [ 13 | { 14 | "KnownInfo": "Analysing conversation logs involves looking for unique phrases and reactions, tracking the usage of specific types of words (like personal pronouns, special characters and foreign phrases), and identifying open and genuine communication.", 15 | "DocumentIDs": [ 16 | "chap01" 17 | ], 18 | "Point": 90 19 | } 20 | ] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/result/reflection_org.json: -------------------------------------------------------------------------------- 1 | { 2 | "Knowledges": [ 3 | { 4 | "Term": "kanetugu2018_personality", 5 | "Reason": "To write appreciating words for kanetugu2018, it's required to understand his personality traits by analysing his conversation logs.", 6 | "KnownInfos": [ 7 | { 8 | "KnownInfo": "kanetugu2018's writing style is conversational and engaging. He uses unique phrases and reactions, emphasizing some points with hashtags. He often uses personal pronouns instead of professional phrases, adding a colloquial touch to his writings. He also uses specific special characters and English phrases to convey strong emotions. Another key aspect of his personality is his openness about his struggles and his ability to recount narratives.", 9 | "Point": 87, 10 | "DocumentIDs": ["chap01"] 11 | } 12 | ], 13 | "UnknownInfo": ["Specific interactions from his conversation logs that highlight these traits would be useful to find the best way to appreciate kanetugu2018."], 14 | "Relations": [ 15 | { 16 | "Term": "conversation_log_analysis", 17 | "RelationReason": "This is the process through which kanetugu2018's personality traits can be understood." 18 | } 19 | ] 20 | }, 21 | { 22 | "Term": "conversation_log_analysis", 23 | "Reason": "The source of information for understanding kanetugu2018's personality is his conversation logs, therefore, the skill to analyze these logs is prerequisite.", 24 | "KnownInfos": [ 25 | { 26 | "KnownInfo": "Analysing conversation logs involves looking for unique phrases and reactions, tracking the usage of specific types of words (like personal pronouns, special characters and foreign phrases), and identifying open and genuine communication.", 27 | "Point": 90, 28 | "DocumentIDs": ["chap01"] 29 | } 30 | ], 31 | "UnknownInfo": ["More information on the specific techniques or guidelines that can aid the analysis of conversation logs can be beneficial."], 32 | "Relations": [ 33 | { 34 | "Term": "kanetugu2018_personality", 35 | "RelationReason": "This analysis is used for understanding kanetugu2018's personality." 36 | } 37 | ] 38 | }, 39 | { 40 | "Term": "appreciating_words", 41 | "Reason": "The objective of this task is to find the proper appreciating words for kanetugu2018 that can emotionally touch him.", 42 | "KnownInfos": [], 43 | "UnknownInfo": ["Specific examples of powerful and touching appreciating words, consistent with the traits observed in kanetugu2018's personality.", "An understanding of what type of appreciation kanetugu2018 would most value based on his personality."], 44 | "Relations": [ 45 | { 46 | "Term": "unnoticed_traits", 47 | "RelationReason": "The appreciation should be based on unnoticed traits." 48 | } 49 | ] 50 | }, 51 | { 52 | "Term": "unnoticed_traits", 53 | "Reason": "The appreciation is supposed to be based on unnoticed traits of kanetugu2018, so understanding what kind of traits are generally overlooked is required.", 54 | "KnownInfos": [], 55 | "UnknownInfo": ["Specific unnoticed traits of kanetugu2018 that could be highlighted through appreciation."], 56 | "Relations": [ 57 | { 58 | "Term": "appreciating_words", 59 | "RelationReason": "These unnoticed traits will be the basis of the appreciating words." 60 | } 61 | ] 62 | }, 63 | { 64 | "Term": "commont_knowledge", 65 | "Reason": "As the appreciation should neither be about common knowledge nor something that kanetugu2018 is already aware of, understanding these aspects is important to avoid them.", 66 | "KnownInfos": [], 67 | "UnknownInfo": ["More specific information about what constitutes common knowledge in the context of kanetugu2018's personality traits, and what he is already aware about himself."], 68 | "Relations": [] 69 | } 70 | ] 71 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/result/reply.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": "Investigate conversation logs in the documents; Understand unnoticed traits, appreciating_words, and avoid common knowledge", 3 | "Plan": [ 4 | { 5 | "DocumentID": "chap01", 6 | "Purpose": "To understand conversation style and find personal traits", 7 | "Perspectives": "Focus on kanetugu2018's unique phrases and reactions" 8 | }, 9 | { 10 | "DocumentID": "chap02", 11 | "Purpose": "To find unnoticed traits and effective appreciating words", 12 | "Perspectives": "Identify overlooked traits and related conversations" 13 | } 14 | ] 15 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/result/result.txt: -------------------------------------------------------------------------------- 1 | Answer: 「会話のログから、kanetugu2018 さんの個性を見てみると、多くの方が気付かないような素敵な特徴が見つけられます。#きっかけや #必死で調べました というハッシュタグの活用、話し言葉で親しみやすい文章を作るために「僕」や「私」を多用するスタイル、更には、特殊文字や英語表現を使いながら強い感情を表現するセンス。そして何より、ここには彼の開放的で真摯なコミュニケーションスタイルが感じられます。こういった点からも、彼がプロジェクトに情熱をもって取り組んでいることが伺えます。アストリスクや引用符等の記号を使った表現、そして「わくわく」という感情を具現化したオノマトペなど、kanetugu2018さんの個性が存分に活かされたコミュニケーションが見てとれます。(参照: chap01)」 2 | -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/result/updated_plan.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": "Investigate conversation logs in the documents; Understand unnoticed traits, appreciating_words, and avoid common knowledge", 3 | "Plan": [ 4 | { 5 | "PlanID": 1, 6 | "DocumentID": "chap01", 7 | "Purpose": "To understand conversation style and find personal traits", 8 | "Perspectives": "Focus on kanetugu2018's unique phrases and reactions", 9 | "ResultID": 1, 10 | "Status": "Done" 11 | }, 12 | { 13 | "PlanID": 2, 14 | "DocumentID": "chap02", 15 | "Purpose": "To find unnoticed traits and effective appreciating words", 16 | "Perspectives": "Identify overlooked traits and related conversations", 17 | "ResultID": 2, 18 | "Status": "Done" 19 | } 20 | ] 21 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_1/strategy_history.json: -------------------------------------------------------------------------------- 1 | { 2 | "Strategies": [ 3 | "Investigate conversation logs in the documents; Understand unnoticed traits, appreciating_words, and avoid common knowledge" 4 | ] 5 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/result/critical_thinking.json: -------------------------------------------------------------------------------- 1 | { 2 | "Knowledges": [ 3 | { 4 | "Term": "kanetugu2018_personality", 5 | "Reason": "To write appreciating words for kanetugu2018, it's required to understand his personality traits by analysing his conversation logs." 6 | }, 7 | { 8 | "Term": "conversation_log_analysis", 9 | "Reason": "The source of information for understanding kanetugu2018's personality is his conversation logs, therefore, the skill to analyze these logs is prerequisite." 10 | }, 11 | { 12 | "Term": "appreciating_words", 13 | "Reason": "The objective of this task is to find the proper appreciating words for kanetugu2018 that can emotionally touch him." 14 | }, 15 | { 16 | "Term": "unnoticed_traits", 17 | "Reason": "The appreciation is supposed to be based on unnoticed traits of kanetugu2018, so understanding what kind of traits are generally overlooked is required." 18 | }, 19 | { 20 | "Term": "commont_knowledge", 21 | "Reason": "As the appreciation should neither be about common knowledge nor something that kanetugu2018 is already aware of, understanding these aspects is important to avoid them." 22 | } 23 | ] 24 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/result/memory.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ID": 1, 4 | "TargetDocID": "chap01", 5 | "Question": "Please obtain information based on the following purposes and perspectives.\nLet's think step by step.\n\npurpose: Understand the individuality of kanetugu2018\nperspectives: To understand kanetugu2018's contribution to the project and his unique characteristics by analyzing conversational style, use of language, and personal anecdotes\n", 6 | "Reply": "kanetugu2018's individuality is evident throughout the text. The use of personal anecdotes lends a sense of authenticity. We learn that kanetugu2018 was motivated by need and challenge when he took on a project to create a CPU emulator called 'athrill'. When facing a potentially debugging nightmare for a project involving TOPPERS OS and V850, kanetugu2018 demonstrated resourcefulness and determination by putting efforts into creating a working emulator, saving on cost and time in the process.\n\nHis conversational style is engaging, peppered with expressions of emotion, creating a story-like narrative. The use of humor and casual language (\"わくわく\" meaning 'exciting') reflects a down-to-earth and approachable personality, while professionally, his passion for problem-solving and innovation is clear. \n\nkanetugu2018's contribution to the project was significant, leading to the successful creation of 'athrill', along with the knowledge that a similar approach could be replicated should a similar situation arise. This adds meaningful value to the project, potentially saving cost and development time. This anecdote illustrates kanetugu2018's ability to adapt, innovate, and solve problems effectively under pressure, highlighting his individuality.\n\nPoint: 90", 7 | "Point": 90.0 8 | }, 9 | { 10 | "ID": 2, 11 | "TargetDocID": "chap02", 12 | "Question": "Please obtain information based on the following purposes and perspectives.\nLet's think step by step.\n\npurpose: Find unique and unnoticed traits of Kanetugu2018\nperspectives: Scrutinizing narrative recounts, usage of specific phrases and emotional expressions\n", 13 | "Reply": "Kanetugu2018's article reveals his commitment to making significant contributions to specific technical communities, evidenced by the development of an ARM-based microcontroller simulator. He critically reflects on his past work, acknowledging its flaws and working on them for improvements, such as moving from the virtually unknown V850 to the commonly used ARM. His 'unnoticed' trait would be his tireless work on these technical improvements, despite them being used by a very niche audience. \n\nHe meticulously recounts his journey, using specific phrases such as 'athrill は,CPUアーキに依存しない共通コードとCPU依存するコードを分離しています.' His expressions show a strong emotional engagement with his work and a sense of duty towards his niche community, seen in phrases like, '地道にコツコツやっていればいつか実現できるはずですよ'.\n\nPoint: 85", 14 | "Point": 85.0 15 | } 16 | ] -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/result/plan.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": "Investigate the profiles of development team members; Specifically focusing on unique characteristics and achievements.", 3 | "Plan": [ 4 | { 5 | "PlanID": 1, 6 | "DocumentID": "chap01", 7 | "Purpose": "Understand the individuality of kanetugu2018", 8 | "Perspectives": "To understand kanetugu2018's contribution to the project and his unique characteristics by analyzing conversational style, use of language, and personal anecdotes", 9 | "ResultID": "", 10 | "Status": "None" 11 | }, 12 | { 13 | "PlanID": 2, 14 | "DocumentID": "chap02", 15 | "Purpose": "Find unique and unnoticed traits of Kanetugu2018", 16 | "Perspectives": "Scrutinizing narrative recounts, usage of specific phrases and emotional expressions", 17 | "ResultID": "", 18 | "Status": "None" 19 | } 20 | ] 21 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/result/plan_result.json: -------------------------------------------------------------------------------- 1 | { 2 | "DocumentIDs": { 3 | "chap01": [ 4 | "kanetugu2018's individuality is evident throughout the text. The use of personal anecdotes lends a sense of authenticity. We learn that kanetugu2018 was motivated by need and challenge when he took on a project to create a CPU emulator called 'athrill'. When facing a potentially debugging nightmare for a project involving TOPPERS OS and V850, kanetugu2018 demonstrated resourcefulness and determination by putting efforts into creating a working emulator, saving on cost and time in the process.\n\nHis conversational style is engaging, peppered with expressions of emotion, creating a story-like narrative. The use of humor and casual language (\"わくわく\" meaning 'exciting') reflects a down-to-earth and approachable personality, while professionally, his passion for problem-solving and innovation is clear. \n\nkanetugu2018's contribution to the project was significant, leading to the successful creation of 'athrill', along with the knowledge that a similar approach could be replicated should a similar situation arise. This adds meaningful value to the project, potentially saving cost and development time. This anecdote illustrates kanetugu2018's ability to adapt, innovate, and solve problems effectively under pressure, highlighting his individuality.\n\nPoint: 90", 5 | "kanetugu2018's writing style is very much conversational and it seems that he is aware of how to engage his readers by using relatable content and experiences. His writing reflects how passionate he was about the project. There are several unique phrases and reactions that were noted.\n\n1. \"#きっかけ\" and \"#必死で調べました\" - He utilizes hashtags within the content to emphasize certain points or sections.\n\n2. His numerous uses of personal pronouns, such as \"僕\" and \"私\", instead of professional language, give his writing a very colloquial feel.\n\n3. The use of asterisks, quotation marks, special characters to convey emotion and emphasis, such as \"※組込み系の開発で実機レス開発環境がないと同じようなトラブルに見舞われると思われますので\" and \"『いっちょやってみるか!』.\n\n4. Usage of English phrases in the middle, like \"CPU emulator\" and \"athrill\".\n\n5. The use of onomatopoeic expressions like \"わくわく\" to describe his excitement.\n\nFurthermore, he writes about his thought process and details that he had to consider during the project, openly sharing his struggles, which adds a genuine touch to his content. It seems like he likes to tell a narrative when explaining technical concepts or processes.\n\nPoint: 93" 6 | ], 7 | "chap02": [ 8 | "Kanetugu2018's article reveals his commitment to making significant contributions to specific technical communities, evidenced by the development of an ARM-based microcontroller simulator. He critically reflects on his past work, acknowledging its flaws and working on them for improvements, such as moving from the virtually unknown V850 to the commonly used ARM. His 'unnoticed' trait would be his tireless work on these technical improvements, despite them being used by a very niche audience. \n\nHe meticulously recounts his journey, using specific phrases such as 'athrill は,CPUアーキに依存しない共通コードとCPU依存するコードを分離しています.' His expressions show a strong emotional engagement with his work and a sense of duty towards his niche community, seen in phrases like, '地道にコツコツやっていればいつか実現できるはずですよ'.\n\nPoint: 85" 9 | ] 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/result/plan_result_org.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": "Investigate the profiles of development team members; Specifically focusing on unique characteristics and achievements.", 3 | "Plan": [ 4 | { 5 | "DocumentID": "chap01", 6 | "Purpose": "Understand the individuality of kanetugu2018", 7 | "Perspectives": "To understand kanetugu2018's contribution to the project and his unique characteristics by analyzing conversational style, use of language, and personal anecdotes", 8 | "ResultID": { 9 | "Reply": "kanetugu2018's individuality is evident throughout the text. The use of personal anecdotes lends a sense of authenticity. We learn that kanetugu2018 was motivated by need and challenge when he took on a project to create a CPU emulator called 'athrill'. When facing a potentially debugging nightmare for a project involving TOPPERS OS and V850, kanetugu2018 demonstrated resourcefulness and determination by putting efforts into creating a working emulator, saving on cost and time in the process.\n\nHis conversational style is engaging, peppered with expressions of emotion, creating a story-like narrative. The use of humor and casual language (\"わくわく\" meaning 'exciting') reflects a down-to-earth and approachable personality, while professionally, his passion for problem-solving and innovation is clear. \n\nkanetugu2018's contribution to the project was significant, leading to the successful creation of 'athrill', along with the knowledge that a similar approach could be replicated should a similar situation arise. This adds meaningful value to the project, potentially saving cost and development time. This anecdote illustrates kanetugu2018's ability to adapt, innovate, and solve problems effectively under pressure, highlighting his individuality.\n\nPoint: 90", 10 | "Point": 90.0 11 | } 12 | }, 13 | { 14 | "DocumentID": "chap02", 15 | "Purpose": "Find unique and unnoticed traits of Kanetugu2018", 16 | "Perspectives": "Scrutinizing narrative recounts, usage of specific phrases and emotional expressions", 17 | "ResultID": { 18 | "Reply": "Kanetugu2018's article reveals his commitment to making significant contributions to specific technical communities, evidenced by the development of an ARM-based microcontroller simulator. He critically reflects on his past work, acknowledging its flaws and working on them for improvements, such as moving from the virtually unknown V850 to the commonly used ARM. His 'unnoticed' trait would be his tireless work on these technical improvements, despite them being used by a very niche audience. \n\nHe meticulously recounts his journey, using specific phrases such as 'athrill は,CPUアーキに依存しない共通コードとCPU依存するコードを分離しています.' His expressions show a strong emotional engagement with his work and a sense of duty towards his niche community, seen in phrases like, '地道にコツコツやっていればいつか実現できるはずですよ'.\n\nPoint: 85", 19 | "Point": 85.0 20 | } 21 | } 22 | ] 23 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/result/reflection.json: -------------------------------------------------------------------------------- 1 | { 2 | "Knowledges": { 3 | "Members": [ 4 | { 5 | "KnownInfo": "kanetugu2018's writing style is conversational and engaging. He uses unique phrases and reactions, emphasizing some points with hashtags. He often uses personal pronouns instead of professional phrases, adding a colloquial touch to his writings. He also uses specific special characters and English phrases to convey strong emotions. Another key aspect of his personality is his openness about his struggles and his ability to recount narratives.", 6 | "DocumentIDs": [ 7 | "chap01" 8 | ], 9 | "Point": 87 10 | }, 11 | { 12 | "KnownInfo": "kanetugu2018 was motivated by need and challenge when he took on a project to create a CPU emulator called 'athrill'. This demonstrates his resourcefulness, determination and problem-solving capabilities.", 13 | "DocumentIDs": [ 14 | "chap01" 15 | ], 16 | "Point": 90 17 | } 18 | ], 19 | "kanetugu2018_personality": [ 20 | { 21 | "KnownInfo": "kanetugu2018's writing style is conversational and engaging. He uses unique phrases and reactions, emphasizing some points with hashtags. He often uses personal pronouns instead of professional phrases, adding a colloquial touch to his writings. He also uses specific special characters and English phrases to convey strong emotions. Another key aspect of his personality is his openness about his struggles and his ability to recount narratives.", 22 | "DocumentIDs": [ 23 | "chap01" 24 | ], 25 | "Point": 87 26 | } 27 | ], 28 | "conversation_log_analysis": [ 29 | { 30 | "KnownInfo": "Analysing conversation logs involves looking for unique phrases and reactions, tracking the usage of specific types of words (like personal pronouns, special characters and foreign phrases), and identifying open and genuine communication.", 31 | "DocumentIDs": [ 32 | "chap01" 33 | ], 34 | "Point": 90 35 | } 36 | ], 37 | "Analysis_Skills": [ 38 | { 39 | "KnownInfo": "Analysing conversation logs involves looking for unique phrases and reactions, tracking the usage of specific types of words (like personal pronouns, special characters and foreign phrases), and identifying open and genuine communication.", 40 | "DocumentIDs": [ 41 | "chap01" 42 | ], 43 | "Point": 90 44 | } 45 | ] 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/result/reflection_org.json: -------------------------------------------------------------------------------- 1 | { 2 | "Knowledges": [ 3 | { 4 | "Term": "Members", 5 | "Reason": "The member 'kanetugu2018' is the subject of the adulation and praise. His personality, writing style, and actions are key for finding the unseen and unnoticed aspects.", 6 | "KnownInfos": [ 7 | { 8 | "KnownInfo": "kanetugu2018's writing style is conversational and engaging. He uses unique phrases and reactions, emphasizing some points with hashtags. He often uses personal pronouns instead of professional phrases, adding a colloquial touch to his writings. He also uses specific special characters and English phrases to convey strong emotions. Another key aspect of his personality is his openness about his struggles and his ability to recount narratives.", 9 | "Point": 87, 10 | "DocumentIDs": [ "chap01" ] 11 | }, 12 | { 13 | "KnownInfo": "kanetugu2018 was motivated by need and challenge when he took on a project to create a CPU emulator called 'athrill'. This demonstrates his resourcefulness, determination and problem-solving capabilities.", 14 | "Point": 90, 15 | "DocumentIDs": [ "chap01" ] 16 | } 17 | ], 18 | "UnknownInfo": [ "Not mentioned explicitly, but kanetugu2018 might have other unseen traits that reflect through his work or writings. Further exploration of his works and conversations would be required." ], 19 | "Relations": [] 20 | }, 21 | { 22 | "Term": "Analysis_Skills", 23 | "Reason": "The section requires an analysis of kanetugu2018's conversation logs or writings. Thus, skills to deduce personality traits and unique characteristics from the mentioned sources are needed.", 24 | "KnownInfos": [ 25 | { 26 | "KnownInfo": "Analysing conversation logs involves looking for unique phrases and reactions, tracking the usage of specific types of words (like personal pronouns, special characters and foreign phrases), and identifying open and genuine communication.", 27 | "Point": 90, 28 | "DocumentIDs": [ "chap01" ] 29 | } 30 | ], 31 | "UnknownInfo": [ "More specific techniques or methods to analyse kanetugu2018's conversations might be required to derive nuanced characteristics" ], 32 | "Relations": [ 33 | { 34 | "Term": "Members", 35 | "RelationReason": "The analysis skills are being applied to the writings and work of the member kanetugu2018." 36 | } 37 | ] 38 | } 39 | ] 40 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/result/reply.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": "Investigate the profiles of development team members; Specifically focusing on unique characteristics and achievements.", 3 | "Plan": [ 4 | { 5 | "DocumentID": "chap01", 6 | "Purpose": "Understand the individuality of kanetugu2018", 7 | "Perspectives": "To understand kanetugu2018's contribution to the project and his unique characteristics by analyzing conversational style, use of language, and personal anecdotes" 8 | }, 9 | { 10 | "DocumentID": "chap02", 11 | "Purpose": "Find unique and unnoticed traits of Kanetugu2018", 12 | "Perspectives": "Scrutinizing narrative recounts, usage of specific phrases and emotional expressions" 13 | } 14 | ] 15 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/result/result.txt: -------------------------------------------------------------------------------- 1 | Answer: 「会話のログから、kanetugu2018 さんの個性を見てみると、多くの方が気付かないような素敵な特徴が見つけられます。その1つが、極めてリレータブルなコンテンツや経験を使用して読者を引きつける能力です。特に、ハッシュタグやアスタリスク、引用符といった特殊文字を使用して感情や強調を表現するその独特な書き方は、彼の個性を象徴するものと言えるでしょう(文書ID:chap01)。また、その挑戦心溢れる性格もまた見逃せない特徴で、プロジェクトの必要性や課題を解決するために、誰もが思いつかないような'athrill'というCPUエミュレータを作成するという挑戦を行っています(文書ID:chap01)。自身の苦戦や思考過程を率直に述べるその姿勢からは、kanetugu2018さんの人間味あふれる、一歩引いてシチュエーションを見つめ、それをストーリーに変えるナレーション力を感じます。そして何よりも、kanetugu2018さんの特徴的でありながらも多くの方に気付かれていない点として、技術的な改善やコミュニティへの貢献へ捧げるその極めて高いコミットメントを指摘したいです(文書ID:chap02)。「athrill は,CPUアーキに依存しない共通コードとCPU依存するコードを分離しています」、「地道にコツコツやっていればいつか実現できるはずですよ」といった表現から見えるその真摯な姿勢は、貴重な特性と言えるでしょう。kanetugu2018さん、あなたの深い熱意、挑戦心、そしてコミュニケーション能力、そして誰もが見えていない領域を見つめるあなたの視点は、我々すべてに大きなインスピレーションを与えてくれます。この貴重な特性を存分に発揮し、これからも多くの挑戦を続けてください。」 2 | -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/result/updated_plan.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": "Investigate the profiles of development team members; Specifically focusing on unique characteristics and achievements.", 3 | "Plan": [ 4 | { 5 | "PlanID": 1, 6 | "DocumentID": "chap01", 7 | "Purpose": "Understand the individuality of kanetugu2018", 8 | "Perspectives": "To understand kanetugu2018's contribution to the project and his unique characteristics by analyzing conversational style, use of language, and personal anecdotes", 9 | "ResultID": 1, 10 | "Status": "Done" 11 | }, 12 | { 13 | "PlanID": 2, 14 | "DocumentID": "chap02", 15 | "Purpose": "Find unique and unnoticed traits of Kanetugu2018", 16 | "Perspectives": "Scrutinizing narrative recounts, usage of specific phrases and emotional expressions", 17 | "ResultID": 2, 18 | "Status": "Done" 19 | } 20 | ] 21 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/q_2/strategy_history.json: -------------------------------------------------------------------------------- 1 | { 2 | "Strategies": [ 3 | "Investigate conversation logs in the documents; Understand unnoticed traits, appreciating_words, and avoid common knowledge", 4 | "Investigate the profiles of development team members; Specifically focusing on unique characteristics and achievements." 5 | ] 6 | } -------------------------------------------------------------------------------- /applaud_data/query_dir/query.txt: -------------------------------------------------------------------------------- 1 | この会話ログから、 さんの良い個性について、 2 | 具体的な行動や会話内容を引用して、 さんが感動するような 3 | 誉め言葉を検討してください。 4 | 5 | その際、以下の点を考慮してください。 6 | 7 | ・なにもなしで、素晴らしいとか言われても感動しません。 8 | ・誰でも知っていること、その人が知っていることを褒められてもうれしくないです。 9 | ・誰も気づいていない、その人も気づいていない、そこを褒めることが大切です。 10 | 11 | また、出だしは、以下の文言で始めてください。 12 | 13 | 「会話のログから、 さんの個性を見てみると、多くの方が気付かないような素敵な特徴が見つけられます。」 14 | -------------------------------------------------------------------------------- /applaud_data/reflections_data/data_model/Analysis_Skills.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Analysis_Skills", 3 | "contents": { 4 | "Term": "Analysis_Skills", 5 | "KnownInfos": [ 6 | { 7 | "KnownInfo": "Analysing conversation logs involves looking for unique phrases and reactions, tracking the usage of specific types of words (like personal pronouns, special characters and foreign phrases), and identifying open and genuine communication.", 8 | "DocumentIDs": [ 9 | "chap01" 10 | ], 11 | "Point": 90 12 | } 13 | ], 14 | "UnknownInfo": [ 15 | "More specific techniques or methods to analyse kanetugu2018's conversations might be required to derive nuanced characteristics" 16 | ], 17 | "Relations": [ 18 | { 19 | "Term": "Members", 20 | "RelationReason": "The analysis skills are being applied to the writings and work of the member kanetugu2018." 21 | } 22 | ] 23 | } 24 | } -------------------------------------------------------------------------------- /applaud_data/reflections_data/data_model/Members.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Members", 3 | "contents": { 4 | "Term": "Members", 5 | "KnownInfos": [ 6 | { 7 | "KnownInfo": "kanetugu2018's writing style is conversational and engaging. He uses unique phrases and reactions, emphasizing some points with hashtags. He often uses personal pronouns instead of professional phrases, adding a colloquial touch to his writings. He also uses specific special characters and English phrases to convey strong emotions. Another key aspect of his personality is his openness about his struggles and his ability to recount narratives.", 8 | "DocumentIDs": [ 9 | "chap01" 10 | ], 11 | "Point": 87 12 | }, 13 | { 14 | "KnownInfo": "kanetugu2018 was motivated by need and challenge when he took on a project to create a CPU emulator called 'athrill'. This demonstrates his resourcefulness, determination and problem-solving capabilities.", 15 | "DocumentIDs": [ 16 | "chap01" 17 | ], 18 | "Point": 90 19 | } 20 | ], 21 | "UnknownInfo": [ 22 | "Not mentioned explicitly, but kanetugu2018 might have other unseen traits that reflect through his work or writings. Further exploration of his works and conversations would be required." 23 | ], 24 | "Relations": [] 25 | } 26 | } -------------------------------------------------------------------------------- /applaud_data/reflections_data/data_model/appreciating_words.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "appreciating_words", 3 | "contents": { 4 | "Term": "appreciating_words", 5 | "KnownInfos": [], 6 | "UnknownInfo": [ 7 | "Specific examples of powerful and touching appreciating words, consistent with the traits observed in kanetugu2018's personality.", 8 | "An understanding of what type of appreciation kanetugu2018 would most value based on his personality." 9 | ], 10 | "Relations": [ 11 | { 12 | "Term": "unnoticed_traits", 13 | "RelationReason": "The appreciation should be based on unnoticed traits." 14 | } 15 | ] 16 | } 17 | } -------------------------------------------------------------------------------- /applaud_data/reflections_data/data_model/commont_knowledge.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "commont_knowledge", 3 | "contents": { 4 | "Term": "commont_knowledge", 5 | "KnownInfos": [], 6 | "UnknownInfo": [ 7 | "More specific information about what constitutes common knowledge in the context of kanetugu2018's personality traits, and what he is already aware about himself." 8 | ], 9 | "Relations": [] 10 | } 11 | } -------------------------------------------------------------------------------- /applaud_data/reflections_data/data_model/conversation_log_analysis.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "conversation_log_analysis", 3 | "contents": { 4 | "Term": "conversation_log_analysis", 5 | "KnownInfos": [ 6 | { 7 | "KnownInfo": "Analysing conversation logs involves looking for unique phrases and reactions, tracking the usage of specific types of words (like personal pronouns, special characters and foreign phrases), and identifying open and genuine communication.", 8 | "DocumentIDs": [ 9 | "chap01" 10 | ], 11 | "Point": 90 12 | } 13 | ], 14 | "UnknownInfo": [ 15 | "More information on the specific techniques or guidelines that can aid the analysis of conversation logs can be beneficial." 16 | ], 17 | "Relations": [ 18 | { 19 | "Term": "kanetugu2018_personality", 20 | "RelationReason": "This analysis is used for understanding kanetugu2018's personality." 21 | } 22 | ] 23 | } 24 | } -------------------------------------------------------------------------------- /applaud_data/reflections_data/data_model/kanetugu2018_personality.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kanetugu2018_personality", 3 | "contents": { 4 | "Term": "kanetugu2018_personality", 5 | "KnownInfos": [ 6 | { 7 | "KnownInfo": "kanetugu2018's writing style is conversational and engaging. He uses unique phrases and reactions, emphasizing some points with hashtags. He often uses personal pronouns instead of professional phrases, adding a colloquial touch to his writings. He also uses specific special characters and English phrases to convey strong emotions. Another key aspect of his personality is his openness about his struggles and his ability to recount narratives.", 8 | "DocumentIDs": [ 9 | "chap01" 10 | ], 11 | "Point": 87 12 | } 13 | ], 14 | "UnknownInfo": [ 15 | "Specific interactions from his conversation logs that highlight these traits would be useful to find the best way to appreciate kanetugu2018." 16 | ], 17 | "Relations": [ 18 | { 19 | "Term": "conversation_log_analysis", 20 | "RelationReason": "This is the process through which kanetugu2018's personality traits can be understood." 21 | } 22 | ] 23 | } 24 | } -------------------------------------------------------------------------------- /applaud_data/reflections_data/data_model/kanetugu2018_unnoticed_characteristics.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kanetugu2018_unnoticed_characteristics", 3 | "contents": { 4 | "Term": "kanetugu2018_unnoticed_characteristics", 5 | "KnownInfos": [], 6 | "UnknownInfo": [ 7 | "Specific instances in the conversation logs that display unique characteristics or actions that have been overlooked. What unique word choices or phrases does he use? Are there hidden messages or meanings in his use of certain characters or English phrases? Does he display any unique reactions or emotions that have been unnoticed?" 8 | ], 9 | "Relations": [ 10 | { 11 | "Term": "kanetugu2018_personality", 12 | "RelationReason": "Understanding kanetugu2018's unnoticed characteristics can provide deeper insights into his personality." 13 | }, 14 | { 15 | "Term": "conversation_log_analysis", 16 | "RelationReason": "Conversation log analysis can reveal kanetugu2018's unnoticed characteristics." 17 | } 18 | ] 19 | } 20 | } -------------------------------------------------------------------------------- /applaud_data/reflections_data/data_model/unnoticed_traits.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Unnoticed_Traits", 3 | "contents": { 4 | "Term": "Unnoticed_Traits", 5 | "KnownInfos": [], 6 | "UnknownInfo": [ 7 | "The subtle characteristics and distinct elements that manifest through kanetugu2018's conversation or works, which haven't been openly noticed or acknowledged. Exploration of his logs and analyses using methods that capture unobservable traits would be required." 8 | ], 9 | "Relations": [ 10 | { 11 | "Term": "Analysis_Skills", 12 | "RelationReason": "The unobserved traits of kanetugu2018 can be derived by using advanced analysis skills to study his conversation logs and works." 13 | }, 14 | { 15 | "Term": "Members", 16 | "RelationReason": "The unnoticed traits are of the member kanetugu2018, and thus are relationally dependent on his known and unknown qualities." 17 | }, 18 | { 19 | "Term": "appreciating_words", 20 | "RelationReason": "These unnoticed traits will be the basis of the appreciating words." 21 | } 22 | ] 23 | } 24 | } -------------------------------------------------------------------------------- /applaud_prompt/ptemplate_critical_thinking.txt: -------------------------------------------------------------------------------- 1 | For this MainQuestion:"{MainQuestion}": 2 | 3 | Please extract the knowledge, concepts, and related members that are prerequisites for this MainQuestion, and also provide reasons for the extracted information. 4 | 5 | The result should be outputted in the following JSON format. 6 | Term items must follow the Unix filename syntax. 7 | 8 | Let's think step by step. 9 | {{ 10 | "Knowledges": [ 11 | {{ 12 | "Term": "", 13 | "Reason": "" 14 | }}, 15 | : 16 | : 17 | ] 18 | }} 19 | 20 | -------------------------------------------------------------------------------- /applaud_prompt/ptemplate_evaluate.txt: -------------------------------------------------------------------------------- 1 | Main Question: {MainQuestion} 2 | Mission: {Mission} 3 | 4 | The extracted information necessary to answer the Main Question and Mission is as follows: 5 | 6 | Input: 7 | PastStrategies: {PastStrategies} 8 | PlanExecutedResults: 9 | {PlanExecutedResults} 10 | Reflection: 11 | {Reflection} 12 | 13 | Based on the information provided, please answer the Main Question in Japanease and reference 14 | the source document by citing its ID in the following format. 15 | Let's think step by step. 16 | 17 | Result: 18 | Answer: 19 | However, if there is insufficient information to provide an answer, please set Answer as "Unknown". 20 | -------------------------------------------------------------------------------- /applaud_prompt/ptemplate_mission.txt: -------------------------------------------------------------------------------- 1 | Please understand the contribution points of the development members to the project based on the given information. 2 | -------------------------------------------------------------------------------- /applaud_prompt/ptemplate_query.txt: -------------------------------------------------------------------------------- 1 | Question: {sub_question} 2 | Also, assess the appropriateness of the answer to the question by assigning a score between 0 and 100 in the following format (higher values indicate a more appropriate answer). 3 | Point: 4 | 5 | Let's think step by step. 6 | -------------------------------------------------------------------------------- /applaud_prompt/ptemplate_query_plan.txt: -------------------------------------------------------------------------------- 1 | Please generate JSON data in the following format. 2 | This data will be used to extract the information necessary to answer the main question from the mission, strategy, and known knowledge. 3 | 4 | Please keep within 100 characters in one line. 5 | 6 | {{ 7 | "DetailedStrategy": "" 8 | "Plan": [ 9 | {{ 10 | "DocumentID": "" 11 | "Purpose": "" 12 | "Perspectives": "" 13 | }}, 14 | : 15 | : 16 | ] 17 | }} 18 | 19 | 以下は、計画を作成する際に注意すべきポイントです。 20 | 21 | ・複数の目的や視点で1つのドキュメントに対して調査を行います。 22 | ・特定の目的や視点で複数のドキュメントを相互参照して調査を行います。 23 | ・1つのドキュメントから回答が得られない場合は、同じ目的や視点で別のドキュメントで調査を行います。 24 | ・これらのポイントを考慮しながら計画を作成してください。 25 | 26 | Regenerate respon 27 | The input information is as follows. 28 | Input: 29 | Main Question: 開発チームメンバのプロフィールです。"{MainQuestion}" 30 | Mission: {Mission} 31 | Strategy: {Strategy} 32 | Document List: {DocumentList} 33 | PastStrategies: {PastStrategies} 34 | AcquiredKnowledges: {AcquiredKnowledges} 35 | 36 | Let's think step by step. 37 | -------------------------------------------------------------------------------- /applaud_prompt/ptemplate_reflection.txt: -------------------------------------------------------------------------------- 1 | MainQuestion:"{MainQuestion}": 2 | 3 | For this MainQuestion, please extract the prerequisite knowledge and concepts, and related members, etc. 4 | 5 | The previous knowledge of the member required to applaud has been listed as follows: 6 | {KnowledgesNeeds} 7 | 8 | And the background information is as follows: 9 | BackgroundKnowledges: {BackgroundKnowledges} 10 | 11 | To gather information related to the above knowledges, inquiries were made to existing documents, and the results are as follows: 12 | {PlanResult} 13 | 14 | items must follow the Unix filename syntax. 15 | Show the importance of KnownInfo by assigning a score between 0 and 100 to Point (higher values indicate a more appropriate answer). 16 | Fill in the UnknownInfo with the information that is not yet fully understood. 17 | Enumerate the relevant in DocumentIDs. 18 | Enumerate any relationships with other knowledge or members under Relations, along with the reasons for the relationships. 19 | 20 | Let's think step by step. 21 | 22 | {{ 23 | "Knowledges": [ 24 | {{ 25 | "Term": "", 26 | "Reason": "", 27 | "KnownInfos": [ 28 | {{ 29 | "KnownInfo": "", 30 | "Point": "(previous one)", 31 | "DocumentIDs": [ "(previous one)", ... ] 32 | }}, {{ 33 | "KnownInfo": "(acquired one)", 34 | "Point": "(acquired one)", 35 | "DocumentIDs": [ "(acquired one)", ... ] 36 | }}, 37 | : 38 | : 39 | ], 40 | "UnknownInfo": [ "", ... ], 41 | "Relations": [ 42 | {{ 43 | "Term": "", 44 | "RelationReason": "", 45 | }}, 46 | : 47 | ] 48 | }}, 49 | : 50 | : 51 | ] 52 | }} 53 | 54 | -------------------------------------------------------------------------------- /applaud_prompt/ptemplate_reflection_addterms.txt: -------------------------------------------------------------------------------- 1 | "{MainQuestion}": 2 | 3 | To applaud the member, the previous knowledge required from the member is listed as follows: 4 | {KnowledgesNeeds} 5 | 6 | Using the and derived from the obtained information, please add a new . 7 | Let's think step by step. 8 | 9 | {{ 10 | "Knowledges": [ 11 | {{ 12 | "Term": "", 13 | "Reason": "", 14 | "KnownInfos": [], 15 | "UnknownInfo": [ "", ... ], 16 | "Relations": [] 17 | }}, 18 | : 19 | : 20 | ] 21 | }} 22 | 23 | -------------------------------------------------------------------------------- /applaud_prompt/ptemplate_strategy.txt: -------------------------------------------------------------------------------- 1 | Background Information: 2 | The DocumentList enumerates the titles of investigatable documents. 3 | Past Investigation Status: 4 | PastStrategies lists the previously investigated DetailedStrategies. 5 | Investigation Approach: 6 | For the member's praise in response to the MainQuestion, please list the prerequisite knowledge and related members, and consider which documents from the DocumentList should be investigated to understand that knowledge and members. 7 | 8 | Based on that, create a plan by treating DetailedStrategy as the investigation approach, determining investigation perspectives for each relevant document, and forming a Plan. 9 | -------------------------------------------------------------------------------- /applaud_prompt/ptemplate_subq_detail.txt: -------------------------------------------------------------------------------- 1 | Please obtain information based on the following purposes and perspectives. 2 | Let's think step by step. 3 | 4 | purpose: {purpose} 5 | perspectives: {perspectives} 6 | -------------------------------------------------------------------------------- /check_recover_json.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | from question import get_response 5 | from json_utils import parse_json 6 | import json 7 | import traceback 8 | 9 | def check_json_str(json_data: str): 10 | try: 11 | _ = json.loads(json_data) 12 | return (True, "OK") 13 | except json.JSONDecodeError as e: 14 | traceback_str = traceback.format_exc() 15 | error_message = f"ERROR: {str(e)}" 16 | print(traceback_str + error_message) 17 | return (False, error_message) 18 | 19 | def check_json(filepath: str): 20 | try: 21 | with open(filepath, "r") as file: 22 | json_data = json.load(file) 23 | return (True, "OK") 24 | except json.JSONDecodeError as e: 25 | traceback_str = traceback.format_exc() 26 | error_message = f"ERROR: {str(e)}" 27 | print(traceback_str + error_message) 28 | return (False, error_message) 29 | 30 | def recover_json_str(errcode, data: str): 31 | res = get_response(f"{errcode}\nPlease fix this json data:\n {data}") 32 | json_data = parse_json(res) 33 | return json_data 34 | 35 | def recover_json(errcode, filepath: str): 36 | with open(filepath, "r") as file: 37 | data = file.read() 38 | json_data = recover_json_str(errcode, data) 39 | result, _ = check_json_str(json_data) 40 | return (result, json_data) 41 | 42 | 43 | 44 | if __name__ == "__main__": 45 | import sys 46 | if len(sys.argv) != 2: 47 | print("Usage: ") 48 | sys.exit(1) 49 | filepath = sys.argv[1] 50 | count = 1 51 | while True: 52 | result, errcode = check_json(filepath) 53 | if result == False: 54 | result, json_data = recover_json(errcode, filepath) 55 | if result == True: 56 | with open(filepath, "w") as file: 57 | file.write(json_data) 58 | print("INFO: RECOVERED JSON DATA") 59 | break 60 | elif count <= 5: 61 | print("ERROR: RCOVERING JSON DATA: RETRY_COUNT=", count) 62 | count += 1 63 | else: 64 | print(json_data) 65 | print("ERROR: can not recover json data...") 66 | sys.exit(1) 67 | else: 68 | break 69 | print("OK") 70 | sys.exit(0) 71 | 72 | -------------------------------------------------------------------------------- /critical_thinking.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | from question import get_response 5 | from prompt_template import PromptTemplate 6 | import json 7 | import traceback 8 | 9 | class CriticalThinking: 10 | def __init__(self, main_question: str, prompt_template_path: str, background_knowledge_path: str): 11 | with open(background_knowledge_path, 'r') as file: 12 | self.background_knowledge = file.read() 13 | prompt_template = PromptTemplate(prompt_template_path) 14 | self.query = prompt_template.get_prompt(MainQuestion=main_question, BackgroundKnowledges = self.background_knowledge) 15 | 16 | def create(self): 17 | print(self.query) 18 | try: 19 | self.reply_raw = get_response(self.query) 20 | except Exception as e: 21 | traceback_str = traceback.format_exc() 22 | error_message = f"ERROR: {str(e)}" 23 | print(traceback_str + error_message) 24 | sys.exit(1) 25 | print(self.reply_raw) 26 | 27 | def save_to_raw(self, file_path): 28 | with open(file_path, 'w') as file: 29 | file.write(self.reply_raw) 30 | 31 | def save_to_json(self, file_path): 32 | with open(file_path, 'w') as file: 33 | json.dump(json.loads(self.reply_raw), file, indent=4, ensure_ascii=False) 34 | 35 | 36 | if __name__ == "__main__": 37 | import sys 38 | from params import get_param 39 | prompt_template_path = get_param("prompt_templates_path") 40 | 41 | if len(sys.argv) != 3: 42 | print("Usage: ") 43 | sys.exit(1) 44 | main_question = sys.argv[1] 45 | background_knowledge_path = sys.argv[2] 46 | think = CriticalThinking(main_question, prompt_template_path + "/ptemplate_critical_thinking.txt", background_knowledge_path) 47 | think.create() 48 | think.save_to_raw("test/result/critical_thinking.json") 49 | -------------------------------------------------------------------------------- /data_model/data_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | import json 6 | 7 | class DataModel: 8 | def __init__(self, name: str, contents): 9 | self._name = name 10 | self._contents = contents 11 | self._concrete_model = None 12 | 13 | def set_concrete_model(self, concrete_model): 14 | self._concrete_model = concrete_model 15 | 16 | def merge(self, old_model): 17 | self._concrete_model.merge(old_model) 18 | self._contents = self._concrete_model.get_contents() 19 | 20 | def get_name(self) -> str: 21 | return self._name 22 | 23 | def get_contents(self): 24 | return self._contents 25 | 26 | def set_content(self, content): 27 | self._contents = content 28 | 29 | def get_json_data(self): 30 | return { 31 | "name": self._name, 32 | "contents": self._contents 33 | } 34 | def is_empty_content(self): 35 | if self._contents == None: 36 | return True 37 | return self._concrete_model.is_empty_content() 38 | 39 | @staticmethod 40 | def load_json_file(filepath: str): 41 | if os.path.exists(filepath): 42 | #print("filepath=", filepath) 43 | with open(filepath, "r") as file: 44 | data = json.load(file) 45 | model = DataModel(data.get("name"), None) 46 | model._contents = data.get("contents") 47 | return model 48 | else: 49 | return None 50 | -------------------------------------------------------------------------------- /data_model/data_model_accessor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | 6 | from data_model import DataModel 7 | from data_model_storage import DataModelStorage 8 | 9 | class DataModelAccessor: 10 | def __init__(self, dir: str): 11 | self.directory = dir 12 | if not os.path.exists(self.directory): 13 | os.makedirs(self.directory) 14 | self.datamodel_path = os.path.join(self.directory, "data_model") 15 | self.data_model_storage = DataModelStorage(self.datamodel_path) 16 | self._load_file_cache() 17 | 18 | def get_data_model_filepath(self, name: str): 19 | return os.path.join(self.datamodel_path, name + ".json") 20 | 21 | def get_filelist(self): 22 | return self.file_cache 23 | 24 | def _load_file_cache(self): 25 | self.file_cache = [] 26 | for file_name in os.listdir(self.datamodel_path): 27 | if file_name.endswith(".json"): 28 | name = file_name[:-5] # .json 拡張子を除去 29 | self.file_cache.append(name) 30 | 31 | def remove_models(self, model_names: list): 32 | for model_name in model_names: 33 | self.data_model_storage.remove_data_model(model_name) 34 | self._load_file_cache() 35 | 36 | def add_data_model(self, name: str, contents: str): 37 | data_model = DataModel(name, contents) 38 | self.data_model_storage.save_data_model(data_model) 39 | 40 | if name not in self.file_cache: 41 | self.file_cache.append(name) 42 | 43 | def add_data_model(self, model: DataModel): 44 | self.data_model_storage.save_data_model(model) 45 | 46 | if model.get_name() not in self.file_cache: 47 | self.file_cache.append(model.get_name()) 48 | 49 | 50 | def get_data_model(self, name: str) -> DataModel: 51 | if name in self.file_cache: 52 | return self.data_model_storage.load_data_model(name) 53 | else: 54 | return None 55 | 56 | def get_json_models(self, filelist: list): 57 | models = [] 58 | for entry in filelist: 59 | models.append(self.get_data_model(entry).get_json_data()) 60 | return models 61 | 62 | def get_data_models(self, filelist: list): 63 | models = [] 64 | for entry in filelist: 65 | models.append(self.get_data_model(entry)) 66 | return models 67 | 68 | if __name__ == "__main__": 69 | import sys 70 | if len(sys.argv) != 2: 71 | print("Usage: ") 72 | sys.exit(1) 73 | dir = sys.argv[1] 74 | accessor = DataModelAccessor(dir) 75 | #accessor.add_data_model("term1", "info1") 76 | #accessor.add_data_model("term1", "info2") 77 | #accessor.add_data_model("term2", "info1") 78 | 79 | model = accessor.get_data_model("term3") 80 | if model is not None: 81 | print(model.get_json_data()) 82 | else: 83 | print("Not Found") 84 | -------------------------------------------------------------------------------- /data_model/data_model_storage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | import os 4 | import json 5 | 6 | from data_model import DataModel 7 | 8 | class DataModelStorage: 9 | def __init__(self, directory: str): 10 | self.directory = directory 11 | 12 | if not os.path.exists(directory): 13 | os.makedirs(directory) 14 | 15 | def save_data_model(self, new_model: DataModel, merge: bool = True): 16 | filename = f"{new_model.get_name()}.json" 17 | filepath = os.path.join(self.directory, filename) 18 | 19 | old_model = DataModel.load_json_file(filepath) 20 | if merge and old_model is not None: 21 | new_model.merge(old_model) 22 | 23 | with open(filepath, "w", encoding="utf-8") as f: 24 | json.dump(new_model.get_json_data(), f, indent=4, ensure_ascii=False) 25 | 26 | def load_data_model(self, name: str) -> DataModel: 27 | filename = f"{name}.json" 28 | filepath = os.path.join(self.directory, filename) 29 | return DataModel.load_json_file(filepath) 30 | 31 | def remove_data_model(self, name: str): 32 | filename = f"{name}.json" 33 | filepath = os.path.join(self.directory, filename) 34 | os.remove(filepath) 35 | 36 | if __name__ == "__main__": 37 | import sys 38 | if len(sys.argv) != 4: 39 | print("Usage: ") 40 | sys.exit(1) 41 | dir = sys.argv[1] 42 | storage = DataModelStorage(dir) 43 | model = DataModel(sys.argv[2], sys.argv[3]) 44 | storage.save_data_model(model) 45 | -------------------------------------------------------------------------------- /data_model/document_contents_similarity_merge.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | import sys 4 | from question import get_response 5 | from json_utils import parse_json 6 | from check_recover_json import check_json_str, recover_json_str 7 | from document_data_model import DocumentDataModel 8 | 9 | def merge_answers_str(data: str): 10 | res = get_response(f"For the given json data, compare each Answer, and if it matches the following conditions, merge them, otherwise output as is.:\n 1. The content of the Answer is the same meaning. Let's think step by step.:\n {data}") 11 | json_data = parse_json(res) 12 | return json_data 13 | 14 | def merge_answers_json(filepath: str): 15 | with open(filepath, "r") as file: 16 | data = file.read() 17 | json_data = merge_answers_str(data) 18 | return json_data 19 | 20 | def merge_and_save_answers_json(filepath: str): 21 | model = DocumentDataModel.load_json_file(filepath) 22 | if model.get_conents_num() < 2: 23 | print(f"INFO: SKIP MERGING MODEL: {filepath} info_num={model.get_conents_num()}") 24 | return 25 | print("INFO: MERGING MODEL: ", filepath) 26 | json_data = merge_answers_json(filepath) 27 | result, errcode = check_json_str(json_data) 28 | if result == False: 29 | json_data = recover_json_str(errcode, json_data) 30 | result, _ = check_json_str(json_data) 31 | if (result == False): 32 | print("ERROR: can not recover json_data...") 33 | return False 34 | #print(json_data) 35 | with open(filepath, "w") as file: 36 | file.write(json_data) 37 | return True 38 | 39 | 40 | 41 | if __name__ == "__main__": 42 | import sys 43 | if len(sys.argv) != 2: 44 | print("Usage: ") 45 | sys.exit(1) 46 | filepath = sys.argv[1] 47 | ret = merge_and_save_answers_json(filepath) 48 | if ret == False: 49 | sys.exit(1) 50 | sys.exit(0) 51 | -------------------------------------------------------------------------------- /data_model/document_data_cleaner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | import json 6 | import traceback 7 | from data_model_accessor import DataModelAccessor 8 | from document_data_model import DocumentDataModel 9 | from document_contents_similarity_merge import merge_and_save_answers_json 10 | 11 | class DocumentDataCleaner: 12 | def __init__(self, accessor: DataModelAccessor): 13 | self.accessor = accessor 14 | 15 | def clean_empty_data_models(self): 16 | clean_names = [] 17 | for name in self.accessor.get_filelist(): 18 | filepath = self.accessor.get_data_model_filepath(name) 19 | model = DocumentDataModel.load_json_file(filepath) 20 | data_model = model.get_model() 21 | if data_model.is_empty_content() == True: 22 | print(f"INFO: REMOVING EMPTY MODEL({data_model.get_name()})") 23 | clean_names.append(name) 24 | self.accessor.remove_models(clean_names) 25 | 26 | def merge_same_data_models(self): 27 | for name in self.accessor.get_filelist(): 28 | print("INFO: name=", name) 29 | filepath = self.accessor.get_data_model_filepath(name) 30 | ret = merge_and_save_answers_json(filepath) 31 | if ret == False: 32 | print("INFO: skip merge...error") 33 | #sys.exit(1) 34 | 35 | 36 | if __name__ == "__main__": 37 | if len(sys.argv) != 2: 38 | print("Usage: ") 39 | sys.exit(1) 40 | dir = sys.argv[1] 41 | accessor = DataModelAccessor(dir) 42 | 43 | cleaner = DocumentDataCleaner(accessor) 44 | cleaner.clean_empty_data_models() 45 | print("INFO: MERGING REFLECTIONS") 46 | cleaner.merge_same_data_models() 47 | -------------------------------------------------------------------------------- /data_model/document_data_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | import json 6 | 7 | from data_model import DataModel 8 | 9 | class DocumentDataModel: 10 | def __init__(self, title: str): 11 | self.title = title 12 | self.results = [] 13 | 14 | def get_title(self): 15 | return self.title 16 | 17 | def is_empty(self): 18 | if len(self.results) == 0: 19 | return True 20 | else: 21 | return False 22 | 23 | def merge(self, old_model: DataModel): 24 | old_contents = old_model.get_contents() 25 | if old_contents is None: 26 | return 27 | 28 | exist_contents = [] 29 | for old_data in old_contents: 30 | if all(old_data.get("Answer") != entry.get("Answer") for entry in self.results): 31 | exist_contents.append(old_data) 32 | 33 | self.results += exist_contents 34 | 35 | def add_info(self, purpose: str, perspectives: str, answer: str, point: float): 36 | if not isinstance(point, float) or float(point) < 60.0: 37 | return 38 | data = { 39 | "Purpose": purpose, 40 | "Perspectives": perspectives, 41 | "Answer": answer, 42 | "Point": point 43 | } 44 | self.results.append(data) 45 | 46 | def get_contents(self): 47 | if self.is_empty(): 48 | return None 49 | return self.results 50 | def get_conents_num(self): 51 | if self.is_empty(): 52 | return 0 53 | return len(self.results) 54 | 55 | def is_empty_content(self): 56 | return self.is_empty() 57 | 58 | def get_model(self) -> DataModel: 59 | data_model = DataModel(self.get_title(), self.get_contents()) 60 | data_model.set_concrete_model(self) 61 | return data_model 62 | 63 | 64 | @staticmethod 65 | def create_from_plans(name: str, plans: dict): 66 | model = DocumentDataModel(name) 67 | if plans is not None and plans.get("Plan") is not None: 68 | for plan in plans.get("Plan"): 69 | if plan.get("DocumentID") == name: 70 | #print(plan) 71 | model.add_info(plan.get("Purpose"), 72 | plan.get("Perspectives"), 73 | plan.get("ResultID").get("Reply"), 74 | plan.get("ResultID").get("Point")) 75 | return model 76 | 77 | @staticmethod 78 | def load_plan_json_file(name: str, filepath: str): 79 | with open(filepath, "r") as file: 80 | plan_data = json.load(file) 81 | model = DocumentDataModel.create_from_plans(name, plan_data) 82 | return model 83 | 84 | @staticmethod 85 | def create_from_entry(name: str, entry: dict): 86 | model = DocumentDataModel(name) 87 | if entry is not None: 88 | for result in entry: 89 | model.add_info( result.get("Purpose"), 90 | result.get("Perspectives"), 91 | result.get("Answer"), 92 | result.get("Point")) 93 | return model 94 | 95 | @staticmethod 96 | def load_json_file(filepath: str): 97 | data_model = DataModel.load_json_file(filepath) 98 | if data_model == None: 99 | return None 100 | model = DocumentDataModel.create_from_entry( 101 | data_model.get_name(), 102 | data_model.get_contents()) 103 | return model 104 | 105 | 106 | if __name__ == "__main__": 107 | if len(sys.argv) != 3: 108 | print("Usage: ") 109 | sys.exit(1) 110 | name = sys.argv[1] 111 | filepath = sys.argv[2] 112 | print("name=", name) 113 | print("filepath=", filepath) 114 | model = DocumentDataModel.load_plan_json_file(name, filepath) 115 | 116 | with open("./doc.json", "w", encoding="utf-8") as f: 117 | json.dump(model.get_model().get_json_data(), f, indent=4, ensure_ascii=False) 118 | 119 | -------------------------------------------------------------------------------- /data_model/document_data_persistentor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | import json 6 | import traceback 7 | from data_model_accessor import DataModelAccessor 8 | from document_data_model import DocumentDataModel 9 | 10 | class DocumentDataPersistentor: 11 | def __init__(self, accessor: DataModelAccessor): 12 | self.accessor = accessor 13 | 14 | def save_document_data(self): 15 | for model in self.models: 16 | data_model = model.get_model() 17 | self.accessor.add_data_model(data_model) 18 | 19 | def load_document_data(self, plan_data_path: str): 20 | try: 21 | with open(plan_data_path, "r") as file: 22 | json_data = json.load(file) 23 | except json.JSONDecodeError as e: 24 | traceback_str = traceback.format_exc() 25 | error_message = f"ERROR: {str(e)}" 26 | print(traceback_str + error_message) 27 | return 28 | if json_data.get("Plan") is None: 29 | return 30 | 31 | document_ids = [] 32 | for entry in json_data.get("Plan"): 33 | if entry.get("DocumentID") not in document_ids: 34 | #print("doc:", entry.get("DocumentID")) 35 | document_ids.append(entry.get("DocumentID")) 36 | 37 | self.models = [] 38 | for entry in document_ids: 39 | model = DocumentDataModel.create_from_plans(entry, json_data) 40 | if model.is_empty() == False: 41 | self.models.append(model) 42 | 43 | if __name__ == "__main__": 44 | if len(sys.argv) != 3: 45 | print("Usage: ") 46 | sys.exit(1) 47 | dir = sys.argv[1] 48 | filepath = sys.argv[2] 49 | accessor = DataModelAccessor(dir) 50 | 51 | persistentor = DocumentDataPersistentor(accessor) 52 | persistentor.load_document_data(filepath) 53 | persistentor.save_document_data() 54 | -------------------------------------------------------------------------------- /data_model/document_similarity_extractor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | import json 6 | from data_model_accessor import DataModelAccessor 7 | from document_data_model import DocumentDataModel 8 | from openai_libs import get_score, get_tokenlen 9 | 10 | class DocumentSimilarityExtractor: 11 | def __init__(self, accessor: DataModelAccessor, maxtoken_num: int): 12 | self.maxtoken_num = maxtoken_num 13 | self.accessor = accessor 14 | self.load() 15 | 16 | def load(self): 17 | filelist = self.accessor.get_filelist() 18 | self.models = [] 19 | for entry in filelist: 20 | filepath = self.accessor.get_data_model_filepath(entry) 21 | model = DocumentDataModel.load_json_file(filepath) 22 | self.models.append(model) 23 | 24 | def _calc_scores(self, query: str): 25 | self.scores = [] 26 | for model in self.models: 27 | contents = model.get_contents() 28 | if contents is None: 29 | continue 30 | for entry in model.get_contents(): 31 | #print("info:", entry) 32 | data = json.dumps(entry["Answer"]) 33 | score = get_score(query, data) 34 | tokens = get_tokenlen(data) 35 | self.scores.append({ 36 | "term": model.get_title(), 37 | "info": entry["Answer"], 38 | "tokens": tokens, 39 | "score": score 40 | }) 41 | self.scores.sort(key=lambda x: x["score"], reverse=True) 42 | 43 | def extract(self, query: str): 44 | self._calc_scores(query) 45 | terms = {} 46 | token_sum = 0 47 | for entry in self.scores: 48 | if token_sum + entry["tokens"] > self.maxtoken_num: 49 | break 50 | if terms.get(entry["term"]) is None: 51 | terms[entry["term"]] = [] 52 | terms[entry["term"]].append(entry["info"]) 53 | token_sum += entry["tokens"] 54 | 55 | data = { 56 | "DocumentIDs": terms 57 | } 58 | return data 59 | 60 | if __name__ == "__main__": 61 | if len(sys.argv) != 4: 62 | print("Usage: ") 63 | sys.exit(1) 64 | query = sys.argv[1] 65 | dir = sys.argv[2] 66 | max_tokens = int(sys.argv[3]) 67 | 68 | accessor = DataModelAccessor(dir) 69 | extractor = DocumentSimilarityExtractor(accessor, max_tokens) 70 | data = extractor.extract(query) 71 | data_str = json.dumps(data, indent=4, ensure_ascii=False) 72 | print(data_str) 73 | -------------------------------------------------------------------------------- /data_model/openai_libs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import openai 5 | import tiktoken 6 | from tiktoken.core import Encoding 7 | from openai.embeddings_utils import cosine_similarity 8 | 9 | llm_model = "gpt-4-0613" 10 | embedding_model = "text-embedding-ada-002" 11 | 12 | # Embedding 13 | def get_embedding(text_input: str): 14 | global embedding_model 15 | 16 | # ベクトル変換 17 | response = openai.Embedding.create( 18 | input = text_input.replace("\n", " "), # 入力文章 19 | model = embedding_model, # GPTモデル 20 | ) 21 | 22 | # 出力結果取得 23 | embeddings = response['data'][0]['embedding'] 24 | 25 | return embeddings 26 | 27 | def get_score(text1: str, text2: str): 28 | vec1 = get_embedding(text1) 29 | vec2 = get_embedding(text2) 30 | result = cosine_similarity(vec1, vec2) 31 | return result 32 | 33 | 34 | def get_tokenlen(data: str): 35 | encoding: Encoding = tiktoken.encoding_for_model(llm_model) 36 | tokens = encoding.encode(data) 37 | return len(tokens) 38 | -------------------------------------------------------------------------------- /data_model/reflection_contents_similarity_merge.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | import sys 4 | from question import get_response 5 | from json_utils import parse_json 6 | from check_recover_json import check_json_str, recover_json_str 7 | from reflection_data_model import ReflectionDataModel 8 | 9 | def merge_known_infos_str(data: str): 10 | res = get_response(f"For the given json data, compare each KnownInfo, and if it matches the following conditions, merge them, otherwise output as is.:\n 1. The DocumentIDs are the same, and\n2. The content of the KnownInfo is the same meaning. Let's think step by step.:\n {data}") 11 | json_data = parse_json(res) 12 | return json_data 13 | 14 | def merge_known_infos_json(filepath: str): 15 | with open(filepath, "r") as file: 16 | data = file.read() 17 | json_data = merge_known_infos_str(data) 18 | return json_data 19 | 20 | def merge_and_save_known_infos_json(filepath: str): 21 | model = ReflectionDataModel.load_json_file(filepath) 22 | if model.get_known_infos_num() < 2: 23 | print(f"INFO: SKIP MERGING MODEL: {filepath} info_num={model.get_known_infos_num()}") 24 | return 25 | print("INFO: MERGING MODEL: ", filepath) 26 | json_data = merge_known_infos_json(filepath) 27 | result, errcode = check_json_str(json_data) 28 | if result == False: 29 | json_data = recover_json_str(errcode, json_data) 30 | result, _ = check_json_str(json_data) 31 | if (result == False): 32 | print("ERROR: can not recover json_data...") 33 | return False 34 | #print(json_data) 35 | with open(filepath, "w") as file: 36 | file.write(json_data) 37 | return True 38 | 39 | 40 | 41 | if __name__ == "__main__": 42 | import sys 43 | if len(sys.argv) != 2: 44 | print("Usage: ") 45 | sys.exit(1) 46 | filepath = sys.argv[1] 47 | ret = merge_and_save_known_infos_json(filepath) 48 | if ret == False: 49 | sys.exit(1) 50 | sys.exit(0) 51 | -------------------------------------------------------------------------------- /data_model/reflection_data_cleaner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | import json 6 | import traceback 7 | from data_model_accessor import DataModelAccessor 8 | from reflection_data_model import ReflectionDataModel 9 | from reflection_contents_similarity_merge import merge_and_save_known_infos_json 10 | 11 | class ReflectionDataCleaner: 12 | def __init__(self, accessor: DataModelAccessor): 13 | self.accessor = accessor 14 | 15 | def clean_empty_data_models(self): 16 | clean_names = [] 17 | for name in self.accessor.get_filelist(): 18 | filepath = self.accessor.get_data_model_filepath(name) 19 | model = ReflectionDataModel.load_json_file(filepath) 20 | data_model = model.get_model() 21 | if data_model.is_empty_content() == True: 22 | print(f"INFO: REMOVING EMPTY MODEL({data_model.get_name()})") 23 | clean_names.append(name) 24 | self.accessor.remove_models(clean_names) 25 | 26 | def merge_same_data_models(self): 27 | for name in self.accessor.get_filelist(): 28 | print("INFO: name=", name) 29 | filepath = self.accessor.get_data_model_filepath(name) 30 | ret = merge_and_save_known_infos_json(filepath) 31 | if ret == False: 32 | print("INFO: skip merge...error") 33 | #sys.exit(1) 34 | 35 | 36 | if __name__ == "__main__": 37 | if len(sys.argv) != 2: 38 | print("Usage: ") 39 | sys.exit(1) 40 | dir = sys.argv[1] 41 | accessor = DataModelAccessor(dir) 42 | 43 | cleaner = ReflectionDataCleaner(accessor) 44 | cleaner.clean_empty_data_models() 45 | print("INFO: MERGING REFLECTIONS") 46 | cleaner.merge_same_data_models() 47 | -------------------------------------------------------------------------------- /data_model/reflection_data_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | import json 6 | 7 | from data_model import DataModel 8 | 9 | class ReflectionDataModel: 10 | def __init__(self, term: str): 11 | self.term = term 12 | self.known_infos = [] 13 | self.relations = [] 14 | self.unknown_infos = [] 15 | 16 | def is_empty(self): 17 | if len(self.known_infos) == 0 and len(self.unknown_infos) == 0: 18 | return True 19 | else: 20 | return False 21 | 22 | def merge(self, old_model: DataModel): 23 | # merge KnownInfos 24 | old_contents = old_model.get_contents() 25 | if old_contents is None: 26 | return 27 | if old_contents.get("KnownInfos") is not None: 28 | old_known_infos = [] 29 | for old_data in old_contents.get("KnownInfos"): 30 | if all(old_data.get("KnownInfo") != entry.get("KnownInfo") for entry in self.known_infos): 31 | old_known_info = { 32 | "KnownInfo": old_data.get("KnownInfo"), 33 | "DocumentIDs": old_data.get("DocumentIDs"), 34 | "Point": old_data.get("Point") 35 | } 36 | old_known_infos.append(old_known_info) 37 | self.known_infos += old_known_infos 38 | 39 | # merge Relations 40 | if old_contents.get("Relations") is not None: 41 | old_relations = [] 42 | for old_data in old_contents.get("Relations"): 43 | if all(old_data.get("Term") != entry.get("Term") for entry in self.relations): 44 | old_relations.append(old_data) 45 | 46 | self.relations += old_relations 47 | 48 | def add_info(self, known_info: str, document_ids: list, point: float): 49 | #print("known_info:", known_info) 50 | if len(known_info) == 0: 51 | #print("skip1") 52 | return 53 | if document_ids is None or len(document_ids) == 0: 54 | #unreliable info 55 | #print("skip2") 56 | return 57 | if point is not None and float(point) < 60.0: 58 | #print("skip3:", type(point)) 59 | #unreliable info 60 | return 61 | data = { 62 | "KnownInfo": known_info, 63 | "DocumentIDs": document_ids, 64 | "Point": point 65 | } 66 | if all(data.get("KnownInfo") != entry.get("KnownInfo") for entry in self.known_infos): 67 | self.known_infos.append(data) 68 | 69 | def update_unknown_info(self, unknwon_infos: list): 70 | self.unknown_infos = unknwon_infos 71 | 72 | def get_known_infos_num(self): 73 | return len(self.known_infos) 74 | 75 | 76 | def add_relations(self, relations): 77 | self.relations += relations 78 | 79 | def get_term(self) -> str: 80 | return self.term 81 | 82 | def get_contents(self): 83 | if self.is_empty(): 84 | return None 85 | data = { 86 | "Term": self.term, 87 | "KnownInfos": self.known_infos, 88 | "UnknownInfo": self.unknown_infos, 89 | "Relations": self.relations 90 | } 91 | return data 92 | def get_known_infos(self): 93 | if self.known_infos == None: 94 | return [] 95 | return self.known_infos 96 | 97 | def is_empty_content(self): 98 | if len(self.known_infos) == 0: 99 | return True 100 | else: 101 | return False 102 | 103 | def get_model(self) -> DataModel: 104 | data_model = DataModel(self.get_term(), self.get_contents()) 105 | data_model.set_concrete_model(self) 106 | return data_model 107 | 108 | @staticmethod 109 | def create_from_entry(name: str, entry: dict): 110 | model = ReflectionDataModel(name) 111 | if entry is not None and entry.get("KnownInfos") is not None: 112 | for known_info in entry.get("KnownInfos"): 113 | #print("KnownInfo:", str(known_info.get("KnownInfo"))) 114 | #print("DocumentIDs:", str(known_info.get("DocumentIDs"))) 115 | #print("Point:", str(known_info.get("Point"))) 116 | model.add_info(known_info.get("KnownInfo"), known_info.get("DocumentIDs"), known_info.get("Point")) 117 | if entry.get("Relations") is not None: 118 | model.add_relations(entry.get("Relations")) 119 | if entry.get("UnknownInfo") is not None: 120 | model.update_unknown_info(entry.get("UnknownInfo")) 121 | if entry.get("Point") is not None: 122 | model.point = entry.get("Point") 123 | return model 124 | 125 | @staticmethod 126 | def load_json_file(filepath: str): 127 | #print("filepath:", filepath) 128 | data_model = DataModel.load_json_file(filepath) 129 | if data_model == None: 130 | #print("data_model == None") 131 | return None 132 | #print("name:", data_model.get_name()) 133 | #print("get_contents:", data_model.get_contents()) 134 | model = ReflectionDataModel.create_from_entry( 135 | data_model.get_name(), 136 | data_model.get_contents()) 137 | return model 138 | -------------------------------------------------------------------------------- /data_model/reflection_data_persistentor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | import json 6 | import traceback 7 | from data_model_accessor import DataModelAccessor 8 | from reflection_data_model import ReflectionDataModel 9 | 10 | class ReflectionDataPersistentor: 11 | def __init__(self, accessor: DataModelAccessor): 12 | self.accessor = accessor 13 | 14 | def save_reflection_data(self): 15 | for model in self.models: 16 | data_model = model.get_model() 17 | self.accessor.add_data_model(data_model) 18 | 19 | def load_reflection_data(self, reflection_data_path: str): 20 | try: 21 | #print("filepath=", reflection_data_path) 22 | with open(reflection_data_path, "r") as file: 23 | json_data = json.load(file) 24 | except json.JSONDecodeError as e: 25 | traceback_str = traceback.format_exc() 26 | error_message = f"ERROR: {str(e)}" 27 | print(traceback_str + error_message) 28 | return 29 | #print("json_data:", json.dumps(json_data)) 30 | if json_data.get("Knowledges") is None: 31 | return 32 | 33 | self.models = [] 34 | for entry in json_data.get("Knowledges"): 35 | #print("Term:", entry.get("Term")) 36 | model = ReflectionDataModel.create_from_entry( 37 | entry.get("Term").replace(" ", "_").replace("/", "_"), entry) 38 | self.models.append(model) 39 | 40 | if __name__ == "__main__": 41 | if len(sys.argv) != 3: 42 | print("Usage: ") 43 | sys.exit(1) 44 | dir = sys.argv[1] 45 | filepath = sys.argv[2] 46 | accessor = DataModelAccessor(dir) 47 | 48 | persistentor = ReflectionDataPersistentor(accessor) 49 | persistentor.load_reflection_data(filepath) 50 | persistentor.save_reflection_data() 51 | -------------------------------------------------------------------------------- /data_model/reflection_similarity_extractor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | import json 6 | from data_model_accessor import DataModelAccessor 7 | from reflection_data_model import ReflectionDataModel 8 | from openai_libs import get_score, get_tokenlen 9 | 10 | class ReflectionSimilarityExtractor: 11 | def __init__(self, accessor: DataModelAccessor, maxtoken_num: int): 12 | self.maxtoken_num = maxtoken_num 13 | self.accessor = accessor 14 | self.load() 15 | 16 | def load(self): 17 | filelist = self.accessor.get_filelist() 18 | self.models = [] 19 | for entry in filelist: 20 | filepath = self.accessor.get_data_model_filepath(entry) 21 | model = ReflectionDataModel.load_json_file(filepath) 22 | self.models.append(model) 23 | 24 | def _calc_scores(self, query: str): 25 | self.scores = [] 26 | for model in self.models: 27 | #print("entry_name:", model.get_term()) 28 | #print(" known_infos:", model.get_known_infos_num()) 29 | for entry in model.get_known_infos(): 30 | #print("info:", entry) 31 | data = model.get_term() + ":" + json.dumps(entry) 32 | score = get_score(query, data) 33 | tokens = get_tokenlen(data) 34 | self.scores.append({ 35 | "term": model.get_term(), 36 | "info": entry, 37 | "tokens": tokens, 38 | "score": score 39 | }) 40 | self.scores.sort(key=lambda x: x["score"], reverse=True) 41 | 42 | def extract(self, query: str): 43 | self._calc_scores(query) 44 | terms = {} 45 | token_sum = 0 46 | for entry in self.scores: 47 | if token_sum + entry["tokens"] > self.maxtoken_num: 48 | break 49 | #print("data:", entry["term"]) 50 | if terms.get(entry["term"]) is None: 51 | terms[entry["term"]] = [] 52 | terms[entry["term"]].append(entry["info"]) 53 | token_sum += entry["tokens"] 54 | 55 | data = { 56 | "Knowledges": terms 57 | } 58 | return data 59 | 60 | if __name__ == "__main__": 61 | if len(sys.argv) != 4: 62 | print("Usage: ") 63 | sys.exit(1) 64 | query = sys.argv[1] 65 | dir = sys.argv[2] 66 | max_tokens = int(sys.argv[3]) 67 | 68 | accessor = DataModelAccessor(dir) 69 | extractor = ReflectionSimilarityExtractor(accessor, max_tokens) 70 | data = extractor.extract(query) 71 | data_str = json.dumps(data, indent=4, ensure_ascii=False) 72 | print(data_str) 73 | -------------------------------------------------------------------------------- /data_model/similarity_extractor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | from data_model_accessor import DataModelAccessor 5 | import json 6 | from openai_libs import get_score, get_tokenlen 7 | 8 | class SimilarityExtractor: 9 | def __init__(self, accessor: DataModelAccessor, maxtoken_num: int): 10 | self.maxtoken_num = maxtoken_num 11 | self.accessor = accessor 12 | 13 | def get_filelist(self, query: str): 14 | scores = self._calc_scores(query, accessor.get_filelist()) 15 | result = [] 16 | token_sum = 0 17 | for entry in scores: 18 | if token_sum + entry["tokens"] > self.maxtoken_num: 19 | break 20 | result.append(entry["file"]) 21 | token_sum += entry["tokens"] 22 | return result 23 | 24 | def _calc_scores(self, query: str, filelist: list): 25 | scores = [] 26 | for entry in filelist: 27 | #print("file:", entry) 28 | json_data = self.accessor.get_data_model(entry).get_json_data() 29 | json_str = json.dumps(json_data) 30 | score = get_score(query, json_str) 31 | tokens = get_tokenlen(json_str) 32 | scores.append({ 33 | "file": entry, 34 | "tokens": tokens, 35 | "score": score 36 | }) 37 | scores.sort(key=lambda x: x["score"], reverse=True) 38 | return scores 39 | 40 | 41 | def extract(self, head_name: str, filelists: list): 42 | models = self.accessor.get_json_models(filelists) 43 | data = { 44 | head_name: models 45 | } 46 | return data 47 | 48 | 49 | if __name__ == "__main__": 50 | import sys 51 | import json 52 | if len(sys.argv) != 3: 53 | print("Usage: ") 54 | sys.exit(1) 55 | query = sys.argv[1] 56 | dir = sys.argv[2] 57 | 58 | accessor = DataModelAccessor(dir) 59 | extractor = SimilarityExtractor(accessor, 2048) 60 | 61 | filelist = extractor.get_filelist(query) 62 | data = extractor.extract("inputs", filelist) 63 | data_str = json.dumps(data, indent=4, ensure_ascii=False) 64 | print(data_str) 65 | -------------------------------------------------------------------------------- /db_manager.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | from document_db import load_db_with_type, similarity_search_with_score 5 | import sys 6 | import pandas as pd 7 | 8 | 9 | def get_qa(db_dir: str, doc_id: str): 10 | return load_db_with_type(db_dir + "/" + doc_id) 11 | 12 | def get_similarity_search_with_scores(db_dir: str, doc_id: str, terms: str, top_k: int): 13 | return similarity_search_with_score(db_dir + "/" + doc_id, terms, top_k) 14 | 15 | def get_similarity_search_with_average_score(db_dir: str, doc_id: str, terms: str, top_k: int): 16 | doc_scores = similarity_search_with_score(db_dir + "/" + doc_id, terms, top_k) 17 | scores = [score[1] for score in doc_scores] 18 | scores_series = pd.Series(scores, dtype='float64') 19 | average_score = scores_series.mean() 20 | return average_score 21 | 22 | def get_similarity_search_results(doclist: str, db_dir: str, terms: str, top_k: int): 23 | with open(doclist, "r") as file: 24 | lines = file.readlines() 25 | total_list = [line.strip() for line in lines] 26 | 27 | scores = [] 28 | entries = [] 29 | for entry in total_list: 30 | score = get_similarity_search_with_average_score(db_dir, entry, terms, top_k) 31 | scores.append(score) 32 | entries.append(entry) 33 | 34 | df = pd.DataFrame({'title': entries, 'score': scores}) 35 | top = df.sort_values(by='score', ascending=True).head(top_k)['title'].tolist() 36 | return top 37 | 38 | 39 | if __name__ == "__main__": 40 | if len(sys.argv) != 5: 41 | print("USAGE: " + sys.argv[0] + " ") 42 | sys.exit(1) 43 | 44 | db_dir=sys.argv[1] 45 | doclist=sys.argv[2] 46 | terms=sys.argv[3] 47 | num=int(sys.argv[4]) 48 | 49 | docs = get_similarity_search_results(doclist, db_dir, terms, num) 50 | for entry in docs: 51 | print(entry) 52 | -------------------------------------------------------------------------------- /evaluate_applaud.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | from prompt_template import PromptTemplate 6 | from question import get_response 7 | 8 | def do_applaud(name: str, document_path: str, template_path): 9 | file_list = os.listdir(document_path) 10 | log_data = "" 11 | for file in file_list: 12 | with open(document_path + "/" + file) as file: 13 | data = file.read() 14 | log_data += data 15 | prompt = PromptTemplate(template_path) 16 | p = prompt.get_prompt( 17 | Name = name, 18 | log_data = log_data) 19 | print(p) 20 | return get_response(p) 21 | 22 | if __name__ == "__main__": 23 | import sys 24 | if len(sys.argv) != 4: 25 | print("Usage: ") 26 | sys.exit(1) 27 | result = do_applaud(sys.argv[1], sys.argv[2], sys.argv[3]) 28 | print(result) 29 | -------------------------------------------------------------------------------- /evaluate_results.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | from prompt_template import PromptTemplate 5 | from question import get_response 6 | 7 | def evaluate_results(query_path: str, result1_path: str, result2_path: str, template_path): 8 | try: 9 | with open(query_path, 'r') as file: 10 | query = file.read() 11 | except FileNotFoundError: 12 | raise FileNotFoundError(f"File '{query_path}' not found.") 13 | try: 14 | with open(result1_path, 'r') as file: 15 | result1 = file.read() 16 | except FileNotFoundError: 17 | raise FileNotFoundError(f"File '{result1_path}' not found.") 18 | try: 19 | with open(result2_path, 'r') as file: 20 | result2 = file.read() 21 | except FileNotFoundError: 22 | raise FileNotFoundError(f"File '{result2_path}' not found.") 23 | 24 | prompt = PromptTemplate(template_path) 25 | p = prompt.get_prompt( 26 | MainQuestion = query, 27 | Result1 = result1, 28 | Result2 = result2) 29 | print(p) 30 | return get_response(p) 31 | 32 | if __name__ == "__main__": 33 | import sys 34 | if len(sys.argv) != 5: 35 | print("Usage: ") 36 | sys.exit(1) 37 | result = evaluate_results(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) 38 | print(result) 39 | -------------------------------------------------------------------------------- /evaluator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import pandas as pd 5 | import json 6 | from memory_stream import MemoryStream 7 | from plan import Plan 8 | from prompt_template import PromptTemplate 9 | from question import get_response 10 | from plan import Plan 11 | import copy 12 | import traceback 13 | 14 | class Evaluator: 15 | def __init__(self, main_question, mission_path, plan: Plan, memory_stream: MemoryStream): 16 | self.main_question = main_question 17 | pmission = PromptTemplate(mission_path) 18 | self.mission = pmission.get_prompt() 19 | self.plan = plan 20 | self.memory_stream = memory_stream 21 | 22 | def merge_data(self): 23 | self.merged_data = dict() 24 | self.merged_data["DetailedStrategy"] = self.plan.detailed_strategy 25 | self.merged_data["Plan"] = [] 26 | for entry in self.plan.get_json_data()["Plan"]: 27 | tmp = copy.deepcopy(entry) 28 | new_entry = dict() 29 | new_entry["DocumentID"] = tmp["DocumentID"] 30 | new_entry["Purpose"] = tmp["Purpose"] 31 | new_entry["Perspectives"] = tmp["Perspectives"] 32 | #print(new_entry) 33 | if isinstance(entry["ResultID"], int) or isinstance(entry["ResultID"], float): 34 | if entry["ResultID"] >= 0: 35 | data = self.memory_stream.get_data(entry["ResultID"]) 36 | #print(data) 37 | new_entry["ResultID"] = { "Reply": data["Reply"], "Point": data["Point"] } 38 | else: 39 | new_entry["ResultID"] = { "Reply": "No Reply", "Point": 0.0 } 40 | else: 41 | new_entry["ResultID"] = { "Reply": "No Reply", "Point": 0.0 } 42 | self.merged_data["Plan"].append(new_entry) 43 | #print(merged_data) 44 | with open("./test/result/plan_result.json", "w", encoding="utf-8") as f: 45 | json.dump(self.merged_data, f, indent=4, ensure_ascii=False) 46 | 47 | def evaluate(self, template_path, ref_json_path): 48 | with open(ref_json_path, 'r') as file: 49 | reflection = file.read() 50 | with open("./test/result/plan_result.json", 'r') as file: 51 | PlanExecutedResults = file.read() 52 | 53 | temp = PromptTemplate(template_path) 54 | prompt = temp.get_prompt( 55 | MainQuestion = self.main_question, 56 | Mission = self.mission, 57 | PastStrategies = [], 58 | PlanExecutedResults = PlanExecutedResults, 59 | Reflection = reflection 60 | ) 61 | try: 62 | reply = get_response(prompt) 63 | except Exception as e: 64 | traceback_str = traceback.format_exc() 65 | error_message = f"ERROR: {str(e)}" 66 | print(traceback_str + error_message) 67 | sys.exit(1) 68 | print(reply) 69 | 70 | if __name__ == "__main__": 71 | import sys 72 | from params import get_param 73 | prompt_template_path = get_param("prompt_templates_path") 74 | 75 | if len(sys.argv) != 4 and len(sys.argv) != 5: 76 | print("Usage: []") 77 | sys.exit(1) 78 | main_question = sys.argv[1] 79 | mission_path= prompt_template_path + "/ptemplate_mission.txt" 80 | if len(sys.argv) == 4: 81 | plan_json_path = sys.argv[2] 82 | mem_json_path = sys.argv[3] 83 | plan = Plan() 84 | plan.load_from_json(plan_json_path) 85 | memory_stream = MemoryStream() 86 | memory_stream.load_from_json(mem_json_path) 87 | evaluator = Evaluator(main_question, mission_path, plan, memory_stream) 88 | evaluator.merge_data() 89 | elif len(sys.argv) == 5: 90 | ref_json_path = sys.argv[4] 91 | evaluator = Evaluator(main_question, mission_path, None, None) 92 | evaluator.evaluate(prompt_template_path + "/ptemplate_evaluate.txt", ref_json_path) -------------------------------------------------------------------------------- /history_selector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | from memory_stream import MemoryStream 5 | 6 | class HistorySelector: 7 | def __init__(self, memory_stream): 8 | self.memory_stream = memory_stream 9 | self.history = [] 10 | 11 | def select(self, threshold): 12 | data = self.memory_stream.get_data() 13 | high_point_data = data[data["Point"] >= threshold] 14 | self.history = high_point_data 15 | return high_point_data 16 | 17 | def get_history(self): 18 | return self.history 19 | 20 | 21 | if __name__ == "__main__": 22 | memory_stream = MemoryStream() 23 | data_num = input("DataNum> ") 24 | count = int(data_num) 25 | i = 0 26 | while i < count: 27 | target_doc_id = input("TargetDocID> ") 28 | question = input("question> ") 29 | reply = input("reply> ") 30 | point = input("point> ") 31 | memory_stream.add_data(target_doc_id, question, reply, int(point)) 32 | print(memory_stream.get_data()) 33 | i += 1 34 | threshold = input("Threshold> ") 35 | history = HistorySelector(memory_stream) 36 | print(history.select(int(threshold))) 37 | else: 38 | pass 39 | -------------------------------------------------------------------------------- /json_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | def fix_quotes(line): 5 | fixed_line = "" 6 | quote_mode = False 7 | 8 | for char in line: 9 | if quote_mode == False: 10 | if char == '"': 11 | quote_mode = True 12 | else: 13 | if char == '"': 14 | quote_mode = False 15 | elif char == ':' or char == '\n': 16 | fixed_line += '"' 17 | quote_mode = False 18 | fixed_line += char 19 | 20 | return fixed_line 21 | 22 | def fix_backslashes(json_string): 23 | # 不正なバックスラッシュを修正する 24 | fixed_string = json_string.replace("\\", "\\\\") 25 | return fixed_string 26 | 27 | def parse_one_entry(line: str, key: str): 28 | line = line.split(key)[1].strip() 29 | string_without_quotes = line.replace('"', '') 30 | entries = string_without_quotes.split(":") 31 | contents = [] 32 | #skip DetailedStrategy 33 | #get contents 34 | for entry in entries: 35 | if line in entry: 36 | continue 37 | else: 38 | contents.append(entry) 39 | new_contents = " ".join(contents) 40 | #recreate line 41 | new_line = '"' + key + '": ' + '"' + new_contents + '",' 42 | print(new_line) 43 | return new_line 44 | 45 | def parse_plan(org_data: str): 46 | lines = org_data.split("\n") 47 | output_lines = [] 48 | start_flag = False 49 | for line in lines: 50 | if start_flag == False: 51 | if "{" in line: 52 | start_flag = True 53 | output_lines.append(fix_quotes(line)) 54 | else: 55 | pass 56 | else: 57 | if "DetailedStrategy" in line: 58 | line = parse_one_entry(line, "DetailedStrategy") 59 | output_lines.append(fix_quotes(fix_backslashes(line))) 60 | 61 | return "\n".join(output_lines) 62 | 63 | 64 | def parse_json(org_data: str): 65 | lines = org_data.split("\n") 66 | output_lines = [] 67 | start_flag = False 68 | nest_count = 0 69 | for line in lines: 70 | if start_flag == False: 71 | if "{" in line: 72 | start_flag = True 73 | nest_count += 1 74 | output_lines.append(fix_quotes(line)) 75 | else: 76 | pass 77 | else: 78 | if "{" in line: 79 | nest_count += 1 80 | elif "}" in line: 81 | nest_count -= 1 82 | output_lines.append(line) 83 | if (nest_count == 0): 84 | break 85 | return "\n".join(output_lines) -------------------------------------------------------------------------------- /memory_stream.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import pandas as pd 5 | import json 6 | 7 | class MemoryStream: 8 | def __init__(self): 9 | self.columns = ["ID", "TargetDocID", "Question", "Reply", "Point"] 10 | self.data = pd.DataFrame(columns=self.columns) 11 | self.current_id = 1 12 | 13 | def add_data(self, target_doc_id, question, reply, point): 14 | ret_id = self.current_id 15 | data = [[self.current_id, target_doc_id, question, reply, point]] 16 | new_data = pd.DataFrame(data, columns=self.columns) 17 | self.data = pd.concat([self.data, new_data], ignore_index=True) 18 | self.current_id += 1 19 | return ret_id 20 | 21 | def get_data(self): 22 | return self.data 23 | 24 | def get_reply(self, index = None): 25 | if (index == None): 26 | index = len(self.data) - 1 27 | if index >= 0 and index < len(self.data): 28 | return self.data.loc[index, "Reply"] 29 | else: 30 | return None 31 | 32 | def get_data(self, id: int): 33 | filtered_data = self.data.loc[self.data["ID"] == id] 34 | if filtered_data.empty: 35 | return None 36 | else: 37 | return filtered_data.to_dict(orient="records")[0] 38 | 39 | def get_point(self, index = None): 40 | if (index == None): 41 | index = len(self.data) - 1 42 | if index >= 0 and index < len(self.data): 43 | return self.data.loc[index, "Point"] 44 | else: 45 | return None 46 | 47 | def save_to_json(self, file_path): 48 | json_data = self.data.to_dict(orient="records") 49 | 50 | with open(file_path, "w", encoding="utf-8") as f: 51 | json.dump(json_data, f, indent=4, ensure_ascii=False) 52 | 53 | 54 | def load_from_json(self, file_path): 55 | with open(file_path, 'r') as file: 56 | json_data = json.load(file) 57 | self.data = pd.DataFrame(json_data) 58 | 59 | if __name__ == "__main__": 60 | memory_stream = MemoryStream() 61 | while True: 62 | target_doc_id = input("TargetDocID> ") 63 | question = input("question> ") 64 | reply = input("reply> ") 65 | point = input("point> ") 66 | memory_stream.add_data(target_doc_id, question, reply, int(point)) 67 | print(memory_stream.get_data()) 68 | else: 69 | pass 70 | -------------------------------------------------------------------------------- /noidea.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmori/generative-agents/81cabfd014a7e20c23bacd7778cf6b3b8b7f659b/noidea.txt -------------------------------------------------------------------------------- /param_templates/applaud_params.json: -------------------------------------------------------------------------------- 1 | { 2 | "prompt_templates_path": "./applaud_prompt", 3 | "documents_path": "./applaud_data/documents", 4 | "documents_data_path": "./applaud_data/documents_data", 5 | "reflections_data_path": "./applaud_data/reflections_data" 6 | } -------------------------------------------------------------------------------- /param_templates/document_params.json: -------------------------------------------------------------------------------- 1 | { 2 | "prompt_templates_path": "./prompt_templates", 3 | "documents_path": "../documents", 4 | "documents_data_path": "./documents_data", 5 | "reflections_data_path": "./reflections_data" 6 | } -------------------------------------------------------------------------------- /params.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import json 5 | 6 | def get_param(param_name: str): 7 | with open('./params.json', 'r') as file: 8 | param = json.load(file) 9 | return param.get(param_name) 10 | 11 | if __name__ == "__main__": 12 | import sys 13 | if len(sys.argv) != 2: 14 | print("Usage: ") 15 | sys.exit(1) 16 | param_name = sys.argv[1] 17 | 18 | print(get_param(param_name)) 19 | -------------------------------------------------------------------------------- /plan.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import pandas as pd 5 | import json 6 | 7 | class Plan: 8 | def __init__(self): 9 | self.columns = ["PlanID", "DocumentID", "Purpose", "Perspectives", "ResultID", "Status"] 10 | self.data = pd.DataFrame(columns=self.columns) 11 | self.current_id = 1 12 | 13 | def set_strategy(self, detailed_strategy: str): 14 | self.detailed_strategy = detailed_strategy 15 | 16 | def add_data(self, document_id, purpose, perspectives): 17 | data = [[self.current_id, document_id, purpose, perspectives, "", "None"]] 18 | new_data = pd.DataFrame(data, columns=self.columns) 19 | self.data = pd.concat([self.data, new_data], ignore_index=True) 20 | self.current_id += 1 21 | 22 | def update_status_doing(self, plan_id: int): 23 | self.data.loc[self.data["PlanID"] == plan_id, "Status"] = "Doing" 24 | 25 | def update_status_done(self, plan_id: int, memory_id: int): 26 | self.data.loc[self.data["PlanID"] == plan_id, "Status"] = "Done" 27 | self.data.loc[self.data["PlanID"] == plan_id, "ResultID"] = memory_id 28 | 29 | def save_to_json(self, file_path): 30 | json_data = dict() 31 | json_data["DetailedStrategy"] = self.detailed_strategy 32 | json_data["Plan"] = self.data.to_dict(orient="records") 33 | 34 | with open(file_path, "w", encoding="utf-8") as f: 35 | json.dump(json_data, f, indent=4, ensure_ascii=False) 36 | 37 | def get_json_data(self): 38 | self.json_data = dict() 39 | self.json_data["DetailedStrategy"] = self.detailed_strategy 40 | self.json_data["Plan"] = self.data.to_dict(orient="records") 41 | return self.json_data 42 | 43 | def load_from_json(self, file_path): 44 | with open(file_path, 'r') as file: 45 | self.json_data = json.load(file) 46 | self.detailed_strategy = self.json_data["DetailedStrategy"] 47 | self.data = pd.DataFrame(self.json_data['Plan']) 48 | 49 | def get_data(self): 50 | return self.data 51 | 52 | def get_data_by_id(self, plan_id=None): 53 | if plan_id is None: 54 | plan_id = self.current_id - 1 55 | return self.data.loc[self.data["PlanID"] == plan_id] 56 | 57 | 58 | if __name__ == "__main__": 59 | plan = Plan() 60 | i = 0 61 | count = 2 62 | while i < count: 63 | doc_id = input("DocumentID> ") 64 | purpose = input("Purpose> ") 65 | perspectives = input("Perspectives> ") 66 | ids = input("ResultID> ") 67 | status = input("Status> ") 68 | plan.add_data(doc_id, purpose, perspectives, ids, status) 69 | print(plan.get_data_by_id()) 70 | i += 1 71 | plan.save_to_json("test/result/plan.json") 72 | -------------------------------------------------------------------------------- /planner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import pandas as pd 5 | import json 6 | from prompt_template import PromptTemplate 7 | from question import get_response 8 | from plan import Plan 9 | import os 10 | import traceback 11 | import json_utils 12 | from check_recover_json import check_json_str, recover_json_str 13 | 14 | class Planner: 15 | def __init__(self, main_question, mission_path, strategy_path, query_plan_path, strategy_history_path, background_knowledge_path, acquired_knowledge_path): 16 | self.main_question = main_question 17 | self.mission_path = mission_path 18 | self.strategy_path = strategy_path 19 | self.query_plan_path = query_plan_path 20 | with open(background_knowledge_path, 'r') as file: 21 | self.background_knowledges = file.read() 22 | with open(acquired_knowledge_path, 'r') as file: 23 | self.acquired_knowledges = file.read() 24 | self.strategy_history_path = strategy_history_path 25 | if os.path.exists(strategy_history_path): 26 | with open(strategy_history_path, 'r') as file: 27 | self.strategy_history_json = json.load(file) 28 | else: 29 | self.strategy_history_json = {} 30 | self.plan = Plan() 31 | 32 | def generate_query(self, document_list, history): 33 | pmission = PromptTemplate(self.mission_path) 34 | self.mission = pmission.get_prompt() 35 | 36 | pstrategy = PromptTemplate(self.strategy_path) 37 | self.strategy = pstrategy.get_prompt() 38 | 39 | pquery_plan = PromptTemplate(self.query_plan_path) 40 | past_strategies = [] 41 | if "Strategies" in self.strategy_history_json: 42 | past_strategies = self.strategy_history_json["Strategies"] 43 | 44 | self.query_plan = pquery_plan.get_prompt( 45 | MainQuestion = self.main_question, 46 | Mission = self.mission, 47 | Strategy = self.strategy, 48 | DocumentList = document_list, 49 | History = history, 50 | PastStrategies = past_strategies, 51 | BackgroundKnowledges = self.background_knowledges, 52 | AcquiredKnowledges = self.acquired_knowledges 53 | ) 54 | print(self.query_plan) 55 | 56 | 57 | def create_plan(self): 58 | try: 59 | self.reply_raw = get_response(self.query_plan) 60 | except Exception as e: 61 | traceback_str = traceback.format_exc() 62 | error_message = f"ERROR: {str(e)}" 63 | print(traceback_str + error_message) 64 | sys.exit(1) 65 | 66 | #self.reply_raw = json_utils.parse_plan(self.reply_raw) 67 | count = 1 68 | while True: 69 | result, errorcode = check_json_str(self.reply_raw) 70 | if result == False and count <= 5: 71 | print(self.reply_raw) 72 | print("ERROR: RECOVER JSON PROCESS of PLAN RETRY_COUNT=", count) 73 | self.reply_raw = recover_json_str(errorcode, self.reply_raw) 74 | count += 1 75 | else: 76 | if result == True: 77 | print(self.reply_raw) 78 | print("INFO: PLAN JSON DATA IS OK") 79 | else: 80 | print(self.reply_raw) 81 | print("ERROR: SORRY CAN NOT RECOVER JSON DATA..") 82 | break 83 | 84 | try: 85 | self.reply_json = json.loads(self.reply_raw) 86 | except json.decoder.JSONDecodeError as e: 87 | traceback_str = traceback.format_exc() 88 | error_message = f"ERROR: {str(e)}" 89 | print(traceback_str + error_message) 90 | sys.exit(1) 91 | 92 | new_strategy = os.getenv("NEW_STARTEGY") 93 | print("NEW_STRATEGY:" + new_strategy) 94 | if new_strategy is None or len(new_strategy.strip()) == 0: 95 | new_strategy = self.reply_json["DetailedStrategy"] 96 | 97 | self.plan.set_strategy(new_strategy) 98 | if "Strategies" not in self.strategy_history_json: 99 | self.strategy_history_json["Strategies"] = [] 100 | self.strategy_history_json["Strategies"].append(new_strategy) 101 | 102 | for entry in self.reply_json["Plan"]: 103 | self.plan.add_data(entry["DocumentID"], entry["Purpose"], entry["Perspectives"]) 104 | 105 | def save_to_json(self, file_path): 106 | with open(file_path, 'w') as file: 107 | json.dump(self.reply_json, file, indent=4, ensure_ascii=False) 108 | 109 | def save_strategy_history(self): 110 | with open(self.strategy_history_path, 'w') as file: 111 | json.dump(self.strategy_history_json, file, indent=4, ensure_ascii=False) 112 | 113 | if __name__ == "__main__": 114 | import sys 115 | from params import get_param 116 | prompt_template_path = get_param("prompt_templates_path") 117 | if len(sys.argv) != 5: 118 | print("Usage: ") 119 | sys.exit(1) 120 | main_question = sys.argv[1] 121 | background_knowledge_path = sys.argv[3] 122 | acquired_knowledge_path = sys.argv[4] 123 | batch_size = 100 124 | with open(sys.argv[2], 'r') as file: 125 | lines = file.readlines() 126 | total_list = [line.strip() for line in lines] 127 | batched_list = [total_list[i:i+batch_size] for i in range(0, len(total_list), batch_size)] 128 | planner = Planner( 129 | main_question = main_question, 130 | mission_path= prompt_template_path + "/ptemplate_mission.txt", 131 | strategy_path= prompt_template_path + "/ptemplate_strategy.txt", 132 | query_plan_path= prompt_template_path + "/ptemplate_query_plan.txt", 133 | strategy_history_path="./test/strategy_history.json", 134 | background_knowledge_path = background_knowledge_path, 135 | acquired_knowledge_path = acquired_knowledge_path 136 | ) 137 | for doc_list in batched_list: 138 | planner.generate_query(doc_list, "") 139 | planner.create_plan() 140 | planner.save_to_json("test/result/reply.json") 141 | planner.plan.save_to_json("test/result/plan.json") 142 | planner.save_strategy_history() 143 | -------------------------------------------------------------------------------- /prompt_template.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | 6 | class PromptTemplate: 7 | def __init__(self, file_path): 8 | try: 9 | with open(file_path, 'r') as file: 10 | self.template = file.read() 11 | except FileNotFoundError: 12 | raise FileNotFoundError(f"File '{file_path}' not found.") 13 | 14 | def get_prompt(self, **kwargs) -> str: 15 | return self.template.format(**kwargs) 16 | 17 | if __name__ == "__main__": 18 | from params import get_param 19 | prompt_template_path = get_param("prompt_templates_path") 20 | 21 | pt = PromptTemplate(prompt_template_path + "/ptemplate_query.txt") 22 | while True: 23 | target_doc_id = input("TargetDocID> ") 24 | question = input("question> ") 25 | reply = input("reply> ") 26 | point = input("point> ") 27 | prompt = pt.get_prompt(TargetDocID=target_doc_id, question=question, reply=reply, point=int(point)) 28 | print("PROMPT:\n" +prompt) 29 | -------------------------------------------------------------------------------- /prompt_templates/ptemplate_critical_thinking.txt: -------------------------------------------------------------------------------- 1 | The question "{MainQuestion}" pertains to user operations, system troubleshooting, 2 | and understanding specifications. 3 | 4 | And the background information is as follows: 5 | BackgroundKnowledges: {BackgroundKnowledges} 6 | 7 | Please list the necessary knowledge/concepts required to answer this question 8 | and provide the reasons for each. 9 | 10 | The result should be outputted in the following JSON format. 11 | Term items must follow the Unix filename syntax. 12 | Let's think step by step. 13 | {{ 14 | "Knowledges": [ 15 | {{ 16 | "Term": "", 17 | "Reason": "" 18 | }}, 19 | : 20 | : 21 | ] 22 | }} 23 | 24 | -------------------------------------------------------------------------------- /prompt_templates/ptemplate_evaluate.txt: -------------------------------------------------------------------------------- 1 | Main Question: {MainQuestion} 2 | Mission: {Mission} 3 | 4 | The extracted information necessary to answer the Main Question and Mission is as follows: 5 | 6 | Input: 7 | PastStrategies: {PastStrategies} 8 | PlanExecutedResults: 9 | {PlanExecutedResults} 10 | Reflection: 11 | {Reflection} 12 | 13 | Based on the information provided, please answer the Main Question in Japanease and reference 14 | the source document by citing its ID in the following format. 15 | Let's think step by step. 16 | 17 | Result: 18 | Answer: 19 | However, if there is insufficient information to provide an answer, please set Answer as "Unknown". -------------------------------------------------------------------------------- /prompt_templates/ptemplate_mission.txt: -------------------------------------------------------------------------------- 1 | Please create the best possible answer while exploring new knowledge in response to the given question. 2 | -------------------------------------------------------------------------------- /prompt_templates/ptemplate_query.txt: -------------------------------------------------------------------------------- 1 | Question: {sub_question} 2 | Also, please assess the appropriateness of the answer to the question by assigning a score between 0 and 100 in the following format (higher values indicate a more appropriate answer). 3 | Point: 4 | 5 | Let's think step by step. 6 | -------------------------------------------------------------------------------- /prompt_templates/ptemplate_query_plan.txt: -------------------------------------------------------------------------------- 1 | Please generate a JSON data structure in the following format, in JSON format, 2 | to extract the necessary information for the answer from the Mission, 3 | Strategy, and known knowledge/concepts, in response to the Main Question. 4 | Please keep the DetailedStrategy within 100 characters in a single line. 5 | 6 | {{ 7 | "DetailedStrategy": "" 8 | "Plan": [ 9 | {{ 10 | "DocumentID": "" 11 | "Purpose": "" 12 | "Perspectives": "" 13 | }}, 14 | : 15 | : 16 | ] 17 | }} 18 | And please Note these points when creating plans. 19 | - perform an investigation on one document for multiple purposes and perspectives. 20 | - conduct a cross-referenced investigation across multiple documents for a specific purpose and perspective. 21 | - If the answer cannot be obtained from one document, please investigate with the same purpose and perspective in another document. 22 | 23 | The input information is as follows. 24 | Input: 25 | Main Question: The question "{MainQuestion}" pertains to user operations of the system, troubleshooting the system, and understanding specifications. 26 | Mission: {Mission} 27 | Strategy: {Strategy} 28 | Document List: {DocumentList} 29 | History: {History} 30 | PastStrategies: {PastStrategies} 31 | BackgroundKnowledges: {BackgroundKnowledges} 32 | AcquiredKnowledges: {AcquiredKnowledges} 33 | 34 | Let's think step by step. 35 | -------------------------------------------------------------------------------- /prompt_templates/ptemplate_reflection.txt: -------------------------------------------------------------------------------- 1 | The question "{MainQuestion}" pertains to user operations, system troubleshooting, 2 | and understanding specifications. 3 | The previous knowledge/concepts required to answer this question have been listed as follows: 4 | {KnowledgesNeeds} 5 | 6 | And the background information is as follows: 7 | BackgroundKnowledges: {BackgroundKnowledges} 8 | 9 | To gather information related to the above concepts/knowledge, inquiries were made to existing documents, 10 | and the results are as follows: 11 | {PlanResult} 12 | 13 | Term items must follow the Unix filename syntax. 14 | Fill in the KnownInfo with the understood information. 15 | Fill in the Point with the importance of the KnownInfo by assigning a score between 0 and 100(higher values indicate a more appropriate answer). 16 | Fill in the UnknownInfo with the information that is not yet fully understood. 17 | Enumerate the relevant under DocumentIDs. 18 | Enumerate any relationships with other concepts/knowledge under Relations, along with the reasons for the relationships. 19 | Let's think step by step. 20 | 21 | {{ 22 | "Knowledges": [ 23 | {{ 24 | "Term": "", 25 | "Reason": "", 26 | "KnownInfos": [ 27 | {{ 28 | "KnownInfo": "", 29 | "Point": "(previous one)", 30 | "DocumentIDs": [ "(previous one)", ... ] 31 | }}, {{ 32 | "KnownInfo": "(acquired one)", 33 | "Point": "(acquired one)", 34 | "DocumentIDs": [ "(acquired one)", ... ] 35 | }}, 36 | : 37 | : 38 | ], 39 | "UnknownInfo": [ "", ... ], 40 | "Relations": [ 41 | {{ 42 | "Term": "", 43 | "RelationReason": "", 44 | }}, 45 | : 46 | ] 47 | }}, 48 | : 49 | : 50 | ] 51 | }} 52 | 53 | -------------------------------------------------------------------------------- /prompt_templates/ptemplate_reflection_addterms.txt: -------------------------------------------------------------------------------- 1 | The question "{MainQuestion}" pertains to user operations, system troubleshooting, 2 | and understanding specifications. 3 | The previous knowledge/concepts required to answer this question have been listed as follows: 4 | {KnowledgesNeeds} 5 | 6 | Please add new s with and derived from the obtained information. 7 | Let's think step by step. 8 | 9 | {{ 10 | "Knowledges": [ 11 | {{ 12 | "Term": "", 13 | "Reason": "", 14 | "KnownInfos": [], 15 | "UnknownInfo": [ "", ... ], 16 | "Relations": [] 17 | }}, 18 | : 19 | : 20 | ] 21 | }} 22 | 23 | -------------------------------------------------------------------------------- /prompt_templates/ptemplate_strategy.txt: -------------------------------------------------------------------------------- 1 | Background Information: 2 | The DocumentList enumerates the titles of investigatable documents. 3 | Past Investigation Status: 4 | PastStrategies lists the previously investigated DetailedStrategies. 5 | Investigation Approach: 6 | To answer the MainQuestion, please list the knowledge and concepts that could be considered as prerequisites, and 7 | consider which documents from the DocumentList should be investigated to understand those knowledge and concepts. 8 | 9 | Based on that, create a plan by treating DetailedStrategy as the investigation approach, 10 | determining investigation perspectives for each relevant document, and forming a Plan. 11 | 12 | -------------------------------------------------------------------------------- /prompt_templates/ptemplate_subq_detail.txt: -------------------------------------------------------------------------------- 1 | Please obtain information based on the following purposes and perspectives. 2 | Let's think step by step. 3 | 4 | purpose: {purpose} 5 | perspectives: {perspectives} 6 | -------------------------------------------------------------------------------- /query.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | import re 4 | import openai 5 | from prompt_template import PromptTemplate 6 | from memory_stream import MemoryStream 7 | 8 | class Query: 9 | def __init__(self, target_doc_id: str, main_question: str, memory_stream: MemoryStream, qa): 10 | self.target_doc_id = target_doc_id 11 | self.main_question = main_question 12 | self.memory_stream = memory_stream 13 | self.qa = qa 14 | 15 | def run(self, prompt_template_path: str, sub_question: str): 16 | prompt_query_template = PromptTemplate(prompt_template_path) 17 | # Generate the prompt query using the template and inputs 18 | query = prompt_query_template.get_prompt(sub_question=sub_question) 19 | try: 20 | reply = self.qa({"question": query}) 21 | except openai.error.InvalidRequestError as e: 22 | print("ERROR: can not query:" + query) 23 | print("ERROR:" + e) 24 | return -1 25 | 26 | print(reply) 27 | #calculate point of reply 28 | if reply.get("answer") == None: 29 | match = False 30 | else: 31 | match = re.search(r"Point: ?([0-9\.]+)$", reply["answer"]) 32 | if match: 33 | point = float(match.group(1)) 34 | # Store the information in the MemoryStream 35 | return self._save(sub_question, reply["answer"], point) 36 | else: 37 | point = -1.0 38 | print("ERROR: can not find point in reply:" + reply["answer"]) 39 | return self._save(sub_question, reply["answer"], point) 40 | 41 | def _save(self, sub_question: str, reply: str, point: int): 42 | # Store the information in the MemoryStream 43 | return self.memory_stream.add_data( 44 | target_doc_id = self.target_doc_id, 45 | question = sub_question, 46 | reply = reply, 47 | point = point) 48 | 49 | 50 | if __name__ == "__main__": 51 | import sys 52 | from db_manager import get_qa 53 | from params import get_param 54 | param_prompt_template_path = get_param("prompt_templates_path") 55 | 56 | db_dir = ".." 57 | doc_id = "DB" 58 | qa = get_qa(db_dir, doc_id) 59 | memory_stream = MemoryStream() 60 | prompt_template_path = param_prompt_template_path + "/ptemplate_query.txt" 61 | query = Query("1", "Athrillとは何ですか?", memory_stream, qa) 62 | while True: 63 | question = input("question> ") 64 | if question == 'exit' or question == 'q' or question == "quit": 65 | print("See you again!") 66 | sys.exit(0) 67 | query.run(prompt_template_path, question) 68 | print("REPLY: " + memory_stream.get_reply()) 69 | print("POINT: " + str(memory_stream.get_point())) 70 | else: 71 | pass -------------------------------------------------------------------------------- /query_dir/q_1/result/critical_thinking.json: -------------------------------------------------------------------------------- 1 | { 2 | "Knowledges": [ 3 | { 4 | "Term": "Hakoniwa_Simulator", 5 | "Reason": "Understanding the specifications of the Hakoniwa simulator, including the correct setup and settings, is important because the robot won't move if there are setup errors or if certain settings are not properly configured." 6 | }, 7 | { 8 | "Term": "User_Interface", 9 | "Reason": "Knowledge of the user interface, specifically the Unity Interface of the Hakoniwa simulator, is necessary since settings related to the EnableEx Ctrl checkbox might affect the robot's movement." 10 | }, 11 | { 12 | "Term": "Robotics", 13 | "Reason": "Understanding how the movements of robots can be controlled within the Hakoniwa environment is critical to troubleshoot issues related to the robot's inability to move." 14 | }, 15 | { 16 | "Term": "Computer_Networking", 17 | "Reason": "Knowledge of computer networking is important because firewall settings can impact the performance of Unity Editor and in turn, might affect the robot's movement in the simulation." 18 | }, 19 | { 20 | "Term": "Operating_Systems", 21 | "Reason": "Understanding the compatibility between the Hakoniwa simulator and the Windows operating system is important, as it impacts the operation of the simulator." 22 | }, 23 | { 24 | "Term": "Reinforcement_Learning", 25 | "Reason": "Knowledge of reinforcement learning is necessary as it is used to train robots within the Unity environment, which influences robot movements based on the calculated rewards." 26 | }, 27 | { 28 | "Term": "Python_Environment_Setup", 29 | "Reason": "Understanding how to setup the Python environment is crucial for running the Hakoniwa simulator on Windows." 30 | }, 31 | { 32 | "Term": "System_Requirements", 33 | "Reason": "Knowledge of the system requirements for running the Hakoniwa simulator on Windows is crucial to ensure that the user's computer can support the simulation." 34 | } 35 | ] 36 | } -------------------------------------------------------------------------------- /query_dir/q_1/result/plan.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": " Investigate documents related to simulator setup, Python env setup, reinforcement learning, and problematic steps to find the cause,", 3 | "Plan": [ 4 | { 5 | "PlanID": 1, 6 | "DocumentID": "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility", 7 | "Purpose": "Investigate setup guide for Windows to identify any missed or mishandled steps", 8 | "Perspectives": [ 9 | "Hakoniwa_Simulator", 10 | "Operating_Systems", 11 | "Python_Environment_Setup", 12 | "System_Requirements" 13 | ], 14 | "ResultID": "", 15 | "Status": "None" 16 | }, 17 | { 18 | "PlanID": 2, 19 | "DocumentID": "Python-Unity-SandboxRobot-CameraData-Retrieval", 20 | "Purpose": "Understand how to properly retrieve data from the robot to diagnose its immobility", 21 | "Perspectives": [ 22 | "System_Requirements", 23 | "Robotics" 24 | ], 25 | "ResultID": "", 26 | "Status": "None" 27 | }, 28 | { 29 | "PlanID": 3, 30 | "DocumentID": "Unity-Python-Robotics-ReinforcementLearning-Implementation", 31 | "Purpose": "Investigate reinforcement learning implementation to understand how it affects robot movements", 32 | "Perspectives": [ 33 | "Reinforcement_Learning", 34 | "Robotics" 35 | ], 36 | "ResultID": "", 37 | "Status": "None" 38 | }, 39 | { 40 | "PlanID": 4, 41 | "DocumentID": "MachineLearning-Simulation-Ubuntu-Unity-EnvironmentSetup", 42 | "Purpose": "Compare environment setup with Windows and Ubuntu to determine any potentially problematic steps", 43 | "Perspectives": [ 44 | "Hakoniwa_Simulator", 45 | "Operating_Systems", 46 | "Python_Environment_Setup" 47 | ], 48 | "ResultID": "", 49 | "Status": "None" 50 | }, 51 | { 52 | "PlanID": 5, 53 | "DocumentID": "Programming-Unity-Python-DroneControl-HakoniwaEnvironment", 54 | "Purpose": "Understand more about drone control in Hakoniwa to check if the problem might come from the drone's control", 55 | "Perspectives": [ 56 | "Hakoniwa_Simulator", 57 | "Robotics" 58 | ], 59 | "ResultID": "", 60 | "Status": "None" 61 | }, 62 | { 63 | "PlanID": 6, 64 | "DocumentID": "Unity-PythonAPI-RobotControl-hako_env-hako_robomodel_any-hako", 65 | "Purpose": "Investigate robot control in Hakoniwa to see if the problem comes from the interaction between robot and environment", 66 | "Perspectives": [ 67 | "Hakoniwa_Simulator", 68 | "Robotics" 69 | ], 70 | "ResultID": "", 71 | "Status": "None" 72 | }, 73 | { 74 | "PlanID": 7, 75 | "DocumentID": "ReinforcementLearning-RobotSimulation-MacUnityPython-SetupGuide-ExecutionProcedure", 76 | "Purpose": "Cross-check Windows and Mac setup and execution process for potential differences that could cause issues", 77 | "Perspectives": [ 78 | "Operating_Systems", 79 | "Python_Environment_Setup" 80 | ], 81 | "ResultID": "", 82 | "Status": "None" 83 | } 84 | ] 85 | } -------------------------------------------------------------------------------- /query_dir/q_1/result/reflection.json: -------------------------------------------------------------------------------- 1 | { 2 | "knowledges": [ 3 | { 4 | "name": "Hakoniwa_Simulator", 5 | "contents": { 6 | "Term": "Hakoniwa_Simulator", 7 | "KnownInfos": [ 8 | { 9 | "KnownInfo": "The Hakoniwa simulator requires the user to clone the AI branch of 'hakoniwa-base'. The simulation environment is created using Unity, and the Unity Editor needs to be allowed through the Windows Defender Firewall.", 10 | "DocumentIDs": [ 11 | "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility" 12 | ] 13 | }, 14 | { 15 | "KnownInfo": "The 'EnableEx Ctrl' option needs to be checked in the inspector view of the robot in the hierarchy view, otherwise, the robot cannot be controlled.", 16 | "DocumentIDs": [ 17 | "Programming-Unity-Python-DroneControl-HakoniwaEnvironment" 18 | ] 19 | }, 20 | { 21 | "KnownInfo": "Hakoniwa is a robot simulator suitable for training machine learning. However, it seems that it cannot function properly if required settings are not met. For instance, the Python interface to control robot needs to be enabled for proper functioning. If this box is unchecked, robot movements may be blocked.", 22 | "DocumentIDs": [ 23 | "Programming-Unity-Python-DroneControl-HakoniwaEnvironment" 24 | ] 25 | }, 26 | { 27 | "KnownInfo": "To set up the Hakoniwa simulator, you first need to clone the AI branch of the hakoniwa-base from GitHub. The cloning should be done in a directory under /mnt/c, not in a directory within WSL2. After cloning, you need to install the Python execution environment.", 28 | "DocumentIDs": [ 29 | "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility" 30 | ] 31 | } 32 | ], 33 | "UnknownInfo": [ 34 | "Detail information about the 'EnableEx Ctrl' option and its impact on the robot's behaviour." 35 | ], 36 | "Relations": [ 37 | { 38 | "Term": "Robotics", 39 | "RelationReason": "Understanding the settings and setup of Hakonia simulator is directly related to control of the robotics" 40 | }, 41 | { 42 | "Term": "Operating_Systems", 43 | "RelationReason": "Hakoniwa Simulator operates on a computer's operating system, and thus any compatibility issues with the OS could affect the simulator's performance." 44 | } 45 | ] 46 | } 47 | }, 48 | { 49 | "name": "Operating_Systems", 50 | "contents": { 51 | "Term": "Operating_Systems", 52 | "KnownInfos": [ 53 | { 54 | "KnownInfo": "The setup guide mentions the use of WSL2, which needs to be installed before starting. Network tools such as route and ifconfig commands need to be installed via terminal. A system reboot may be necessary after setting environment variables.", 55 | "DocumentIDs": [ 56 | "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility" 57 | ] 58 | }, 59 | { 60 | "KnownInfo": "Unity Editor should be allowed in the Windows Defender Firewall settings for it to operate properly. Also, Unity is capable of handling conversion between different versions.", 61 | "DocumentIDs": [ 62 | "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility" 63 | ] 64 | } 65 | ], 66 | "UnknownInfo": [ 67 | "Specific network settings for WSL2 to successfully simulate Hakoniwa on Windows." 68 | ], 69 | "Relations": [ 70 | { 71 | "Term": "Computer_Networking", 72 | "RelationReason": "Because the network setting in the operating system could affect the operation of the Hakoniwa simulator." 73 | } 74 | ] 75 | } 76 | }, 77 | { 78 | "name": "User_Interface", 79 | "contents": { 80 | "Term": "User_Interface", 81 | "KnownInfos": [ 82 | { 83 | "KnownInfo": "In the Unity Interface of Hakoniwa, certain settings should be enabled for robot movement. For instance, in the Inspector view, checkbox for EnableEx Ctrl should be checked.", 84 | "DocumentIDs": [ 85 | "Programming-Unity-Python-DroneControl-HakoniwaEnvironment" 86 | ] 87 | } 88 | ], 89 | "UnknownInfo": [], 90 | "Relations": [] 91 | } 92 | }, 93 | { 94 | "name": "Unity", 95 | "contents": { 96 | "Term": "Unity", 97 | "KnownInfos": [], 98 | "UnknownInfo": [ 99 | "Specific Unity configuration that corresponds to the Hakoniwa simulator", 100 | "Potential conflicts of Unity with the reinforcement learning process" 101 | ], 102 | "Relations": [ 103 | { 104 | "Term": "Hakoniwa_Simulator", 105 | "RelationReason": "Hakoniwa Simulator uses Unity for running the environment for robotic simulation." 106 | } 107 | ] 108 | } 109 | }, 110 | { 111 | "name": "Computer_Networking", 112 | "contents": { 113 | "Term": "Computer_Networking", 114 | "KnownInfos": [ 115 | { 116 | "KnownInfo": "Unity Editor needs to be allowed in the Windows Defender Firewall settings to operate properly. If not, it might be the reason for the issue.", 117 | "DocumentIDs": [ 118 | "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility" 119 | ] 120 | } 121 | ], 122 | "UnknownInfo": [], 123 | "Relations": [] 124 | } 125 | } 126 | ] 127 | } 128 | -------------------------------------------------------------------------------- /query_dir/q_1/result/reply.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": " Investigate documents related to simulator setup, Python env setup, reinforcement learning, and problematic steps to find the cause,", 3 | "Plan": [ 4 | { 5 | "DocumentID": "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility", 6 | "Purpose": "Investigate setup guide for Windows to identify any missed or mishandled steps", 7 | "Perspectives": [ 8 | "Hakoniwa_Simulator", 9 | "Operating_Systems", 10 | "Python_Environment_Setup", 11 | "System_Requirements" 12 | ] 13 | }, 14 | { 15 | "DocumentID": "Python-Unity-SandboxRobot-CameraData-Retrieval", 16 | "Purpose": "Understand how to properly retrieve data from the robot to diagnose its immobility", 17 | "Perspectives": [ 18 | "System_Requirements", 19 | "Robotics" 20 | ] 21 | }, 22 | { 23 | "DocumentID": "Unity-Python-Robotics-ReinforcementLearning-Implementation", 24 | "Purpose": "Investigate reinforcement learning implementation to understand how it affects robot movements", 25 | "Perspectives": [ 26 | "Reinforcement_Learning", 27 | "Robotics" 28 | ] 29 | }, 30 | { 31 | "DocumentID": "MachineLearning-Simulation-Ubuntu-Unity-EnvironmentSetup", 32 | "Purpose": "Compare environment setup with Windows and Ubuntu to determine any potentially problematic steps", 33 | "Perspectives": [ 34 | "Hakoniwa_Simulator", 35 | "Operating_Systems", 36 | "Python_Environment_Setup" 37 | ] 38 | }, 39 | { 40 | "DocumentID": "Programming-Unity-Python-DroneControl-HakoniwaEnvironment", 41 | "Purpose": "Understand more about drone control in Hakoniwa to check if the problem might come from the drone's control", 42 | "Perspectives": [ 43 | "Hakoniwa_Simulator", 44 | "Robotics" 45 | ] 46 | }, 47 | { 48 | "DocumentID": "Unity-PythonAPI-RobotControl-hako_env-hako_robomodel_any-hako", 49 | "Purpose": "Investigate robot control in Hakoniwa to see if the problem comes from the interaction between robot and environment", 50 | "Perspectives": [ 51 | "Hakoniwa_Simulator", 52 | "Robotics" 53 | ] 54 | }, 55 | { 56 | "DocumentID": "ReinforcementLearning-RobotSimulation-MacUnityPython-SetupGuide-ExecutionProcedure", 57 | "Purpose": "Cross-check Windows and Mac setup and execution process for potential differences that could cause issues", 58 | "Perspectives": [ 59 | "Operating_Systems", 60 | "Python_Environment_Setup" 61 | ] 62 | } 63 | ] 64 | } -------------------------------------------------------------------------------- /query_dir/q_1/result/result.txt: -------------------------------------------------------------------------------- 1 | Answer: ロボットが動かない原因はいくつか考えられます。まず、Hakoniwaシミュレーターの設定が正しく行われているか確認してください。AIブランチの'hakoniwa-base'を正しくクローンし、それをWSL2ディレクトリではなく、/mnt/cに作成した任意のディレクトリにおいて作業を行っているか確認してください[ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility]。 2 | 3 | 次に、オペレーティングシステムの相性を確認します。WSL2のインストールと、ネットワークツール(routeとifconfigコマンド)のインストールを行った上で、環境変数の設定後にシステムを再起動したか確認が必要です[ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility]。さらに、UnityエディターがWindowsディフェンダーファイアウォールを通過できるよう設定しているかも見直すべきです[ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility]。 4 | 5 | シミュレーター内でロボットが動けるようにするために、'EnableEx Ctrl'というオプションがインスペクタビューで選択されているか確認することも重要です。この設定が有効になっていないと、Pythonからドローンを操作することができません[Programming-Unity-Python-DroneControl-HakoniwaEnvironment]。 6 | 7 | 加えて、強化学習の実装はロボットの動作に直接影響を与える可能性があるため、これが問題を引き起こしている可能性もあります。ロボットが行動を選択し、その結果を学習するためのQテーブルが正しく設定されているか確認してください[Unity-Python-Robotics-ReinforcementLearning-Implementation]。 8 | 9 | これらのステップのいずれかが適切に行われていない場合、それがロボットの不動の原因となる可能性があります。不明な点が残る場合、問題の診断により詳しい情報が必要となるかもしれません。 10 | -------------------------------------------------------------------------------- /query_dir/q_1/result/updated_plan.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": " Investigate documents related to simulator setup, Python env setup, reinforcement learning, and problematic steps to find the cause,", 3 | "Plan": [ 4 | { 5 | "PlanID": 1, 6 | "DocumentID": "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility", 7 | "Purpose": "Investigate setup guide for Windows to identify any missed or mishandled steps", 8 | "Perspectives": [ 9 | "Hakoniwa_Simulator", 10 | "Operating_Systems", 11 | "Python_Environment_Setup", 12 | "System_Requirements" 13 | ], 14 | "ResultID": 1, 15 | "Status": "Done" 16 | }, 17 | { 18 | "PlanID": 2, 19 | "DocumentID": "Python-Unity-SandboxRobot-CameraData-Retrieval", 20 | "Purpose": "Understand how to properly retrieve data from the robot to diagnose its immobility", 21 | "Perspectives": [ 22 | "System_Requirements", 23 | "Robotics" 24 | ], 25 | "ResultID": 2, 26 | "Status": "Done" 27 | }, 28 | { 29 | "PlanID": 3, 30 | "DocumentID": "Unity-Python-Robotics-ReinforcementLearning-Implementation", 31 | "Purpose": "Investigate reinforcement learning implementation to understand how it affects robot movements", 32 | "Perspectives": [ 33 | "Reinforcement_Learning", 34 | "Robotics" 35 | ], 36 | "ResultID": 3, 37 | "Status": "Done" 38 | }, 39 | { 40 | "PlanID": 4, 41 | "DocumentID": "MachineLearning-Simulation-Ubuntu-Unity-EnvironmentSetup", 42 | "Purpose": "Compare environment setup with Windows and Ubuntu to determine any potentially problematic steps", 43 | "Perspectives": [ 44 | "Hakoniwa_Simulator", 45 | "Operating_Systems", 46 | "Python_Environment_Setup" 47 | ], 48 | "ResultID": 4, 49 | "Status": "Done" 50 | }, 51 | { 52 | "PlanID": 5, 53 | "DocumentID": "Programming-Unity-Python-DroneControl-HakoniwaEnvironment", 54 | "Purpose": "Understand more about drone control in Hakoniwa to check if the problem might come from the drone's control", 55 | "Perspectives": [ 56 | "Hakoniwa_Simulator", 57 | "Robotics" 58 | ], 59 | "ResultID": 5, 60 | "Status": "Done" 61 | }, 62 | { 63 | "PlanID": 6, 64 | "DocumentID": "Unity-PythonAPI-RobotControl-hako_env-hako_robomodel_any-hako", 65 | "Purpose": "Investigate robot control in Hakoniwa to see if the problem comes from the interaction between robot and environment", 66 | "Perspectives": [ 67 | "Hakoniwa_Simulator", 68 | "Robotics" 69 | ], 70 | "ResultID": 6, 71 | "Status": "Done" 72 | }, 73 | { 74 | "PlanID": 7, 75 | "DocumentID": "ReinforcementLearning-RobotSimulation-MacUnityPython-SetupGuide-ExecutionProcedure", 76 | "Purpose": "Cross-check Windows and Mac setup and execution process for potential differences that could cause issues", 77 | "Perspectives": [ 78 | "Operating_Systems", 79 | "Python_Environment_Setup" 80 | ], 81 | "ResultID": 7, 82 | "Status": "Done" 83 | } 84 | ] 85 | } -------------------------------------------------------------------------------- /query_dir/q_1/strategy_history.json: -------------------------------------------------------------------------------- 1 | { 2 | "Strategies": [ 3 | " Investigate documents related to simulator setup, Python env setup, reinforcement learning, and problematic steps to find the cause," 4 | ] 5 | } -------------------------------------------------------------------------------- /query_dir/q_2/result/critical_thinking.json: -------------------------------------------------------------------------------- 1 | { 2 | "Knowledges": [ 3 | { 4 | "Term": "Hakoniwa_Simulator", 5 | "Reason": "Understanding the specifications of the Hakoniwa simulator, including the correct setup and settings, is important because the robot won't move if there are setup errors or if certain settings are not properly configured." 6 | }, 7 | { 8 | "Term": "User_Interface", 9 | "Reason": "Knowledge of the user interface, specifically the Unity Interface of the Hakoniwa simulator, is necessary since settings related to the EnableEx Ctrl checkbox might affect the robot's movement." 10 | }, 11 | { 12 | "Term": "Robotics", 13 | "Reason": "Understanding how the movements of robots can be controlled within the Hakoniwa environment is critical to troubleshoot issues related to the robot's inability to move." 14 | }, 15 | { 16 | "Term": "Computer_Networking", 17 | "Reason": "Knowledge of computer networking is important because firewall settings can impact the performance of Unity Editor and in turn, might affect the robot's movement in the simulation." 18 | }, 19 | { 20 | "Term": "Operating_Systems", 21 | "Reason": "Understanding the compatibility between the Hakoniwa simulator and the Windows operating system is important, as it impacts the operation of the simulator." 22 | }, 23 | { 24 | "Term": "Reinforcement_Learning", 25 | "Reason": "Knowledge of reinforcement learning is necessary as it is used to train robots within the Unity environment, which influences robot movements based on the calculated rewards." 26 | }, 27 | { 28 | "Term": "Python_Environment_Setup", 29 | "Reason": "Understanding how to setup the Python environment is crucial for running the Hakoniwa simulator on Windows." 30 | }, 31 | { 32 | "Term": "System_Requirements", 33 | "Reason": "Knowledge of the system requirements for running the Hakoniwa simulator on Windows is crucial to ensure that the user's computer can support the simulation." 34 | } 35 | ] 36 | } -------------------------------------------------------------------------------- /query_dir/q_2/result/memory.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ID": 1, 4 | "TargetDocID": "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility", 5 | "Question": "Please obtain information based on the following purposes and perspectives.\nLet's think step by step.\n\npurpose: To understand the setup of the Hakoniwa simulator and specific network settings for WSL2 on Windows.\nperspectives: Hakoniwa_Simulator, Operating_Systems\n", 6 | "Reply": "Answer: To set up the Hakoniwa simulator and specific network settings for WSL2 on Windows, you need to follow these steps:\n\n1. Install WSL2 on your Windows system. This is necessary for the operation of the Hakoniwa simulator.\n\n2. Install network tools on your terminal using the command \"sudo apt install net-tools\".\n\n3. Install Unity and Blender on your system. These are necessary for the operation of the Hakoniwa simulator.\n\n4. Ensure that Git for Windows is installed on your system. If it is not, refer to the provided article and install it, setting up the environment variables as instructed. After setting the environment variables, you will need to restart your computer.\n\n5. Clone the AI branch of the hakoniwa-base using the command \"git clone -b ai --recursive https://github.com/toppers/hakoniwa-base.git\". Note that you should not clone into a directory within WSL2, but rather create a directory under /mnt/c and clone into that.\n\n6. Install the Python execution environment.\n\n7. In the WSL2 terminal, navigate to the hakoniwa-base directory and launch the Hakoniwa environment using the command \"bash docker/run.bash runtime\". \n\n8. For the Windows version, the Python program for reinforcement learning is executed on a Docker container. Therefore, create an image following the provided instructions. Note that Docker Desktop for Windows is not used. Instead, Docker Engine installed on the Ubuntu environment on WSL2 is used. \n\n9. Create a Dockerfile using the command \"bash docker/template/make_dockerfile.bash runtime ai\". If the docker command is not available, refer to the provided installation instructions.\n\n10. Start the Docker service using the command \"sudo service docker start\".\n\n11. Install the necessary Unity models following the provided instructions. The command will automatically set the IP address for communication between Unity and Hakoniwa (a virtual IP address for WSL2). Note that if you restart your computer, the IP address may change.\n\nPoint: 100", 7 | "Point": 100.0 8 | }, 9 | { 10 | "ID": 2, 11 | "TargetDocID": "Programming-Unity-Python-DroneControl-HakoniwaEnvironment", 12 | "Question": "Please obtain information based on the following purposes and perspectives.\nLet's think step by step.\n\npurpose: To understand the control settings of the Hakoniwa Simulator, specifically 'EnableEx Ctrl' option.\nperspectives: User_Interface, Hakoniwa_Simulator\n", 13 | "Reply": "Answer: To control the Hakoniwa Simulator, you need to make some settings in the Unity interface. First, you need to click on the green arrow (Y-axis) and move it upwards. Then, in the hierarchy view, click on 'Drone' and bring up the inspector view. In the inspector view, you need to check the 'EnableEx Ctrl' option. If you don't do this, you won't be able to control the drone from Python. After these settings, you can move the drone using the arrow keys and the space key for ascending and the A key for descending. If you restart Windows, you need to click on 'Window/Hakoniwa/Generate' at least once.\n\nPoint: 90", 14 | "Point": 90.0 15 | }, 16 | { 17 | "ID": 3, 18 | "TargetDocID": "Unity-Python-Robotics-ReinforcementLearning-Implementation", 19 | "Question": "Please obtain information based on the following purposes and perspectives.\nLet's think step by step.\n\npurpose: To understand how Unity interfaces with reinforcement learning for robotic control.\nperspectives: Unity\n", 20 | "Reply": "Answer: The articles discuss using Unity, Python, and a sandbox environment to train robots through reinforcement learning. The robot model used is the same as the ET Robocon competition robot, which is also similar to a cargo transport robot. The robot in the Unity environment uses two sensors: an ultrasonic sensor to measure the distance to the central cylinder, which is used to calculate rewards, and a color sensor for line tracing. The color sensor takes continuous values from 0 to 100. The learned program can be converted into a development language for real robots (such as C language), ensuring accuracy at the timing level.\n\nPoint: 85", 21 | "Point": 85.0 22 | }, 23 | { 24 | "ID": 4, 25 | "TargetDocID": "MachineLearning-Simulation-Ubuntu-Unity-EnvironmentSetup", 26 | "Question": "Please obtain information based on the following purposes and perspectives.\nLet's think step by step.\n\npurpose: To understand network settings and compatibility issues that might affect the Hakoniwa Simulator.\nperspectives: Operating_Systems, Hakoniwa_Simulator\n", 27 | "Reply": "Answer: I'm sorry, but the provided context does not contain information about network settings or compatibility issues that might affect the Hakoniwa Simulator.\n\nPoint: 0", 28 | "Point": 0.0 29 | } 30 | ] -------------------------------------------------------------------------------- /query_dir/q_2/result/next_plan_result.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": " Investigate simulator setup, robot's control, operating system, and Unity Editor's network settings.,", 3 | "Plan": [ 4 | { 5 | "DocumentID": "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility", 6 | "Purpose": "To understand the setup of the Hakoniwa simulator and specific network settings for WSL2 on Windows.", 7 | "Perspectives": "Hakoniwa_Simulator, Operating_Systems", 8 | "ResultID": { 9 | "Reply": "Answer: To set up the Hakoniwa simulator and specific network settings for WSL2 on Windows, you need to follow these steps:\n\n1. Install WSL2 on your Windows system. This is necessary for the operation of the Hakoniwa simulator.\n\n2. Install network tools on your terminal using the command \"sudo apt install net-tools\".\n\n3. Install Unity and Blender on your system. These are necessary for the operation of the Hakoniwa simulator.\n\n4. Ensure that Git for Windows is installed on your system. If it is not, refer to the provided article and install it, setting up the environment variables as instructed. After setting the environment variables, you will need to restart your computer.\n\n5. Clone the AI branch of the hakoniwa-base using the command \"git clone -b ai --recursive https://github.com/toppers/hakoniwa-base.git\". Note that you should not clone into a directory within WSL2, but rather create a directory under /mnt/c and clone into that.\n\n6. Install the Python execution environment.\n\n7. In the WSL2 terminal, navigate to the hakoniwa-base directory and launch the Hakoniwa environment using the command \"bash docker/run.bash runtime\". \n\n8. For the Windows version, the Python program for reinforcement learning is executed on a Docker container. Therefore, create an image following the provided instructions. Note that Docker Desktop for Windows is not used. Instead, Docker Engine installed on the Ubuntu environment on WSL2 is used. \n\n9. Create a Dockerfile using the command \"bash docker/template/make_dockerfile.bash runtime ai\". If the docker command is not available, refer to the provided installation instructions.\n\n10. Start the Docker service using the command \"sudo service docker start\".\n\n11. Install the necessary Unity models following the provided instructions. The command will automatically set the IP address for communication between Unity and Hakoniwa (a virtual IP address for WSL2). Note that if you restart your computer, the IP address may change.\n\nPoint: 100", 10 | "Point": 100.0 11 | } 12 | }, 13 | { 14 | "DocumentID": "Programming-Unity-Python-DroneControl-HakoniwaEnvironment", 15 | "Purpose": "To understand the control settings of the Hakoniwa Simulator, specifically 'EnableEx Ctrl' option.", 16 | "Perspectives": "User_Interface, Hakoniwa_Simulator", 17 | "ResultID": { 18 | "Reply": "Answer: To control the Hakoniwa Simulator, you need to make some settings in the Unity interface. First, you need to click on the green arrow (Y-axis) and move it upwards. Then, in the hierarchy view, click on 'Drone' and bring up the inspector view. In the inspector view, you need to check the 'EnableEx Ctrl' option. If you don't do this, you won't be able to control the drone from Python. After these settings, you can move the drone using the arrow keys and the space key for ascending and the A key for descending. If you restart Windows, you need to click on 'Window/Hakoniwa/Generate' at least once.\n\nPoint: 90", 19 | "Point": 90.0 20 | } 21 | }, 22 | { 23 | "DocumentID": "Unity-Python-Robotics-ReinforcementLearning-Implementation", 24 | "Purpose": "To understand how Unity interfaces with reinforcement learning for robotic control.", 25 | "Perspectives": "Unity", 26 | "ResultID": { 27 | "Reply": "Answer: The articles discuss using Unity, Python, and a sandbox environment to train robots through reinforcement learning. The robot model used is the same as the ET Robocon competition robot, which is also similar to a cargo transport robot. The robot in the Unity environment uses two sensors: an ultrasonic sensor to measure the distance to the central cylinder, which is used to calculate rewards, and a color sensor for line tracing. The color sensor takes continuous values from 0 to 100. The learned program can be converted into a development language for real robots (such as C language), ensuring accuracy at the timing level.\n\nPoint: 85", 28 | "Point": 85.0 29 | } 30 | }, 31 | { 32 | "DocumentID": "MachineLearning-Simulation-Ubuntu-Unity-EnvironmentSetup", 33 | "Purpose": "To understand network settings and compatibility issues that might affect the Hakoniwa Simulator.", 34 | "Perspectives": "Operating_Systems, Hakoniwa_Simulator", 35 | "ResultID": { 36 | "Reply": "Answer: I'm sorry, but the provided context does not contain information about network settings or compatibility issues that might affect the Hakoniwa Simulator.\n\nPoint: 0", 37 | "Point": 0.0 38 | } 39 | } 40 | ] 41 | } -------------------------------------------------------------------------------- /query_dir/q_2/result/plan.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": " Investigate simulator setup, robot's control, operating system, and Unity Editor's network settings.,", 3 | "Plan": [ 4 | { 5 | "PlanID": 1, 6 | "DocumentID": "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility", 7 | "Purpose": "To understand the setup of the Hakoniwa simulator and specific network settings for WSL2 on Windows.", 8 | "Perspectives": "Hakoniwa_Simulator, Operating_Systems", 9 | "ResultID": "", 10 | "Status": "None" 11 | }, 12 | { 13 | "PlanID": 2, 14 | "DocumentID": "Programming-Unity-Python-DroneControl-HakoniwaEnvironment", 15 | "Purpose": "To understand the control settings of the Hakoniwa Simulator, specifically 'EnableEx Ctrl' option.", 16 | "Perspectives": "User_Interface, Hakoniwa_Simulator", 17 | "ResultID": "", 18 | "Status": "None" 19 | }, 20 | { 21 | "PlanID": 3, 22 | "DocumentID": "Unity-Python-Robotics-ReinforcementLearning-Implementation", 23 | "Purpose": "To understand how Unity interfaces with reinforcement learning for robotic control.", 24 | "Perspectives": "Unity", 25 | "ResultID": "", 26 | "Status": "None" 27 | }, 28 | { 29 | "PlanID": 4, 30 | "DocumentID": "MachineLearning-Simulation-Ubuntu-Unity-EnvironmentSetup", 31 | "Purpose": "To understand network settings and compatibility issues that might affect the Hakoniwa Simulator.", 32 | "Perspectives": "Operating_Systems, Hakoniwa_Simulator", 33 | "ResultID": "", 34 | "Status": "None" 35 | } 36 | ] 37 | } -------------------------------------------------------------------------------- /query_dir/q_2/result/reflection.json: -------------------------------------------------------------------------------- 1 | { 2 | "knowledges": [ 3 | { 4 | "name": "Hakoniwa_Simulator", 5 | "contents": { 6 | "Term": "Hakoniwa_Simulator", 7 | "KnownInfos": [ 8 | { 9 | "KnownInfo": "The Hakoniwa simulator requires the user to clone the AI branch of 'hakoniwa-base'. The simulation environment is created using Unity, and the Unity Editor needs to be allowed through the Windows Defender Firewall.", 10 | "DocumentIDs": [ 11 | "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility" 12 | ] 13 | }, 14 | { 15 | "KnownInfo": "The 'EnableEx Ctrl' option needs to be checked in the inspector view of the robot in the hierarchy view, otherwise, the robot cannot be controlled.", 16 | "DocumentIDs": [ 17 | "Programming-Unity-Python-DroneControl-HakoniwaEnvironment" 18 | ] 19 | }, 20 | { 21 | "KnownInfo": "Hakoniwa is a robot simulator suitable for training machine learning. However, it seems that it cannot function properly if required settings are not met.", 22 | "DocumentIDs": [ 23 | "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility" 24 | ] 25 | }, 26 | { 27 | "KnownInfo": "After reviewing various documents, it was found that a Python program is used to control the robot. If the Python interface ('EnableEx Ctrl') is unchecked, robot movements may be blocked.", 28 | "DocumentIDs": [ 29 | "Programming-Unity-Python-DroneControl-HakoniwaEnvironment" 30 | ] 31 | }, 32 | { 33 | "KnownInfo": "Hakoniwa is a robot simulator suitable for training machine learning. However, it seems that it cannot function properly if required settings are not met. For instance, the Python interface to control robot needs to be enabled for proper functioning. If this box is unchecked, robot movements may be blocked.", 34 | "DocumentIDs": [ 35 | "Programming-Unity-Python-DroneControl-HakoniwaEnvironment" 36 | ] 37 | }, 38 | { 39 | "KnownInfo": "To set up the Hakoniwa simulator, you first need to clone the AI branch of the hakoniwa-base from GitHub. The cloning should be done in a directory under /mnt/c, not in a directory within WSL2. After cloning, you need to install the Python execution environment.", 40 | "DocumentIDs": [ 41 | "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility" 42 | ] 43 | } 44 | ], 45 | "UnknownInfo": [ 46 | "How Unity Editor's network settings may influence the simulator.", 47 | "How the interaction between the robot and the environment works within Hakoniwa." 48 | ], 49 | "Relations": [ 50 | { 51 | "Term": "Operating_Systems", 52 | "RelationReason": "Setup of the Hakoniwa Simulator involves operations in the Operating Systems, which can affect the robot’s movement." 53 | }, 54 | { 55 | "Term": "Unity", 56 | "RelationReason": "Hakoniwa Simulator uses Unity for running the environment for robotic simulation. Any settings in Unity could potentially affect the robot's movement within the simulator." 57 | }, 58 | { 59 | "Term": "Reinforcement_Learning", 60 | "RelationReason": "Robotics movements within the Hakoniwa Simulator are set up based on reinforcement learning principles. Understanding how it applies in this context may help diagnose the robot's immobility." 61 | }, 62 | { 63 | "Term": "Robotics", 64 | "RelationReason": "Understanding the settings and setup of Hakonia simulator is directly related to control of the robotics" 65 | } 66 | ] 67 | } 68 | }, 69 | { 70 | "name": "Computer_Networking", 71 | "contents": { 72 | "Term": "Computer_Networking", 73 | "KnownInfos": [ 74 | { 75 | "KnownInfo": "Unity Editor needs to be allowed in the Windows Defender Firewall settings to operate properly. If not, it might be the reason for the issue.", 76 | "DocumentIDs": [ 77 | "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility" 78 | ] 79 | } 80 | ], 81 | "UnknownInfo": [ 82 | "Further information on specific network settings for Unity and Hakoniwa Simulator." 83 | ], 84 | "Relations": [ 85 | { 86 | "Term": "Hakoniwa_Simulator", 87 | "RelationReason": "The smooth running of Hakoniwa simulator might depend on certain network settings." 88 | }, 89 | { 90 | "Term": "Operating_Systems", 91 | "RelationReason": "The access to the network settings relies on the permissions and freedom provided by the operating system." 92 | }, 93 | { 94 | "Term": "Unity", 95 | "RelationReason": "The operation of Unity is heavily dependent on the network accessibility, and Unity is used to run the Hakoniwa simulator." 96 | } 97 | ] 98 | } 99 | } 100 | ] 101 | } 102 | -------------------------------------------------------------------------------- /query_dir/q_2/result/reply.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": " Investigate simulator setup, robot's control, operating system, and Unity Editor's network settings.,", 3 | "Plan": [ 4 | { 5 | "DocumentID": "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility", 6 | "Purpose": "To understand the setup of the Hakoniwa simulator and specific network settings for WSL2 on Windows.", 7 | "Perspectives": "Hakoniwa_Simulator, Operating_Systems" 8 | }, 9 | { 10 | "DocumentID": "Programming-Unity-Python-DroneControl-HakoniwaEnvironment", 11 | "Purpose": "To understand the control settings of the Hakoniwa Simulator, specifically 'EnableEx Ctrl' option.", 12 | "Perspectives": "User_Interface, Hakoniwa_Simulator" 13 | }, 14 | { 15 | "DocumentID": "Unity-Python-Robotics-ReinforcementLearning-Implementation", 16 | "Purpose": "To understand how Unity interfaces with reinforcement learning for robotic control.", 17 | "Perspectives": "Unity" 18 | }, 19 | { 20 | "DocumentID": "MachineLearning-Simulation-Ubuntu-Unity-EnvironmentSetup", 21 | "Purpose": "To understand network settings and compatibility issues that might affect the Hakoniwa Simulator.", 22 | "Perspectives": "Operating_Systems, Hakoniwa_Simulator" 23 | } 24 | ] 25 | } -------------------------------------------------------------------------------- /query_dir/q_2/result/result.txt: -------------------------------------------------------------------------------- 1 | Answer: ロボットが動かない問題の原因はいくつかあります。まず、"ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility" ガイドによれば、Windowsで箱庭シミュレータを使用するための設定は以下のようになります。 2 | 3 | 1. 'hakoniwa-base'のAIブランチをクローンします。クローンはWSL2のディレクトリ内ではなく、任意のディレクトリを作成して /mnt/c の下に作成する必要があります。 4 | 5 | 2. Git for Windowsがインストールされ、環境変数が設定されていることを確認します。環境変数を設定した後にはシステムの再起動が必要です。 6 | 7 | 3. UnityとBlenderをインストールします。また、Windows Defender FirewallからUnityエディタが許可されていることも重要です。これがブロックされている場合、設定を変更して許可する必要があります。 8 | 9 | それでは、この設定がうまく行われていない場合、Unityが正常に動作しない可能性があり、結果的にロボットが動かなくなる可能性があります。 10 | 11 | 次に、「Programming-Unity-Python-DroneControl-HakoniwaEnvironment」のガイドによれば、ドローン(またはロボット)の制御について、「EnableEx Ctrl」オプションがチェックされていなければ、Pythonからドローンを制御することはできないとのことです。したがって、この設定が適切に行われていない場合、その影響でロボットが動かない可能性があります。 12 | 13 | 最後に、「Unity-Python-Robotics-ReinforcementLearning-Implementation」のガイドは、モーターを使ったロボットの動きと、状態に基づいたアクションの実行に関する強化学習の実装について説明しています。強化学習の設定や実装に問題がある場合も、ロボットの動きに影響が出ます。 14 | 15 | これらの情報に基づき、原因の特定と解決策の探求を行うことが重要です。同様に、Windows向けのセットアップガイドを確認し、指示通りに手順が行われているか、または防火壁やその他のネットワーク設定など、特定の問題を引き起こす可能性がある要素を調査することが有効です。 16 | -------------------------------------------------------------------------------- /query_dir/q_2/result/updated_plan.json: -------------------------------------------------------------------------------- 1 | { 2 | "DetailedStrategy": " Investigate simulator setup, robot's control, operating system, and Unity Editor's network settings.,", 3 | "Plan": [ 4 | { 5 | "PlanID": 1, 6 | "DocumentID": "ReinforcementLearning-RobotSimulation-WindowsUnityPython-SetupGuide-UnityVersionCompatibility", 7 | "Purpose": "To understand the setup of the Hakoniwa simulator and specific network settings for WSL2 on Windows.", 8 | "Perspectives": "Hakoniwa_Simulator, Operating_Systems", 9 | "ResultID": 1, 10 | "Status": "Done" 11 | }, 12 | { 13 | "PlanID": 2, 14 | "DocumentID": "Programming-Unity-Python-DroneControl-HakoniwaEnvironment", 15 | "Purpose": "To understand the control settings of the Hakoniwa Simulator, specifically 'EnableEx Ctrl' option.", 16 | "Perspectives": "User_Interface, Hakoniwa_Simulator", 17 | "ResultID": 2, 18 | "Status": "Done" 19 | }, 20 | { 21 | "PlanID": 3, 22 | "DocumentID": "Unity-Python-Robotics-ReinforcementLearning-Implementation", 23 | "Purpose": "To understand how Unity interfaces with reinforcement learning for robotic control.", 24 | "Perspectives": "Unity", 25 | "ResultID": 3, 26 | "Status": "Done" 27 | }, 28 | { 29 | "PlanID": 4, 30 | "DocumentID": "MachineLearning-Simulation-Ubuntu-Unity-EnvironmentSetup", 31 | "Purpose": "To understand network settings and compatibility issues that might affect the Hakoniwa Simulator.", 32 | "Perspectives": "Operating_Systems, Hakoniwa_Simulator", 33 | "ResultID": 4, 34 | "Status": "Done" 35 | } 36 | ] 37 | } -------------------------------------------------------------------------------- /query_dir/q_2/strategy_history.json: -------------------------------------------------------------------------------- 1 | { 2 | "Strategies": [ 3 | " Investigate documents related to simulator setup, Python env setup, reinforcement learning, and problematic steps to find the cause,", 4 | " Investigate simulator setup, robot's control, operating system, and Unity Editor's network settings.," 5 | ] 6 | } -------------------------------------------------------------------------------- /query_dir/query.txt: -------------------------------------------------------------------------------- 1 | I'm trying to train a robot using reinforcement learning on the Windows version of Hakoniwa (a simulator for robotics), but the robot doesn't move even after starting the simulation. Can you please provide me with the possible causes, such as steps, firewall, settings, or any other considerations? -------------------------------------------------------------------------------- /question.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | from langchain.agents import Tool 5 | from langchain.memory import ConversationBufferMemory 6 | from langchain.chat_models import ChatOpenAI 7 | from langchain.utilities import SerpAPIWrapper 8 | from langchain.agents import initialize_agent 9 | from langchain.agents import AgentType 10 | #from getpass import getpass 11 | import os 12 | import openai 13 | import traceback 14 | 15 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 16 | tools = [] 17 | 18 | # OpenAI APIでモデルを指定して応答を取得する 19 | def get_response(question): 20 | response = openai.ChatCompletion.create( 21 | model="gpt-4-0613", 22 | # model="gpt-4", 23 | # model="gpt-3.5-turbo", 24 | # model="gpt-3.5-turbo-0613", 25 | # model="gpt-3.5-turbo-16k", 26 | messages=[ 27 | {"role": "user", "content": question } 28 | ] 29 | ) 30 | return response["choices"][0]["message"]["content"] 31 | 32 | class TextQa: 33 | def __init__(self, doc_dir: str, doc_id: str): 34 | self.doc_dir = doc_dir 35 | self.doc_id = doc_id 36 | self.filepath = os.path.join(self.doc_dir, self.doc_id) 37 | def get_answer(self, prompt: str): 38 | res = get_response(prompt) 39 | return { 40 | "answer": res 41 | } 42 | def qa(self, question): 43 | with open(self.filepath, "r") as file: 44 | text_data = file.read() 45 | prompt = f"Input Question: {question}\nInput Text Data: {text_data}\n" 46 | return self.get_answer(prompt) 47 | 48 | @staticmethod 49 | def get_qa(doc_dir: str, doc_id: str): 50 | text_qa = TextQa(doc_dir, doc_id) 51 | func_ptr = text_qa.qa 52 | return func_ptr 53 | 54 | if __name__ == "__main__": 55 | import sys 56 | if (len(sys.argv) == 1): 57 | arg = input("> ") 58 | else: 59 | arg = sys.argv[1] 60 | if arg == "q" or arg == "quit": 61 | print("See you again!") 62 | sys.exit(0) 63 | try: 64 | ret = get_response(arg) 65 | except Exception as e: 66 | traceback_str = traceback.format_exc() 67 | error_message = f"ERROR: {str(e)}" 68 | print(traceback_str + error_message) 69 | sys.exit(1) 70 | 71 | print(ret) 72 | -------------------------------------------------------------------------------- /reflection.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | from question import get_response 5 | from prompt_template import PromptTemplate 6 | import json 7 | import traceback 8 | import json_utils 9 | 10 | class Reflection: 11 | def __init__(self, main_question: str, knowledge_path: str, plan_result_path: str, prompt_template_path: str, document_list_path: str, background_knowledge_path: str): 12 | prompt_template = PromptTemplate(prompt_template_path) 13 | with open(knowledge_path, 'r') as file: 14 | KnowledgesNeeds = file.read() 15 | with open(plan_result_path, 'r') as file: 16 | PlanResult = file.read() 17 | with open(document_list_path, 'r') as file: 18 | DocumentList = file.read() 19 | with open(background_knowledge_path, 'r') as file: 20 | BackgroundKnowledges = file.read() 21 | self.query = prompt_template.get_prompt( 22 | MainQuestion=main_question, 23 | KnowledgesNeeds=KnowledgesNeeds, 24 | PlanResult=PlanResult, 25 | DocumentList=DocumentList, 26 | BackgroundKnowledges=BackgroundKnowledges 27 | ) 28 | 29 | def create(self): 30 | print(self.query) 31 | try: 32 | self.reply_raw = get_response(self.query) 33 | except Exception as e: 34 | traceback_str = traceback.format_exc() 35 | error_message = f"ERROR: {str(e)}" 36 | print(traceback_str + error_message) 37 | sys.exit(1) 38 | print(self.reply_raw) 39 | 40 | def save_to_raw(self, file_path): 41 | with open(file_path, 'w') as file: 42 | file.write(self.reply_raw) 43 | 44 | def save_to_json(self, file_path): 45 | with open(file_path, 'w') as file: 46 | json.dump(json.loads(self.reply_raw), file, indent=4, ensure_ascii=False) 47 | 48 | 49 | if __name__ == "__main__": 50 | import sys 51 | if len(sys.argv) != 6: 52 | print("Usage: ") 53 | sys.exit(1) 54 | main_question = sys.argv[1] 55 | document_list_path = sys.argv[2] 56 | previous_knowledge_path = sys.argv[3] 57 | background_knowledge_path = sys.argv[4] 58 | template_path = sys.argv[5] 59 | think = Reflection( 60 | main_question, 61 | previous_knowledge_path, 62 | "./test/result/plan_result.json", 63 | template_path, 64 | document_list_path, 65 | background_knowledge_path) 66 | think.create() 67 | think.save_to_raw("test/result/reflection.json") 68 | -------------------------------------------------------------------------------- /tactical_plannig.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | from plan import Plan 5 | from prompt_template import PromptTemplate 6 | from db_manager import get_qa 7 | from question import TextQa 8 | import sys 9 | 10 | class TacticalPlanning: 11 | def __init__(self, plan: Plan, db_dir: str): 12 | self.plan = plan 13 | self.db_dir = db_dir 14 | 15 | def generate_question(self, prompt_templates): 16 | prioritized_plan = self._prioritize_plan() 17 | if (len(prioritized_plan) == 0): 18 | return None 19 | 20 | #print(prioritized_plan) 21 | row = prioritized_plan.head(1).iloc[0] 22 | #print(row) 23 | plan_id = row["PlanID"] 24 | self.plan.update_status_doing(plan_id) 25 | 26 | document_id = row["DocumentID"] 27 | purpose = row["Purpose"] 28 | perspectives = row["Perspectives"] 29 | 30 | return (plan_id, document_id, self._generate_document_question(prompt_templates, document_id, purpose, perspectives)) 31 | 32 | def _prioritize_plan(self): 33 | plan_data = self.plan.get_data() 34 | prioritized_plan = plan_data.sort_values(by=["PlanID"], ascending=True) 35 | prioritized_plan = prioritized_plan.loc[prioritized_plan["Status"].isin(["Doing", "None"])] 36 | return prioritized_plan 37 | 38 | def _generate_document_question(self, prompt_template_path, document_id, purpose, perspectives): 39 | prompt_query_template = PromptTemplate(prompt_template_path) 40 | query = prompt_query_template.get_prompt(document_id=document_id, purpose=purpose, perspectives=perspectives) 41 | return query 42 | 43 | if __name__ == "__main__": 44 | if len(sys.argv) != 1 and len(sys.argv) != 2: 45 | print("USAGE: " + sys.argv[0] + " [text]") 46 | sys.exit(1) 47 | query_mode = "db_query" 48 | if len(sys.argv) == 2: 49 | query_mode = "text_query" 50 | 51 | from query import Query 52 | from memory_stream import MemoryStream 53 | from params import get_param 54 | param_prompt_template_path = get_param("prompt_templates_path") 55 | param_documents_path = get_param("documents_path") 56 | 57 | plan = Plan() 58 | plan.load_from_json("./test/result/plan.json") 59 | db_dir = param_documents_path + "/dbs" 60 | tactical_planning = TacticalPlanning(plan, db_dir) 61 | memory_stream = MemoryStream() 62 | 63 | while True: 64 | ret = tactical_planning.generate_question(param_prompt_template_path + "/ptemplate_subq_detail.txt") 65 | if ret == None: 66 | print("END") 67 | break 68 | plan_id = ret[0] 69 | doc_id = ret[1] 70 | question = ret[2] 71 | if query_mode == "db_query": 72 | qa = get_qa(db_dir, doc_id) 73 | else: 74 | qa = TextQa.get_qa(db_dir, doc_id) 75 | print("query_mode=", query_mode) 76 | 77 | prompt_template_path = param_prompt_template_path + "/ptemplate_query.txt" 78 | query = Query(doc_id, question, memory_stream, qa) 79 | memory_id = query.run(prompt_template_path, question) 80 | if memory_id < 0: 81 | plan.update_status_done(plan_id, memory_id) 82 | continue 83 | print("REPLY: " + memory_stream.get_reply()) 84 | print("POINT: " + str(memory_stream.get_point())) 85 | memory_stream.save_to_json("test/result/memory.json") 86 | 87 | plan.update_status_done(plan_id, memory_id) 88 | plan.save_to_json("./test/result/updated_plan.json") 89 | 90 | -------------------------------------------------------------------------------- /test/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmori/generative-agents/81cabfd014a7e20c23bacd7778cf6b3b8b7f659b/test/.gitkeep -------------------------------------------------------------------------------- /tools/cleanup_applaud_data.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf applaud_data/documents_data/* 4 | rm -rf applaud_data/reflections_data/* 5 | -------------------------------------------------------------------------------- /tools/create_doclist.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TITLE="" 4 | function do_task() 5 | { 6 | ## CREATE DB 7 | python3 generative-agents/document_db.py new tmp tmp/DB 8 | 9 | ## GET TITLE 10 | TITLE=`python3 generative-agents/document_db.py question "この文書の概要を調査し 、適切なタイトルを英名で階層形式(最大5階層)で表現して:---..。levelには空白文字は入れないで。例 :aaa-bbb-ccc-ddd" tmp/DB | tail -n 1 | sed 's/\//\-/g'` 11 | 12 | ## ADD TITLE ON DOCLIST 13 | echo "$TITLE" >> documents/document.list 14 | 15 | ## MV DB TO TITLE 16 | mv tmp/DB documents/dbs/$TITLE 17 | } 18 | rm -f documents/document.list 19 | rm -f documents/document-title-mapping.list 20 | rm -rf documents/dbs/* 21 | 22 | ls documents/docs > list.txt 23 | 24 | while IFS= read -r filepath || [[ -n "$filepath" ]]; do 25 | if [ -n "$filepath" ]; then 26 | rm -rf tmp 27 | mkdir tmp 28 | cp documents/docs/"$filepath" tmp/ 29 | do_task 30 | echo "${TITLE}: $filepath" >> documents/document-title-mapping.list 31 | fi 32 | done < list.txt 33 | -------------------------------------------------------------------------------- /tools/evaluate_answers.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 " 6 | exit 1 7 | fi 8 | 9 | qa_dir=${1} 10 | 11 | python3 evaluate_results.py \ 12 | ${qa_dir}/query.txt \ 13 | ${qa_dir}/q_1/result/result.txt \ 14 | ${qa_dir}/q_2/result/result.txt \ 15 | ./tools/prompt_templates/ptemplate_evaluate_answers.txt 16 | 17 | -------------------------------------------------------------------------------- /tools/evaluate_plans.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 " 6 | exit 1 7 | fi 8 | 9 | qa_dir=${1} 10 | 11 | python3 evaluate_results.py \ 12 | ${qa_dir}/query.txt \ 13 | ${qa_dir}/q_1/result/updated_plan.json \ 14 | ${qa_dir}/q_2/result/updated_plan.json \ 15 | ./tools/prompt_templates/ptemplate_evaluate_plans.txt 16 | 17 | -------------------------------------------------------------------------------- /tools/evaluate_reflection.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 " 6 | exit 1 7 | fi 8 | 9 | qa_dir=${1} 10 | 11 | python3 tools/evaluate_reflection.py \ 12 | ${qa_dir}/q_1/result/reflection.json 13 | 14 | python3 tools/evaluate_reflection.py \ 15 | ${qa_dir}/q_2/result/reflection.json 16 | -------------------------------------------------------------------------------- /tools/evaluate_reflection.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import json 5 | from deepdiff import DeepDiff 6 | import jsondiff 7 | import pandas as pd 8 | 9 | def get_value(json: dict, term: str, key: str): 10 | for entry in json["Knowledges"]: 11 | if term == entry["Term"]: 12 | return entry[key] 13 | 14 | def get_entry(json: dict, term: str): 15 | for entry in json["Knowledges"]: 16 | if term == entry["Term"]: 17 | return entry 18 | 19 | 20 | def evaluate(json_path: str): 21 | with open(json_path, 'r') as file: 22 | json_data = file.read() 23 | 24 | json_value = json.loads(json_data) 25 | #print(json_value) 26 | json_terms = [] 27 | for entry in json_value["Knowledges"]: 28 | json_terms.append(entry["Term"]) 29 | 30 | print("TermNum: ", len(json_terms)) 31 | knowns = [] 32 | for term in json_terms: 33 | known = get_value(json_value, term, "KnownInfos") 34 | knowns.append(len(known)) 35 | 36 | df = pd.DataFrame(knowns, columns=["KnownInfos"]) 37 | ave_num = df["KnownInfos"].mean() 38 | max_num = df["KnownInfos"].max() 39 | min_num = df["KnownInfos"].min() 40 | 41 | print("AveKnwNum:", ave_num, " MaxKnwNum:", max_num, " MinKnwNum:", min_num) 42 | 43 | 44 | docnums = [] 45 | for term in json_terms: 46 | knowns = get_value(json_value, term, "KnownInfos") 47 | for known in knowns: 48 | term_docids = known["DocumentIDs"] 49 | docnums.append(len(term_docids)) 50 | 51 | df = pd.DataFrame(docnums, columns=["DocNum"]) 52 | ave_num = df["DocNum"].mean() 53 | max_num = df["DocNum"].max() 54 | min_num = df["DocNum"].min() 55 | 56 | print("AveDocNum:", ave_num, " MaxDocNum:", max_num, " MinDocNum:", min_num) 57 | 58 | points = [] 59 | for term in json_terms: 60 | knowns = get_value(json_value, term, "KnownInfos") 61 | for known in knowns: 62 | points.append(float(known["Point"])) 63 | 64 | df = pd.DataFrame(points, columns=["Point"]) 65 | ave_num = df["Point"].mean() 66 | max_num = df["Point"].max() 67 | min_num = df["Point"].min() 68 | 69 | print("AvePoint:", ave_num, " MaxPoint:", max_num, " MinPointm:", min_num) 70 | 71 | known_lens = [] 72 | for term in json_terms: 73 | knowns = get_value(json_value, term, "KnownInfos") 74 | for known in knowns: 75 | known_lens.append(len(known["KnownInfo"])) 76 | 77 | df = pd.DataFrame(known_lens, columns=["KnownInfo"]) 78 | ave_num = df["KnownInfo"].mean() 79 | max_num = df["KnownInfo"].max() 80 | min_num = df["KnownInfo"].min() 81 | 82 | print("AveknwLen:", ave_num, " MaxknwLen:", max_num, " MinknwLen:", min_num) 83 | 84 | 85 | relations = [] 86 | for term in json_terms: 87 | term_relations = get_entry(json_value, term) 88 | if "Relations" in term_relations: 89 | #print(term_relations["Relations"]) 90 | relations.append(len(term_relations["Relations"])) 91 | 92 | df = pd.DataFrame(relations, columns=["RelationNum"]) 93 | ave_num = df["RelationNum"].mean() 94 | max_num = df["RelationNum"].max() 95 | min_num = df["RelationNum"].min() 96 | print("AveRelNum:", ave_num, " MaxRelNum:", max_num, " MinRelNum:", min_num) 97 | 98 | unknowns = [] 99 | for term in json_terms: 100 | terms = get_entry(json_value, term) 101 | if "UnknownInfo" in terms: 102 | #print(term_relations["Relations"]) 103 | unknowns.append(len(term_relations["UnknownInfo"])) 104 | 105 | df = pd.DataFrame(unknowns, columns=["UnknownInfo"]) 106 | ave_num = df["UnknownInfo"].mean() 107 | max_num = df["UnknownInfo"].max() 108 | min_num = df["UnknownInfo"].min() 109 | print("AveUnkwnNum:", ave_num, " MaxUnkwnNum:", max_num, " MinUnkwnNum:", min_num) 110 | 111 | 112 | if __name__ == "__main__": 113 | import sys 114 | if len(sys.argv) != 2: 115 | print("Usage: ") 116 | sys.exit(1) 117 | json_path = sys.argv[1] 118 | evaluate(json_path) 119 | -------------------------------------------------------------------------------- /tools/evaluate_reflections.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 " 6 | exit 1 7 | fi 8 | 9 | qa_dir=${1} 10 | 11 | python3 evaluate_results.py \ 12 | ${qa_dir}/query.txt \ 13 | ${qa_dir}/q_1/result/reflection.json \ 14 | ${qa_dir}/q_2/result/reflection.json \ 15 | ./tools/prompt_templates/ptemplate_evaluate_reflections.txt 16 | 17 | -------------------------------------------------------------------------------- /tools/get_terms.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 " 6 | exit 1 7 | fi 8 | 9 | reflection_path=${1} 10 | 11 | grep Term ${reflection_path} | awk -F: '{print $2}' | sort | uniq | sed 's/\"//g'| sed ':a;N;$!ba;s/\n//g' 12 | -------------------------------------------------------------------------------- /tools/prompt_templates/ptemplate_applaud.txt: -------------------------------------------------------------------------------- 1 | この会話ログから、{Name} さんの良い個性について、 2 | 具体的な行動や会話内容を引用して、{Name} さんが感動するような 3 | 誉め言葉を検討してください。 4 | 5 | その際、以下の点を考慮してください。 6 | 7 | ・なにもなしで、素晴らしいとか言われても感動しません。 8 | ・誰でも知っていること、その人が知っていることを褒められてもうれしくないです。 9 | ・誰も気づいていない、その人も気づいていない、そこを褒めることが大切です。 10 | 11 | また、出だしは、以下の文言で始めてください。 12 | 13 | 「会話のログから、{Name} さんの個性を見てみると、多くの方が気付かないような素敵な特徴が見つけられます。」 14 | 15 | 会話ログ: 16 | {log_data} -------------------------------------------------------------------------------- /tools/prompt_templates/ptemplate_evaluate_answers.txt: -------------------------------------------------------------------------------- 1 | For the Question {MainQuestion}, there are 2 answers. 2 | 3 | Answer1: {Result1} 4 | Answer2: {Result2} 5 | 6 | Please assess the appropriateness of the answer in Japanease 7 | to each questions by assigning a score between 0 and 100 in the following format 8 | 0-20 points: No information at all is provided about the question. 9 | 21-40 points: Basic information about the question is available, but key details are missing. 10 | 41-60 points: Information about the question is available, but some important details are missing. 11 | 61-80 points: The information about the question is quite detailed, but some details are missing. 12 | 81-100 points: The information about the question is very detailed and includes all necessary information. 13 | 14 | Let's think step by step. 15 | 16 | Answer1's Point: 17 | The Point Reason: 18 | 19 | Answer2's Point: 20 | The Point Reason: 21 | -------------------------------------------------------------------------------- /tools/prompt_templates/ptemplate_evaluate_plans.txt: -------------------------------------------------------------------------------- 1 | In response to the question {MainQuestion}, we have developed two different plans. 2 | 3 | Plan 1: {Result1} 4 | Plan 2: {Result2} 5 | 6 | Please evaluate the appropriateness of these plans in Japanese. 7 | 8 | For each plan, please assess and score the following aspects, and provide a total score: 9 | 10 | Achievement of Objectives (0-20 points): Evaluate to what extent the plan achieves its specific objectives, which are primarily derived from research requirements. The score will be based on the specificity and completeness of objective achievement. 11 | Strategic Consistency (0-20 points): Assess whether the overall plan is based on a clear and consistent strategy. This score is based on the consistency, clarity, and depth of strategic thinking. 12 | Detail (0-20 points): Assess how detailed the plan's strategic explanation is, and how concretely each part of the plan is designed. The score will be based on the level of detail and concreteness. 13 | Anticipated Challenges (0-20 points): Evaluate whether the plan appropriately anticipates challenges and considers solutions for dealing with them. The score will be based on the depth of challenge anticipation and the appropriateness of proposed solutions. 14 | Diversity (0-20 points): Reflect on how many different documents the plan investigates from various perspectives. This score is based on the diversity of documents and perspectives. 15 | 16 | Let's think step by step. 17 | 18 | Plan 1's Total Score: 19 | Rationale for Score: 20 | 21 | Plan 2's Total Score: 22 | Rationale for Score: 23 | -------------------------------------------------------------------------------- /tools/prompt_templates/ptemplate_evaluate_reflections.txt: -------------------------------------------------------------------------------- 1 | For the Question {MainQuestion}, there are 2 reflections. 2 | 3 | Reflection1: {Result1} 4 | Reflection2: {Result2} 5 | 6 | Please assess the appropriateness of the reflections in Japanese. 7 | Please quantify the points for each term in the reflections according to the following perspectives: 8 | 9 | Reason: High scores can be awarded if the reason is specific and clearly explains how the knowledge entry will be helpful in solving the problem. On the other hand, lower scores may be given if the reason is abstract or its importance is unclear. 10 | KnownInfos: Consider whether the known information is detailed enough and whether it is beneficial in accomplishing the task. Detailed and beneficial information scores high, while information that lacks detail or usefulness scores low. 11 | UnknownInfo: Evaluate whether the unknown information covers the elements needed to solve the problem. High scores can be given when the necessary elements are specifically pointed out, while lower scores may be given if the identification is vague or misses important elements. 12 | Relations: Evaluate how much each knowledge entry is related to others. High scores can be given when deep, specific relationships are established, while lower scores may be given if relationships are shallow or unclear. 13 | 14 | The above perspectives should be scored in the following scale: 15 | 0-20 points: No information at all is provided about the term. 16 | 21-40 points: Basic information about the term is available, but key details are missing. 17 | 41-60 points: Information about the term is available, but some important details are missing. 18 | 61-80 points: The information about the term is quite detailed, but some details are missing. 19 | 81-100 points: The information about the term is very detailed and includes all necessary information. 20 | 21 | Please give each term a score and then calculate the total score for each reflection. 22 | Let's think step by step. 23 | 24 | Reflection1's Each Term Point: 25 | Reflection1's Total Point: 26 | The Reason for the Point: 27 | 28 | Reflection2's Each Term Point: 29 | Reflection2's Total Point: 30 | The Reason for the Point: 31 | -------------------------------------------------------------------------------- /tools/query.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 " 6 | exit 1 7 | fi 8 | 9 | question=${1} 10 | echo "${question}" > query_dir/query.txt 11 | bash tools/do_query.bash query_dir noidea.txt 12 | --------------------------------------------------------------------------------