├── .env.example
├── .gitattributes
├── .github
├── FUNDING.yml
└── workflows
│ └── github-actions-github-sponsors.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CNAME
├── Dockerfile
├── LICENSE
├── README-jp.md
├── README.md
├── _config.yml
├── babyagi.py
├── babycoder
├── README.md
├── babycoder.py
├── embeddings.py
├── objective.sample.txt
└── playground
│ └── noop.md
├── backup_workspace.sh
├── classic
├── ABOUT.md
├── BabyBeeAGI
├── BabyCatAGI.py
├── README.md
├── babyagi.py
└── requirements.txt
├── clean.sh
├── data
└── .gitkeep
├── docker-compose.yml
├── docs
├── Architecture-20240718-2.png
└── BabyCommandAGI.png
├── env_dump
└── .gitkeep
├── executed_task_parser.py
├── extensions
├── __init__.py
├── argparseext.py
├── dotenvext.py
├── human_mode.py
├── pinecone_storage.py
├── ray_tasks.py
├── requirements.txt
└── weaviate_storage.py
├── log
└── .gitkeep
├── new_store.sh
├── node
├── .env.example
├── README.md
├── babyagi.js
├── package-lock.json
└── package.json
├── pwd
└── .gitkeep
├── requirements.txt
├── task_parser.py
├── tools
├── README.md
├── __init__.py
├── monitor.py
├── results.py
└── results_browser.py
├── workspace
└── .gitkeep
└── workspace_backup
└── .gitkeep
/.env.example:
--------------------------------------------------------------------------------
1 | # cp .env.example .env
2 | # Edit your .env file with your own values
3 | # Don't commit your .env file to git/push to GitHub!
4 | # Don't modify/delete .env.example unless adding extensions to the project
5 | # which require new variable to be added to the .env file
6 |
7 | # API CONFIG
8 | LLM_MODEL="claude-3-5-sonnet-20241022" # (Use 8K output max-tokens-3-5-sonnet-2024-07-15 https://x.com/alexalbert__/status/1812921642143900036), alternatively, openai/o1-preview is OpenRouter's API, etc
9 | LLM_VISION_MODEL="claude-3-5-sonnet-20241022" # LLM for use with Vision
10 | TOKEN_COUNT_MODEL="claude-3-5-sonnet-20241022" # alternatively, Use gpt-4o for o1 based on https://github.com/openai/tiktoken/issues/337#issuecomment-2348139508
11 |
12 | OPENROUTER_API_KEY=
13 |
14 | ANTHROPIC_API_KEY=
15 | ANTHROPIC_TEMPERATURE=0.5
16 |
17 | OPENAI_API_KEY=
18 | OPENAI_TEMPERATURE=0.5
19 |
20 | GOOGLE_AI_STUDIO_API_KEY=
21 | GEMINI_TEMPERATURE=
22 |
23 | # RUN CONFIG
24 | OBJECTIVE=Please install the Flutter environment via git, implement a Flutter app to play Reversi with black and white stones, and make the Flutter app you created accessible from outside the container by running 'flutter run -d web-server --web-port 8080 --web-hostname 0.0.0.0'.
25 | INITIAL_TASK=Develop a task list
26 |
27 | # STORE CONFIG
28 | RESULTS_STORE_NAME=baby-agi-default-table
29 | RESULTS_SOTRE_NUMBER=1
30 |
31 |
32 | # [Caution]The following settings do not guarantee functionality.
33 |
34 | # COOPERATIVE MODE CONFIG
35 | INSTANCE_NAME=BabyCommandAGI
36 | COOPERATIVE_MODE=none # local
37 |
38 | LLAMA_MODEL_PATH= # ex. models/llama-13B/ggml-model.bin
39 | #LLAMA_THREADS_NUM=8 # Set the number of threads for llama (optional)
40 |
41 | ## Extensions
42 | # List additional extension .env files to load (except .env.example!)
43 | DOTENV_EXTENSIONS=
44 | # Set to true to enable command line args support
45 | ENABLE_COMMAND_LINE_ARGS=false
46 |
47 |
48 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.py text eol=lf
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: [saten-private] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4 | patreon: # Replace with a single Patreon username
5 | open_collective: # Replace with a single Open Collective username
6 | ko_fi: # Replace with a single Ko-fi username
7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | otechie: # Replace with a single Otechie username
12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
14 |
--------------------------------------------------------------------------------
/.github/workflows/github-actions-github-sponsors.yml:
--------------------------------------------------------------------------------
1 | name: Generate Sponsors README
2 | on:
3 | workflow_dispatch:
4 | schedule:
5 | - cron: 30 15 * * 0-6
6 | permissions:
7 | contents: write
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout 🛎️
13 | uses: actions/checkout@v2
14 |
15 | - name: Generate Sponsors 💖
16 | uses: JamesIves/github-sponsors-readme-action@v1
17 | with:
18 | token: ${{ secrets.PAT }}
19 | file: 'README.md'
20 |
21 | - name: Generate Sponsors JP 💖
22 | uses: JamesIves/github-sponsors-readme-action@v1
23 | with:
24 | token: ${{ secrets.PAT }}
25 | file: 'README-jp.md'
26 |
27 | - name: Deploy to GitHub Pages 🚀
28 | uses: JamesIves/github-pages-deploy-action@v4.6.1
29 | with:
30 | branch: main
31 | folder: '.'
32 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | *$py.class
4 |
5 | .env
6 | .env.*
7 | env/
8 | .venv
9 | *venv/
10 |
11 | .vscode/
12 | .idea/
13 |
14 | models
15 | llama/
16 |
17 | babycoder/playground/*
18 | babycoder/playground_data/*
19 | babycoder/objective.txt
20 |
21 | # for BabyCommandAGI
22 | data/*
23 | !data/.gitkeep
24 | log/*
25 | !log/.gitkeep
26 | pwd/*
27 | !pwd/.gitkeep
28 | env_dump/*
29 | !env_dump/.gitkeep
30 | workspace/*
31 | !workspace/.gitkeep
32 | workspace_backup/*
33 | !workspace_backup/.gitkeep
34 |
35 | # for node
36 | chroma/
37 | node_modules/
38 | .DS_Store
39 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.4.0
4 | hooks:
5 | - id: trailing-whitespace
6 | - id: end-of-file-fixer
7 | - id: check-yaml
8 | - id: check-added-large-files
9 | - id: check-merge-conflict
10 | - id: debug-statements
11 | - id: requirements-txt-fixer
12 | files: requirements.txt
13 |
14 | - repo: https://github.com/psf/black
15 | rev: 23.3.0
16 | hooks:
17 | - id: black
18 |
19 | - repo: https://github.com/pycqa/isort
20 | rev: 5.11.5
21 | hooks:
22 | - id: isort
23 |
24 | - repo: https://github.com/pycqa/flake8
25 | rev: 6.0.0
26 | hooks:
27 | - id: flake8
28 | args: ["--max-line-length=140"]
29 |
--------------------------------------------------------------------------------
/CNAME:
--------------------------------------------------------------------------------
1 | babyagi.org
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11-slim
2 |
3 | ENV PIP_NO_CACHE_DIR=true
4 | WORKDIR /tmp
5 | RUN apt-get update && apt-get install build-essential -y
6 |
7 | # Correction of the following errors
8 | # https://github.com/oobabooga/text-generation-webui/issues/1534#issuecomment-1555024722
9 | RUN apt-get install gcc-11 g++-11 -y
10 |
11 | COPY requirements.txt /tmp/requirements.txt
12 | RUN CXX=g++-11 CC=gcc-11 pip install -r requirements.txt
13 |
14 | WORKDIR /workspace
15 | WORKDIR /app
16 | COPY . /app
17 | ENTRYPOINT ["./babyagi.py"]
18 | EXPOSE 8080
19 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Yohei Nakajima
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README-jp.md:
--------------------------------------------------------------------------------
1 | # ⭐️Claude 3.7 Sonnet(Extended Thinking含む)とo3-mini-hight(OpenRouter)に対応⭐️
2 |
3 | # 注意事項
4 |
5 | - 意図せず環境を破壊する恐れがあります。基本的にはDockerなどの仮想環境で実行してください。
6 | - 目的を達成できず、ループし続けることがあります。その際にAPIの使用量が多くなることがありますので、責任を持って使用してください
7 | - 基本的にはClaude 3.7 Sonnet以上で検証しているため、Claude 3.7 Sonnet以上の使用を推奨します
8 | (GPT-4 Turboを利用する場合は旧バージョンのBabyCommandAGIのv3を使用することを推奨します)
9 |
10 | # 目的
11 |
12 | BabyCommandAGIはGUIよりも古くからあるコンピューターとの対話インタフェースであるCLIとLLMを組み合わせた時、何が起きるのか検証するためのものです。コンピューターに詳しくない方はご存知無いかもしれませんが、CLIは古くからあるコンピューターとの対話インターフェースです。現在もCLIを通すことで多くのコンピューターの操作ができます(よくあるLinuxサーバーはCLIを主に使用します)。LLMとCLIが会話するのを想像してみてください。何が起きるかワクワクしますよね。是非皆さんも触って新しいユースケースを見つけて頂けると幸いです。
13 |
14 | このシステムを動かすにはClaude 3.7 Sonnet以上のAPIを推奨します。
15 |
16 | このPythonスクリプトのシステムは[BabyAGI](https://github.com/yoheinakajima/babyagi)をベースにしています。但し、[BabyAGI](https://github.com/yoheinakajima/babyagi)の思考部分だった箇所について効率良くコマンドが実行するためにかなり簡略化してしまっています。(後に変えていくかもしれません)
17 |
18 | # ユースケース
19 |
20 | BabyCommandAGIは様々なケースで使用できる可能性があります。是非皆さん使ってユースケースを見つけてみてください。
21 |
22 | 下記にわかっている有用なユースケースが記載しておきます。
23 |
24 | ## 自動プログラミング
25 |
26 | フィードバックするだけでアプリを自動的に作らせる
27 |
28 | ### プログラミング例
29 |
30 | - リバーシ
31 | https://x.com/saten_work/status/1791550524988490053
32 | - スネークゲーム
33 | https://x.com/saten_work/status/1723509089471492563
34 |
35 | ## 自動環境構築
36 |
37 | - コンテナのLinux環境にFlutterをインストールし、Flutterアプリを作成して、Webサーバーを立ち上げ、コンテナ外からアクセスできるようにさせる
38 |
39 | https://twitter.com/saten_work/status/1667126272072491009
40 |
41 | ## その他
42 |
43 | - 天気予報の取得
44 | https://x.com/saten_work/status/1791558481432232355
45 |
46 | # 仕組み
47 |
48 | このスクリプトは、次のような継続したループを実行することで動作します
49 |
50 | 1. タスクリストから次のタスクを取り出す。(最初は1つの計画タスクから始まる)
51 | 2. そのタスクがコマンドタスクか計画タスクかを判別する
52 | 3. コマンドタスクの場合:
53 | 1. コマンドを実行
54 | 2. コマンド実行結果のStatus Codeが0(成功)の場合:
55 | 5.へ
56 | 3. それ以外(失敗):
57 | 実行履歴をLLMで分析し、目的に応じて新しいタスクリストを作成
58 | 4. 計画タスクの場合:
59 | 1. 計画内容と実行履歴と目的を元にLLMで計画し、新しいタスクリストを作成する
60 | 5. ユーザーのフィードバックが生じている場合:
61 | 1. フィードバックを意識しつつ目的と実行履歴を元にLLMで計画し、新しいタスクリストを作成する
62 |
63 |
64 | 
65 |
66 | # セットアップ
67 |
68 | 以下の手順を実施してください。
69 |
70 | 1. ```git clone https://github.com/saten-private/BabyCommandAGI.git```
71 | 2. ```cd```でBabyCommandAGIのディレクトリに入ってくださ
72 | 3. ```cp .env.example .env``` で環境変数を入れるファイルを作ります
73 | 4. ANTHROPIC_API_KEYを設定します。(OpenAIのモデルを使用する場合はOPENAI_API_KEYを設定します)
74 | 5. (オプション)OBJECTIVE変数にタスク管理システムの目的を設定します。
75 |
76 | # 実行(Docker)
77 |
78 | 前提条件として、docker と docker-compose がインストールされている必要があります。Docker desktop は最もシンプルなオプションです https://www.docker.com/products/docker-desktop/
79 |
80 | ## 実行
81 |
82 | ```
83 | docker-compose up -d && docker attach babyagi
84 | ```
85 |
86 | ## 停止
87 |
88 | ```
89 | docker-compose stop
90 | ```
91 |
92 | **注意:Ctrl+Cで終了しても```docker-compose stop```や```./clean.sh```を実行しない限り停止しません。ご注意ください。**
93 |
94 | **注意:目的を達成できず、ループし続けることがあります。AnthropicやOpenAIのAPIの使用料にご注意ください。**
95 |
96 | ```workspace```フォルダにAIの生成物が作成されていきます。
97 |
98 | 失敗した場合は、再度実行すれば途中から再開できます。
99 |
100 | OBJECTIVEを変更すると将来のタスク一覧とOBJECTIVEのフィードバックがクリアされます。
101 |
102 | ## AIにフィードバック
103 |
104 | "f"を入力した際に目的に対してユーザーのフィードバックをAIに与えられます。これでGUIのようなCLIからわからない情報もAIにフィードバックできます。
105 |
106 | ## AIのコマンド実行中に返答
107 |
108 | 通常時はAIが実行しているコマンドに対して"y"や"n"のような返答はできませんが、"a"を入力すると返答できるモードになります。
109 |
110 | (ちなみにシェルコマンドが"y"や"n"のような返答を待ったまま5分以上経過し、LLMが返答した方が適切と考えた場合、その時の状況を判断して自動的にLLMが"y"や"n"のような返答を行います。)
111 |
112 | # 便利コマンド
113 |
114 | - ```./clean.sh```
115 |
116 | ```workspace```、環境(コンテナ)をリセットします。また```./new_store.sh```も実行します
117 | - ```./backup_workspace.sh```
118 |
119 | ```workspace_backup```に現在時刻のフォルダを作成して```workspace```をバックアップします
120 | (環境(コンテナ)やBabyCommandAGIのデータはバックアップされないのでご注意ください)
121 | - ```./new_store.sh```
122 |
123 | BabyCommandAGIのデータ(覚えている情報)を新しくします。新しいデータに切り替わるため、BabyCommandAGIは何も覚えていない状態になります。
124 |
125 | # ログ
126 |
127 | 実行時のログが```log```フォルダ配下に残るようになっています。
128 | OBJECTIVEの識別子とRESULTS_STORE_NAMEによりログファイル名は決まります。
129 |
130 | # 保存データ
131 |
132 | 以下にそれぞれ途中まで実行された状態が保存されます。
133 | - ```data```フォルダ配下に途中まで実行されたタスクは保存されています。
134 | - ```pwd```フォルダ配下には最後のカレントディレクトリ
135 | - ```env_dump```フォルダ配下には最後の環境変数のdump
136 |
137 | # コントリビュート
138 |
139 | BabyCommandAGI はまだ初期段階にあり、その方向性とそこに到達するためのステップを決定しているところです。現在、BabyCommandAGI が目指しているのは、「シンプル」であることです。このシンプルさを維持するために、PR を提出する際には、以下のガイドラインを遵守していただくようお願いいたします:
140 |
141 | - 大規模なリファクタリングではなく、小規模でモジュール化された修正に重点を置く。
142 | - 新機能を導入する際には、対応する特定のユースケースについて詳細な説明を行うこと。
143 |
144 | @saten-private (2023年5月21日)からのメモ:
145 |
146 | 私はオープンソースへの貢献に慣れていません。昼間は他の仕事をしており、PRやissueのチェックも頻繁にできるかはわかりません。但し、このアイディアを大事にしており、みんなの役に立つと良いと考えています。何かあれば気軽におっしゃってください。皆さんから多くのことを学びたいと思っています。
147 | 私は未熟者であり、英語も話せませんし、日本以外の文化をほぼ知らないです。但し、自分のアイディアを大事にしているし、多くの人の役に立つと良いとも考えています。
148 | (きっとこれからもつまらないアイディアをいっぱい出すと思います)
149 |
150 |
151 | ✨ BabyCommandAGIのGitHub Sponsors ✨
152 |
153 |
154 |
155 | このプロジェクトの維持は、すべての下記スポンサーのおかげで可能になっています。このプロジェクトのスポンサーとなり、あなたのアバターのロゴを下に表示したい場合は、ここ をクリックしてください。💖 5$でスポンサーになることができます。
156 |
157 |
158 |
159 |
160 |
161 |
162 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ⭐️Now supported Claude 3.7 Sonnet (including extended thinking) and o3-mini-hight (OpenRouter)⭐️
2 |
3 | # Precautions
4 |
5 | - It may unintentionally destroy the environment. Basically, run it in a virtual environment such as Docker.
6 | - It may fail to achieve its purpose and keep looping. (The API usage may increase in such cases, so please use it responsibly.
7 | - Basically, we recommend using Claude 3.7 Sonnet or higher, as it has been verified with Claude 3.7 Sonnet or higher
8 | (If you are using GPT-4 Turbo, it is recommended to use v3 of the older version of BabyCommandAGI)
9 |
10 | # Objective
11 |
12 | BabyCommandAGI is designed to test what happens when you combine CLI and LLM, which are older computer interaction interfaces than GUI. Those who are not familiar with computers may not know, but CLI is an old computer interaction interface. Even now, many computer operations can be done through CLI (common Linux servers mainly use CLI). Imagine LLM and CLI having a conversation. It's exciting to think about what could happen. I hope you will all try it out and find new use cases.
13 |
14 | This system is recommended to be run with an API of Claude 3.7 Sonnet or higher.
15 |
16 | This Python script system is based on [BabyAGI](https://github.com/yoheinakajima/babyagi). However, the part that was the thinking part of [BabyAGI](https://github.com/yoheinakajima/babyagi) has been greatly simplified in order to execute commands efficiently. (This may change later)
17 |
18 | # Use Cases
19 |
20 | BabyCommandAGI has the potential to be used in various cases. Please try using it to find use cases.
21 |
22 | Below are some known useful use cases.
23 |
24 | ## Automatic Programming
25 |
26 | Create an app automatically just by providing feedback
27 |
28 | ### Programming Examples
29 |
30 | - Reversi
31 | https://x.com/saten_work/status/1791550524988490053
32 | - Snake Game
33 | https://x.com/saten_work/status/1723509089471492563
34 |
35 | ## Automatic Environment Setup
36 |
37 | - Install Flutter in a Linux container environment, create a Flutter app, launch a web server, and make it accessible from outside the container
38 |
39 | https://twitter.com/saten_work/status/1667126272072491009
40 |
41 | ## Other
42 |
43 | - Get weather forecast
44 | https://x.com/saten_work/status/1791558481432232355
45 |
46 | # Mechanism
47 |
48 | This script works by executing the following continuous loop:
49 |
50 | 1. pull the next task from the task list. (It starts with one plan task.)
51 | 2. determine whether the task is a command task or a planned task
52 | 3. if it is a command task:
53 | 1. Execute the command.
54 | 2. If the Status Code of the command execution result is 0 (success):
55 | Go to 5.
56 | 3. Otherwise (failure):
57 | Analyze the history of executions with LLM and create a new task list according to the OBJECTIVE.
58 | 4. for plan tasks:
59 | 1. plan with LLM based on the plan task, the history of executions and the OBJECTIVE, and create a new task list.
60 | 5. If user feedback is generated:
61 | 1. plan and create a new task list in LLM based on the OBJECTIVE and the history of executions while being aware of feedback.
62 |
63 | 
64 |
65 | # Setup
66 |
67 | Please follow the steps below:
68 |
69 | 1. ```git clone https://github.com/saten-private/BabyCommandAGI.git```
70 | 2. Enter the BabyCommandAGI directory with ```cd```.
71 | 3. Create a file to insert environment variables with ```cp .env.example .env```.
72 | 4. Set ANTHROPIC_API_KEY. (If you use OpenAI models, set OPENAI_API_KEY)
73 | 5. (Optional) Set the objective of the task management system to the OBJECTIVE variable.
74 |
75 | # Execution (Docker)
76 |
77 | As a prerequisite, docker and docker-compose must be installed. Docker desktop is the simplest option https://www.docker.com/products/docker-desktop/
78 |
79 | ## Run
80 |
81 | ```
82 | docker-compose up -d && docker attach babyagi
83 | ```
84 |
85 | ## Stop
86 |
87 | ```
88 | docker-compose stop
89 | ```
90 |
91 | **Note: Even if you exit with Ctrl+C, it will not stop unless you run ```docker-compose stop``` or ```./clean.sh```. Please be careful.**
92 |
93 | **Note: The agent might loop indefinitely if it cannot achieve its objective. Please be aware of the cost of Anthropic and OpenAI APIs usage.**
94 |
95 | The AI's generated items will be created in the ```workspace``` folder.
96 |
97 | If you fail, you can resume from where you left off by running it again.
98 |
99 | Changing the OBJECTIVE will clear the list of future tasks and OBJECTIVE feedback.
100 |
101 | ## Feedback to AI
102 |
103 | By entering "f", you can give the AI user feedback on the OBJECTIVE. This allows AI to feed back information that is not available from the CLI, such as the GUI.
104 |
105 | ## Answer while AI is executing a command
106 |
107 | Normally, the AI cannot answer with a such as "y" or "n" to a command it is executing, but it will enter a mode where it can answer by entering "a".
108 |
109 | (By the way, if a shell command waits for a answer like “y” or “n” for more than 5 minutes and the LLM thinks it is appropriate to answer, the LLM will automatically answer like “y” or “n” based on its judgment of the situation at that time.)
110 |
111 | # Useful commands
112 |
113 | - ```./clean.sh```
114 |
115 | ```workspace```, resets the environment (container). Also ```./new_store.sh``` also executes
116 | - ```./backup_workspace.sh```
117 |
118 | Backup your ``workspace`` by creating a folder with the current time in ``workspace_backup``.
119 | (Note that environment (container) and BabyCommandAGI data will not be backed up.)
120 | - ```./new_store.sh```
121 |
122 | New BabyCommandAGI data (remembered information) will be created. Because of the switch to new data, BabyCommandAGI will not remember anything.
123 |
124 | # Logs
125 |
126 | The logs during execution are saved under the ```log``` folder.
127 | The log file name is determined by the OBJECTIVE identifier and the RESULTS_STORE_NAME.
128 |
129 | # Saved Data
130 |
131 | The following are saved up to the point where they were executed:
132 | - Tasks executed up to a certain point are saved under the ```data``` folder.
133 | - The last current directory is under the ```pwd``` folder.
134 | - The dump of the last environment variables is under the ```env_dump``` folder.
135 |
136 | # Contributing
137 |
138 | BabyCommandAGI is still in the early stages, determining its direction and the steps to get there. Currently, BabyCommandAGI is aiming for simplicity. To maintain this simplicity, when submitting PRs, we kindly ask you to follow the guidelines below:
139 |
140 | - Focus on small, modularized fixes rather than large-scale refactoring.
141 | - When introducing new features, provide a detailed explanation of the corresponding specific use cases.
142 |
143 | Note from @saten-private (May 21, 2023):
144 |
145 | I am not used to contributing to open source. I work another job during the day and I don't know if I can check PRs and issues frequently. However, I cherish this idea and hope it will be useful for everyone. Please feel free to let me know if there's anything. I am looking forward to learning a lot from you all.
146 | I am a novice, I cannot speak English, and I barely know cultures outside of Japan. However, I cherish my ideas, and I hope they will be of use to many people.
147 | (I'm sure I will continue to come up with many boring ideas in the future)
148 |
149 |
150 | ✨ BabyCommandAGI's GitHub Sponsors ✨
151 |
152 |
153 | The maintenance of this project is made possible thanks to all of the following sponsors. If you'd like to become a sponsor and have your avatar logo displayed below, please click here. 💖 You can become a sponsor for $5.
154 |
155 |
156 |
157 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | title: BabyAGI
2 | email:
3 | description: >-
4 | BabyAGI is an AI-powered task management system that uses OpenAI and Pinecone APIs to create, prioritize, and execute tasks.
5 | baseurl: ""
6 | url: "https://babyagi.org"
7 | logo: docs/babyagi.png
8 | twitter_username: babyagi_
9 | github_username: yoheinakajima
10 | show_downloads: false
11 | remote_theme: pages-themes/minimal@v0.2.0
12 | include: [docs]
13 | plugins:
14 | - jekyll-remote-theme
15 |
--------------------------------------------------------------------------------
/babycoder/README.md:
--------------------------------------------------------------------------------
1 | # Babycoder: Recipe for using BabyAgi to write code
2 |
3 | Babycoder is a work in progress AI system that is able to write code for small programs given a simple objective. As a part of the BabyAgi system, Babycoder's goal is to lay the foundation for creating increasingly powerful AI agents capable of managing larger and more complex projects.
4 |
5 | ## Objective
6 |
7 | The primary objective of Babycoder is to provide a recipe for developing AI agent systems capable of writing and editing code. By starting with a simple system and iterating on it, Babycoder aims to improve over time and eventually handle more extensive projects.
8 |
9 | ## How It Works
10 |
11 |
12 |
13 |
14 |
15 | Babycoder's task management system consists of several AI agents working together to create, prioritize, and execute tasks based on a predefined objective and the current state of the project being worked on. The process consists of the following steps:
16 |
17 | 1. **Task Definition**: Four task agents define tasks in a JSON list, which includes all tasks to be executed by the system.
18 |
19 | 2. **(Optional) Human feedback**: If enabled, allows to provide feedback for each task before it is executed. The feedback is processed by an agent responsible for applying it to improve the task.
20 |
21 | 3. **Agent Assignment**: For each task, two agents collaborate to determine the agent responsible for executing the task. The possible executor agents are:
22 | - `command_executor_agent`
23 | - `code_writer_agent`
24 | - `code_refactor_agent`
25 |
26 | 4. **File Management**: The `files_management_agent` scans files in the project directory to determine which files or folders will be used by the executor agents to accomplish their tasks.
27 |
28 | 5. **Task Execution**: The executor agents perform their assigned tasks using the following capabilities:
29 | - The `command_executor_agent` runs OS commands, such as installing dependencies or creating files and folders.
30 | - The `code_writer_agent` writes new code or updates existing code, using embeddings of the current codebase to retrieve relevant code sections and ensure compatibility with other parts of the codebase.
31 | - The `code_refactor_agent` edits existing code according to the specified task, with the help of a `code_relevance_agent` that analyzes code chunks and identifies the most relevant section for editing.
32 |
33 | The code is written to a folder called `playground` in Babycoder's root directory. A folder named `playground_data` is used to save embeddings of the code being written.
34 |
35 | ## How to use
36 |
37 | - Configure BabyAgi by following the instructions in the main README file.
38 | - Navigate to the babycoder directory: `cd babycoder`
39 | - Make a copy of the objective.sample.txt file (`cp objective.sample.txt objective.txt`) and update it to contain the objective of the project you want to create.
40 | - Finally, from the `./babycoder` directory, run: `python babycoder.py` and watch it write code for you!
41 |
--------------------------------------------------------------------------------
/babycoder/babycoder.py:
--------------------------------------------------------------------------------
1 | import os
2 | import openai
3 | import time
4 | import sys
5 | from typing import List, Dict, Union
6 | from dotenv import load_dotenv
7 | import json
8 | import subprocess
9 | import platform
10 |
11 | from embeddings import Embeddings
12 |
13 | # Set Variables
14 | load_dotenv()
15 | current_directory = os.getcwd()
16 | os_version = platform.release()
17 |
18 | openai_calls_retried = 0
19 | max_openai_calls_retries = 3
20 |
21 | # Set API Keys
22 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
23 | assert OPENAI_API_KEY, "OPENAI_API_KEY environment variable is missing from .env"
24 | openai.api_key = OPENAI_API_KEY
25 |
26 | OPENAI_API_MODEL = os.getenv("OPENAI_API_MODEL", "gpt-3.5-turbo")
27 | assert OPENAI_API_MODEL, "OPENAI_API_MODEL environment variable is missing from .env"
28 |
29 | if "gpt-4" in OPENAI_API_MODEL.lower():
30 | print(
31 | f"\033[91m\033[1m"
32 | + "\n*****USING GPT-4. POTENTIALLY EXPENSIVE. MONITOR YOUR COSTS*****"
33 | + "\033[0m\033[0m"
34 | )
35 |
36 | if len(sys.argv) > 1:
37 | OBJECTIVE = sys.argv[1]
38 | elif os.path.exists(os.path.join(current_directory, "objective.txt")):
39 | with open(os.path.join(current_directory, "objective.txt")) as f:
40 | OBJECTIVE = f.read()
41 |
42 | assert OBJECTIVE, "OBJECTIVE missing"
43 |
44 | ## Start of Helper/Utility functions ##
45 |
46 | def print_colored_text(text, color):
47 | color_mapping = {
48 | 'blue': '\033[34m',
49 | 'red': '\033[31m',
50 | 'yellow': '\033[33m',
51 | 'green': '\033[32m',
52 | }
53 | color_code = color_mapping.get(color.lower(), '')
54 | reset_code = '\033[0m'
55 | print(color_code + text + reset_code)
56 |
57 | def print_char_by_char(text, delay=0.00001, chars_at_once=3):
58 | for i in range(0, len(text), chars_at_once):
59 | chunk = text[i:i + chars_at_once]
60 | print(chunk, end='', flush=True)
61 | time.sleep(delay)
62 | print()
63 |
64 | def openai_call(
65 | prompt: str,
66 | model: str = OPENAI_API_MODEL,
67 | temperature: float = 0.5,
68 | max_tokens: int = 100,
69 | ):
70 | global openai_calls_retried
71 | if not model.startswith("gpt-"):
72 | # Use completion API
73 | response = openai.Completion.create(
74 | engine=model,
75 | prompt=prompt,
76 | temperature=temperature,
77 | max_tokens=max_tokens,
78 | top_p=1,
79 | frequency_penalty=0,
80 | presence_penalty=0
81 | )
82 | return response.choices[0].text.strip()
83 | else:
84 | # Use chat completion API
85 | messages=[{"role": "user", "content": prompt}]
86 | try:
87 | response = openai.ChatCompletion.create(
88 | model=model,
89 | messages=messages,
90 | temperature=temperature,
91 | max_tokens=max_tokens,
92 | n=1,
93 | stop=None,
94 | )
95 | openai_calls_retried = 0
96 | return response.choices[0].message.content.strip()
97 | except Exception as e:
98 | # try again
99 | if openai_calls_retried < max_openai_calls_retries:
100 | openai_calls_retried += 1
101 | print(f"Error calling OpenAI. Retrying {openai_calls_retried} of {max_openai_calls_retries}...")
102 | return openai_call(prompt, model, temperature, max_tokens)
103 |
104 | def execute_command_json(json_string):
105 | try:
106 | command_data = json.loads(json_string)
107 | full_command = command_data.get('command')
108 |
109 | process = subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True, cwd='playground')
110 | stdout, stderr = process.communicate(timeout=60)
111 |
112 | return_code = process.returncode
113 |
114 | if return_code == 0:
115 | return stdout
116 | else:
117 | return stderr
118 |
119 | except json.JSONDecodeError as e:
120 | return f"Error: Unable to decode JSON string: {str(e)}"
121 | except subprocess.TimeoutExpired:
122 | process.terminate()
123 | return "Error: Timeout reached (60 seconds)"
124 | except Exception as e:
125 | return f"Error: {str(e)}"
126 |
127 | def execute_command_string(command_string):
128 | try:
129 | result = subprocess.run(command_string, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True, cwd='playground')
130 | output = result.stdout or result.stderr or "No output"
131 | return output
132 |
133 | except Exception as e:
134 | return f"Error: {str(e)}"
135 |
136 | def save_code_to_file(code: str, file_path: str):
137 | full_path = os.path.join(current_directory, "playground", file_path)
138 | try:
139 | mode = 'a' if os.path.exists(full_path) else 'w'
140 | with open(full_path, mode, encoding='utf-8') as f:
141 | f.write(code + '\n\n')
142 | except:
143 | pass
144 |
145 | def refactor_code(modified_code: List[Dict[str, Union[int, str]]], file_path: str):
146 | full_path = os.path.join(current_directory, "playground", file_path)
147 |
148 | with open(full_path, "r", encoding="utf-8") as f:
149 | lines = f.readlines()
150 |
151 | for modification in modified_code:
152 | start_line = modification["start_line"]
153 | end_line = modification["end_line"]
154 | modified_chunk = modification["modified_code"].splitlines()
155 |
156 | # Remove original lines within the range
157 | del lines[start_line - 1:end_line]
158 |
159 | # Insert the new modified_chunk lines
160 | for i, line in enumerate(modified_chunk):
161 | lines.insert(start_line - 1 + i, line + "\n")
162 |
163 | with open(full_path, "w", encoding="utf-8") as f:
164 | f.writelines(lines)
165 |
166 | def split_code_into_chunks(file_path: str, chunk_size: int = 50) -> List[Dict[str, Union[int, str]]]:
167 | full_path = os.path.join(current_directory, "playground", file_path)
168 |
169 | with open(full_path, "r", encoding="utf-8") as f:
170 | lines = f.readlines()
171 |
172 | chunks = []
173 | for i in range(0, len(lines), chunk_size):
174 | start_line = i + 1
175 | end_line = min(i + chunk_size, len(lines))
176 | chunk = {"start_line": start_line, "end_line": end_line, "code": "".join(lines[i:end_line])}
177 | chunks.append(chunk)
178 | return chunks
179 |
180 | ## End of Helper/Utility functions ##
181 |
182 | ## TASKS AGENTS ##
183 |
184 | def code_tasks_initializer_agent(objective: str):
185 | prompt = f"""You are an AGI agent responsible for creating a detailed JSON checklist of tasks that will guide other AGI agents to complete a given programming objective. Your task is to analyze the provided objective and generate a well-structured checklist with a clear starting point and end point, as well as tasks broken down to be very specific, clear, and executable by other agents without the context of other tasks.
186 |
187 | The current agents work as follows:
188 | - code_writer_agent: Writes code snippets or functions and saves them to the appropriate files. This agent can also append code to existing files if required.
189 | - code_refactor_agent: Responsible for modifying and refactoring existing code to meet the requirements of the task.
190 | - command_executor_agent: Executes terminal commands for tasks such as creating directories, installing dependencies, etc.
191 |
192 | Keep in mind that the agents cannot open files in text editors, and tasks should be designed to work within these agent capabilities.
193 |
194 | Here is the programming objective you need to create a checklist for: {objective}.
195 |
196 | To generate the checklist, follow these steps:
197 |
198 | 1. Analyze the objective to identify the high-level requirements and goals of the project. This will help you understand the scope and create a comprehensive checklist.
199 |
200 | 2. Break down the objective into smaller, highly specific tasks that can be worked on independently by other agents. Ensure that the tasks are designed to be executed by the available agents (code_writer_agent, code_refactor and command_executor_agent) without requiring opening files in text editors.
201 |
202 | 3. Assign a unique ID to each task for easy tracking and organization. This will help the agents to identify and refer to specific tasks in the checklist.
203 |
204 | 4. Organize the tasks in a logical order, with a clear starting point and end point. The starting point should represent the initial setup or groundwork necessary for the project, while the end point should signify the completion of the objective and any finalization steps.
205 |
206 | 5. Provide the current context for each task, which should be sufficient for the agents to understand and execute the task without referring to other tasks in the checklist. This will help agents avoid task duplication.
207 |
208 | 6. Pay close attention to the objective and make sure the tasks implement all necessary pieces needed to make the program work.
209 |
210 | 7. Compile the tasks into a well-structured JSON format, ensuring that it is easy to read and parse by other AGI agents. The JSON should include fields such as task ID, description and file_path.
211 |
212 | IMPORTANT: BE VERY CAREFUL WITH IMPORTS AND MANAGING MULTIPLE FILES. REMEMBER EACH AGENT WILL ONLY SEE A SINGLE TASK. ASK YOURSELF WHAT INFORMATION YOU NEED TO INCLUDE IN THE CONTEXT OF EACH TASK TO MAKE SURE THE AGENT CAN EXECUTE THE TASK WITHOUT SEEING THE OTHER TASKS OR WHAT WAS ACCOMPLISHED IN OTHER TASKS.
213 |
214 | Pay attention to the way files are passed in the tasks, always use full paths. For example 'project/main.py'.
215 |
216 | Make sure tasks are not duplicated.
217 |
218 | Do not take long and complex routes, minimize tasks and steps as much as possible.
219 |
220 | Here is a sample JSON output for a checklist:
221 |
222 | {{
223 | "tasks": [
224 | {{
225 | "id": 1,
226 | "description": "Run a command to create the project directory named 'project'",
227 | "file_path": "./project",
228 | }},
229 | {{
230 | "id": 2,
231 | "description": "Run a command to Install the following dependencies: 'numpy', 'pandas', 'scikit-learn', 'matplotlib'",
232 | "file_path": "null",
233 | }},
234 | {{
235 | "id": 3,
236 | "description": "Write code to create a function named 'parser' that takes an input named 'input' of type str, [perform a specific task on it], and returns a specific output",
237 | "file_path": "./project/main.py",
238 | }},
239 | ...
240 | {{
241 | "id": N,
242 | "description": "...",
243 | }}
244 | ],
245 | }}
246 |
247 | The tasks will be executed by either of the three agents: command_executor, code_writer or code_refactor. They can't interact with programs. They can either run terminal commands or write code snippets. Their output is controlled by other functions to run the commands or save their output to code files. Make sure the tasks are compatible with the current agents. ALL tasks MUST start either with the following phrases: 'Run a command to...', 'Write code to...', 'Edit existing code to...' depending on the agent that will execute the task. RETURN JSON ONLY:"""
248 |
249 | return openai_call(prompt, temperature=0.8, max_tokens=2000)
250 |
251 | def code_tasks_refactor_agent(objective: str, task_list_json):
252 | prompt = f"""You are an AGI tasks_refactor_agent responsible for adapting a task list generated by another agent to ensure the tasks are compatible with the current AGI agents. Your goal is to analyze the task list and make necessary modifications so that the tasks can be executed by the agents listed below
253 |
254 | YOU SHOULD OUTPUT THE MODIFIED TASK LIST IN THE SAME JSON FORMAT AS THE INITIAL TASK LIST. DO NOT CHANGE THE FORMAT OF THE JSON OUTPUT. DO NOT WRITE ANYTHING OTHER THAN THE MODIFIED TASK LIST IN THE JSON FORMAT.
255 |
256 | The current agents work as follows:
257 | - code_writer_agent: Writes code snippets or functions and saves them to the appropriate files. This agent can also append code to existing files if required.
258 | - code_refactor_agent: Responsible for editing current existing code/files.
259 | - command_executor_agent: Executes terminal commands for tasks such as creating directories, installing dependencies, etc.
260 |
261 | Here is the overall objective you need to refactor the tasks for: {objective}.
262 | Here is the JSON task list you need to refactor for compatibility with the current agents: {task_list_json}.
263 |
264 | To refactor the task list, follow these steps:
265 | 1. Modify the task descriptions to make them compatible with the current agents, ensuring that the tasks are self-contained, clear, and executable by the agents without additional context. You don't need to mention the agents in the task descriptions, but the tasks should be compatible with the current agents.
266 | 2. If necessary, add new tasks or remove irrelevant tasks to make the task list more suitable for the current agents.
267 | 3. Keep the JSON structure of the task list intact, maintaining the "id", "description" and "file_path" fields for each task.
268 | 4. Pay close attention to the objective and make sure the tasks implement all necessary pieces needed to make the program work.
269 |
270 | Always specify file paths to files. Make sure tasks are not duplicated. Never write code to create files. If needed, use commands to create files and folders.
271 | Return the updated JSON task list with the following format:
272 |
273 | {{
274 | "tasks": [
275 | {{
276 | "id": 1,
277 | "description": "Run a commmand to create a folder named 'project' in the current directory",
278 | "file_path": "./project",
279 | }},
280 | {{
281 | "id": 2,
282 | "description": "Write code to print 'Hello World!' with Python",
283 | "file_path": "./project/main.py",
284 | }},
285 | {{
286 | "id": 3,
287 | "description": "Write code to create a function named 'parser' that takes an input named 'input' of type str, [perform a specific task on it], and returns a specific output",
288 | "file_path": "./project/main.py",
289 | }}
290 | {{
291 | "id": 3,
292 | "description": "Run a command calling the script in ./project/main.py",
293 | "file_path": "./project/main.py",
294 | }}
295 | ...
296 | ],
297 | }}
298 |
299 | IMPORTANT: All tasks should start either with the following phrases: 'Run a command to...', 'Write a code to...', 'Edit the code to...' depending on the agent that will execute the task:
300 |
301 | ALWAYS ENSURE ALL TASKS HAVE RELEVANT CONTEXT ABOUT THE CODE TO BE WRITTEN, INCLUDE DETAILS ON HOW TO CALL FUNCTIONS, CLASSES, IMPORTS, ETC. AGENTS HAVE NO VIEW OF OTHER TASKS, SO THEY NEED TO BE SELF-CONTAINED. RETURN THE JSON:"""
302 |
303 | return openai_call(prompt, temperature=0, max_tokens=2000)
304 |
305 | def code_tasks_details_agent(objective: str, task_list_json):
306 | prompt = f"""You are an AGI agent responsible for improving a list of tasks in JSON format and adding ALL the necessary details to each task. These tasks will be executed individually by agents that have no idea about other tasks or what code exists in the codebase. It is FUNDAMENTAL that each task has enough details so that an individual isolated agent can execute. The metadata of the task is the only information the agents will have.
307 |
308 | Each task should contain the details necessary to execute it. For example, if it creates a function, it needs to contain the details about the arguments to be used in that function and this needs to be consistent across all tasks.
309 |
310 | Look at all tasks at once, and update the task description adding details to it for each task so that it can be executed by an agent without seeing the other tasks and to ensure consistency across all tasks. DETAILS ARE CRUCIAL. For example, if one task creates a class, it should have all the details about the class, including the arguments to be used in the constructor. If another task creates a function that uses the class, it should have the details about the class and the arguments to be used in the constructor.
311 |
312 | RETURN JSON OUTPUTS ONLY.
313 |
314 | Here is the overall objective you need to refactor the tasks for: {objective}.
315 | Here is the task list you need to improve: {task_list_json}
316 |
317 | RETURN THE SAME TASK LIST but with the description improved to contain the details you is adding for each task in the list. DO NOT MAKE OTHER MODIFICATIONS TO THE LIST. Your input should go in the 'description' field of each task.
318 |
319 | RETURN JSON ONLY:"""
320 | return openai_call(prompt, temperature=0.7, max_tokens=2000)
321 |
322 | def code_tasks_context_agent(objective: str, task_list_json):
323 | prompt = f"""You are an AGI agent responsible for improving a list of tasks in JSON format and adding ALL the necessary context to it. These tasks will be executed individually by agents that have no idea about other tasks or what code exists in the codebase. It is FUNDAMENTAL that each task has enough context so that an individual isolated agent can execute. The metadata of the task is the only information the agents will have.
324 |
325 | Look at all tasks at once, and add the necessary context to each task so that it can be executed by an agent without seeing the other tasks. Remember, one agent can only see one task and has no idea about what happened in other tasks. CONTEXT IS CRUCIAL. For example, if one task creates one folder and the other tasks creates a file in that folder. The second tasks should contain the name of the folder that already exists and the information that it already exists.
326 |
327 | This is even more important for tasks that require importing functions, classes, etc. If a task needs to call a function or initialize a Class, it needs to have the detailed arguments, etc.
328 |
329 | Note that you should identify when imports need to happen and specify this in the context. Also, you should identify when functions/classes/etc already exist and specify this very clearly because the agents sometimes duplicate things not knowing.
330 |
331 | Always use imports with the file name. For example, 'from my_script import MyScript'.
332 |
333 | RETURN JSON OUTPUTS ONLY.
334 |
335 | Here is the overall objective you need to refactor the tasks for: {objective}.
336 | Here is the task list you need to improve: {task_list_json}
337 |
338 | RETURN THE SAME TASK LIST but with a new field called 'isolated_context' for each task in the list. This field should be a string with the context you are adding. DO NOT MAKE OTHER MODIFICATIONS TO THE LIST.
339 |
340 | RETURN JSON ONLY:"""
341 | return openai_call(prompt, temperature=0.7, max_tokens=2000)
342 |
343 | def task_assigner_recommendation_agent(objective: str, task: str):
344 | prompt = f"""You are an AGI agent responsible for providing recommendations on which agent should be used to handle a specific task. Analyze the provided major objective of the project and a single task from the JSON checklist generated by the previous agent, and suggest the most appropriate agent to work on the task.
345 |
346 | The overall objective is: {objective}
347 | The current task is: {task}
348 |
349 | The available agents are:
350 | 1. code_writer_agent: Responsible for writing code based on the task description.
351 | 2. code_refactor_agent: Responsible for editing existing code.
352 | 3. command_executor_agent: Responsible for executing commands and handling file operations, such as creating, moving, or deleting files.
353 |
354 | When analyzing the task, consider the following tips:
355 | - Pay attention to keywords in the task description that indicate the type of action required, such as "write", "edit", "run", "create", "move", or "delete".
356 | - Keep the overall objective in mind, as it can help you understand the context of the task and guide your choice of agent.
357 | - If the task involves writing new code or adding new functionality, consider using the code_writer_agent.
358 | - If the task involves modifying or optimizing existing code, consider using the code_refactor_agent.
359 | - If the task involves file operations, command execution, or running a script, consider using the command_executor_agent.
360 |
361 | Based on the task and overall objective, suggest the most appropriate agent to work on the task."""
362 | return openai_call(prompt, temperature=0.5, max_tokens=2000)
363 |
364 | def task_assigner_agent(objective: str, task: str, recommendation: str):
365 | prompt = f"""You are an AGI agent responsible for choosing the best agent to work on a given task. Your goal is to analyze the provided major objective of the project and a single task from the JSON checklist generated by the previous agent, and choose the best agent to work on the task.
366 |
367 | The overall objective is: {objective}
368 | The current task is: {task}
369 |
370 | Use this recommendation to guide you: {recommendation}
371 |
372 | The available agents are:
373 | 1. code_writer_agent: Responsible for writing code based on the task description.
374 | 2. code_refactor_agent: Responsible for editing existing code.
375 | 2. command_executor_agent: Responsible for executing commands and handling file operations, such as creating, moving, or deleting files.
376 |
377 | Please consider the task description and the overall objective when choosing the most appropriate agent. Keep in mind that creating a file and writing code are different tasks. If the task involves creating a file, like "calculator.py" but does not mention writing any code inside it, the command_executor_agent should be used for this purpose. The code_writer_agent should only be used when the task requires writing or adding code to a file. The code_refactor_agent should only be used when the task requires modifying existing code.
378 |
379 | TLDR: To create files, use command_executor_agent, to write text/code to files, use code_writer_agent, to modify existing code, use code_refactor_agent.
380 |
381 | Choose the most appropriate agent to work on the task and return a JSON output with the following format: {{"agent": "agent_name"}}. ONLY return JSON output:"""
382 | return openai_call(prompt, temperature=0, max_tokens=2000)
383 |
384 | def command_executor_agent(task: str, file_path: str):
385 | prompt = f"""You are an AGI agent responsible for executing a given command on the {os_version} OS. Your goal is to analyze the provided major objective of the project and a single task from the JSON checklist generated by the previous agent, and execute the command on the {os_version} OS.
386 |
387 | The current task is: {task}
388 | File or folder name referenced in the task (relative file path): {file_path}
389 |
390 | Based on the task, write the appropriate command to execute on the {os_version} OS. Make sure the command is relevant to the task and objective. For example, if the task is to create a new folder, the command should be 'mkdir new_folder_name'. Return the command as a JSON output with the following format: {{"command": "command_to_execute"}}. ONLY return JSON output:"""
391 | return openai_call(prompt, temperature=0, max_tokens=2000)
392 |
393 | def code_writer_agent(task: str, isolated_context: str, context_code_chunks):
394 | prompt = f"""You are an AGI agent responsible for writing code to accomplish a given task. Your goal is to analyze the provided major objective of the project and a single task from the JSON checklist generated by the previous agent, and write the necessary code to complete the task.
395 |
396 | The current task is: {task}
397 |
398 | To help you make the code useful in this codebase, use this context as reference of the other pieces of the codebase that are relevant to your task. PAY ATTENTION TO THIS: {isolated_context}
399 |
400 | The following code chunks were found to be relevant to the task. You can use them as reference to write the code if they are useful. PAY CLOSE ATTENTION TO THIS:
401 | {context_code_chunks}
402 |
403 | Note: Always use 'encoding='utf-8'' when opening files with open().
404 |
405 | Based on the task and objective, write the appropriate code to achieve the task. Make sure the code is relevant to the task and objective, and follows best practices. Return the code as a plain text output and NOTHING ELSE. Use identation and line breaks in the in the code. Make sure to only write the code and nothing else as your output will be saved directly to the file by other agent. IMPORTANT" If the task is asking you to write code to write files, this is a mistake! Interpret it and either do nothing or return the plain code, not a code to write file, not a code to write code, etc."""
406 | return openai_call(prompt, temperature=0, max_tokens=2000)
407 |
408 | def code_refactor_agent(task_description: str, existing_code_snippet: str, context_chunks, isolated_context: str):
409 |
410 | prompt = f"""You are an AGI agent responsible for refactoring code to accomplish a given task. Your goal is to analyze the provided major objective of the project, the task descriptionm and refactor the code accordingly.
411 |
412 | The current task description is: {task_description}
413 | To help you make the code useful in this codebase, use this context as reference of the other pieces of the codebase that are relevant to your task: {isolated_context}
414 |
415 | Here are some context chunks that might be relevant to the task:
416 | {context_chunks}
417 |
418 | Existing code you should refactor:
419 | {existing_code_snippet}
420 |
421 | Based on the task description, objective, refactor the existing code to achieve the task. Make sure the refactored code is relevant to the task and objective, follows best practices, etc.
422 |
423 | Return a plain text code snippet with your refactored code. IMPORTANT: JUST RETURN CODE, YOUR OUTPUT WILL BE ADDED DIRECTLY TO THE FILE BY OTHER AGENT. BE MINDFUL OF THIS:"""
424 |
425 | return openai_call(prompt, temperature=0, max_tokens=2000)
426 |
427 | def file_management_agent(objective: str, task: str, current_directory_files: str, file_path: str):
428 | prompt = f"""You are an AGI agent responsible for managing files in a software project. Your goal is to analyze the provided major objective of the project and a single task from the JSON checklist generated by the previous agent, and determine the appropriate file path and name for the generated code.
429 |
430 | The overall objective is: {objective}
431 | The current task is: {task}
432 | Specified file path (relative path from the current dir): {file_path}
433 |
434 | Make the file path adapted for the current directory files. The current directory files are: {current_directory_files}. Assume this file_path will be interpreted from the root path of the directory.
435 |
436 | Do not use '.' or './' in the file path.
437 |
438 | BE VERY SPECIFIC WITH THE FILES, AVOID FILE DUPLICATION, AVOID SPECIFYING THE SAME FILE NAME UNDER DIFFERENT FOLDERS, ETC.
439 |
440 | Based on the task, determine the file path and name for the generated code. Return the file path and name as a JSON output with the following format: {{"file_path": "file_path_and_name"}}. ONLY return JSON output:"""
441 | return openai_call(prompt, temperature=0, max_tokens=2000)
442 |
443 | def code_relevance_agent(objective: str, task_description: str, code_chunk: str):
444 | prompt = f"""You are an AGI agent responsible for evaluating the relevance of a code chunk in relation to a given task. Your goal is to analyze the provided major objective of the project, the task description, and the code chunk, and assign a relevance score from 0 to 10, where 0 is completely irrelevant and 10 is highly relevant.
445 |
446 | The overall objective is: {objective}
447 | The current task description is: {task_description}
448 | The code chunk is as follows (line numbers included):
449 | {code_chunk}
450 |
451 | Based on the task description, objective, and code chunk, assign a relevance score between 0 and 10 (inclusive) for the code chunk. DO NOT OUTPUT ANYTHING OTHER THAN THE RELEVANCE SCORE AS A NUMBER."""
452 |
453 | relevance_score = openai_call(prompt, temperature=0.5, max_tokens=50)
454 |
455 | return json.dumps({"relevance_score": relevance_score.strip()})
456 |
457 | def task_human_input_agent(task: str, human_feedback: str):
458 | prompt = f"""You are an AGI agent responsible for getting human input to improve the quality of tasks in a software project. Your goal is to analyze the provided task and adapt it based on the human's suggestions. The tasks should start with either 'Run a command to...', 'Write code to...', or 'Edit existing code to...' depending on the agent that will execute the task.
459 |
460 | For context, this task will be executed by other AGI agents with the following characteristics:
461 | - code_writer_agent: Writes code snippets or functions and saves them to the appropriate files. This agent can also append code to existing files if required.
462 | - code_refactor_agent: Responsible for modifying and refactoring existing code to meet the requirements of the task.
463 | - command_executor_agent: Executes terminal commands for tasks such as creating directories, installing dependencies, etc.
464 |
465 | The current task is:
466 | {task}
467 |
468 | The human feedback is:
469 | {human_feedback}
470 |
471 | If the human feedback is empty, return the task as is. If the human feedback is saying to ignore the task, return the following string:
472 |
473 | Note that your output will replace the existing task, so make sure that your output is a valid task that starts with one of the required phrases ('Run a command to...', 'Write code to...', 'Edit existing code to...').
474 |
475 | Please adjust the task based on the human feedback while ensuring it starts with one of the required phrases ('Run a command to...', 'Write code to...', 'Edit existing code to...'). Return the improved task as a plain text output and nothing else. Write only the new task."""
476 |
477 | return openai_call(prompt, temperature=0.3, max_tokens=200)
478 |
479 | ## END OF AGENTS ##
480 |
481 | print_colored_text(f"****Objective****", color='green')
482 | print_char_by_char(OBJECTIVE, 0.00001, 10)
483 |
484 | # Create the tasks
485 | print_colored_text("*****Working on tasks*****", "red")
486 | print_colored_text(" - Creating initial tasks", "yellow")
487 | task_agent_output = code_tasks_initializer_agent(OBJECTIVE)
488 | print_colored_text(" - Reviewing and refactoring tasks to fit agents", "yellow")
489 | task_agent_output = code_tasks_refactor_agent(OBJECTIVE, task_agent_output)
490 | print_colored_text(" - Adding relevant technical details to the tasks", "yellow")
491 | task_agent_output = code_tasks_details_agent(OBJECTIVE, task_agent_output)
492 | print_colored_text(" - Adding necessary context to the tasks", "yellow")
493 | task_agent_output = code_tasks_context_agent(OBJECTIVE, task_agent_output)
494 | print()
495 |
496 | print_colored_text("*****TASKS*****", "green")
497 | print_char_by_char(task_agent_output, 0.00000001, 10)
498 |
499 | # Task list
500 | task_json = json.loads(task_agent_output)
501 |
502 | embeddings = Embeddings(current_directory)
503 |
504 | for task in task_json["tasks"]:
505 | task_description = task["description"]
506 | task_isolated_context = task["isolated_context"]
507 |
508 | print_colored_text("*****TASK*****", "yellow")
509 | print_char_by_char(task_description)
510 | print_colored_text("*****TASK CONTEXT*****", "yellow")
511 | print_char_by_char(task_isolated_context)
512 |
513 | # HUMAN FEEDBACK
514 | # Uncomment below to enable human feedback before each task. This can be used to improve the quality of the tasks,
515 | # skip tasks, etc. I believe it may be very relevant in future versions that may have more complex tasks and could
516 | # allow a ton of automation when working on large projects.
517 | #
518 | # Get user input as a feedback to the task_description
519 | # print_colored_text("*****TASK FEEDBACK*****", "yellow")
520 | # user_input = input("\n>:")
521 | # task_description = task_human_input_agent(task_description, user_input)
522 | # if task_description == "":
523 | # continue
524 | # print_colored_text("*****IMPROVED TASK*****", "green")
525 | # print_char_by_char(task_description)
526 |
527 | # Assign the task to an agent
528 | task_assigner_recommendation = task_assigner_recommendation_agent(OBJECTIVE, task_description)
529 | task_agent_output = task_assigner_agent(OBJECTIVE, task_description, task_assigner_recommendation)
530 |
531 | print_colored_text("*****ASSIGN*****", "yellow")
532 | print_char_by_char(task_agent_output)
533 |
534 | chosen_agent = json.loads(task_agent_output)["agent"]
535 |
536 | if chosen_agent == "command_executor_agent":
537 | command_executor_output = command_executor_agent(task_description, task["file_path"])
538 | print_colored_text("*****COMMAND*****", "green")
539 | print_char_by_char(command_executor_output)
540 |
541 | command_execution_output = execute_command_json(command_executor_output)
542 | else:
543 | # CODE AGENTS
544 | if chosen_agent == "code_writer_agent":
545 | # Compute embeddings for the codebase
546 | # This will recompute embeddings for all files in the 'playground' directory
547 | print_colored_text("*****RETRIEVING RELEVANT CODE CONTEXT*****", "yellow")
548 | embeddings.compute_repository_embeddings()
549 | relevant_chunks = embeddings.get_relevant_code_chunks(task_description, task_isolated_context)
550 |
551 | current_directory_files = execute_command_string("ls")
552 | file_management_output = file_management_agent(OBJECTIVE, task_description, current_directory_files, task["file_path"])
553 | print_colored_text("*****FILE MANAGEMENT*****", "yellow")
554 | print_char_by_char(file_management_output)
555 | file_path = json.loads(file_management_output)["file_path"]
556 |
557 | code_writer_output = code_writer_agent(task_description, task_isolated_context, relevant_chunks)
558 |
559 | print_colored_text("*****CODE*****", "green")
560 | print_char_by_char(code_writer_output)
561 |
562 | # Save the generated code to the file the agent selected
563 | save_code_to_file(code_writer_output, file_path)
564 |
565 | elif chosen_agent == "code_refactor_agent":
566 | # The code refactor agent works with multiple agents:
567 | # For each task, the file_management_agent is used to select the file to edit.Then, the
568 | # code_relevance_agent is used to select the relevant code chunks from that filewith the
569 | # goal of finding the code chunk that is most relevant to the task description. This is
570 | # the code chunk that will be edited. Finally, the code_refactor_agent is used to edit
571 | # the code chunk.
572 |
573 | current_directory_files = execute_command_string("ls")
574 | file_management_output = file_management_agent(OBJECTIVE, task_description, current_directory_files, task["file_path"])
575 | file_path = json.loads(file_management_output)["file_path"]
576 |
577 | print_colored_text("*****FILE MANAGEMENT*****", "yellow")
578 | print_char_by_char(file_management_output)
579 |
580 | # Split the code into chunks and get the relevance scores for each chunk
581 | code_chunks = split_code_into_chunks(file_path, 80)
582 | print_colored_text("*****ANALYZING EXISTING CODE*****", "yellow")
583 | relevance_scores = []
584 | for chunk in code_chunks:
585 | score = code_relevance_agent(OBJECTIVE, task_description, chunk["code"])
586 | relevance_scores.append(score)
587 |
588 | # Select the most relevant chunk
589 | selected_chunk = sorted(zip(relevance_scores, code_chunks), key=lambda x: x[0], reverse=True)[0][1]
590 |
591 | # Refactor the code
592 | modified_code_output = code_refactor_agent(task_description, selected_chunk, context_chunks=[selected_chunk], isolated_context=task_isolated_context)
593 |
594 | # Extract the start_line and end_line of the selected chunk. This will be used to replace the code in the original file
595 | start_line = selected_chunk["start_line"]
596 | end_line = selected_chunk["end_line"]
597 |
598 | # Count the number of lines in the modified_code_output
599 | modified_code_lines = modified_code_output.count("\n") + 1
600 | # Create a dictionary with the necessary information for the refactor_code function
601 | modified_code_info = {
602 | "start_line": start_line,
603 | "end_line": start_line + modified_code_lines - 1,
604 | "modified_code": modified_code_output
605 | }
606 | print_colored_text("*****REFACTORED CODE*****", "green")
607 | print_char_by_char(modified_code_output)
608 |
609 | # Save the refactored code to the file
610 | refactor_code([modified_code_info], file_path)
611 |
--------------------------------------------------------------------------------
/babycoder/embeddings.py:
--------------------------------------------------------------------------------
1 | import os
2 | import csv
3 | import shutil
4 | import openai
5 | import pandas as pd
6 | import numpy as np
7 | from transformers import GPT2TokenizerFast
8 | from dotenv import load_dotenv
9 | import time
10 |
11 | # Heavily derived from OpenAi's cookbook example
12 |
13 | load_dotenv()
14 |
15 | # the dir is the ./playground directory
16 | REPOSITORY_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "playground")
17 |
18 | class Embeddings:
19 | def __init__(self, workspace_path: str):
20 | self.workspace_path = workspace_path
21 | openai.api_key = os.getenv("OPENAI_API_KEY", "")
22 |
23 | self.DOC_EMBEDDINGS_MODEL = f"text-embedding-ada-002"
24 | self.QUERY_EMBEDDINGS_MODEL = f"text-embedding-ada-002"
25 |
26 | self.SEPARATOR = "\n* "
27 |
28 | self.tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
29 | self.separator_len = len(self.tokenizer.tokenize(self.SEPARATOR))
30 |
31 | def compute_repository_embeddings(self):
32 | try:
33 | playground_data_path = os.path.join(self.workspace_path, 'playground_data')
34 |
35 | # Delete the contents of the playground_data directory but not the directory itself
36 | # This is to ensure that we don't have any old data lying around
37 | for filename in os.listdir(playground_data_path):
38 | file_path = os.path.join(playground_data_path, filename)
39 |
40 | try:
41 | if os.path.isfile(file_path) or os.path.islink(file_path):
42 | os.unlink(file_path)
43 | elif os.path.isdir(file_path):
44 | shutil.rmtree(file_path)
45 | except Exception as e:
46 | print(f"Failed to delete {file_path}. Reason: {str(e)}")
47 | except Exception as e:
48 | print(f"Error: {str(e)}")
49 |
50 | # extract and save info to csv
51 | info = self.extract_info(REPOSITORY_PATH)
52 | self.save_info_to_csv(info)
53 |
54 | df = pd.read_csv(os.path.join(self.workspace_path, 'playground_data\\repository_info.csv'))
55 | df = df.set_index(["filePath", "lineCoverage"])
56 | self.df = df
57 | context_embeddings = self.compute_doc_embeddings(df)
58 | self.save_doc_embeddings_to_csv(context_embeddings, df, os.path.join(self.workspace_path, 'playground_data\\doc_embeddings.csv'))
59 |
60 | try:
61 | self.document_embeddings = self.load_embeddings(os.path.join(self.workspace_path, 'playground_data\\doc_embeddings.csv'))
62 | except:
63 | pass
64 |
65 | # Extract information from files in the repository in chunks
66 | # Return a list of [filePath, lineCoverage, chunkContent]
67 | def extract_info(self, REPOSITORY_PATH):
68 | # Initialize an empty list to store the information
69 | info = []
70 |
71 | LINES_PER_CHUNK = 60
72 |
73 | # Iterate through the files in the repository
74 | for root, dirs, files in os.walk(REPOSITORY_PATH):
75 | for file in files:
76 | file_path = os.path.join(root, file)
77 |
78 | # Read the contents of the file
79 | with open(file_path, "r", encoding="utf-8") as f:
80 | try:
81 | contents = f.read()
82 | except:
83 | continue
84 |
85 | # Split the contents into lines
86 | lines = contents.split("\n")
87 | # Ignore empty lines
88 | lines = [line for line in lines if line.strip()]
89 | # Split the lines into chunks of LINES_PER_CHUNK lines
90 | chunks = [
91 | lines[i:i+LINES_PER_CHUNK]
92 | for i in range(0, len(lines), LINES_PER_CHUNK)
93 | ]
94 | # Iterate through the chunks
95 | for i, chunk in enumerate(chunks):
96 | # Join the lines in the chunk back into a single string
97 | chunk = "\n".join(chunk)
98 | # Get the first and last line numbers
99 | first_line = i * LINES_PER_CHUNK + 1
100 | last_line = first_line + len(chunk.split("\n")) - 1
101 | line_coverage = (first_line, last_line)
102 | # Add the file path, line coverage, and content to the list
103 | info.append((os.path.join(root, file), line_coverage, chunk))
104 |
105 | # Return the list of information
106 | return info
107 |
108 | def save_info_to_csv(self, info):
109 | # Open a CSV file for writing
110 | os.makedirs(os.path.join(self.workspace_path, "playground_data"), exist_ok=True)
111 | with open(os.path.join(self.workspace_path, 'playground_data\\repository_info.csv'), "w", newline="") as csvfile:
112 | # Create a CSV writer
113 | writer = csv.writer(csvfile)
114 | # Write the header row
115 | writer.writerow(["filePath", "lineCoverage", "content"])
116 | # Iterate through the info
117 | for file_path, line_coverage, content in info:
118 | # Write a row for each chunk of data
119 | writer.writerow([file_path, line_coverage, content])
120 |
121 | def get_relevant_code_chunks(self, task_description: str, task_context: str):
122 | query = task_description + "\n" + task_context
123 | most_relevant_document_sections = self.order_document_sections_by_query_similarity(query, self.document_embeddings)
124 | selected_chunks = []
125 | for _, section_index in most_relevant_document_sections:
126 | try:
127 | document_section = self.df.loc[section_index]
128 | selected_chunks.append(self.SEPARATOR + document_section['content'].replace("\n", " "))
129 | if len(selected_chunks) >= 2:
130 | break
131 | except:
132 | pass
133 |
134 | return selected_chunks
135 |
136 | def get_embedding(self, text: str, model: str) -> list[float]:
137 | result = openai.Embedding.create(
138 | model=model,
139 | input=text
140 | )
141 | return result["data"][0]["embedding"]
142 |
143 | def get_doc_embedding(self, text: str) -> list[float]:
144 | return self.get_embedding(text, self.DOC_EMBEDDINGS_MODEL)
145 |
146 | def get_query_embedding(self, text: str) -> list[float]:
147 | return self.get_embedding(text, self.QUERY_EMBEDDINGS_MODEL)
148 |
149 | def compute_doc_embeddings(self, df: pd.DataFrame) -> dict[tuple[str, str], list[float]]:
150 | """
151 | Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
152 |
153 | Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
154 | """
155 | embeddings = {}
156 | for idx, r in df.iterrows():
157 | # Wait one second before making the next call to the OpenAI Embeddings API
158 | # print("Waiting one second before embedding next row\n")
159 | time.sleep(1)
160 | embeddings[idx] = self.get_doc_embedding(r.content.replace("\n", " "))
161 | return embeddings
162 |
163 | def save_doc_embeddings_to_csv(self, doc_embeddings: dict, df: pd.DataFrame, csv_filepath: str):
164 | # Get the dimensionality of the embedding vectors from the first element in the doc_embeddings dictionary
165 | if len(doc_embeddings) == 0:
166 | return
167 |
168 | EMBEDDING_DIM = len(list(doc_embeddings.values())[0])
169 |
170 | # Create a new dataframe with the filePath, lineCoverage, and embedding vector columns
171 | embeddings_df = pd.DataFrame(columns=["filePath", "lineCoverage"] + [f"{i}" for i in range(EMBEDDING_DIM)])
172 |
173 | # Iterate over the rows in the original dataframe
174 | for idx, _ in df.iterrows():
175 | # Get the embedding vector for the current row
176 | embedding = doc_embeddings[idx]
177 | # Create a new row in the embeddings dataframe with the filePath, lineCoverage, and embedding vector values
178 | row = [idx[0], idx[1]] + embedding
179 | embeddings_df.loc[len(embeddings_df)] = row
180 |
181 | # Save the embeddings dataframe to a CSV file
182 | embeddings_df.to_csv(csv_filepath, index=False)
183 |
184 | def vector_similarity(self, x: list[float], y: list[float]) -> float:
185 | return np.dot(np.array(x), np.array(y))
186 |
187 | def order_document_sections_by_query_similarity(self, query: str, contexts: dict[(str, str), np.array]) -> list[(float, (str, str))]:
188 | """
189 | Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
190 | to find the most relevant sections.
191 |
192 | Return the list of document sections, sorted by relevance in descending order.
193 | """
194 | query_embedding = self.get_query_embedding(query)
195 |
196 | document_similarities = sorted([
197 | (self.vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
198 | ], reverse=True)
199 |
200 | return document_similarities
201 |
202 | def load_embeddings(self, fname: str) -> dict[tuple[str, str], list[float]]:
203 | df = pd.read_csv(fname, header=0)
204 | max_dim = max([int(c) for c in df.columns if c != "filePath" and c != "lineCoverage"])
205 | return {
206 | (r.filePath, r.lineCoverage): [r[str(i)] for i in range(max_dim + 1)] for _, r in df.iterrows()
207 | }
--------------------------------------------------------------------------------
/babycoder/objective.sample.txt:
--------------------------------------------------------------------------------
1 | Create a Python program that consists of a single class named 'TemperatureConverter' in a file named 'temperature_converter.py'. The class should have the following methods:
2 |
3 | - celsius_to_fahrenheit(self, celsius: float) -> float: Converts Celsius temperature to Fahrenheit.
4 | - fahrenheit_to_celsius(self, fahrenheit: float) -> float: Converts Fahrenheit temperature to Celsius.
5 |
6 | Create a separate 'main.py' file that imports the 'TemperatureConverter' class, takes user input for the temperature value and the unit, converts the temperature to the other unit, and then prints the result.
--------------------------------------------------------------------------------
/babycoder/playground/noop.md:
--------------------------------------------------------------------------------
1 | # noop
--------------------------------------------------------------------------------
/backup_workspace.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | # 現在の時刻を取得してフォルダ名に使用
5 | TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
6 |
7 | # バックアップ先のディレクトリを作成
8 | BACKUP_DIR="./workspace_backup/$TIMESTAMP"
9 | mkdir -p "$BACKUP_DIR"
10 |
11 | # .gitkeep以外のファイルをコピー
12 | # システムタイプの判定
13 | if [ "$(uname)" = "Darwin" ] || [ "$(uname -s)" = "FreeBSD" ]; then
14 | # BSDベースのシステム(macOSやFreeBSDなど)
15 | echo "BSD-based system detected. Using rsync for backup."
16 | rsync -av --exclude='.gitkeep' ./workspace/ "$BACKUP_DIR/"
17 | else
18 | # GNU/Linuxベースのシステム
19 | echo "GNU/Linux system detected. Using find and cp for backup."
20 | find ./workspace -type f ! -name ".gitkeep" -exec cp --parents \{\} "$BACKUP_DIR" \;
21 | fi
22 |
23 | echo "Backup completed successfully to $BACKUP_DIR"
24 |
--------------------------------------------------------------------------------
/classic/ABOUT.md:
--------------------------------------------------------------------------------
1 | # BabyAgi Classic
2 |
3 | This folder contains the classic version of BabyAGI as a single script. You can use this as a starting point for your own projects built on the original BabyAGI reasoning engine, if the mainline version is too complex for your needs.
--------------------------------------------------------------------------------
/classic/BabyBeeAGI:
--------------------------------------------------------------------------------
1 | ###### This is a modified version of OG BabyAGI, called BabyBeeAGI (future modifications will follow the pattern "BabyAGI"). This version requires GPT-4, it's very slow, and often errors out.######
2 | ######IMPORTANT NOTE: I'm sharing this as a framework to build on top of (with lots of errors for improvement), to facilitate discussion around how to improve these. This is NOT for people who are looking for a complete solution that's ready to use. ######
3 |
4 | import openai
5 | import pinecone
6 | import time
7 | import requests
8 | from bs4 import BeautifulSoup
9 | from collections import deque
10 | from typing import Dict, List
11 | import re
12 | import ast
13 | import json
14 | from serpapi import GoogleSearch
15 |
16 | ### SET THESE 4 VARIABLES ##############################
17 |
18 | # Add your API keys here
19 | OPENAI_API_KEY = ""
20 | SERPAPI_API_KEY = "" #If you include SERPAPI KEY, this will enable web-search. If you don't, it will automatically remove web-search capability.
21 |
22 | # Set variables
23 | OBJECTIVE = "You are an AI. Make the world a better place."
24 | YOUR_FIRST_TASK = "Develop a task list."
25 |
26 | ### UP TO HERE ##############################
27 |
28 | # Configure OpenAI and SerpAPI client
29 | openai.api_key = OPENAI_API_KEY
30 | if SERPAPI_API_KEY:
31 | serpapi_client = GoogleSearch({"api_key": SERPAPI_API_KEY})
32 | websearch_var = "[web-search] "
33 | else:
34 | websearch_var = ""
35 |
36 | # Initialize task list
37 | task_list = []
38 |
39 | # Initialize session_summary
40 | session_summary = ""
41 |
42 | ### Task list functions ##############################
43 | def add_task(task: Dict):
44 | task_list.append(task)
45 |
46 | def get_task_by_id(task_id: int):
47 | for task in task_list:
48 | if task["id"] == task_id:
49 | return task
50 | return None
51 |
52 | def get_completed_tasks():
53 | return [task for task in task_list if task["status"] == "complete"]
54 |
55 | ### Tool functions ##############################
56 | def text_completion_tool(prompt: str):
57 | response = openai.Completion.create(
58 | engine="text-davinci-003",
59 | prompt=prompt,
60 | temperature=0.5,
61 | max_tokens=1500,
62 | top_p=1,
63 | frequency_penalty=0,
64 | presence_penalty=0
65 | )
66 | return response.choices[0].text.strip()
67 |
68 | def web_search_tool(query: str):
69 | search_params = {
70 | "engine": "google",
71 | "q": query,
72 | "api_key": SERPAPI_API_KEY,
73 | "num":3
74 | }
75 | search_results = GoogleSearch(search_params)
76 | results = search_results.get_dict()
77 |
78 | return str(results["organic_results"])
79 |
80 | def web_scrape_tool(url: str):
81 | response = requests.get(url)
82 | print(response)
83 | soup = BeautifulSoup(response.content, "html.parser")
84 | result = soup.get_text(strip=True)+"URLs: "
85 | for link in soup.findAll('a', attrs={'href': re.compile("^https://")}):
86 | result+= link.get('href')+", "
87 | return result
88 |
89 | ### Agent functions ##############################
90 | def execute_task(task, task_list, OBJECTIVE):
91 | global task_id_counter
92 | # Check if dependent_task_id is complete
93 | if task["dependent_task_id"]:
94 | dependent_task = get_task_by_id(task["dependent_task_id"])
95 | if not dependent_task or dependent_task["status"] != "complete":
96 | return
97 |
98 | # Execute task
99 |
100 | print("\033[92m\033[1m"+"\n*****NEXT TASK*****\n"+"\033[0m\033[0m")
101 | print(str(task['id'])+": "+str(task['task'])+" ["+str(task['tool']+"]"))
102 | task_prompt = f"Complete your assigned task based on the objective: {OBJECTIVE}. Your task: {task['task']}"
103 | if task["dependent_task_id"]:
104 | dependent_task_result = dependent_task["result"]
105 | task_prompt += f"\nThe previous task ({dependent_task['id']}. {dependent_task['task']}) result: {dependent_task_result}"
106 |
107 | task_prompt += "\nResponse:"
108 | ##print("###task_prompt: "+task_prompt)
109 | if task["tool"] == "text-completion":
110 | result = text_completion_tool(task_prompt)
111 | elif task["tool"] == "web-search":
112 | result = web_search_tool(task_prompt)
113 | elif task["tool"] == "web-scrape":
114 | result = web_scrape_tool(str(task['task']))
115 | else:
116 | result = "Unknown tool"
117 |
118 |
119 | print("\033[93m\033[1m"+"\n*****TASK RESULT*****\n"+"\033[0m\033[0m")
120 | print_result = result[0:2000]
121 | if result != result[0:2000]:
122 | print(print_result+"...")
123 | else:
124 | print(result)
125 | # Update task status and result
126 | task["status"] = "complete"
127 | task["result"] = result
128 | task["result_summary"] = summarizer_agent(result)
129 |
130 | # Update session_summary
131 | session_summary = overview_agent(task["id"])
132 |
133 | # Increment task_id_counter
134 | task_id_counter += 1
135 |
136 | # Update task_manager_agent of tasks
137 | task_manager_agent(
138 | OBJECTIVE,
139 | result,
140 | task["task"],
141 | [t["task"] for t in task_list if t["status"] == "incomplete"],
142 | task["id"]
143 | )
144 |
145 |
146 | def task_manager_agent(objective: str, result: str, task_description: str, incomplete_tasks: List[str], current_task_id : int) -> List[Dict]:
147 | global task_list
148 | original_task_list = task_list.copy()
149 | minified_task_list = [{k: v for k, v in task.items() if k != "result"} for task in task_list]
150 | result = result[0:4000] #come up with better solution later.
151 |
152 | prompt = (
153 | f"You are a task management AI tasked with cleaning the formatting of and reprioritizing the following tasks: {minified_task_list}. "
154 | f"Consider the ultimate objective of your team: {OBJECTIVE}. "
155 | f"Do not remove any tasks. Return the result as a JSON-formatted list of dictionaries.\n"
156 | f"Create new tasks based on the result of last task if necessary for the objective. Limit tasks types to those that can be completed with the available tools listed below. Task description should be detailed."
157 | f"The maximum task list length is 7. Do not add an 8th task."
158 | f"The last completed task has the following result: {result}. "
159 | f"Current tool option is [text-completion] {websearch_var} and [web-scrape] only."# web-search is added automatically if SERPAPI exists
160 | f"For tasks using [web-scrape], provide only the URL to scrape as the task description. Do not provide placeholder URLs, but use ones provided by a search step or the initial objective."
161 | #f"If the objective is research related, use at least one [web-search] with the query as the task description, and after, add up to three URLs from the search result as a task with [web-scrape], then use [text-completion] to write a comprehensive summary of each site thas has been scraped.'"
162 | f"For tasks using [web-search], provide the search query, and only the search query to use (eg. not 'research waterproof shoes, but 'waterproof shoes')"
163 | f"dependent_task_id should always be null or a number."
164 | f"Do not reorder completed tasks. Only reorder and dedupe incomplete tasks.\n"
165 | f"Make sure all task IDs are in chronological order.\n"
166 | f"Do not provide example URLs for [web-scrape].\n"
167 | f"Do not include the result from the last task in the JSON, that will be added after..\n"
168 | f"The last step is always to provide a final summary report of all tasks.\n"
169 | f"An example of the desired output format is: "
170 | "[{\"id\": 1, \"task\": \"https://untapped.vc\", \"tool\": \"web-scrape\", \"dependent_task_id\": null, \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}, {\"id\": 2, \"task\": \"Analyze the contents of...\", \"tool\": \"text-completion\", \"dependent_task_id\": 1, \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}, {\"id\": 3, \"task\": \"Untapped Capital\", \"tool\": \"web-search\", \"dependent_task_id\": null, \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}]."
171 | )
172 | print("\033[90m\033[3m" + "\nRunning task manager agent...\n" + "\033[0m")
173 | response = openai.ChatCompletion.create(
174 | model="gpt-4",
175 | messages=[
176 | {
177 | "role": "system",
178 | "content": "You are a task manager AI."
179 | },
180 | {
181 | "role": "user",
182 | "content": prompt
183 | }
184 | ],
185 | temperature=0.2,
186 | max_tokens=1500,
187 | top_p=1,
188 | frequency_penalty=0,
189 | presence_penalty=0
190 | )
191 |
192 | # Extract the content of the assistant's response and parse it as JSON
193 | result = response["choices"][0]["message"]["content"]
194 | print("\033[90m\033[3m" + "\nDone!\n" + "\033[0m")
195 | try:
196 | task_list = json.loads(result)
197 | except Exception as error:
198 | print(error)
199 | # Add the 'result' field back in
200 | for updated_task, original_task in zip(task_list, original_task_list):
201 | if "result" in original_task:
202 | updated_task["result"] = original_task["result"]
203 | task_list[current_task_id]["result"]=result
204 | #print(task_list)
205 | return task_list
206 |
207 |
208 |
209 | def summarizer_agent(text: str) -> str:
210 | text = text[0:4000]
211 | prompt = f"Please summarize the following text:\n{text}\nSummary:"
212 | response = openai.Completion.create(
213 | engine="text-davinci-003",
214 | prompt=prompt,
215 | temperature=0.5,
216 | max_tokens=100,
217 | top_p=1,
218 | frequency_penalty=0,
219 | presence_penalty=0
220 | )
221 | return response.choices[0].text.strip()
222 |
223 |
224 | def overview_agent(last_task_id: int) -> str:
225 | global session_summary
226 |
227 | completed_tasks = get_completed_tasks()
228 | completed_tasks_text = "\n".join(
229 | [f"{task['id']}. {task['task']} - {task['result_summary']}" for task in completed_tasks]
230 | )
231 |
232 | prompt = f"Here is the current session summary:\n{session_summary}\nThe last completed task is task {last_task_id}. Please update the session summary with the information of the last task:\n{completed_tasks_text}\nUpdated session summary, which should describe all tasks in chronological order:"
233 | response = openai.Completion.create(
234 | engine="text-davinci-003",
235 | prompt=prompt,
236 | temperature=0.5,
237 | max_tokens=200,
238 | top_p=1,
239 | frequency_penalty=0,
240 | presence_penalty=0
241 | )
242 | session_summary = response.choices[0].text.strip()
243 | return session_summary
244 |
245 |
246 | ### Main Loop ##############################
247 |
248 | # Add the first task
249 | first_task = {
250 | "id": 1,
251 | "task": YOUR_FIRST_TASK,
252 | "tool": "text-completion",
253 | "dependent_task_id": None,
254 | "status": "incomplete",
255 | "result": "",
256 | "result_summary": ""
257 | }
258 | add_task(first_task)
259 |
260 | task_id_counter = 0
261 | #Print OBJECTIVE
262 | print("\033[96m\033[1m"+"\n*****OBJECTIVE*****\n"+"\033[0m\033[0m")
263 | print(OBJECTIVE)
264 |
265 | # Continue the loop while there are incomplete tasks
266 | while any(task["status"] == "incomplete" for task in task_list):
267 |
268 | # Filter out incomplete tasks
269 | incomplete_tasks = [task for task in task_list if task["status"] == "incomplete"]
270 |
271 | if incomplete_tasks:
272 | # Sort tasks by ID
273 | incomplete_tasks.sort(key=lambda x: x["id"])
274 |
275 | # Pull the first task
276 | task = incomplete_tasks[0]
277 |
278 | # Execute task & call task manager from function
279 | execute_task(task, task_list, OBJECTIVE)
280 |
281 | # Print task list and session summary
282 | print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m")
283 | for t in task_list:
284 | dependent_task = ""
285 | if t['dependent_task_id'] is not None:
286 | dependent_task = f"\033[31m\033[0m"
287 | status_color = "\033[32m" if t['status'] == "complete" else "\033[31m"
288 | print(f"\033[1m{t['id']}\033[0m: {t['task']} {status_color}[{t['status']}]\033[0m \033[93m[{t['tool']}] {dependent_task}\033[0m")
289 | print("\033[93m\033[1m" + "\n*****SESSION SUMMARY*****\n" + "\033[0m\033[0m")
290 | print(session_summary)
291 |
292 | time.sleep(1) # Sleep before checking the task list again
293 |
294 | ### Objective complete ##############################
295 |
296 | # Print the full task list if there are no incomplete tasks
297 | if all(task["status"] != "incomplete" for task in task_list):
298 | print("\033[92m\033[1m" + "\n*****ALL TASKS COMPLETED*****\n" + "\033[0m\033[0m")
299 | for task in task_list:
300 | print(f"ID: {task['id']}, Task: {task['task']}, Result: {task['result']}")
301 |
--------------------------------------------------------------------------------
/classic/BabyCatAGI.py:
--------------------------------------------------------------------------------
1 | ###### This is a modified version of OG BabyAGI, called BabyCatAGI (future modifications will follow the pattern "BabyAGI"). This version requires GPT-4, it's very slow, and often errors out.######
2 | ######IMPORTANT NOTE: I'm sharing this as a framework to build on top of (with lots of errors for improvement), to facilitate discussion around how to improve these. This is NOT for people who are looking for a complete solution that's ready to use. ######
3 |
4 | import openai
5 | import time
6 | import requests
7 | from bs4 import BeautifulSoup
8 | from collections import deque
9 | from typing import Dict, List
10 | import re
11 | import ast
12 | import json
13 | from serpapi import GoogleSearch
14 |
15 | ### SET THESE 4 VARIABLES ##############################
16 |
17 | # Add your API keys here
18 | OPENAI_API_KEY = ""
19 | SERPAPI_API_KEY = "" #If you include SERPAPI KEY, this will enable web-search. If you don't, it will autoatically remove web-search capability.
20 |
21 | # Set variables
22 | OBJECTIVE = "Research experts at scaling NextJS and their Twitter accounts."
23 | YOUR_FIRST_TASK = "Develop a task list." #you can provide additional instructions here regarding the task list.
24 |
25 | ### UP TO HERE ##############################
26 |
27 | # Configure OpenAI and SerpAPI client
28 | openai.api_key = OPENAI_API_KEY
29 | if SERPAPI_API_KEY:
30 | serpapi_client = GoogleSearch({"api_key": SERPAPI_API_KEY})
31 | websearch_var = "[web-search] "
32 | else:
33 | websearch_var = ""
34 |
35 | # Initialize task list
36 | task_list = []
37 |
38 | # Initialize session_summary
39 | session_summary = ""
40 |
41 | ### Task list functions ##############################
42 | def add_task(task: Dict):
43 | task_list.append(task)
44 |
45 | def get_task_by_id(task_id: int):
46 | for task in task_list:
47 | if task["id"] == task_id:
48 | return task
49 | return None
50 |
51 | def get_completed_tasks():
52 | return [task for task in task_list if task["status"] == "complete"]
53 |
54 |
55 | # Print task list and session summary
56 | def print_tasklist():
57 | print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m")
58 | for t in task_list:
59 | dependent_task = ""
60 | if t['dependent_task_ids']:
61 | dependent_task = f"\033[31m\033[0m"
62 | status_color = "\033[32m" if t['status'] == "complete" else "\033[31m"
63 | print(f"\033[1m{t['id']}\033[0m: {t['task']} {status_color}[{t['status']}]\033[0m \033[93m[{t['tool']}] {dependent_task}\033[0m")
64 |
65 | ### Tool functions ##############################
66 | def text_completion_tool(prompt: str):
67 | messages = [
68 | {"role": "user", "content": prompt}
69 | ]
70 |
71 | response = openai.ChatCompletion.create(
72 | model="gpt-3.5-turbo",
73 | messages=messages,
74 | temperature=0.2,
75 | max_tokens=1500,
76 | top_p=1,
77 | frequency_penalty=0,
78 | presence_penalty=0
79 | )
80 |
81 | return response.choices[0].message['content'].strip()
82 |
83 |
84 | def web_search_tool(query: str):
85 | search_params = {
86 | "engine": "google",
87 | "q": query,
88 | "api_key": SERPAPI_API_KEY,
89 | "num":5 #edit this up or down for more results, though higher often results in OpenAI rate limits
90 | }
91 | search_results = GoogleSearch(search_params)
92 | search_results = search_results.get_dict()
93 | try:
94 | search_results = search_results["organic_results"]
95 | except:
96 | search_results = {}
97 | search_results = simplify_search_results(search_results)
98 | print("\033[90m\033[3m" + "Completed search. Now scraping results.\n" + "\033[0m")
99 | results = "";
100 | # Loop through the search results
101 | for result in search_results:
102 | # Extract the URL from the result
103 | url = result.get('link')
104 | # Call the web_scrape_tool function with the URL
105 | print("\033[90m\033[3m" + "Scraping: "+url+"" + "...\033[0m")
106 | content = web_scrape_tool(url, task)
107 | print("\033[90m\033[3m" +str(content[0:100])[0:100]+"...\n" + "\033[0m")
108 | results += str(content)+". "
109 |
110 |
111 | return results
112 |
113 |
114 | def simplify_search_results(search_results):
115 | simplified_results = []
116 | for result in search_results:
117 | simplified_result = {
118 | "position": result.get("position"),
119 | "title": result.get("title"),
120 | "link": result.get("link"),
121 | "snippet": result.get("snippet")
122 | }
123 | simplified_results.append(simplified_result)
124 | return simplified_results
125 |
126 |
127 | def web_scrape_tool(url: str, task:str):
128 | content = fetch_url_content(url)
129 | if content is None:
130 | return None
131 |
132 | text = extract_text(content)
133 | print("\033[90m\033[3m"+"Scrape completed. Length:" +str(len(text))+".Now extracting relevant info..."+"...\033[0m")
134 | info = extract_relevant_info(OBJECTIVE, text[0:5000], task)
135 | links = extract_links(content)
136 |
137 | #result = f"{info} URLs: {', '.join(links)}"
138 | result = info
139 |
140 | return result
141 |
142 | headers = {
143 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36"
144 | }
145 |
146 | def fetch_url_content(url: str):
147 | try:
148 | response = requests.get(url, headers=headers, timeout=10)
149 | response.raise_for_status()
150 | return response.content
151 | except requests.exceptions.RequestException as e:
152 | print(f"Error while fetching the URL: {e}")
153 | return ""
154 |
155 | def extract_links(content: str):
156 | soup = BeautifulSoup(content, "html.parser")
157 | links = [link.get('href') for link in soup.findAll('a', attrs={'href': re.compile("^https?://")})]
158 | return links
159 |
160 | def extract_text(content: str):
161 | soup = BeautifulSoup(content, "html.parser")
162 | text = soup.get_text(strip=True)
163 | return text
164 |
165 |
166 |
167 | def extract_relevant_info(objective, large_string, task):
168 | chunk_size = 3000
169 | overlap = 500
170 | notes = ""
171 |
172 | for i in range(0, len(large_string), chunk_size - overlap):
173 | chunk = large_string[i:i + chunk_size]
174 |
175 | messages = [
176 | {"role": "system", "content": f"Objective: {objective}\nCurrent Task:{task}"},
177 | {"role": "user", "content": f"Analyze the following text and extract information relevant to our objective and current task, and only information relevant to our objective and current task. If there is no relevant information do not say that there is no relevant informaiton related to our objective. ### Then, update or start our notes provided here (keep blank if currently blank): {notes}.### Text to analyze: {chunk}.### Updated Notes:"}
178 | ]
179 |
180 | response = openai.ChatCompletion.create(
181 | model="gpt-3.5-turbo",
182 | messages=messages,
183 | max_tokens=800,
184 | n=1,
185 | stop="###",
186 | temperature=0.7,
187 | )
188 |
189 | notes += response.choices[0].message['content'].strip()+". ";
190 |
191 | return notes
192 |
193 | ### Agent functions ##############################
194 |
195 |
196 | def execute_task(task, task_list, OBJECTIVE):
197 | global task_id_counter
198 | # Check if dependent_task_ids is not empty
199 | if task["dependent_task_ids"]:
200 | all_dependent_tasks_complete = True
201 | for dep_id in task["dependent_task_ids"]:
202 | dependent_task = get_task_by_id(dep_id)
203 | if not dependent_task or dependent_task["status"] != "complete":
204 | all_dependent_tasks_complete = False
205 | break
206 |
207 |
208 | # Execute task
209 | print("\033[92m\033[1m"+"\n*****NEXT TASK*****\n"+"\033[0m\033[0m")
210 | print(str(task['id'])+": "+str(task['task'])+" ["+str(task['tool']+"]"))
211 | task_prompt = f"Complete your assigned task based on the objective and only based on information provided in the dependent task output, if provided. Your objective: {OBJECTIVE}. Your task: {task['task']}"
212 | if task["dependent_task_ids"]:
213 | dependent_tasks_output = ""
214 | for dep_id in task["dependent_task_ids"]:
215 | dependent_task_output = get_task_by_id(dep_id)["output"]
216 | dependent_task_output = dependent_task_output[0:2000]
217 | dependent_tasks_output += f" {dependent_task_output}"
218 | task_prompt += f" Your dependent tasks output: {dependent_tasks_output}\n OUTPUT:"
219 |
220 | # Use tool to complete the task
221 | if task["tool"] == "text-completion":
222 | task_output = text_completion_tool(task_prompt)
223 | elif task["tool"] == "web-search":
224 | task_output = web_search_tool(str(task['task']))
225 | elif task["tool"] == "web-scrape":
226 | task_output = web_scrape_tool(str(task['task']))
227 |
228 | # Find task index in the task_list
229 | task_index = next((i for i, t in enumerate(task_list) if t["id"] == task["id"]), None)
230 |
231 | # Mark task as complete and save output
232 | task_list[task_index]["status"] = "complete"
233 | task_list[task_index]["output"] = task_output
234 |
235 | # Print task output
236 | print("\033[93m\033[1m"+"\nTask Output:"+"\033[0m\033[0m")
237 | print(task_output)
238 |
239 | # Add task output to session_summary
240 | global session_summary
241 | session_summary += f"\n\nTask {task['id']} - {task['task']}:\n{task_output}"
242 |
243 |
244 |
245 | task_list = []
246 |
247 | def task_creation_agent(objective: str) -> List[Dict]:
248 | global task_list
249 | minified_task_list = [{k: v for k, v in task.items() if k != "result"} for task in task_list]
250 |
251 | prompt = (
252 | f"You are a task creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: {OBJECTIVE}. "
253 | f"Create new tasks based on the objective. Limit tasks types to those that can be completed with the available tools listed below. Task description should be detailed."
254 | f"Current tool option is [text-completion] {websearch_var} and only." # web-search is added automatically if SERPAPI exists
255 | f"For tasks using [web-search], provide the search query, and only the search query to use (eg. not 'research waterproof shoes, but 'waterproof shoes')"
256 | f"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from."
257 | f"Make sure all task IDs are in chronological order.\n"
258 | f"The last step is always to provide a final summary report including tasks executed and summary of knowledge acquired.\n"
259 | f"Do not create any summarizing steps outside of the last step..\n"
260 | f"An example of the desired output format is: "
261 | "[{\"id\": 1, \"task\": \"https://untapped.vc\", \"tool\": \"web-scrape\", \"dependent_task_ids\": [], \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}, {\"id\": 2, \"task\": \"Consider additional insights that can be reasoned from the results of...\", \"tool\": \"text-completion\", \"dependent_task_ids\": [1], \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}, {\"id\": 3, \"task\": \"Untapped Capital\", \"tool\": \"web-search\", \"dependent_task_ids\": [], \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}].\n"
262 | f"JSON TASK LIST="
263 | )
264 |
265 | print("\033[90m\033[3m" + "\nInitializing...\n" + "\033[0m")
266 | print("\033[90m\033[3m" + "Analyzing objective...\n" + "\033[0m")
267 | print("\033[90m\033[3m" + "Running task creation agent...\n" + "\033[0m")
268 | response = openai.ChatCompletion.create(
269 | model="gpt-4",
270 | messages=[
271 | {
272 | "role": "system",
273 | "content": "You are a task creation AI."
274 | },
275 | {
276 | "role": "user",
277 | "content": prompt
278 | }
279 | ],
280 | temperature=0,
281 | max_tokens=1500,
282 | top_p=1,
283 | frequency_penalty=0,
284 | presence_penalty=0
285 | )
286 |
287 | # Extract the content of the assistant's response and parse it as JSON
288 | result = response["choices"][0]["message"]["content"]
289 | print("\033[90m\033[3m" + "\nDone!\n" + "\033[0m")
290 | try:
291 | task_list = json.loads(result)
292 | except Exception as error:
293 | print(error)
294 |
295 | return task_list
296 |
297 | ##### START MAIN LOOP########
298 |
299 | #Print OBJECTIVE
300 | print("\033[96m\033[1m"+"\n*****OBJECTIVE*****\n"+"\033[0m\033[0m")
301 | print(OBJECTIVE)
302 |
303 | # Initialize task_id_counter
304 | task_id_counter = 1
305 |
306 | # Run the task_creation_agent to create initial tasks
307 | task_list = task_creation_agent(OBJECTIVE)
308 | print_tasklist()
309 |
310 | # Execute tasks in order
311 | while len(task_list) > 0:
312 | for task in task_list:
313 | if task["status"] == "incomplete":
314 | execute_task(task, task_list, OBJECTIVE)
315 | print_tasklist()
316 | break
317 |
318 | # Print session summary
319 | print("\033[96m\033[1m"+"\n*****SESSION SUMMARY*****\n"+"\033[0m\033[0m")
320 | print(session_summary)
321 |
--------------------------------------------------------------------------------
/classic/README.md:
--------------------------------------------------------------------------------
1 | # babyagi
2 |
3 |
4 | # Objective
5 | This Python script is an example of an AI-powered task management system. The system uses OpenAI and Pinecone APIs to create, prioritize, and execute tasks. The main idea behind this system is that it creates tasks based on the result of previous tasks and a predefined objective. The script then uses OpenAI's natural language processing (NLP) capabilities to create new tasks based on the objective, and Pinecone to store and retrieve task results for context. This is a paired-down version of the original [Task-Driven Autonomous Agent](https://twitter.com/yoheinakajima/status/1640934493489070080?s=20) (Mar 28, 2023).
6 |
7 | This README will cover the following:
8 |
9 | * How the script works
10 |
11 | * How to use the script
12 | * Warning about running the script continuously
13 | # How It Works
14 | The script works by running an infinite loop that does the following steps:
15 |
16 | 1. Pulls the first task from the task list.
17 | 2. Sends the task to the execution agent, which uses OpenAI's API to complete the task based on the context.
18 | 3. Enriches the result and stores it in Pinecone.
19 | 4. Creates new tasks and reprioritizes the task list based on the objective and the result of the previous task.
20 | The execution_agent() function is where the OpenAI API is used. It takes two parameters: the objective and the task. It then sends a prompt to OpenAI's API, which returns the result of the task. The prompt consists of a description of the AI system's task, the objective, and the task itself. The result is then returned as a string.
21 |
22 | The task_creation_agent() function is where OpenAI's API is used to create new tasks based on the objective and the result of the previous task. The function takes four parameters: the objective, the result of the previous task, the task description, and the current task list. It then sends a prompt to OpenAI's API, which returns a list of new tasks as strings. The function then returns the new tasks as a list of dictionaries, where each dictionary contains the name of the task.
23 |
24 | The prioritization_agent() function is where OpenAI's API is used to reprioritize the task list. The function takes one parameter, the ID of the current task. It sends a prompt to OpenAI's API, which returns the reprioritized task list as a numbered list.
25 |
26 | Finally, the script uses Pinecone to store and retrieve task results for context. The script creates a Pinecone index based on the table name specified in YOUR_TABLE_NAME variable. Pinecone is then used to store the results of the task in the index, along with the task name and any additional metadata.
27 |
28 | # How to Use
29 | To use the script, you will need to follow these steps:
30 |
31 | 1. Install the required packages: `pip install -r requirements.txt`
32 | 2. Set your OpenAI and Pinecone API keys in the OPENAI_API_KEY and PINECONE_API_KEY variables.
33 | 3. Set the Pinecone environment in the PINECONE_ENVIRONMENT variable.
34 | 4. Set the name of the table where the task results will be stored in the YOUR_TABLE_NAME variable.
35 | 5. Set the objective of the task management system in the OBJECTIVE variable.
36 | 6. Set the first task of the system in the YOUR_FIRST_TASK variable.
37 | 7. Run the script.
38 |
39 | # Warning
40 | This script is designed to be run continuously as part of a task management system. Running this script continuously can result in high API usage, so please use it responsibly. Additionally, the script requires the OpenAI and Pinecone APIs to be set up correctly, so make sure you have set up the APIs before running the script.
41 |
42 | #Backstory
43 | BabyAGI is a paired-down version of the original [Task-Driven Autonomous Agent](https://twitter.com/yoheinakajima/status/1640934493489070080?s=20) (Mar 28, 2023) shared on Twitter. This version is down to 140 lines: 13 comments, 22 blank, 105 code. The name of the repo came up in the reaction to the original autonomous agent - the author does not mean to imply that this is AGI.
44 |
45 | Made with love by [@yoheinakajima](https://twitter.com/yoheinakajima), who happens to be a VC - so if you use this build a startup, ping him!
46 |
--------------------------------------------------------------------------------
/classic/babyagi.py:
--------------------------------------------------------------------------------
1 | import openai
2 | import pinecone
3 | import time
4 | from collections import deque
5 | from typing import Dict, List
6 |
7 | #Set API Keys
8 | OPENAI_API_KEY = ""
9 | PINECONE_API_KEY = ""
10 | PINECONE_ENVIRONMENT = "us-east1-gcp" #Pinecone Environment (eg. "us-east1-gcp")
11 |
12 | #Set Variables
13 | YOUR_TABLE_NAME = "test-table"
14 | OBJECTIVE = "Solve world hunger."
15 | YOUR_FIRST_TASK = "Develop a task list."
16 |
17 | #Print OBJECTIVE
18 | print("\033[96m\033[1m"+"\n*****OBJECTIVE*****\n"+"\033[0m\033[0m")
19 | print(OBJECTIVE)
20 |
21 | # Configure OpenAI and Pinecone
22 | openai.api_key = OPENAI_API_KEY
23 | pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)
24 |
25 | # Create Pinecone index
26 | table_name = YOUR_TABLE_NAME
27 | dimension = 1536
28 | metric = "cosine"
29 | pod_type = "p1"
30 | if table_name not in pinecone.list_indexes():
31 | pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
32 |
33 | # Connect to the index
34 | index = pinecone.Index(table_name)
35 |
36 | # Task list
37 | task_list = deque([])
38 |
39 | def add_task(task: Dict):
40 | task_list.append(task)
41 |
42 | def get_ada_embedding(text):
43 | text = text.replace("\n", " ")
44 | return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
45 |
46 | def task_creation_agent(objective: str, result: Dict, task_description: str, task_list: List[str]):
47 | prompt = f"You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: {objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}. These are incomplete tasks: {', '.join(task_list)}. Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. Return the tasks as an array."
48 | response = openai.Completion.create(engine="text-davinci-003",prompt=prompt,temperature=0.5,max_tokens=100,top_p=1,frequency_penalty=0,presence_penalty=0)
49 | new_tasks = response.choices[0].text.strip().split('\n')
50 | return [{"task_name": task_name} for task_name in new_tasks]
51 |
52 | def prioritization_agent(this_task_id:int):
53 | global task_list
54 | task_names = [t["task_name"] for t in task_list]
55 | next_task_id = int(this_task_id)+1
56 | prompt = f"""You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: {task_names}. Consider the ultimate objective of your team:{OBJECTIVE}. Do not remove any tasks. Return the result as a numbered list, like:
57 | #. First task
58 | #. Second task
59 | Start the task list with number {next_task_id}."""
60 | response = openai.Completion.create(engine="text-davinci-003",prompt=prompt,temperature=0.5,max_tokens=1000,top_p=1,frequency_penalty=0,presence_penalty=0)
61 | new_tasks = response.choices[0].text.strip().split('\n')
62 | task_list = deque()
63 | for task_string in new_tasks:
64 | task_parts = task_string.strip().split(".", 1)
65 | if len(task_parts) == 2:
66 | task_id = task_parts[0].strip()
67 | task_name = task_parts[1].strip()
68 | task_list.append({"task_id": task_id, "task_name": task_name})
69 |
70 | def execution_agent(objective:str,task: str) -> str:
71 | #context = context_agent(index="quickstart", query="my_search_query", n=5)
72 | context=context_agent(index=YOUR_TABLE_NAME, query=objective, n=5)
73 | #print("\n*******RELEVANT CONTEXT******\n")
74 | #print(context)
75 | response = openai.Completion.create(
76 | engine="text-davinci-003",
77 | prompt=f"You are an AI who performs one task based on the following objective: {objective}. Your task: {task}\nResponse:",
78 | temperature=0.7,
79 | max_tokens=2000,
80 | top_p=1,
81 | frequency_penalty=0,
82 | presence_penalty=0
83 | )
84 | return response.choices[0].text.strip()
85 |
86 | def context_agent(query: str, index: str, n: int):
87 | query_embedding = get_ada_embedding(query)
88 | index = pinecone.Index(index_name=index)
89 | results = index.query(query_embedding, top_k=n,
90 | include_metadata=True)
91 | #print("***** RESULTS *****")
92 | #print(results)
93 | sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True)
94 | return [(str(item.metadata['task'])) for item in sorted_results]
95 |
96 | # Add the first task
97 | first_task = {
98 | "task_id": 1,
99 | "task_name": YOUR_FIRST_TASK
100 | }
101 |
102 | add_task(first_task)
103 | # Main loop
104 | task_id_counter = 1
105 | while True:
106 | if task_list:
107 | # Print the task list
108 | print("\033[95m\033[1m"+"\n*****TASK LIST*****\n"+"\033[0m\033[0m")
109 | for t in task_list:
110 | print(str(t['task_id'])+": "+t['task_name'])
111 |
112 | # Step 1: Pull the first task
113 | task = task_list.popleft()
114 | print("\033[92m\033[1m"+"\n*****NEXT TASK*****\n"+"\033[0m\033[0m")
115 | print(str(task['task_id'])+": "+task['task_name'])
116 |
117 | # Send to execution function to complete the task based on the context
118 | result = execution_agent(OBJECTIVE,task["task_name"])
119 | this_task_id = int(task["task_id"])
120 | print("\033[93m\033[1m"+"\n*****TASK RESULT*****\n"+"\033[0m\033[0m")
121 | print(result)
122 |
123 | # Step 2: Enrich result and store in Pinecone
124 | enriched_result = {'data': result} # This is where you should enrich the result if needed
125 | result_id = f"result_{task['task_id']}"
126 | vector = enriched_result['data'] # extract the actual result from the dictionary
127 | index.upsert([(result_id, get_ada_embedding(vector),{"task":task['task_name'],"result":result})])
128 |
129 | # Step 3: Create new tasks and reprioritize task list
130 | new_tasks = task_creation_agent(OBJECTIVE,enriched_result, task["task_name"], [t["task_name"] for t in task_list])
131 |
132 | for new_task in new_tasks:
133 | task_id_counter += 1
134 | new_task.update({"task_id": task_id_counter})
135 | add_task(new_task)
136 | prioritization_agent(this_task_id)
137 |
138 | time.sleep(1) # Sleep before checking the task list again
139 |
--------------------------------------------------------------------------------
/classic/requirements.txt:
--------------------------------------------------------------------------------
1 | openai==0.27.2
2 | pinecone-client==2.2.1
3 |
--------------------------------------------------------------------------------
/clean.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | find ./workspace -mindepth 1 -maxdepth 1 -not -name '.*' -exec rm -rf {} \;
5 | docker-compose down
6 |
7 | ./new_store.sh
8 |
9 | echo "Clean completed"
10 |
--------------------------------------------------------------------------------
/data/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saten-private/BabyCommandAGI/e4d78243b8f14611bc570cd8ff103cc6895ae7a7/data/.gitkeep
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 |
3 | services:
4 | babyagi:
5 | build: ./
6 | container_name: babyagi
7 | restart: unless-stopped
8 | ports:
9 | - 127.0.0.1:8080:8080
10 | volumes:
11 | - "./:/app"
12 | - "./workspace:/workspace"
13 | stdin_open: true
14 | tty: true
15 | ulimits:
16 | memlock: -1
17 |
--------------------------------------------------------------------------------
/docs/Architecture-20240718-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saten-private/BabyCommandAGI/e4d78243b8f14611bc570cd8ff103cc6895ae7a7/docs/Architecture-20240718-2.png
--------------------------------------------------------------------------------
/docs/BabyCommandAGI.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saten-private/BabyCommandAGI/e4d78243b8f14611bc570cd8ff103cc6895ae7a7/docs/BabyCommandAGI.png
--------------------------------------------------------------------------------
/env_dump/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saten-private/BabyCommandAGI/e4d78243b8f14611bc570cd8ff103cc6895ae7a7/env_dump/.gitkeep
--------------------------------------------------------------------------------
/executed_task_parser.py:
--------------------------------------------------------------------------------
1 | from collections import deque
2 |
3 | class ExecutedTaskParser:
4 |
5 | def encode(self, input_data: deque) -> str:
6 | output = ""
7 | for item in input_data:
8 | output += f"{item['type']}: {item['target']}\n"
9 | output += "```\n"
10 | output += f"{item['result']}\n"
11 | output += "```\n"
12 | return output
13 |
--------------------------------------------------------------------------------
/extensions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saten-private/BabyCommandAGI/e4d78243b8f14611bc570cd8ff103cc6895ae7a7/extensions/__init__.py
--------------------------------------------------------------------------------
/extensions/argparseext.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import importlib
4 | import argparse
5 |
6 | def can_import(module_name):
7 | try:
8 | importlib.import_module(module_name)
9 | return True
10 | except ImportError:
11 | return False
12 |
13 | # Extract the env filenames in the -e flag only
14 | # Ignore any other arguments
15 | def parse_dotenv_extensions(argv):
16 | env_argv = []
17 | if '-e' in argv:
18 | tmp_argv = argv[argv.index('-e') + 1:]
19 | parsed_args = []
20 | for arg in tmp_argv:
21 | if arg.startswith('-'):
22 | break
23 | parsed_args.append(arg)
24 | env_argv = ['-e'] + parsed_args
25 |
26 | parser = argparse.ArgumentParser()
27 | parser.add_argument('-e', '--env', nargs='+', help='''
28 | filenames for additional env variables to load
29 | ''', default=os.getenv("DOTENV_EXTENSIONS", "").split(' '))
30 |
31 | return parser.parse_args(env_argv).env
32 |
33 | def parse_arguments():
34 | dotenv_extensions = parse_dotenv_extensions(sys.argv)
35 | # Check if we need to load any additional env files
36 | # This allows us to override the default .env file
37 | # and update the default values for any command line arguments
38 | if dotenv_extensions:
39 | from extensions.dotenvext import load_dotenv_extensions
40 | load_dotenv_extensions(parse_dotenv_extensions(sys.argv))
41 |
42 | # Now parse the full command line arguments
43 | parser = argparse.ArgumentParser(
44 | add_help=False,
45 | )
46 | parser.add_argument('objective', nargs='*', metavar='', help='''
47 | main objective description. Doesn\'t need to be quoted.
48 | if not specified, get objective from environment.
49 | ''', default=[os.getenv("OBJECTIVE", "")])
50 | parser.add_argument('-n', '--name', required=False, help='''
51 | instance name.
52 | if not specified, get the instance name from environment.
53 | ''', default=os.getenv("INSTANCE_NAME", os.getenv("BABY_NAME", "BabyAGI")))
54 | parser.add_argument('-m', '--mode', choices=['n', 'none', 'l', 'local', 'd', 'distributed'], help='''
55 | cooperative mode type
56 | ''', default='none')
57 | group = parser.add_mutually_exclusive_group()
58 | group.add_argument('-t', '--task', metavar='', help='''
59 | initial task description. must be quoted.
60 | if not specified, get initial_task from environment.
61 | ''', default=os.getenv("INITIAL_TASK", os.getenv("FIRST_TASK", "")))
62 | group.add_argument('-j', '--join', action='store_true', help='''
63 | join an existing objective.
64 | install cooperative requirements.
65 | ''')
66 | group2 = parser.add_mutually_exclusive_group()
67 | group2.add_argument('-4', '--gpt-4', dest='llm_model', action='store_const', const="gpt-4", help='''
68 | use GPT-4 instead of the default model.
69 | ''')
70 | group2.add_argument('-l', '--llama', dest='llm_model', action='store_const', const="llama", help='''
71 | use LLaMa instead of the default model. Requires llama.cpp.
72 | ''')
73 | # This will parse -e again, which we want, because we need
74 | # to load those in the main file later as well
75 | parser.add_argument('-e', '--env', nargs='+', help='''
76 | filenames for additional env variables to load
77 | ''', default=os.getenv("DOTENV_EXTENSIONS", "").split(' '))
78 | parser.add_argument('-h', '-?', '--help', action='help', help='''
79 | show this help message and exit
80 | ''')
81 |
82 | args = parser.parse_args()
83 |
84 | llm_model = args.llm_model if args.llm_model else os.getenv("LLM_MODEL", os.getenv("OPENAI_API_MODEL", "gpt-3.5-turbo")).lower()
85 |
86 | dotenv_extensions = args.env
87 |
88 | instance_name = args.name
89 | if not instance_name:
90 | print("\033[91m\033[1m" + "BabyAGI instance name missing\n" + "\033[0m\033[0m")
91 | parser.print_help()
92 | parser.exit()
93 |
94 | module_name = "ray"
95 | cooperative_mode = args.mode
96 | if cooperative_mode in ['l', 'local'] and not can_import(module_name):
97 | print("\033[91m\033[1m"+f"Local cooperative mode requires package {module_name}\nInstall: pip install -r extensions/requirements.txt\n" + "\033[0m\033[0m")
98 | parser.print_help()
99 | parser.exit()
100 | elif cooperative_mode in ['d', 'distributed']:
101 | print("\033[91m\033[1m" + "Distributed cooperative mode is not implemented yet\n" + "\033[0m\033[0m")
102 | parser.print_help()
103 | parser.exit()
104 |
105 | join_existing_objective = args.join
106 | if join_existing_objective and cooperative_mode in ['n', 'none']:
107 | print("\033[91m\033[1m"+f"Joining existing objective requires local or distributed cooperative mode\n" + "\033[0m\033[0m")
108 | parser.print_help()
109 | parser.exit()
110 |
111 | objective = ' '.join(args.objective).strip()
112 | if not objective:
113 | print("\033[91m\033[1m" + "No objective specified or found in environment.\n" + "\033[0m\033[0m")
114 | parser.print_help()
115 | parser.exit()
116 |
117 | initial_task = args.task
118 | if not initial_task and not join_existing_objective:
119 | print("\033[91m\033[1m" + "No initial task specified or found in environment.\n" + "\033[0m\033[0m")
120 | parser.print_help()
121 | parser.exit()
122 |
123 | return objective, initial_task, llm_model, dotenv_extensions, instance_name, cooperative_mode, join_existing_objective
--------------------------------------------------------------------------------
/extensions/dotenvext.py:
--------------------------------------------------------------------------------
1 | from dotenv import load_dotenv
2 |
3 | def load_dotenv_extensions(dotenv_files):
4 | for dotenv_file in dotenv_files:
5 | load_dotenv(dotenv_file)
6 |
--------------------------------------------------------------------------------
/extensions/human_mode.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | def user_input_await(prompt: str) -> str:
4 | print("\033[94m\033[1m" + "\n> COPY FOLLOWING TEXT TO CHATBOT\n" + "\033[0m\033[0m")
5 | print(prompt)
6 | print("\033[91m\033[1m" + "\n AFTER PASTING, PRESS: (ENTER), (CTRL+Z), (ENTER) TO FINISH\n" + "\033[0m\033[0m")
7 | print("\033[96m\033[1m" + "\n> PASTE YOUR RESPONSE:\n" + "\033[0m\033[0m")
8 | input_text = sys.stdin.read()
9 | return input_text.strip()
--------------------------------------------------------------------------------
/extensions/pinecone_storage.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List
2 | import importlib
3 | import openai
4 | import pinecone
5 | import re
6 |
7 | def can_import(module_name):
8 | try:
9 | importlib.import_module(module_name)
10 | return True
11 | except ImportError:
12 | return False
13 |
14 | assert (
15 | can_import("pinecone")
16 | ), "\033[91m\033[1m"+"Pinecone storage requires package pinecone-client.\nInstall: pip install -r extensions/requirements.txt"
17 |
18 | class PineconeResultsStorage:
19 | def __init__(self, openai_api_key: str, pinecone_api_key: str, pinecone_environment: str, llm_model: str, llama_model_path: str, results_store_name: str, objective: str):
20 | openai.api_key = openai_api_key
21 | pinecone.init(api_key=pinecone_api_key, environment=pinecone_environment)
22 |
23 | # Pinecone namespaces are only compatible with ascii characters (used in query and upsert)
24 | self.namespace = re.sub(re.compile('[^\x00-\x7F]+'), '', objective)
25 |
26 | self.llm_model = llm_model
27 | self.llama_model_path = llama_model_path
28 |
29 | results_store_name = results_store_name
30 | dimension = 1536 if not self.llm_model.startswith("llama") else 5120
31 | metric = "cosine"
32 | pod_type = "p1"
33 | if results_store_name not in pinecone.list_indexes():
34 | pinecone.create_index(
35 | results_store_name, dimension=dimension, metric=metric, pod_type=pod_type
36 | )
37 |
38 | self.index = pinecone.Index(results_store_name)
39 | index_stats_response = self.index.describe_index_stats()
40 | assert dimension == index_stats_response['dimension'], "Dimension of the index does not match the dimension of the LLM embedding"
41 |
42 | def add(self, task: Dict, result: str, result_id: int):
43 | vector = self.get_embedding(
44 | result
45 | )
46 | self.index.upsert(
47 | [(result_id, vector, {"task": task["task_name"], "result": result})], namespace=self.namespace
48 | )
49 |
50 | def query(self, query: str, top_results_num: int) -> List[dict]:
51 | query_embedding = self.get_embedding(query)
52 | results = self.index.query(query_embedding, top_k=top_results_num, include_metadata=True, namespace=self.namespace)
53 | sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True)
54 | return [(str(item.metadata["task"])) for item in sorted_results]
55 |
56 | # Get embedding for the text
57 | def get_embedding(self, text: str) -> list:
58 | text = text.replace("\n", " ")
59 |
60 | if self.llm_model.startswith("llama"):
61 | from llama_cpp import Llama
62 |
63 | llm_embed = Llama(
64 | model_path=self.llama_model_path,
65 | n_ctx=2048, n_threads=4,
66 | embedding=True, use_mlock=True,
67 | )
68 | return llm_embed.embed(text)
69 |
70 | return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
71 |
--------------------------------------------------------------------------------
/extensions/ray_tasks.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import logging
3 | import ray
4 | from collections import deque
5 | from typing import Dict, List
6 |
7 | from pathlib import Path
8 | sys.path.append(str(Path(__file__).resolve().parent.parent))
9 |
10 | try:
11 | ray.init(address="auto", namespace="babyagi", logging_level=logging.FATAL, ignore_reinit_error=True)
12 | except:
13 | ray.init(namespace="babyagi", logging_level=logging.FATAL, ignore_reinit_error=True)
14 |
15 | @ray.remote
16 | class CooperativeTaskListStorageActor:
17 | def __init__(self, task_list: deque):
18 | self.tasks = task_list
19 |
20 | def append(self, task: Dict):
21 | self.tasks.append(task)
22 |
23 | def appendleft(self, task: Dict):
24 | self.tasks.appendleft(task)
25 |
26 | def replace(self, task_list: deque):
27 | self.tasks = task_list
28 |
29 | def reference(self, index: int):
30 | return self.tasks[index]
31 |
32 | def pop(self):
33 | return self.tasks.pop()
34 |
35 | def popleft(self):
36 | return self.tasks.popleft()
37 |
38 | def is_empty(self):
39 | return False if self.tasks else True
40 |
41 | def get_tasks(self):
42 | return self.tasks
43 |
44 | def remove_target_write_dicts(self, path):
45 | """
46 | Remove dictionaries from the list where "target" key matches path and "type" key is "write".
47 |
48 | Args:
49 | - path (str): The target path to match against.
50 |
51 | """
52 | self.tasks = deque([d for d in self.tasks if not (d.get("target") == path and d.get("type") == "write")])
53 |
54 | def remove_target_command_dicts(self, path, command, result):
55 | """
56 | Remove dictionaries from the list where "target" key matches path and "type" key is "write".
57 |
58 | Args:
59 | - path (str): The target path to match against.
60 |
61 | """
62 | self.tasks = deque([d for d in self.tasks if not (d.get("target") == command and d.get("type") == "command" and "path" in d and d.get("path") == path and d.get("content") == result and self.is_big_command_result(result))])
63 |
64 | def is_big_command_result(self, string) -> bool:
65 |
66 | try:
67 | encoding = tiktoken.encoding_for_model('gpt-4-0314')
68 | except:
69 | encoding = tiktoken.encoding_for_model('gpt2') # Fallback for others.
70 |
71 | encoded = encoding.encode(string)
72 |
73 | return MAX_DUPLICATE_COMMAND_RESULT_TOKEN <= len(encoded)
74 |
75 | class CooperativeTaskListStorage:
76 | def __init__(self, name: str, task_list: deque):
77 | self.name = name
78 |
79 | try:
80 | self.actor = ray.get_actor(name=self.name, namespace="babyagi")
81 | except ValueError:
82 | self.actor = CooperativeTaskListStorageActor(task_list).options(name=self.name, namespace="babyagi", lifetime="detached").remote()
83 |
84 | def append(self, task: Dict):
85 | self.actor.append.remote(task)
86 |
87 | def appendleft(self, task: Dict):
88 | self.actor.appendleft.remote(task)
89 |
90 | def replace(self, task_list: deque):
91 | self.actor.replace.remote(task_list)
92 |
93 | def reference(self, index: int):
94 | return ray.get(self.actor.reference(index).remote())
95 |
96 | def pop(self):
97 | return ray.get(self.actor.pop.remote())
98 |
99 | def popleft(self):
100 | return ray.get(self.actor.popleft.remote())
101 |
102 | def is_empty(self):
103 | return ray.get(self.actor.is_empty.remote())
104 |
105 | def get_tasks(self):
106 | return ray.get(self.actor.get_tasks.remote())
107 |
--------------------------------------------------------------------------------
/extensions/requirements.txt:
--------------------------------------------------------------------------------
1 | ray==2.3.1
2 | pinecone-client==2.2.1
3 | llama-cpp-python>=0.1.35
4 | weaviate-client>=3.16.1
--------------------------------------------------------------------------------
/extensions/weaviate_storage.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import logging
3 | import re
4 | from typing import Dict, List
5 |
6 | import openai
7 | import weaviate
8 | from weaviate.embedded import EmbeddedOptions
9 |
10 |
11 | def can_import(module_name):
12 | try:
13 | importlib.import_module(module_name)
14 | return True
15 | except ImportError:
16 | return False
17 |
18 |
19 | assert can_import("weaviate"), (
20 | "\033[91m\033[1m"
21 | + "Weaviate storage requires package weaviate-client.\nInstall: pip install -r extensions/requirements.txt"
22 | )
23 |
24 |
25 | def create_client(
26 | weaviate_url: str, weaviate_api_key: str, weaviate_use_embedded: bool
27 | ):
28 | if weaviate_use_embedded:
29 | client = weaviate.Client(embedded_options=EmbeddedOptions())
30 | else:
31 | auth_config = (
32 | weaviate.auth.AuthApiKey(api_key=weaviate_api_key)
33 | if weaviate_api_key
34 | else None
35 | )
36 | client = weaviate.Client(weaviate_url, auth_client_secret=auth_config)
37 |
38 | return client
39 |
40 |
41 | class WeaviateResultsStorage:
42 | schema = {
43 | "properties": [
44 | {"name": "result_id", "dataType": ["string"]},
45 | {"name": "task", "dataType": ["string"]},
46 | {"name": "result", "dataType": ["text"]},
47 | ]
48 | }
49 |
50 | def __init__(
51 | self,
52 | openai_api_key: str,
53 | weaviate_url: str,
54 | weaviate_api_key: str,
55 | weaviate_use_embedded: bool,
56 | llm_model: str,
57 | llama_model_path: str,
58 | results_store_name: str,
59 | objective: str,
60 | ):
61 | openai.api_key = openai_api_key
62 | self.client = create_client(
63 | weaviate_url, weaviate_api_key, weaviate_use_embedded
64 | )
65 | self.index_name = None
66 | self.create_schema(results_store_name)
67 |
68 | self.llm_model = llm_model
69 | self.llama_model_path = llama_model_path
70 |
71 | def create_schema(self, results_store_name: str):
72 | valid_class_name = re.compile(r"^[A-Z][a-zA-Z0-9_]*$")
73 | if not re.match(valid_class_name, results_store_name):
74 | raise ValueError(
75 | f"Invalid index name: {results_store_name}. "
76 | "Index names must start with a capital letter and "
77 | "contain only alphanumeric characters and underscores."
78 | )
79 |
80 | self.schema["class"] = results_store_name
81 | if self.client.schema.contains(self.schema):
82 | logging.info(
83 | f"Index named {results_store_name} already exists. Reusing it."
84 | )
85 | else:
86 | logging.info(f"Creating index named {results_store_name}")
87 | self.client.schema.create_class(self.schema)
88 |
89 | self.index_name = results_store_name
90 |
91 | def add(self, task: Dict, result: Dict, result_id: int, vector: List):
92 | enriched_result = {"data": result}
93 | vector = self.get_embedding(enriched_result["data"])
94 |
95 | with self.client.batch as batch:
96 | data_object = {
97 | "result_id": result_id,
98 | "task": task["task_name"],
99 | "result": result,
100 | }
101 | batch.add_data_object(
102 | data_object=data_object, class_name=self.index_name, vector=vector
103 | )
104 |
105 | def query(self, query: str, top_results_num: int) -> List[dict]:
106 | query_embedding = self.get_embedding(query)
107 |
108 | results = (
109 | self.client.query.get(self.index_name, ["task"])
110 | .with_hybrid(query=query, alpha=0.5, vector=query_embedding)
111 | .with_limit(top_results_num)
112 | .do()
113 | )
114 |
115 | return self._extract_tasks(results)
116 |
117 | def _extract_tasks(self, data):
118 | task_data = data.get("data", {}).get("Get", {}).get(self.index_name, [])
119 | return [item["task"] for item in task_data]
120 |
121 | # Get embedding for the text
122 | def get_embedding(self, text: str) -> list:
123 | text = text.replace("\n", " ")
124 |
125 | if self.llm_model.startswith("llama"):
126 | from llama_cpp import Llama
127 |
128 | llm_embed = Llama(
129 | model_path=self.llama_model_path,
130 | n_ctx=2048,
131 | n_threads=4,
132 | embedding=True,
133 | use_mlock=True,
134 | )
135 | return llm_embed.embed(text)
136 |
137 | return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[
138 | "data"
139 | ][0]["embedding"]
140 |
--------------------------------------------------------------------------------
/log/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saten-private/BabyCommandAGI/e4d78243b8f14611bc570cd8ff103cc6895ae7a7/log/.gitkeep
--------------------------------------------------------------------------------
/new_store.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | # .envファイルのパス
5 | ENV_FILE=".env"
6 |
7 | # RESULTS_SOTEE_NUMBERの現在の値を取得
8 | # システムタイプの判定
9 | if [ "$(uname)" = "Darwin" ] || [ "$(uname -s)" = "FreeBSD" ]; then
10 | CURRENT_VALUE=$(awk -F '=' '/^RESULTS_SOTRE_NUMBER/ {print $2}' "$ENV_FILE")
11 | else
12 | CURRENT_VALUE=$(grep -oP '(?<=RESULTS_SOTRE_NUMBER=).*' "$ENV_FILE")
13 | fi
14 |
15 | # 現在の値をインクリメント
16 | NEW_VALUE=$((CURRENT_VALUE + 1))
17 |
18 | # .envファイルのRESULTS_SOTEE_NUMBERを更新
19 | sed -i ".backup" "s/RESULTS_SOTRE_NUMBER=$CURRENT_VALUE/RESULTS_SOTRE_NUMBER=$NEW_VALUE/" "$ENV_FILE"
20 |
21 | echo "RESULTS_SOTRE_NUMBER has been incremented to $NEW_VALUE"
--------------------------------------------------------------------------------
/node/.env.example:
--------------------------------------------------------------------------------
1 | # cp .env.example .env
2 | # Edit your .env file with your own values
3 | # Don't commit your .env file to git/push to GitHub!
4 | # Don't modify/delete .env.example unless adding extensions to the project
5 | # which require new variable to be added to the .env file
6 |
7 | # API CONFIG
8 | OPENAI_API_KEY=
9 | OPENAI_API_MODEL=gpt-3.5-turbo # alternatively, gpt-4, text-davinci-003, etc
10 |
11 | # Collection CONFIG
12 | TABLE_NAME=test-collection
13 |
14 | # INSTANCE CONFIG
15 | BABY_NAME=BabyAGI
16 |
17 |
--------------------------------------------------------------------------------
/node/README.md:
--------------------------------------------------------------------------------
1 | # node-chroma babyagi
2 |
3 | # Objective
4 |
5 | This Node script is an example of an AI-powered task management system. The system uses OpenAI and Chroma APIs to create, prioritize, and execute tasks. The main idea behind this system is that it creates tasks based on the result of previous tasks and a predefined objective. The script then uses OpenAI's natural language processing (NLP) capabilities to create new tasks based on the objective, and Chroma to store and retrieve task results for context. This is a paired-down version of the original [Task-Driven Autonomous Agent](https://twitter.com/yoheinakajima/status/1640934493489070080?s=20) (Mar 28, 2023).
6 |
7 | This README will cover the following:
8 |
9 | - How the script works
10 |
11 | - How to use the script
12 | - Warning about running the script continuously
13 |
14 | # How It Works
15 |
16 | The script works by running an infinite loop that does the following steps:
17 |
18 | 1. Pulls the first task from the task list.
19 | 2. Sends the task to the execution agent, which uses OpenAI's API to complete the task based on the context.
20 | 3. Enriches the result and stores it in [Chroma](docs.trychroma.com).
21 | 4. Creates new tasks and reprioritizes the task list based on the objective and the result of the previous task.
22 | The execution_agent() function is where the OpenAI API is used. It takes two parameters: the objective and the task. It then sends a prompt to OpenAI's API, which returns the result of the task. The prompt consists of a description of the AI system's task, the objective, and the task itself. The result is then returned as a string.
23 |
24 | The task_creation_agent() function is where OpenAI's API is used to create new tasks based on the objective and the result of the previous task. The function takes four parameters: the objective, the result of the previous task, the task description, and the current task list. It then sends a prompt to OpenAI's API, which returns a list of new tasks as strings. The function then returns the new tasks as a list of dictionaries, where each dictionary contains the name of the task.
25 |
26 | The prioritization_agent() function is where OpenAI's API is used to reprioritize the task list. The function takes one parameter, the ID of the current task. It sends a prompt to OpenAI's API, which returns the reprioritized task list as a numbered list.
27 |
28 | Finally, the script uses Chroma to store and retrieve task results for context. The script creates a Chroma collection based on the table name specified in the TABLE_NAME variable. Chroma is then used to store the results of the task in the collection, along with the task name and any additional metadata.
29 |
30 | # How to Use
31 |
32 | To use the script, you will need to follow these steps:
33 |
34 | 1. Install the required packages: `npm install`
35 | 2. Install chroma in this directory (based on the Chroma [docs](https://docs.trychroma.com/getting-started)) :
36 | ```
37 | git clone git@github.com:chroma-core/chroma.git
38 | ```
39 | 3. Make sure Docker is running on your machine
40 | 4. Set your OpenAI and in the OPENAI_API_KEY variables.
41 | 5. Set your OpenAI API key in the OPENAI_API_KEY and OPENAPI_API_MODEL variables.
42 | 6. Set the name of the table where the task results will be stored in the TABLE_NAME variable.
43 | 7. Run the script with 'npm run babyagi'. This will handle 'docker compose up.'
44 | 8. Provide the objective of the task management system when prompted.
45 | 9. Provide the objective of the task management system when prompted.
46 |
47 | # Warning
48 |
49 | This script is designed to be run continuously as part of a task management system. Running this script continuously can result in high API usage, so please use it responsibly. Additionally, the script requires the OpenAI and Pinecone APIs to be set up correctly, so make sure you have set up the APIs before running the script.
50 |
51 | # Backstory
52 |
53 | BabyAGI is a paired-down version of the original [Task-Driven Autonomous Agent](https://twitter.com/yoheinakajima/status/1640934493489070080?s=20) (Mar 28, 2023) shared on Twitter. This version is down to 140 lines: 13 comments, 22 blank, 105 code. The name of the repo came up in the reaction to the original autonomous agent - the author does not mean to imply that this is AGI.
54 |
55 | # TODO
56 |
57 | - [x] Implement BabyAGI in nodex
58 | - [x] Switch Pinecome to Chroma
59 | - [ ] Add LLaMA support
60 | - [ ] Allow users to modify model params
61 | - [ ] Support command line args
62 | - [ ] Allow agent to request additional input from user ( could be an interesting strategy to mitigate looping )
63 |
64 | Made with love by :
65 |
66 | - [@yoheinakajima](https://twitter.com/yoheinakajima) (0->1), who happens to be a VC - so if you use this build a startup, ping him!
67 |
68 | Contributions from:
69 |
70 | - [@anton](https://twitter.com/atroyn) (pinecone->chroma), who happens to be a founder at [Chroma](https://www.trychroma.com/)
71 | - [@aidanjrauscher](https://twitter.com/aidanjrauscher) (python->node), who happens to be trying to find a job
72 |
--------------------------------------------------------------------------------
/node/babyagi.js:
--------------------------------------------------------------------------------
1 | import { Configuration, OpenAIApi } from "openai"
2 | import { ChromaClient, OpenAIEmbeddingFunction } from "chromadb"
3 | import prompt from "prompt-sync"
4 | import assert from "assert"
5 | import * as dotenv from "dotenv"
6 | dotenv.config()
7 |
8 | // const client = new ChromaClient("http://localhost:8000")
9 |
10 | // API Keys
11 | const OPENAI_API_KEY = process.env.OPENAI_API_KEY || ""
12 | assert(OPENAI_API_KEY, "OPENAI_API_KEY environment variable is missing from .env")
13 |
14 | const OPENAI_API_MODEL = process.env.OPENAI_API_MODEL || "gpt-3.5-turbo"
15 |
16 | // Table config
17 | const TABLE_NAME = process.env.TABLE_NAME || ""
18 | assert(TABLE_NAME, "TABLE_NAME environment variable is missing from .env")
19 |
20 | // Run config
21 | const BABY_NAME = process.env.BABY_NAME || "BabyAGI"
22 |
23 | // Goal config
24 | const p = prompt()
25 | const OBJECTIVE = p("What is BabyAGI's objective? ")
26 | const INITIAL_TASK = p("What is the initial task to complete the objective? ")
27 | assert(OBJECTIVE, "No objective provided.")
28 | assert (INITIAL_TASK, "No initial task provided.")
29 |
30 | console.log('\x1b[95m\x1b[1m\n*****CONFIGURATION*****\n\x1b[0m\x1b[0m')
31 | console.log(`Name: ${BABY_NAME}`)
32 | console.log(`LLM: ${OPENAI_API_MODEL}`)
33 |
34 | if (OPENAI_API_MODEL.toLowerCase().includes("gpt-4")){
35 | console.log("\x1b[91m\x1b[1m\n*****USING GPT-4. POTENTIALLY EXPENSIVE. MONITOR YOUR COSTS*****\x1b[0m\x1b[0m")
36 | }
37 |
38 | console.log("\x1b[94m\x1b[1m" + "\n*****OBJECTIVE*****\n" + "\x1b[0m\x1b[0m")
39 | console.log(`${OBJECTIVE}`)
40 |
41 | console.log(`\x1b[93m\x1b[1m \nInitial task: \x1b[0m\x1b[0m ${INITIAL_TASK}`)
42 |
43 | // Define OpenAI embedding function using Chroma
44 | const embeddingFunction = new OpenAIEmbeddingFunction(OPENAI_API_KEY)
45 |
46 | // Configure OpenAI
47 | const configuration = new Configuration({
48 | apiKey: OPENAI_API_KEY,
49 | });
50 | const openai = new OpenAIApi(configuration);
51 |
52 | //Task List
53 | var taskList = []
54 |
55 | // Connect to chromadb and create/get collection
56 | const chromaConnect = async ()=>{
57 | const chroma = new ChromaClient("http://localhost:8000")
58 | const metric = "cosine"
59 | const collections = await chroma.listCollections()
60 | const collectionNames = collections.map((c)=>c.name)
61 | if(collectionNames.includes(TABLE_NAME)){
62 | const collection = await chroma.getCollection(TABLE_NAME, embeddingFunction)
63 | return collection
64 | }
65 | else{
66 | const collection = await chroma.createCollection(
67 | TABLE_NAME,
68 | {
69 | "hnsw:space": metric
70 | },
71 | embeddingFunction
72 | )
73 | return collection
74 | }
75 | }
76 |
77 | const add_task = (task)=>{ taskList.push(task) }
78 |
79 | const clear_tasks = ()=>{ taskList = [] }
80 |
81 | const get_ada_embedding = async (text)=>{
82 | text = text.replace("\n", " ")
83 | const embedding = await embeddingFunction.generate(text)
84 | return embedding
85 | }
86 |
87 | const openai_completion = async (prompt, temperature=0.5, maxTokens=100)=>{
88 | if(OPENAI_API_MODEL.startsWith("gpt-")){
89 | const messages = [{"role": "system", "content": prompt}]
90 | const response = await openai.createChatCompletion({
91 | model: OPENAI_API_MODEL,
92 | messages: messages,
93 | max_tokens: maxTokens,
94 | temperature: temperature,
95 | n: 1,
96 | stop: null
97 | })
98 | return response.data.choices[0].message.content.trim()
99 | }
100 | else {
101 | const response = await openai.createCompletion({
102 | model: OPENAI_API_MODEL,
103 | prompt: prompt,
104 | max_tokens: maxTokens,
105 | temperature: temperature,
106 | top_p: 1,
107 | frequency_penalty: 0,
108 | presence_penalty: 0
109 | })
110 | return response.data.choices[0].text.trim()
111 | }
112 | }
113 |
114 | const task_creation_agent = async (objective, result, task_description, taskList)=>{
115 | const prompt = `
116 | You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: ${objective},
117 | The last completed task has the result: ${result}.
118 | This result was based on this task description: ${task_description}.
119 | These are incomplete tasks: ${taskList.map(task=>`${task.taskId}: ${task.taskName}`).join(', ')}.
120 | Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks.
121 | Return the tasks as an array.`
122 | const response = await openai_completion(prompt)
123 | const newTasks = response.trim().includes("\n") ? response.trim().split("\n") : [response.trim()];
124 | return newTasks.map(taskName => ({ taskName: taskName }));
125 | }
126 |
127 |
128 |
129 | const prioritization_agent = async (taskId)=>{
130 | const taskNames = taskList.map((task)=>task.taskName)
131 | const nextTaskId = taskId+1
132 | const prompt = `
133 | You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: ${taskNames}.
134 | Consider the ultimate objective of your team:${OBJECTIVE}. Do not remove any tasks. Return the result as a numbered list, like:
135 | #. First task
136 | #. Second task
137 | Start the task list with number ${nextTaskId}.`
138 | const response = await openai_completion(prompt)
139 | const newTasks = response.trim().includes("\n") ? response.trim().split("\n") : [response.trim()];
140 | clear_tasks()
141 | newTasks.forEach((newTask)=>{
142 | const newTaskParts = newTask.trim().split(/\.(?=\s)/)
143 | if (newTaskParts.length == 2){
144 | const newTaskId = newTaskParts[0].trim()
145 | const newTaskName = newTaskParts[1].trim()
146 | add_task({
147 | taskId: newTaskId,
148 | taskName: newTaskName
149 | })
150 | }
151 | })
152 | }
153 |
154 | const execution_agent = async (objective, task, chromaCollection)=>{
155 | const context = context_agent(objective, 5, chromaCollection)
156 | const prompt = `
157 | You are an AI who performs one task based on the following objective: ${objective}.\n
158 | Take into account these previously completed tasks: ${context}.\n
159 | Your task: ${task}\nResponse:`
160 | const response = await openai_completion(prompt, undefined, 2000)
161 | return response
162 | }
163 |
164 | const context_agent = async (query, topResultsNum, chromaCollection)=>{
165 | const count = await chromaCollection.count()
166 | if (count == 0){
167 | return []
168 | }
169 | const results = await chromaCollection.query(
170 | undefined,
171 | Math.min(topResultsNum, count),
172 | undefined,
173 | query,
174 | )
175 | return results.metadatas[0].map(item=>item.task)
176 | }
177 |
178 | function sleep(ms) {
179 | return new Promise(resolve => setTimeout(resolve, ms))
180 | }
181 |
182 | (async()=>{
183 | const initialTask = {
184 | taskId: 1,
185 | taskName: INITIAL_TASK
186 | }
187 | add_task(initialTask)
188 | const chromaCollection = await chromaConnect()
189 | var taskIdCounter = 1
190 | while (true){
191 | if(taskList.length>0){
192 | console.log("\x1b[95m\x1b[1m"+"\n*****TASK LIST*****\n"+"\x1b[0m\x1b[0m")
193 | taskList.forEach(t => {
194 | console.log(" • " + t.taskName)
195 | })
196 |
197 | // Step 1: Pull the first task
198 | const task = taskList.shift()
199 | console.log("\x1b[92m\x1b[1m"+"\n*****NEXT TASK*****\n"+"\x1b[0m\x1b[0m")
200 | console.log(task.taskId + ": " + task.taskName)
201 |
202 | // Send to execution function to complete the task based on the context
203 | const result = await execution_agent(OBJECTIVE, task.taskName, chromaCollection)
204 | const currTaskId = task.taskId
205 | console.log("\x1b[93m\x1b[1m"+"\nTASK RESULT\n"+"\x1b[0m\x1b[0m")
206 | console.log(result)
207 |
208 | // Step 2: Enrich result and store in Chroma
209 | const enrichedResult = { data : result} // this is where you should enrich the result if needed
210 | const resultId = `result_${task.taskId}`
211 | const vector = enrichedResult.data // extract the actual result from the dictionary
212 | const collectionLength = (await chromaCollection.get([resultId])).ids?.length
213 | if(collectionLength>0){
214 | await chromaCollection.update(
215 | resultId,
216 | undefined,
217 | {task: task.taskName, result: result},
218 | vector
219 | )
220 | }
221 | else{
222 | await chromaCollection.add(
223 | resultId,
224 | undefined,
225 | {task: task.taskName, result},
226 | vector
227 | )
228 | }
229 |
230 | // Step 3: Create new tasks and reprioritize task list
231 | const newTasks = await task_creation_agent(OBJECTIVE, enrichedResult, task.taskName, taskList.map(task=>task.taskName))
232 | newTasks.forEach((task)=>{
233 | taskIdCounter += 1
234 | task.taskId = taskIdCounter
235 | add_task(task)
236 | })
237 | await prioritization_agent(currTaskId)
238 | await sleep(3000)
239 | }
240 | }
241 | })()
242 |
243 |
--------------------------------------------------------------------------------
/node/package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "babyagi",
3 | "version": "1.0.0",
4 | "lockfileVersion": 3,
5 | "requires": true,
6 | "packages": {
7 | "": {
8 | "name": "babyagi",
9 | "version": "1.0.0",
10 | "license": "ISC",
11 | "dependencies": {
12 | "chromadb": "^1.4.1",
13 | "dotenv": "^16.0.3",
14 | "openai": "^3.2.1",
15 | "prompt-sync": "^4.2.0"
16 | }
17 | },
18 | "node_modules/ansi-regex": {
19 | "version": "4.1.1",
20 | "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz",
21 | "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==",
22 | "engines": {
23 | "node": ">=6"
24 | }
25 | },
26 | "node_modules/asynckit": {
27 | "version": "0.4.0",
28 | "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
29 | "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
30 | },
31 | "node_modules/axios": {
32 | "version": "0.26.1",
33 | "resolved": "https://registry.npmjs.org/axios/-/axios-0.26.1.tgz",
34 | "integrity": "sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==",
35 | "dependencies": {
36 | "follow-redirects": "^1.14.8"
37 | }
38 | },
39 | "node_modules/chromadb": {
40 | "version": "1.4.1",
41 | "resolved": "https://registry.npmjs.org/chromadb/-/chromadb-1.4.1.tgz",
42 | "integrity": "sha512-vRcig4CJxJXs++cKMt9tHmk9YjQprxzLK9sVYD6iXfqRJBoXeoFzk/RS95Dz1J6/7aSfBwDsyx3AE2ePP9FnYA==",
43 | "dependencies": {
44 | "axios": "^0.26.0"
45 | }
46 | },
47 | "node_modules/combined-stream": {
48 | "version": "1.0.8",
49 | "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
50 | "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
51 | "dependencies": {
52 | "delayed-stream": "~1.0.0"
53 | },
54 | "engines": {
55 | "node": ">= 0.8"
56 | }
57 | },
58 | "node_modules/delayed-stream": {
59 | "version": "1.0.0",
60 | "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
61 | "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
62 | "engines": {
63 | "node": ">=0.4.0"
64 | }
65 | },
66 | "node_modules/dotenv": {
67 | "version": "16.0.3",
68 | "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz",
69 | "integrity": "sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==",
70 | "engines": {
71 | "node": ">=12"
72 | }
73 | },
74 | "node_modules/follow-redirects": {
75 | "version": "1.15.2",
76 | "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz",
77 | "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==",
78 | "funding": [
79 | {
80 | "type": "individual",
81 | "url": "https://github.com/sponsors/RubenVerborgh"
82 | }
83 | ],
84 | "engines": {
85 | "node": ">=4.0"
86 | },
87 | "peerDependenciesMeta": {
88 | "debug": {
89 | "optional": true
90 | }
91 | }
92 | },
93 | "node_modules/form-data": {
94 | "version": "4.0.0",
95 | "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
96 | "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
97 | "dependencies": {
98 | "asynckit": "^0.4.0",
99 | "combined-stream": "^1.0.8",
100 | "mime-types": "^2.1.12"
101 | },
102 | "engines": {
103 | "node": ">= 6"
104 | }
105 | },
106 | "node_modules/mime-db": {
107 | "version": "1.52.0",
108 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
109 | "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
110 | "engines": {
111 | "node": ">= 0.6"
112 | }
113 | },
114 | "node_modules/mime-types": {
115 | "version": "2.1.35",
116 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
117 | "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
118 | "dependencies": {
119 | "mime-db": "1.52.0"
120 | },
121 | "engines": {
122 | "node": ">= 0.6"
123 | }
124 | },
125 | "node_modules/openai": {
126 | "version": "3.2.1",
127 | "resolved": "https://registry.npmjs.org/openai/-/openai-3.2.1.tgz",
128 | "integrity": "sha512-762C9BNlJPbjjlWZi4WYK9iM2tAVAv0uUp1UmI34vb0CN5T2mjB/qM6RYBmNKMh/dN9fC+bxqPwWJZUTWW052A==",
129 | "dependencies": {
130 | "axios": "^0.26.0",
131 | "form-data": "^4.0.0"
132 | }
133 | },
134 | "node_modules/prompt-sync": {
135 | "version": "4.2.0",
136 | "resolved": "https://registry.npmjs.org/prompt-sync/-/prompt-sync-4.2.0.tgz",
137 | "integrity": "sha512-BuEzzc5zptP5LsgV5MZETjDaKSWfchl5U9Luiu8SKp7iZWD5tZalOxvNcZRwv+d2phNFr8xlbxmFNcRKfJOzJw==",
138 | "dependencies": {
139 | "strip-ansi": "^5.0.0"
140 | }
141 | },
142 | "node_modules/strip-ansi": {
143 | "version": "5.2.0",
144 | "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
145 | "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
146 | "dependencies": {
147 | "ansi-regex": "^4.1.0"
148 | },
149 | "engines": {
150 | "node": ">=6"
151 | }
152 | }
153 | }
154 | }
155 |
--------------------------------------------------------------------------------
/node/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "babyagi",
3 | "version": "1.0.0",
4 | "description": "babyagi",
5 | "main": "babyagi.js",
6 | "type": "module",
7 | "author": {
8 | "name": "Yohei Nakajima",
9 | "url": "https://twitter.com/yoheinakajima"
10 | },
11 | "scripts": {
12 | "babyagi": "node babyagi.js",
13 | "start": "npm run chroma-up && node babyagi.js",
14 | "chroma-up": "docker-compose -f ./chroma/docker-compose.yml up -d --build",
15 | "chroma-down": "docker-compose -f ./chroma/docker-compose.yml down"
16 | },
17 | "keywords": [],
18 | "license": "ISC",
19 | "dependencies": {
20 | "chromadb": "^1.4.1",
21 | "dotenv": "^16.0.3",
22 | "openai": "^3.2.1",
23 | "prompt-sync": "^4.2.0"
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/pwd/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saten-private/BabyCommandAGI/e4d78243b8f14611bc570cd8ff103cc6895ae7a7/pwd/.gitkeep
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | argparse==1.4.0
2 | openai==1.65.2
3 | httpx==0.27.2
4 | chromadb==0.3.21
5 | pre-commit>=3.2.0
6 | python-dotenv==1.0.0
7 | tiktoken==0.7.0
8 | google-generativeai==0.5.3
9 | anthropic==0.49.0
--------------------------------------------------------------------------------
/task_parser.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List
2 | from collections import deque
3 |
4 | class TaskParser:
5 |
6 | def decode(self, input_string: str) -> List[dict]:
7 | input_string = input_string.strip()
8 | if input_string.startswith("type:"):
9 | # The "type:" at the beginning should be deleted because it will all be added later.
10 | input_string = input_string[len("type:"):]
11 | data = input_string.split("\ntype:")
12 | else:
13 | # Ignore the first part of the "\ntype:" division if it does not begin with "type:".
14 | data = input_string.split("\ntype:")[1:]
15 |
16 | if len(data) == 0:
17 | raise ValueError("No valid items found")
18 | parsed_data = []
19 |
20 | for item in data:
21 | item = "type:" + item.strip()
22 | type, path, content = self._split_data(item)
23 | dict = {}
24 | dict["type"] = type
25 | if path is not None:
26 | dict["path"] = path
27 | dict["content"] = content
28 | parsed_data.append(dict)
29 |
30 | if len(parsed_data) == 0:
31 | raise ValueError("No valid items found")
32 |
33 | return parsed_data
34 |
35 | def _split_data(self, input_data):
36 | lines = input_data.split('\n')
37 | type_line, path_line = None, None
38 | content_lines = []
39 | record_content = False
40 | has_content = False
41 |
42 | for line in lines:
43 | #line = line.strip()
44 | if line.startswith("type:"):
45 | if type_line is not None:
46 | raise ValueError("Multiple type lines found")
47 | type_line = line[5:].strip()
48 | elif line.startswith("path:"):
49 | if path_line is not None:
50 | raise ValueError("Multiple path lines found")
51 | path_line = line[5:].strip()
52 | elif line.startswith("```"):
53 | has_content = True
54 | record_content = not record_content
55 | elif record_content:
56 | content_lines.append(line)
57 |
58 | if type_line is None:
59 | raise ValueError("No type line found")
60 | if len(content_lines) == 0 and has_content == False:
61 | raise ValueError("No content found")
62 |
63 | content = "\n".join(content_lines)
64 |
65 | return type_line, path_line, content
66 |
67 | def encode(self, input_data: deque) -> str:
68 | output = ""
69 | for item in input_data:
70 | output += f"type: {item['type']}\n"
71 | if 'path' in item:
72 | output += f"path: {item['path']}\n"
73 | output += "```\n"
74 | output += f"{item['content']}\n"
75 | output += "```\n"
76 | return output
77 |
78 | def close_open_backticks(self, string: str) -> str:
79 | if string.count('\n```') % 2 != 0:
80 | string += '\n```'
81 | return string
82 |
83 | def test(self):
84 | test_input = """type: command
85 | path: /workspace/
86 | ```bash
87 | sudo apt-get update
88 | sudo apt-get install git
89 | git clone https://github.com/flutter/flutter.git
90 | ```
91 |
92 | type: command
93 | path: /workspace/
94 | ```bash
95 | cd flutter
96 | ./bin/flutter doctor
97 | export PATH="$PATH:`pwd`/bin"
98 | ```
99 |
100 | type: command
101 | path: /workspace/
102 | ```bash
103 | cd /workspace
104 | flutter create my_flutter_app
105 | ```
106 |
107 | type: command
108 | path: /workspace/
109 | ```bash
110 | flutter channel beta
111 | flutter upgrade
112 | flutter config --enable-web
113 | ```
114 |
115 | type: command
116 | path: /workspace/
117 | ```bash
118 | cd my_flutter_app
119 | flutter run -d web-server --web-port=8080
120 | ```
121 |
122 | type: write
123 | path: /app/Dockerfile
124 | ```
125 | FROM ubuntu:latest
126 |
127 | RUN apt-get update && apt-get install -y git
128 |
129 | COPY . /app
130 |
131 | WORKDIR /app
132 |
133 | RUN git clone https://github.com/flutter/flutter.git
134 |
135 | ENV PATH="/workspace/flutter/bin:${PATH}"
136 |
137 | RUN flutter doctor
138 |
139 | RUN cd /workspace && flutter create my_flutter_app
140 |
141 | RUN flutter channel beta && flutter upgrade && flutter config --enable-web
142 |
143 | EXPOSE 8080
144 |
145 | CMD ["flutter", "run", "-d", "web-server", "--web-port=8080"]
146 | ```
147 |
148 | type: plan
149 | ```
150 | Configure the container to expose port 8080 to the host machine and access the app from a browser outside the container.
151 | ```
152 |
153 | type: write
154 | path: /workspace/othello_app/lib/main.dart
155 | ```dart
156 | import 'package:flutter/material.dart';
157 |
158 | void main() {
159 | runApp(MyApp());
160 | }
161 |
162 | class MyApp extends StatelessWidget {
163 | const MyApp({Key? key}) : super(key: key);
164 |
165 | @override
166 | Widget build(BuildContext context) {
167 | return MaterialApp(
168 | title: 'Othello',
169 | theme: ThemeData(
170 | primarySwatch: Colors.blue,
171 | ),
172 | home: MyHomePage(title: 'Othello'),
173 | );
174 | }
175 | }
176 |
177 | class MyHomePage extends StatefulWidget {
178 | MyHomePage({Key? key, required this.title}) : super(key: key);
179 |
180 | final String title;
181 |
182 | @override
183 | _MyHomePageState createState() => _MyHomePageState();
184 | }
185 |
186 | class _MyHomePageState extends State {
187 | @override
188 | Widget build(BuildContext context) {
189 | return Scaffold(
190 | appBar: AppBar(
191 | title: Text(widget.title),
192 | ),
193 | body: Center(
194 | child: Text('Othello Game'),
195 | ),
196 | );
197 | }
198 | }
199 | ```"""
200 | # fail case
201 | # test_input = """Example of tasks output:
202 | #1. Start a web server: `flutter run -d web-server --web-port 8080`
203 | #2. Make the web server accessible from outside the container: Configure the container to expose port 8080 and map it to the host machine."""
204 | parsed_data = self.decode(test_input)
205 | print(parsed_data)
206 | print(self.encode(deque(parsed_data)))
--------------------------------------------------------------------------------
/tools/README.md:
--------------------------------------------------------------------------------
1 | Monitor requires curses library. To install it on Ubuntu, run:
2 |
3 | ```sh
4 | sudo apt update && sudo apt install libncurses5-dev libncursesw5-dev -y
5 | ```
6 |
7 | Also, install keyboard library:
8 |
9 | ```sh
10 | pip3 install keyboard
11 | ```
12 |
--------------------------------------------------------------------------------
/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saten-private/BabyCommandAGI/e4d78243b8f14611bc570cd8ff103cc6895ae7a7/tools/__init__.py
--------------------------------------------------------------------------------
/tools/monitor.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import sys
3 | import time
4 | import curses
5 |
6 | from pathlib import Path
7 | sys.path.append(str(Path(__file__).resolve().parent.parent))
8 | from extensions.ray_objectives import CooperativeObjectivesListStorage
9 | from extensions.ray_tasks import CooperativeTaskListStorage
10 |
11 | def print_buffer(stdscr, lines):
12 | stdscr.clear()
13 | y = 0
14 | x = 0
15 | for line in lines:
16 | stdscr.addstr(y, x, line)
17 | y += 1
18 | stdscr.refresh()
19 |
20 | def main(stdscr):
21 | objectives = CooperativeObjectivesListStorage()
22 | while True:
23 | objectives_list = objectives.get_objective_names()
24 | buffer = []
25 | if not objectives_list:
26 | buffer.append("No objectives")
27 | for objective in objectives_list:
28 | buffer.append("-----------------")
29 | buffer.append(f"Objective: {objective}")
30 | buffer.append("-----------------")
31 | tasks = CooperativeTaskListStorage(objective)
32 | tasks_list = tasks.get_task_names()
33 | buffer.append(f"Tasks:")
34 | for t in tasks_list:
35 | buffer.append(f" * {t}")
36 | buffer.append("-----------------")
37 | print_buffer(stdscr, buffer)
38 | time.sleep(30)
39 |
40 | curses.wrapper(main)
41 |
--------------------------------------------------------------------------------
/tools/results.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import argparse
4 | import openai
5 | import pinecone
6 | from dotenv import load_dotenv
7 |
8 | load_dotenv()
9 |
10 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
11 | assert OPENAI_API_KEY, "OPENAI_API_KEY environment variable is missing from .env"
12 |
13 | PINECONE_API_KEY = os.getenv("PINECONE_API_KEY", "")
14 | assert PINECONE_API_KEY, "PINECONE_API_KEY environment variable is missing from .env"
15 |
16 | PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT", "us-east1-gcp")
17 | assert PINECONE_ENVIRONMENT, "PINECONE_ENVIRONMENT environment variable is missing from .env"
18 |
19 | # Table config
20 | PINECONE_TABLE_NAME = os.getenv("TABLE_NAME", "")
21 | assert PINECONE_TABLE_NAME, "TABLE_NAME environment variable is missing from .env"
22 |
23 | # Function to query records from the Pinecone index
24 | def query_records(index, query, top_k=1000):
25 | results = index.query(query, top_k=top_k, include_metadata=True)
26 | return [f"{task.metadata['task']}:\n{task.metadata['result']}\n------------------" for task in results.matches]
27 |
28 | # Get embedding for the text
29 | def get_ada_embedding(text):
30 | text = text.replace("\n", " ")
31 | return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
32 |
33 | def main():
34 | # Parse command-line arguments
35 | parser = argparse.ArgumentParser(description="Query Pinecone index using a string.")
36 | parser.add_argument('objective', nargs='*', metavar='', help='''
37 | main objective description. Doesn\'t need to be quoted.
38 | if not specified, get objective from environment.
39 | ''', default=[os.getenv("OBJECTIVE", "")])
40 | args = parser.parse_args()
41 |
42 | # Configure OpenAI
43 | openai.api_key = OPENAI_API_KEY
44 |
45 | # Initialize Pinecone
46 | pinecone.init(api_key=PINECONE_API_KEY)
47 |
48 | # Connect to the objective index
49 | index = pinecone.Index(PINECONE_TABLE_NAME)
50 |
51 | # Query records from the index
52 | query = get_ada_embedding(' '.join(args.objective).strip())
53 | retrieved_tasks = query_records(index, query)
54 | for r in retrieved_tasks:
55 | print(r)
56 |
57 | if __name__ == "__main__":
58 | main()
59 |
--------------------------------------------------------------------------------
/tools/results_browser.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import curses
4 | import argparse
5 | import openai
6 | import pinecone
7 | from dotenv import load_dotenv
8 | import textwrap
9 |
10 | load_dotenv()
11 |
12 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
13 | assert OPENAI_API_KEY, "OPENAI_API_KEY environment variable is missing from .env"
14 |
15 | PINECONE_API_KEY = os.getenv("PINECONE_API_KEY", "")
16 | assert PINECONE_API_KEY, "PINECONE_API_KEY environment variable is missing from .env"
17 |
18 | PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT", "us-east1-gcp")
19 | assert PINECONE_ENVIRONMENT, "PINECONE_ENVIRONMENT environment variable is missing from .env"
20 |
21 | # Table config
22 | PINECONE_TABLE_NAME = os.getenv("TABLE_NAME", "")
23 | assert PINECONE_TABLE_NAME, "TABLE_NAME environment variable is missing from .env"
24 |
25 | # Function to query records from the Pinecone index
26 | def query_records(index, query, top_k=1000):
27 | results = index.query(query, top_k=top_k, include_metadata=True)
28 | return [{"name": f"{task.metadata['task']}", "result": f"{task.metadata['result']}"} for task in results.matches]
29 |
30 | # Get embedding for the text
31 | def get_ada_embedding(text):
32 | return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
33 |
34 | def draw_tasks(stdscr, tasks, scroll_pos, selected):
35 | y = 0
36 | h, w = stdscr.getmaxyx()
37 | for idx, task in enumerate(tasks[scroll_pos:], start=scroll_pos):
38 | if y >= h:
39 | break
40 | task_name = f'{task["name"]}'
41 | truncated_str = task_name[:w-1]
42 | if idx == selected:
43 | stdscr.addstr(y, 0, truncated_str, curses.A_REVERSE)
44 | else:
45 | stdscr.addstr(y, 0, truncated_str)
46 | y += 1
47 |
48 | def draw_result(stdscr, task):
49 | task_name = f'Task: {task["name"]}'
50 | task_result = f'Result: {task["result"]}'
51 |
52 | _, w = stdscr.getmaxyx()
53 | task_name_wrapped = textwrap.wrap(task_name, width=w)
54 |
55 | for i, line in enumerate(task_name_wrapped):
56 | stdscr.addstr(i, 0, line)
57 |
58 | y, _ = stdscr.getyx()
59 | stdscr.addstr(y+1, 0, '------------------')
60 | stdscr.addstr(y+2, 0, task_result)
61 |
62 | def draw_summary(stdscr, objective, tasks, start, num):
63 | stdscr.box()
64 | summary_text = f'{len(tasks)} tasks ({start}-{num}) | {objective}'
65 | stdscr.addstr(1, 1, summary_text[:stdscr.getmaxyx()[1] - 2])
66 |
67 | def main(stdscr):
68 | # Configure OpenAI
69 | openai.api_key = OPENAI_API_KEY
70 |
71 | # Initialize Pinecone
72 | pinecone.init(api_key=PINECONE_API_KEY)
73 |
74 | # Connect to the objective index
75 | index = pinecone.Index(PINECONE_TABLE_NAME)
76 |
77 | curses.curs_set(0)
78 | stdscr.timeout(1000)
79 |
80 | h, w = stdscr.getmaxyx()
81 | left_w = w // 2
82 | visible_lines = h - 3
83 |
84 | scroll_pos = 0
85 | selected = 0
86 |
87 | # Parse command-line arguments
88 | parser = argparse.ArgumentParser(description="Query Pinecone index using a string.")
89 | parser.add_argument('objective', nargs='*', metavar='', help='''
90 | main objective description. Doesn\'t need to be quoted.
91 | if not specified, get objective from environment.
92 | ''', default=[os.getenv("OBJECTIVE", "")])
93 | args = parser.parse_args()
94 |
95 | # Query records from the index
96 | objective = ' '.join(args.objective).strip().replace("\n", " ")
97 | retrieved_tasks = query_records(index, get_ada_embedding(objective))
98 |
99 | while True:
100 | stdscr.clear()
101 | draw_tasks(stdscr.subwin(h-3, left_w, 0, 0), retrieved_tasks, scroll_pos, selected)
102 | draw_result(stdscr.subwin(h, w - left_w, 0, left_w), retrieved_tasks[selected])
103 | draw_summary(stdscr.subwin(3, left_w, h - 3, 0), objective, retrieved_tasks, scroll_pos+1, scroll_pos+h-3)
104 |
105 | stdscr.refresh()
106 | key = stdscr.getch()
107 |
108 | if key == ord('q') or key == 27:
109 | break
110 | elif key == curses.KEY_UP and selected > 0:
111 | selected -= 1
112 | if selected < scroll_pos:
113 | scroll_pos -= 1
114 | elif key == curses.KEY_DOWN and selected < len(retrieved_tasks) - 1:
115 | selected += 1
116 | if selected - scroll_pos >= visible_lines:
117 | scroll_pos += 1
118 |
119 | curses.wrapper(main)
--------------------------------------------------------------------------------
/workspace/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saten-private/BabyCommandAGI/e4d78243b8f14611bc570cd8ff103cc6895ae7a7/workspace/.gitkeep
--------------------------------------------------------------------------------
/workspace_backup/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saten-private/BabyCommandAGI/e4d78243b8f14611bc570cd8ff103cc6895ae7a7/workspace_backup/.gitkeep
--------------------------------------------------------------------------------