├── .autodoc └── docs │ ├── data │ ├── args.json │ ├── docstore.json │ └── hnswlib.index │ ├── json │ ├── src │ │ ├── cli │ │ │ ├── commands │ │ │ │ ├── init │ │ │ │ │ ├── index.json │ │ │ │ │ └── summary.json │ │ │ │ ├── run │ │ │ │ │ ├── index.json │ │ │ │ │ └── summary.json │ │ │ │ └── summary.json │ │ │ ├── spinner.json │ │ │ ├── summary.json │ │ │ └── utils │ │ │ │ ├── APIRateLimit.json │ │ │ │ ├── LLMUtil.json │ │ │ │ ├── WaitUtil.json │ │ │ │ └── summary.json │ │ ├── index.json │ │ ├── langchain │ │ │ ├── hnswlib.json │ │ │ └── summary.json │ │ ├── summary.json │ │ └── types.json │ └── tsconfig.json │ └── markdown │ ├── babyagi.config.md │ ├── src │ ├── cli │ │ ├── commands │ │ │ ├── init │ │ │ │ ├── index.md │ │ │ │ └── summary.md │ │ │ ├── run │ │ │ │ ├── index.md │ │ │ │ └── summary.md │ │ │ └── summary.md │ │ ├── spinner.md │ │ ├── summary.md │ │ └── utils │ │ │ ├── APIRateLimit.md │ │ │ ├── LLMUtil.md │ │ │ ├── WaitUtil.md │ │ │ └── summary.md │ ├── index.md │ ├── langchain │ │ ├── hnswlib.md │ │ └── summary.md │ ├── summary.md │ └── types.md │ └── tsconfig.md ├── .eslintrc.cjs ├── .gitignore ├── LICENSE ├── README.md ├── assets └── babyagi-config.png ├── autodoc.config.json ├── package-lock.json ├── package.json ├── src ├── cli │ ├── commands │ │ ├── init │ │ │ └── index.ts │ │ └── run │ │ │ └── index.ts │ ├── spinner.ts │ └── utils │ │ ├── APIRateLimit.ts │ │ ├── LLMUtil.ts │ │ └── WaitUtil.ts ├── index.ts ├── langchain │ └── hnswlib.ts └── types.ts └── tsconfig.json /.autodoc/docs/data/args.json: -------------------------------------------------------------------------------- 1 | {"space":"cosine","numDimensions":1536} -------------------------------------------------------------------------------- /.autodoc/docs/data/hnswlib.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/context-labs/babyagi-ts/5300635caee5a923fdbda31926504ac73d772f24/.autodoc/docs/data/hnswlib.index -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/commands/init/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileName": "index.ts", 3 | "filePath": "src/cli/commands/init/index.ts", 4 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/commands/init/index.ts", 5 | "summary": "This code is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file.\n\nThe `makeConfigTemplate` function creates a default configuration object with optional values provided by the user. It takes an optional `config` parameter and returns a `BabyAGIConfig` object with default values for `name`, `objective`, `initialTask`, `llm`, and `root`.\n\nThe `init` function is the main entry point for initializing the agent. It takes an optional `config` parameter, which defaults to the result of `makeConfigTemplate()`. The function first checks if a `babyagi.config.json` file already exists in the specified `root` directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration.\n\nNext, the user is prompted to provide values for the agent's `name`, `objective`, `initialTask`, and `llm` (language learning model). The `llm` prompt provides a list of available models, including GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user's input is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration is written to the `babyagi.config.json` file in the specified `root` directory, and a success message is displayed, instructing the user to run `babyagi start` to start the agent.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize a new BabyAGI agent with default configuration\nawait init();\n\n// Initialize a new BabyAGI agent with custom configuration\nawait init({\n name: 'MyAgent',\n objective: 'Answer questions',\n initialTask: 'Learn about the topic',\n llm: LLMModels.GPT3,\n root: './my-agent',\n});\n```\n\nThis code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model.", 6 | "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the BabyAGI project. It takes an optional `config` parameter and returns a new configuration object with default values for any missing properties.\n\n2. **How does the `init` function handle existing `babyagi.config.json` files?**\n\n The `init` function checks if a `babyagi.config.json` file already exists in the specified location. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\n3. **What are the available LLM models in the `init` function's `questions` array?**\n\n The available LLM models are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user is prompted to select one of these models during the initialization process." 7 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/commands/init/summary.json: -------------------------------------------------------------------------------- 1 | { 2 | "folderName": "init", 3 | "folderPath": ".autodoc/docs/json/src/cli/commands/init", 4 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands/init", 5 | "files": [ 6 | { 7 | "fileName": "index.ts", 8 | "filePath": "src/cli/commands/init/index.ts", 9 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/commands/init/index.ts", 10 | "summary": "This code is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file.\n\nThe `makeConfigTemplate` function creates a default configuration object with optional values provided by the user. It takes an optional `config` parameter and returns a `BabyAGIConfig` object with default values for `name`, `objective`, `initialTask`, `llm`, and `root`.\n\nThe `init` function is the main entry point for initializing the agent. It takes an optional `config` parameter, which defaults to the result of `makeConfigTemplate()`. The function first checks if a `babyagi.config.json` file already exists in the specified `root` directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration.\n\nNext, the user is prompted to provide values for the agent's `name`, `objective`, `initialTask`, and `llm` (language learning model). The `llm` prompt provides a list of available models, including GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user's input is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration is written to the `babyagi.config.json` file in the specified `root` directory, and a success message is displayed, instructing the user to run `babyagi start` to start the agent.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize a new BabyAGI agent with default configuration\nawait init();\n\n// Initialize a new BabyAGI agent with custom configuration\nawait init({\n name: 'MyAgent',\n objective: 'Answer questions',\n initialTask: 'Learn about the topic',\n llm: LLMModels.GPT3,\n root: './my-agent',\n});\n```\n\nThis code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model.", 11 | "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the BabyAGI project. It takes an optional `config` parameter and returns a new configuration object with default values for any missing properties.\n\n2. **How does the `init` function handle existing `babyagi.config.json` files?**\n\n The `init` function checks if a `babyagi.config.json` file already exists in the specified location. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\n3. **What are the available LLM models in the `init` function's `questions` array?**\n\n The available LLM models are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user is prompted to select one of these models during the initialization process." 12 | } 13 | ], 14 | "folders": [], 15 | "summary": "The `init` command in the `index.ts` file is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file. This code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model.\n\nThe `makeConfigTemplate` function creates a default configuration object with optional values provided by the user. It takes an optional `config` parameter and returns a `BabyAGIConfig` object with default values for `name`, `objective`, `initialTask`, `llm`, and `root`.\n\nThe `init` function is the main entry point for initializing the agent. It takes an optional `config` parameter, which defaults to the result of `makeConfigTemplate()`. The function first checks if a `babyagi.config.json` file already exists in the specified `root` directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration.\n\nNext, the user is prompted to provide values for the agent's `name`, `objective`, `initialTask`, and `llm` (language learning model). The `llm` prompt provides a list of available models, including GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user's input is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration is written to the `babyagi.config.json` file in the specified `root` directory, and a success message is displayed, instructing the user to run `babyagi start` to start the agent.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize a new BabyAGI agent with default configuration\nawait init();\n\n// Initialize a new BabyAGI agent with custom configuration\nawait init({\n name: 'MyAgent',\n objective: 'Answer questions',\n initialTask: 'Learn about the topic',\n llm: LLMModels.GPT3,\n root: './my-agent',\n});\n```\n\nThis code plays a crucial role in the babyagi-ts project, as it allows users to set up their agent's configuration interactively. The `init` command ensures that the agent is properly configured before starting, which helps prevent potential issues during runtime. Additionally, the interactive nature of the command makes it easy for users to customize their agent's behavior and underlying language model, enabling them to create agents tailored to their specific needs.", 16 | "questions": "" 17 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/commands/run/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileName": "index.ts", 3 | "filePath": "src/cli/commands/run/index.ts", 4 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/commands/run/index.ts", 5 | "summary": "This code defines a module for the `babyagi-ts` project that manages the execution of tasks by an AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. The main purpose of this code is to create, prioritize, and execute tasks based on the given objective and initial task.\n\nThe `run` function performs the following steps:\n\n1. Initialize the vector store, which is used to store the results of completed tasks. If the vector store does not exist, it is created with an initial document.\n\n2. Define the initial task list with the given `initialTask`.\n\n3. Define three agent functions: `taskCreationAgent`, `prioritizationAgent`, and `executionAgent`. These functions are responsible for creating new tasks based on the results of completed tasks, prioritizing the task list, and executing tasks, respectively.\n\n4. Define a `contextAgent` function, which retrieves the top completed tasks related to the given query.\n\n5. Enter an infinite loop that performs the following steps:\n\n a. If there are tasks in the task list, print the task list and proceed with the next steps. Otherwise, wait for 1 second and check again.\n\n b. Pop the first task from the task list and execute it using the `executionAgent` function. Store the result in the vector store.\n\n c. Create new tasks based on the result using the `taskCreationAgent` function and add them to the task list.\n\n d. Prioritize the task list using the `prioritizationAgent` function.\n\nHere's an example of how the `run` function might be used:\n\n```javascript\nimport babyagi from 'babyagi-ts';\n\nconst config = {\n objective: 'Create a summary of a given text',\n initialTask: 'Summarize the first paragraph',\n llm: 'gpt-3.5-turbo',\n root: './data',\n};\n\nbabyagi.run(config);\n```\n\nIn this example, the AI system will start with the initial task of summarizing the first paragraph and continue to create, prioritize, and execute tasks based on the given objective.", 6 | "questions": "1. **Question:** What is the purpose of the `taskCreationAgent` function and how does it work?\n **Answer:** The `taskCreationAgent` function is responsible for creating new tasks based on the result of an execution agent. It takes the objective, result, task description, and a list of incomplete tasks as input, and returns an array of new tasks that do not overlap with the incomplete tasks.\n\n2. **Question:** How does the `contextAgent` function work and what is its role in the code?\n **Answer:** The `contextAgent` function is responsible for providing context to the execution agent. It takes a query and the number of top results as input, creates an embedding for the query, and performs a similarity search on the vector store. It returns a sorted list of tasks based on their similarity scores.\n\n3. **Question:** What is the purpose of the `vectorStore` and how is it initialized?\n **Answer:** The `vectorStore` is used to store and manage the embeddings of tasks and their results. It is initialized by either loading an existing vector store from the specified path or creating a new one with a sample document, and then saving it to the specified path." 7 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/commands/run/summary.json: -------------------------------------------------------------------------------- 1 | { 2 | "folderName": "run", 3 | "folderPath": ".autodoc/docs/json/src/cli/commands/run", 4 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands/run", 5 | "files": [ 6 | { 7 | "fileName": "index.ts", 8 | "filePath": "src/cli/commands/run/index.ts", 9 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/commands/run/index.ts", 10 | "summary": "This code defines a module for the `babyagi-ts` project that manages the execution of tasks by an AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. The main purpose of this code is to create, prioritize, and execute tasks based on the given objective and initial task.\n\nThe `run` function performs the following steps:\n\n1. Initialize the vector store, which is used to store the results of completed tasks. If the vector store does not exist, it is created with an initial document.\n\n2. Define the initial task list with the given `initialTask`.\n\n3. Define three agent functions: `taskCreationAgent`, `prioritizationAgent`, and `executionAgent`. These functions are responsible for creating new tasks based on the results of completed tasks, prioritizing the task list, and executing tasks, respectively.\n\n4. Define a `contextAgent` function, which retrieves the top completed tasks related to the given query.\n\n5. Enter an infinite loop that performs the following steps:\n\n a. If there are tasks in the task list, print the task list and proceed with the next steps. Otherwise, wait for 1 second and check again.\n\n b. Pop the first task from the task list and execute it using the `executionAgent` function. Store the result in the vector store.\n\n c. Create new tasks based on the result using the `taskCreationAgent` function and add them to the task list.\n\n d. Prioritize the task list using the `prioritizationAgent` function.\n\nHere's an example of how the `run` function might be used:\n\n```javascript\nimport babyagi from 'babyagi-ts';\n\nconst config = {\n objective: 'Create a summary of a given text',\n initialTask: 'Summarize the first paragraph',\n llm: 'gpt-3.5-turbo',\n root: './data',\n};\n\nbabyagi.run(config);\n```\n\nIn this example, the AI system will start with the initial task of summarizing the first paragraph and continue to create, prioritize, and execute tasks based on the given objective.", 11 | "questions": "1. **Question:** What is the purpose of the `taskCreationAgent` function and how does it work?\n **Answer:** The `taskCreationAgent` function is responsible for creating new tasks based on the result of an execution agent. It takes the objective, result, task description, and a list of incomplete tasks as input, and returns an array of new tasks that do not overlap with the incomplete tasks.\n\n2. **Question:** How does the `contextAgent` function work and what is its role in the code?\n **Answer:** The `contextAgent` function is responsible for providing context to the execution agent. It takes a query and the number of top results as input, creates an embedding for the query, and performs a similarity search on the vector store. It returns a sorted list of tasks based on their similarity scores.\n\n3. **Question:** What is the purpose of the `vectorStore` and how is it initialized?\n **Answer:** The `vectorStore` is used to store and manage the embeddings of tasks and their results. It is initialized by either loading an existing vector store from the specified path or creating a new one with a sample document, and then saving it to the specified path." 12 | } 13 | ], 14 | "folders": [], 15 | "summary": "The `index.ts` file in the `run` folder is a crucial part of the `babyagi-ts` project, as it manages the execution of tasks by the AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. This function is responsible for creating, prioritizing, and executing tasks based on the given objective and initial task.\n\nThe `run` function follows these steps:\n\n1. Initializes the vector store for storing the results of completed tasks.\n2. Defines the initial task list with the given `initialTask`.\n3. Defines agent functions for task creation, prioritization, and execution.\n4. Defines a `contextAgent` function for retrieving top completed tasks related to a query.\n5. Enters an infinite loop that executes tasks, creates new tasks based on results, and prioritizes the task list.\n\nHere's an example of how the `run` function might be used:\n\n```javascript\nimport babyagi from 'babyagi-ts';\n\nconst config = {\n objective: 'Create a summary of a given text',\n initialTask: 'Summarize the first paragraph',\n llm: 'gpt-3.5-turbo',\n root: './data',\n};\n\nbabyagi.run(config);\n```\n\nIn this example, the AI system starts with the initial task of summarizing the first paragraph and continues to create, prioritize, and execute tasks based on the given objective.\n\nThe `run` function interacts with other parts of the `babyagi-ts` project by utilizing the agent functions and the vector store. The agent functions are responsible for creating new tasks, prioritizing the task list, and executing tasks. The vector store is used to store the results of completed tasks, which can be used by the `contextAgent` function to retrieve top completed tasks related to a query.\n\nThis module is essential for the overall functioning of the `babyagi-ts` project, as it drives the AI system's task execution process. Developers working with this code should be familiar with the agent functions and the vector store to understand how tasks are created, prioritized, and executed within the system.", 16 | "questions": "" 17 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/commands/summary.json: -------------------------------------------------------------------------------- 1 | { 2 | "folderName": "commands", 3 | "folderPath": ".autodoc/docs/json/src/cli/commands", 4 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands", 5 | "files": [], 6 | "folders": [ 7 | { 8 | "folderName": "init", 9 | "folderPath": ".autodoc/docs/json/src/cli/commands/init", 10 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands/init", 11 | "files": [ 12 | { 13 | "fileName": "index.ts", 14 | "filePath": "src/cli/commands/init/index.ts", 15 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/commands/init/index.ts", 16 | "summary": "This code is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file.\n\nThe `makeConfigTemplate` function creates a default configuration object with optional values provided by the user. It takes an optional `config` parameter and returns a `BabyAGIConfig` object with default values for `name`, `objective`, `initialTask`, `llm`, and `root`.\n\nThe `init` function is the main entry point for initializing the agent. It takes an optional `config` parameter, which defaults to the result of `makeConfigTemplate()`. The function first checks if a `babyagi.config.json` file already exists in the specified `root` directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration.\n\nNext, the user is prompted to provide values for the agent's `name`, `objective`, `initialTask`, and `llm` (language learning model). The `llm` prompt provides a list of available models, including GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user's input is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration is written to the `babyagi.config.json` file in the specified `root` directory, and a success message is displayed, instructing the user to run `babyagi start` to start the agent.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize a new BabyAGI agent with default configuration\nawait init();\n\n// Initialize a new BabyAGI agent with custom configuration\nawait init({\n name: 'MyAgent',\n objective: 'Answer questions',\n initialTask: 'Learn about the topic',\n llm: LLMModels.GPT3,\n root: './my-agent',\n});\n```\n\nThis code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model.", 17 | "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the BabyAGI project. It takes an optional `config` parameter and returns a new configuration object with default values for any missing properties.\n\n2. **How does the `init` function handle existing `babyagi.config.json` files?**\n\n The `init` function checks if a `babyagi.config.json` file already exists in the specified location. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\n3. **What are the available LLM models in the `init` function's `questions` array?**\n\n The available LLM models are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user is prompted to select one of these models during the initialization process." 18 | } 19 | ], 20 | "folders": [], 21 | "summary": "The `init` command in the `index.ts` file is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file. This code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model.\n\nThe `makeConfigTemplate` function creates a default configuration object with optional values provided by the user. It takes an optional `config` parameter and returns a `BabyAGIConfig` object with default values for `name`, `objective`, `initialTask`, `llm`, and `root`.\n\nThe `init` function is the main entry point for initializing the agent. It takes an optional `config` parameter, which defaults to the result of `makeConfigTemplate()`. The function first checks if a `babyagi.config.json` file already exists in the specified `root` directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration.\n\nNext, the user is prompted to provide values for the agent's `name`, `objective`, `initialTask`, and `llm` (language learning model). The `llm` prompt provides a list of available models, including GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user's input is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration is written to the `babyagi.config.json` file in the specified `root` directory, and a success message is displayed, instructing the user to run `babyagi start` to start the agent.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize a new BabyAGI agent with default configuration\nawait init();\n\n// Initialize a new BabyAGI agent with custom configuration\nawait init({\n name: 'MyAgent',\n objective: 'Answer questions',\n initialTask: 'Learn about the topic',\n llm: LLMModels.GPT3,\n root: './my-agent',\n});\n```\n\nThis code plays a crucial role in the babyagi-ts project, as it allows users to set up their agent's configuration interactively. The `init` command ensures that the agent is properly configured before starting, which helps prevent potential issues during runtime. Additionally, the interactive nature of the command makes it easy for users to customize their agent's behavior and underlying language model, enabling them to create agents tailored to their specific needs.", 22 | "questions": "" 23 | }, 24 | { 25 | "folderName": "run", 26 | "folderPath": ".autodoc/docs/json/src/cli/commands/run", 27 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands/run", 28 | "files": [ 29 | { 30 | "fileName": "index.ts", 31 | "filePath": "src/cli/commands/run/index.ts", 32 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/commands/run/index.ts", 33 | "summary": "This code defines a module for the `babyagi-ts` project that manages the execution of tasks by an AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. The main purpose of this code is to create, prioritize, and execute tasks based on the given objective and initial task.\n\nThe `run` function performs the following steps:\n\n1. Initialize the vector store, which is used to store the results of completed tasks. If the vector store does not exist, it is created with an initial document.\n\n2. Define the initial task list with the given `initialTask`.\n\n3. Define three agent functions: `taskCreationAgent`, `prioritizationAgent`, and `executionAgent`. These functions are responsible for creating new tasks based on the results of completed tasks, prioritizing the task list, and executing tasks, respectively.\n\n4. Define a `contextAgent` function, which retrieves the top completed tasks related to the given query.\n\n5. Enter an infinite loop that performs the following steps:\n\n a. If there are tasks in the task list, print the task list and proceed with the next steps. Otherwise, wait for 1 second and check again.\n\n b. Pop the first task from the task list and execute it using the `executionAgent` function. Store the result in the vector store.\n\n c. Create new tasks based on the result using the `taskCreationAgent` function and add them to the task list.\n\n d. Prioritize the task list using the `prioritizationAgent` function.\n\nHere's an example of how the `run` function might be used:\n\n```javascript\nimport babyagi from 'babyagi-ts';\n\nconst config = {\n objective: 'Create a summary of a given text',\n initialTask: 'Summarize the first paragraph',\n llm: 'gpt-3.5-turbo',\n root: './data',\n};\n\nbabyagi.run(config);\n```\n\nIn this example, the AI system will start with the initial task of summarizing the first paragraph and continue to create, prioritize, and execute tasks based on the given objective.", 34 | "questions": "1. **Question:** What is the purpose of the `taskCreationAgent` function and how does it work?\n **Answer:** The `taskCreationAgent` function is responsible for creating new tasks based on the result of an execution agent. It takes the objective, result, task description, and a list of incomplete tasks as input, and returns an array of new tasks that do not overlap with the incomplete tasks.\n\n2. **Question:** How does the `contextAgent` function work and what is its role in the code?\n **Answer:** The `contextAgent` function is responsible for providing context to the execution agent. It takes a query and the number of top results as input, creates an embedding for the query, and performs a similarity search on the vector store. It returns a sorted list of tasks based on their similarity scores.\n\n3. **Question:** What is the purpose of the `vectorStore` and how is it initialized?\n **Answer:** The `vectorStore` is used to store and manage the embeddings of tasks and their results. It is initialized by either loading an existing vector store from the specified path or creating a new one with a sample document, and then saving it to the specified path." 35 | } 36 | ], 37 | "folders": [], 38 | "summary": "The `index.ts` file in the `run` folder is a crucial part of the `babyagi-ts` project, as it manages the execution of tasks by the AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. This function is responsible for creating, prioritizing, and executing tasks based on the given objective and initial task.\n\nThe `run` function follows these steps:\n\n1. Initializes the vector store for storing the results of completed tasks.\n2. Defines the initial task list with the given `initialTask`.\n3. Defines agent functions for task creation, prioritization, and execution.\n4. Defines a `contextAgent` function for retrieving top completed tasks related to a query.\n5. Enters an infinite loop that executes tasks, creates new tasks based on results, and prioritizes the task list.\n\nHere's an example of how the `run` function might be used:\n\n```javascript\nimport babyagi from 'babyagi-ts';\n\nconst config = {\n objective: 'Create a summary of a given text',\n initialTask: 'Summarize the first paragraph',\n llm: 'gpt-3.5-turbo',\n root: './data',\n};\n\nbabyagi.run(config);\n```\n\nIn this example, the AI system starts with the initial task of summarizing the first paragraph and continues to create, prioritize, and execute tasks based on the given objective.\n\nThe `run` function interacts with other parts of the `babyagi-ts` project by utilizing the agent functions and the vector store. The agent functions are responsible for creating new tasks, prioritizing the task list, and executing tasks. The vector store is used to store the results of completed tasks, which can be used by the `contextAgent` function to retrieve top completed tasks related to a query.\n\nThis module is essential for the overall functioning of the `babyagi-ts` project, as it drives the AI system's task execution process. Developers working with this code should be familiar with the agent functions and the vector store to understand how tasks are created, prioritized, and executed within the system.", 39 | "questions": "" 40 | } 41 | ], 42 | "summary": "The `.autodoc/docs/json/src/cli/commands` folder contains essential code for the `babyagi-ts` project, specifically for initializing and running BabyAGI agents. It consists of two subfolders: `init` and `run`.\n\nThe `init` subfolder contains the `index.ts` file, which is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file. This code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize a new BabyAGI agent with default configuration\nawait init();\n\n// Initialize a new BabyAGI agent with custom configuration\nawait init({\n name: 'MyAgent',\n objective: 'Answer questions',\n initialTask: 'Learn about the topic',\n llm: LLMModels.GPT3,\n root: './my-agent',\n});\n```\n\nThe `run` subfolder contains the `index.ts` file, which manages the execution of tasks by the AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. This function is responsible for creating, prioritizing, and executing tasks based on the given objective and initial task.\n\nExample usage:\n\n```javascript\nimport babyagi from 'babyagi-ts';\n\nconst config = {\n objective: 'Create a summary of a given text',\n initialTask: 'Summarize the first paragraph',\n llm: 'gpt-3.5-turbo',\n root: './data',\n};\n\nbabyagi.run(config);\n```\n\nIn this example, the AI system starts with the initial task of summarizing the first paragraph and continues to create, prioritize, and execute tasks based on the given objective.\n\nThe `run` function interacts with other parts of the `babyagi-ts` project by utilizing the agent functions and the vector store. The agent functions are responsible for creating new tasks, prioritizing the task list, and executing tasks. The vector store is used to store the results of completed tasks, which can be used by the `contextAgent` function to retrieve top completed tasks related to a query.\n\nThis folder plays a crucial role in the babyagi-ts project, as it allows users to set up and run their agents with the desired configuration and objectives. Developers working with this code should be familiar with the agent functions and the vector store to understand how tasks are created, prioritized, and executed within the system.", 43 | "questions": "" 44 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/spinner.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileName": "spinner.ts", 3 | "filePath": "src/cli/spinner.ts", 4 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/spinner.ts", 5 | "summary": "This code is responsible for managing a spinner in the `babyagi-ts` project, which is a visual element that indicates a loading or processing state. The spinner is created using the `ora` library, which provides a simple and customizable way to create and manage spinners in the terminal.\n\nThe code starts by importing the `ora` library and creating a singleton spinner instance with the 'dots' style. This ensures that there will only be one spinner active at any given time, preventing multiple spinners from overlapping or interfering with each other.\n\nThere are several exported functions that allow other parts of the project to interact with the spinner:\n\n- `updateSpinnerText(message: string)`: This function updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message.\n\n Example usage:\n ```javascript\n updateSpinnerText('Loading data...');\n ```\n\n- `stopSpinner()`: This function stops the spinner if it is currently spinning.\n\n Example usage:\n ```javascript\n stopSpinner();\n ```\n\n- `spinnerError(message?: string)`: This function stops the spinner and marks it as failed, displaying an error message if provided. This is useful for indicating that an operation has failed.\n\n Example usage:\n ```javascript\n spinnerError('Failed to load data');\n ```\n\n- `spinnerSuccess(message?: string)`: This function stops the spinner and marks it as successful, displaying a success message if provided. This is useful for indicating that an operation has completed successfully.\n\n Example usage:\n ```javascript\n spinnerSuccess('Data loaded successfully');\n ```\n\n- `spinnerInfo(message: string)`: This function displays an informational message without affecting the spinner's state. This is useful for providing additional context or updates during a long-running operation.\n\n Example usage:\n ```javascript\n spinnerInfo('Processing data...');\n ```\n\nOverall, this code provides a convenient way for the `babyagi-ts` project to manage a spinner, allowing it to display loading states and provide feedback to the user during various operations.", 6 | "questions": "1. **What is the purpose of the `ora` package in this code?**\n\n The `ora` package is used to create a spinner in the command line interface (CLI) to provide a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style.\n\n2. **What are the different functions exported in this module and what do they do?**\n\n - `updateSpinnerText`: Updates the spinner's text with the given message. If the spinner is not spinning, it starts the spinner with the given message.\n - `stopSpinner`: Stops the spinner if it is spinning.\n - `spinnerError`: If the spinner is spinning, it stops the spinner and marks it as failed with an optional message.\n - `spinnerSuccess`: If the spinner is spinning, it stops the spinner and marks it as successful with an optional message.\n - `spinnerInfo`: Displays an info message with the spinner.\n\n3. **What is the purpose of the `spinner.isSpinning` condition in the functions?**\n\n The `spinner.isSpinning` condition is used to check if the spinner is currently spinning before performing certain actions like updating the text, stopping the spinner, or marking it as failed or successful. This ensures that the spinner's state is managed correctly and prevents any unintended behavior." 7 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/summary.json: -------------------------------------------------------------------------------- 1 | { 2 | "folderName": "cli", 3 | "folderPath": ".autodoc/docs/json/src/cli", 4 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli", 5 | "files": [ 6 | { 7 | "fileName": "spinner.ts", 8 | "filePath": "src/cli/spinner.ts", 9 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/spinner.ts", 10 | "summary": "This code is responsible for managing a spinner in the `babyagi-ts` project, which is a visual element that indicates a loading or processing state. The spinner is created using the `ora` library, which provides a simple and customizable way to create and manage spinners in the terminal.\n\nThe code starts by importing the `ora` library and creating a singleton spinner instance with the 'dots' style. This ensures that there will only be one spinner active at any given time, preventing multiple spinners from overlapping or interfering with each other.\n\nThere are several exported functions that allow other parts of the project to interact with the spinner:\n\n- `updateSpinnerText(message: string)`: This function updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message.\n\n Example usage:\n ```javascript\n updateSpinnerText('Loading data...');\n ```\n\n- `stopSpinner()`: This function stops the spinner if it is currently spinning.\n\n Example usage:\n ```javascript\n stopSpinner();\n ```\n\n- `spinnerError(message?: string)`: This function stops the spinner and marks it as failed, displaying an error message if provided. This is useful for indicating that an operation has failed.\n\n Example usage:\n ```javascript\n spinnerError('Failed to load data');\n ```\n\n- `spinnerSuccess(message?: string)`: This function stops the spinner and marks it as successful, displaying a success message if provided. This is useful for indicating that an operation has completed successfully.\n\n Example usage:\n ```javascript\n spinnerSuccess('Data loaded successfully');\n ```\n\n- `spinnerInfo(message: string)`: This function displays an informational message without affecting the spinner's state. This is useful for providing additional context or updates during a long-running operation.\n\n Example usage:\n ```javascript\n spinnerInfo('Processing data...');\n ```\n\nOverall, this code provides a convenient way for the `babyagi-ts` project to manage a spinner, allowing it to display loading states and provide feedback to the user during various operations.", 11 | "questions": "1. **What is the purpose of the `ora` package in this code?**\n\n The `ora` package is used to create a spinner in the command line interface (CLI) to provide a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style.\n\n2. **What are the different functions exported in this module and what do they do?**\n\n - `updateSpinnerText`: Updates the spinner's text with the given message. If the spinner is not spinning, it starts the spinner with the given message.\n - `stopSpinner`: Stops the spinner if it is spinning.\n - `spinnerError`: If the spinner is spinning, it stops the spinner and marks it as failed with an optional message.\n - `spinnerSuccess`: If the spinner is spinning, it stops the spinner and marks it as successful with an optional message.\n - `spinnerInfo`: Displays an info message with the spinner.\n\n3. **What is the purpose of the `spinner.isSpinning` condition in the functions?**\n\n The `spinner.isSpinning` condition is used to check if the spinner is currently spinning before performing certain actions like updating the text, stopping the spinner, or marking it as failed or successful. This ensures that the spinner's state is managed correctly and prevents any unintended behavior." 12 | } 13 | ], 14 | "folders": [ 15 | { 16 | "folderName": "commands", 17 | "folderPath": ".autodoc/docs/json/src/cli/commands", 18 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands", 19 | "files": [], 20 | "folders": [ 21 | { 22 | "folderName": "init", 23 | "folderPath": ".autodoc/docs/json/src/cli/commands/init", 24 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands/init", 25 | "files": [ 26 | { 27 | "fileName": "index.ts", 28 | "filePath": "src/cli/commands/init/index.ts", 29 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/commands/init/index.ts", 30 | "summary": "This code is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file.\n\nThe `makeConfigTemplate` function creates a default configuration object with optional values provided by the user. It takes an optional `config` parameter and returns a `BabyAGIConfig` object with default values for `name`, `objective`, `initialTask`, `llm`, and `root`.\n\nThe `init` function is the main entry point for initializing the agent. It takes an optional `config` parameter, which defaults to the result of `makeConfigTemplate()`. The function first checks if a `babyagi.config.json` file already exists in the specified `root` directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration.\n\nNext, the user is prompted to provide values for the agent's `name`, `objective`, `initialTask`, and `llm` (language learning model). The `llm` prompt provides a list of available models, including GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user's input is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration is written to the `babyagi.config.json` file in the specified `root` directory, and a success message is displayed, instructing the user to run `babyagi start` to start the agent.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize a new BabyAGI agent with default configuration\nawait init();\n\n// Initialize a new BabyAGI agent with custom configuration\nawait init({\n name: 'MyAgent',\n objective: 'Answer questions',\n initialTask: 'Learn about the topic',\n llm: LLMModels.GPT3,\n root: './my-agent',\n});\n```\n\nThis code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model.", 31 | "questions": "1. **What is the purpose of the `makeConfigTemplate` function?**\n\n The `makeConfigTemplate` function is used to create a default configuration object for the BabyAGI project. It takes an optional `config` parameter and returns a new configuration object with default values for any missing properties.\n\n2. **How does the `init` function handle existing `babyagi.config.json` files?**\n\n The `init` function checks if a `babyagi.config.json` file already exists in the specified location. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits.\n\n3. **What are the available LLM models in the `init` function's `questions` array?**\n\n The available LLM models are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user is prompted to select one of these models during the initialization process." 32 | } 33 | ], 34 | "folders": [], 35 | "summary": "The `init` command in the `index.ts` file is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file. This code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model.\n\nThe `makeConfigTemplate` function creates a default configuration object with optional values provided by the user. It takes an optional `config` parameter and returns a `BabyAGIConfig` object with default values for `name`, `objective`, `initialTask`, `llm`, and `root`.\n\nThe `init` function is the main entry point for initializing the agent. It takes an optional `config` parameter, which defaults to the result of `makeConfigTemplate()`. The function first checks if a `babyagi.config.json` file already exists in the specified `root` directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration.\n\nNext, the user is prompted to provide values for the agent's `name`, `objective`, `initialTask`, and `llm` (language learning model). The `llm` prompt provides a list of available models, including GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user's input is then used to create a new configuration object using the `makeConfigTemplate` function.\n\nFinally, the new configuration is written to the `babyagi.config.json` file in the specified `root` directory, and a success message is displayed, instructing the user to run `babyagi start` to start the agent.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize a new BabyAGI agent with default configuration\nawait init();\n\n// Initialize a new BabyAGI agent with custom configuration\nawait init({\n name: 'MyAgent',\n objective: 'Answer questions',\n initialTask: 'Learn about the topic',\n llm: LLMModels.GPT3,\n root: './my-agent',\n});\n```\n\nThis code plays a crucial role in the babyagi-ts project, as it allows users to set up their agent's configuration interactively. The `init` command ensures that the agent is properly configured before starting, which helps prevent potential issues during runtime. Additionally, the interactive nature of the command makes it easy for users to customize their agent's behavior and underlying language model, enabling them to create agents tailored to their specific needs.", 36 | "questions": "" 37 | }, 38 | { 39 | "folderName": "run", 40 | "folderPath": ".autodoc/docs/json/src/cli/commands/run", 41 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands/run", 42 | "files": [ 43 | { 44 | "fileName": "index.ts", 45 | "filePath": "src/cli/commands/run/index.ts", 46 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/commands/run/index.ts", 47 | "summary": "This code defines a module for the `babyagi-ts` project that manages the execution of tasks by an AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. The main purpose of this code is to create, prioritize, and execute tasks based on the given objective and initial task.\n\nThe `run` function performs the following steps:\n\n1. Initialize the vector store, which is used to store the results of completed tasks. If the vector store does not exist, it is created with an initial document.\n\n2. Define the initial task list with the given `initialTask`.\n\n3. Define three agent functions: `taskCreationAgent`, `prioritizationAgent`, and `executionAgent`. These functions are responsible for creating new tasks based on the results of completed tasks, prioritizing the task list, and executing tasks, respectively.\n\n4. Define a `contextAgent` function, which retrieves the top completed tasks related to the given query.\n\n5. Enter an infinite loop that performs the following steps:\n\n a. If there are tasks in the task list, print the task list and proceed with the next steps. Otherwise, wait for 1 second and check again.\n\n b. Pop the first task from the task list and execute it using the `executionAgent` function. Store the result in the vector store.\n\n c. Create new tasks based on the result using the `taskCreationAgent` function and add them to the task list.\n\n d. Prioritize the task list using the `prioritizationAgent` function.\n\nHere's an example of how the `run` function might be used:\n\n```javascript\nimport babyagi from 'babyagi-ts';\n\nconst config = {\n objective: 'Create a summary of a given text',\n initialTask: 'Summarize the first paragraph',\n llm: 'gpt-3.5-turbo',\n root: './data',\n};\n\nbabyagi.run(config);\n```\n\nIn this example, the AI system will start with the initial task of summarizing the first paragraph and continue to create, prioritize, and execute tasks based on the given objective.", 48 | "questions": "1. **Question:** What is the purpose of the `taskCreationAgent` function and how does it work?\n **Answer:** The `taskCreationAgent` function is responsible for creating new tasks based on the result of an execution agent. It takes the objective, result, task description, and a list of incomplete tasks as input, and returns an array of new tasks that do not overlap with the incomplete tasks.\n\n2. **Question:** How does the `contextAgent` function work and what is its role in the code?\n **Answer:** The `contextAgent` function is responsible for providing context to the execution agent. It takes a query and the number of top results as input, creates an embedding for the query, and performs a similarity search on the vector store. It returns a sorted list of tasks based on their similarity scores.\n\n3. **Question:** What is the purpose of the `vectorStore` and how is it initialized?\n **Answer:** The `vectorStore` is used to store and manage the embeddings of tasks and their results. It is initialized by either loading an existing vector store from the specified path or creating a new one with a sample document, and then saving it to the specified path." 49 | } 50 | ], 51 | "folders": [], 52 | "summary": "The `index.ts` file in the `run` folder is a crucial part of the `babyagi-ts` project, as it manages the execution of tasks by the AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. This function is responsible for creating, prioritizing, and executing tasks based on the given objective and initial task.\n\nThe `run` function follows these steps:\n\n1. Initializes the vector store for storing the results of completed tasks.\n2. Defines the initial task list with the given `initialTask`.\n3. Defines agent functions for task creation, prioritization, and execution.\n4. Defines a `contextAgent` function for retrieving top completed tasks related to a query.\n5. Enters an infinite loop that executes tasks, creates new tasks based on results, and prioritizes the task list.\n\nHere's an example of how the `run` function might be used:\n\n```javascript\nimport babyagi from 'babyagi-ts';\n\nconst config = {\n objective: 'Create a summary of a given text',\n initialTask: 'Summarize the first paragraph',\n llm: 'gpt-3.5-turbo',\n root: './data',\n};\n\nbabyagi.run(config);\n```\n\nIn this example, the AI system starts with the initial task of summarizing the first paragraph and continues to create, prioritize, and execute tasks based on the given objective.\n\nThe `run` function interacts with other parts of the `babyagi-ts` project by utilizing the agent functions and the vector store. The agent functions are responsible for creating new tasks, prioritizing the task list, and executing tasks. The vector store is used to store the results of completed tasks, which can be used by the `contextAgent` function to retrieve top completed tasks related to a query.\n\nThis module is essential for the overall functioning of the `babyagi-ts` project, as it drives the AI system's task execution process. Developers working with this code should be familiar with the agent functions and the vector store to understand how tasks are created, prioritized, and executed within the system.", 53 | "questions": "" 54 | } 55 | ], 56 | "summary": "The `.autodoc/docs/json/src/cli/commands` folder contains essential code for the `babyagi-ts` project, specifically for initializing and running BabyAGI agents. It consists of two subfolders: `init` and `run`.\n\nThe `init` subfolder contains the `index.ts` file, which is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file. This code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model.\n\nExample usage:\n\n```javascript\nimport { init } from './path/to/this/file';\n\n// Initialize a new BabyAGI agent with default configuration\nawait init();\n\n// Initialize a new BabyAGI agent with custom configuration\nawait init({\n name: 'MyAgent',\n objective: 'Answer questions',\n initialTask: 'Learn about the topic',\n llm: LLMModels.GPT3,\n root: './my-agent',\n});\n```\n\nThe `run` subfolder contains the `index.ts` file, which manages the execution of tasks by the AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. This function is responsible for creating, prioritizing, and executing tasks based on the given objective and initial task.\n\nExample usage:\n\n```javascript\nimport babyagi from 'babyagi-ts';\n\nconst config = {\n objective: 'Create a summary of a given text',\n initialTask: 'Summarize the first paragraph',\n llm: 'gpt-3.5-turbo',\n root: './data',\n};\n\nbabyagi.run(config);\n```\n\nIn this example, the AI system starts with the initial task of summarizing the first paragraph and continues to create, prioritize, and execute tasks based on the given objective.\n\nThe `run` function interacts with other parts of the `babyagi-ts` project by utilizing the agent functions and the vector store. The agent functions are responsible for creating new tasks, prioritizing the task list, and executing tasks. The vector store is used to store the results of completed tasks, which can be used by the `contextAgent` function to retrieve top completed tasks related to a query.\n\nThis folder plays a crucial role in the babyagi-ts project, as it allows users to set up and run their agents with the desired configuration and objectives. Developers working with this code should be familiar with the agent functions and the vector store to understand how tasks are created, prioritized, and executed within the system.", 57 | "questions": "" 58 | }, 59 | { 60 | "folderName": "utils", 61 | "folderPath": ".autodoc/docs/json/src/cli/utils", 62 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/utils", 63 | "files": [ 64 | { 65 | "fileName": "APIRateLimit.ts", 66 | "filePath": "src/cli/utils/APIRateLimit.ts", 67 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/utils/APIRateLimit.ts", 68 | "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in scenarios where the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server.\n\nThe class has a constructor that takes an optional `maxConcurrentCalls` parameter, which defaults to 50. This parameter determines the maximum number of API calls that can be made concurrently.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. This method takes a function `apiFunction` that returns a Promise, and it returns a new Promise. The purpose of this method is to queue the API calls and execute them in a controlled manner, ensuring that the number of concurrent calls does not exceed the specified limit.\n\nWhen `callApi` is called, it wraps the provided `apiFunction` in a new function `executeCall`, which is then added to the internal queue. If the number of in-progress calls is less than the maximum allowed, the `dequeueAndExecute` method is called to start processing the queued calls.\n\nThe `dequeueAndExecute` method dequeues and executes the API calls as long as there are calls in the queue and the number of in-progress calls is below the limit. When a call is executed, the `inProgress` counter is incremented, and the result of the API call is used to resolve or reject the Promise returned by `callApi`. Once the call is completed, the `inProgress` counter is decremented, and the `dequeueAndExecute` method is called again to process any remaining calls in the queue.\n\nHere's an example of how this class can be used:\n\n```typescript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchData(id: number): Promise {\n // Make an API call to fetch data for the given ID\n}\n\nasync function fetchMultipleData(ids: number[]): Promise {\n const results = await Promise.all(ids.map(id => apiRateLimiter.callApi(() => fetchData(id))));\n return results;\n}\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls made by the `fetchMultipleData` function, ensuring that no more than 10 calls are made at the same time.", 69 | "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and manages the execution of queued calls based on the maximum number of concurrent calls allowed. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How does the `dequeueAndExecute` method function?**\n\n The `dequeueAndExecute` method is responsible for executing the queued API calls. It checks if there are any calls in the queue and if the number of in-progress calls is below the maximum allowed concurrent calls. If both conditions are met, it dequeues the next call and executes it. This method is called after each API call is completed to ensure that the queue is continuously processed." 70 | }, 71 | { 72 | "fileName": "LLMUtil.ts", 73 | "filePath": "src/cli/utils/LLMUtil.ts", 74 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/utils/LLMUtil.ts", 75 | "summary": "This code is responsible for interacting with the OpenAI API to generate embeddings and completions using different language models. It imports the necessary classes and types from the `openai` package and the project's `types.js` file. The code checks if the `OPENAI_API_KEY` environment variable is set, and initializes the `openai` instance with the API key.\n\nThe `models` object contains details about three different language models (GPT3, GPT4, and GPT432k), including their names, input and output costs per 1K tokens, and maximum token lengths.\n\nThe `createEmbedding` function takes a string as input and returns a Promise that resolves to an array of numbers representing the embedding. It calls the `openai.createEmbedding` method with the input string and the model name `text-embedding-ada-002`.\n\nThe `CreateCompletionParams` interface defines the parameters for the `createCompletion` function, which generates completions based on a given prompt and other optional parameters. The function constructs a `messages` array with a single system message containing the prompt, and sends a POST request to the OpenAI API's `/v1/chat/completions` endpoint with the necessary headers and parameters. It then extracts the completion result from the API response and returns it as a string.\n\nExample usage of these functions in the larger project might involve generating embeddings for text inputs or generating completions for prompts using the specified language models:\n\n```javascript\nconst embedding = await createEmbedding(\"This is a sample text.\");\nconsole.log(embedding);\n\nconst completion = await createCompletion({\n model: LLMModels.GPT3,\n prompt: \"Write a short story about a robot.\",\n temperature: 0.7,\n max_tokens: 100,\n});\nconsole.log(completion);\n```\n\nThese functions can be used to integrate the OpenAI API's capabilities into the babyagi-ts project, enabling it to generate embeddings and completions for various use cases.", 76 | "questions": "1. **Question:** What is the purpose of the `models` object and its properties?\n **Answer:** The `models` object is a record that maps the names of different LLMModels (GPT3, GPT4, and GPT432k) to their respective details, such as name, inputCostPer1KTokens, outputCostPer1KTokens, and maxLength.\n\n2. **Question:** How does the `createEmbedding` function work and what does it return?\n **Answer:** The `createEmbedding` function takes a string value as input and sends a request to the OpenAI API to create an embedding for the given input using the 'text-embedding-ada-002' model. It returns a Promise that resolves to an array of numbers representing the embedding.\n\n3. **Question:** What is the purpose of the `createCompletion` function and what parameters does it accept?\n **Answer:** The `createCompletion` function is used to generate a completion for a given prompt using the OpenAI API. It accepts an object with properties such as model, prompt, temperature, max_tokens, top_p, frequency_penalty, and presence_penalty." 77 | }, 78 | { 79 | "fileName": "WaitUtil.ts", 80 | "filePath": "src/cli/utils/WaitUtil.ts", 81 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/utils/WaitUtil.ts", 82 | "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger babyagi-ts project. Both functions return Promises, making them suitable for use with `async/await` syntax.\n\n### wait function\n\nThe `wait` function takes two arguments: `timeoutMs`, which is the number of milliseconds to wait before resolving the Promise, and an optional `value` parameter, which defaults to `null`. The purpose of this function is to create a delay in the execution of asynchronous code. This can be useful in scenarios where you need to wait for a specific amount of time before proceeding with the next operation.\n\nExample usage:\n\n```javascript\nasync function example() {\n console.log(\"Starting...\");\n await wait(1000); // Wait for 1 second\n console.log(\"...Finished\");\n}\n```\n\n### forTrue function\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. The purpose of this function is to repeatedly check the result of the `fn` function until it returns `true` or a maximum number of attempts (200 in this case) is reached. The function checks the result of `fn` every 50 milliseconds. If `fn` returns `true`, the Promise resolves with the value `true`. If the maximum number of attempts is reached without `fn` returning `true`, the Promise is rejected.\n\nThis function can be useful in scenarios where you need to wait for a specific condition to be met before proceeding with the next operation, such as waiting for an element to be visible on a web page or for a specific value to be present in a data store.\n\nExample usage:\n\n```javascript\nasync function waitForElement() {\n const elementExists = () => document.querySelector(\"#my-element\") !== null;\n try {\n await forTrue(elementExists);\n console.log(\"Element found!\");\n } catch {\n console.log(\"Element not found after waiting\");\n }\n}\n```\n\nIn summary, this file provides two utility functions that help manage asynchronous operations in the babyagi-ts project by introducing delays and waiting for specific conditions to be met.", 83 | "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds, optionally returning a value when the promise resolves.\n\n2. **How does the `forTrue` function work and what is its use case?**\n\n The `forTrue` function takes a function `fn` as an argument, which should return a boolean value. It repeatedly checks the result of `fn` every 50 milliseconds, and resolves a promise when `fn` returns `true`. It can be used to wait for a certain condition to become true before proceeding with the execution of the code.\n\n3. **Is there a limit to the number of times the `forTrue` function checks the result of `fn`?**\n\n Yes, the `forTrue` function checks the result of `fn` up to 200 times. If `fn` does not return `true` within these 200 checks, the promise is rejected." 84 | } 85 | ], 86 | "folders": [], 87 | "summary": "The code in the `src/cli/utils` folder of the babyagi-ts project provides utility functions and classes to manage asynchronous operations, interact with the OpenAI API, and limit the number of concurrent API calls. These utilities can be used throughout the project to ensure efficient and controlled execution of various tasks.\n\n### APIRateLimit.ts\n\nThe `APIRateLimit` class helps manage and limit the number of concurrent API calls made by the application. This is useful when the API has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server. Here's an example of how this class can be used:\n\n```typescript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchData(id: number): Promise {\n // Make an API call to fetch data for the given ID\n}\n\nasync function fetchMultipleData(ids: number[]): Promise {\n const results = await Promise.all(ids.map(id => apiRateLimiter.callApi(() => fetchData(id))));\n return results;\n}\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls made by the `fetchMultipleData` function, ensuring that no more than 10 calls are made at the same time.\n\n### LLMUtil.ts\n\nThis code interacts with the OpenAI API to generate embeddings and completions using different language models. Example usage of these functions might involve generating embeddings for text inputs or generating completions for prompts using the specified language models:\n\n```javascript\nconst embedding = await createEmbedding(\"This is a sample text.\");\nconsole.log(embedding);\n\nconst completion = await createCompletion({\n model: LLMModels.GPT3,\n prompt: \"Write a short story about a robot.\",\n temperature: 0.7,\n max_tokens: 100,\n});\nconsole.log(completion);\n```\n\nThese functions can be used to integrate the OpenAI API's capabilities into the babyagi-ts project, enabling it to generate embeddings and completions for various use cases.\n\n### WaitUtil.ts\n\nThe `wait` and `forTrue` utility functions help manage asynchronous operations in the project by introducing delays and waiting for specific conditions to be met. Example usage of the `wait` function:\n\n```javascript\nasync function example() {\n console.log(\"Starting...\");\n await wait(1000); // Wait for 1 second\n console.log(\"...Finished\");\n}\n```\n\nExample usage of the `forTrue` function:\n\n```javascript\nasync function waitForElement() {\n const elementExists = () => document.querySelector(\"#my-element\") !== null;\n try {\n await forTrue(elementExists);\n console.log(\"Element found!\");\n } catch {\n console.log(\"Element not found after waiting\");\n }\n}\n```\n\nIn summary, the `src/cli/utils` folder provides utility functions and classes that help manage asynchronous operations, interact with the OpenAI API, and limit the number of concurrent API calls in the babyagi-ts project. These utilities can be used throughout the project to ensure efficient and controlled execution of various tasks.", 88 | "questions": "" 89 | } 90 | ], 91 | "summary": "The `src/cli` folder of the babyagi-ts project contains essential code for managing the command-line interface (CLI), providing utilities for interacting with the OpenAI API, and controlling the execution of BabyAGI agents. The code in this folder is organized into three main parts: the spinner, commands, and utilities.\n\nThe `spinner.ts` file manages a spinner, a visual element that indicates a loading or processing state in the terminal. It provides several exported functions to interact with the spinner, such as `updateSpinnerText`, `stopSpinner`, `spinnerError`, `spinnerSuccess`, and `spinnerInfo`. These functions allow the project to display loading states and provide feedback to the user during various operations.\n\n```javascript\nimport { updateSpinnerText, stopSpinner } from './path/to/spinner';\n\nupdateSpinnerText('Loading data...');\n// Perform some operation\nstopSpinner();\n```\n\nThe `commands` subfolder contains code for initializing and running BabyAGI agents. The `init` subfolder provides an interactive CLI for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file. The `run` subfolder manages the execution of tasks by the AI system, creating, prioritizing, and executing tasks based on the given objective and initial task.\n\n```javascript\nimport { init } from './path/to/init';\nawait init();\n\nimport babyagi from 'babyagi-ts';\nconst config = { /* ... */ };\nbabyagi.run(config);\n```\n\nThe `utils` folder provides utility functions and classes for managing asynchronous operations, interacting with the OpenAI API, and limiting the number of concurrent API calls. The `APIRateLimit` class helps manage and limit the number of concurrent API calls made by the application. The `LLMUtil.ts` file contains functions for generating embeddings and completions using different language models. The `WaitUtil.ts` file provides utility functions for managing asynchronous operations, such as `wait` and `forTrue`.\n\n```javascript\nimport { APIRateLimit } from './path/to/APIRateLimit';\nconst apiRateLimiter = new APIRateLimit(10);\n\nimport { createEmbedding, createCompletion } from './path/to/LLMUtil';\nconst embedding = await createEmbedding(\"Sample text\");\nconst completion = await createCompletion({ /* ... */ });\n\nimport { wait, forTrue } from './path/to/WaitUtil';\nawait wait(1000);\nawait forTrue(() => someCondition);\n```\n\nIn summary, the `src/cli` folder plays a crucial role in the babyagi-ts project, providing a convenient way to manage the CLI, interact with the OpenAI API, and control the execution of BabyAGI agents. Developers working with this code should be familiar with the agent functions and the vector store to understand how tasks are created, prioritized, and executed within the system.", 92 | "questions": "" 93 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/utils/APIRateLimit.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileName": "APIRateLimit.ts", 3 | "filePath": "src/cli/utils/APIRateLimit.ts", 4 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/utils/APIRateLimit.ts", 5 | "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in scenarios where the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server.\n\nThe class has a constructor that takes an optional `maxConcurrentCalls` parameter, which defaults to 50. This parameter determines the maximum number of API calls that can be made concurrently.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. This method takes a function `apiFunction` that returns a Promise, and it returns a new Promise. The purpose of this method is to queue the API calls and execute them in a controlled manner, ensuring that the number of concurrent calls does not exceed the specified limit.\n\nWhen `callApi` is called, it wraps the provided `apiFunction` in a new function `executeCall`, which is then added to the internal queue. If the number of in-progress calls is less than the maximum allowed, the `dequeueAndExecute` method is called to start processing the queued calls.\n\nThe `dequeueAndExecute` method dequeues and executes the API calls as long as there are calls in the queue and the number of in-progress calls is below the limit. When a call is executed, the `inProgress` counter is incremented, and the result of the API call is used to resolve or reject the Promise returned by `callApi`. Once the call is completed, the `inProgress` counter is decremented, and the `dequeueAndExecute` method is called again to process any remaining calls in the queue.\n\nHere's an example of how this class can be used:\n\n```typescript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchData(id: number): Promise {\n // Make an API call to fetch data for the given ID\n}\n\nasync function fetchMultipleData(ids: number[]): Promise {\n const results = await Promise.all(ids.map(id => apiRateLimiter.callApi(() => fetchData(id))));\n return results;\n}\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls made by the `fetchMultipleData` function, ensuring that no more than 10 calls are made at the same time.", 6 | "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and manages the execution of queued calls based on the maximum number of concurrent calls allowed. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How does the `dequeueAndExecute` method function?**\n\n The `dequeueAndExecute` method is responsible for executing the queued API calls. It checks if there are any calls in the queue and if the number of in-progress calls is below the maximum allowed concurrent calls. If both conditions are met, it dequeues the next call and executes it. This method is called after each API call is completed to ensure that the queue is continuously processed." 7 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/utils/LLMUtil.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileName": "LLMUtil.ts", 3 | "filePath": "src/cli/utils/LLMUtil.ts", 4 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/utils/LLMUtil.ts", 5 | "summary": "This code is responsible for interacting with the OpenAI API to generate embeddings and completions using different language models. It imports the necessary classes and types from the `openai` package and the project's `types.js` file. The code checks if the `OPENAI_API_KEY` environment variable is set, and initializes the `openai` instance with the API key.\n\nThe `models` object contains details about three different language models (GPT3, GPT4, and GPT432k), including their names, input and output costs per 1K tokens, and maximum token lengths.\n\nThe `createEmbedding` function takes a string as input and returns a Promise that resolves to an array of numbers representing the embedding. It calls the `openai.createEmbedding` method with the input string and the model name `text-embedding-ada-002`.\n\nThe `CreateCompletionParams` interface defines the parameters for the `createCompletion` function, which generates completions based on a given prompt and other optional parameters. The function constructs a `messages` array with a single system message containing the prompt, and sends a POST request to the OpenAI API's `/v1/chat/completions` endpoint with the necessary headers and parameters. It then extracts the completion result from the API response and returns it as a string.\n\nExample usage of these functions in the larger project might involve generating embeddings for text inputs or generating completions for prompts using the specified language models:\n\n```javascript\nconst embedding = await createEmbedding(\"This is a sample text.\");\nconsole.log(embedding);\n\nconst completion = await createCompletion({\n model: LLMModels.GPT3,\n prompt: \"Write a short story about a robot.\",\n temperature: 0.7,\n max_tokens: 100,\n});\nconsole.log(completion);\n```\n\nThese functions can be used to integrate the OpenAI API's capabilities into the babyagi-ts project, enabling it to generate embeddings and completions for various use cases.", 6 | "questions": "1. **Question:** What is the purpose of the `models` object and its properties?\n **Answer:** The `models` object is a record that maps the names of different LLMModels (GPT3, GPT4, and GPT432k) to their respective details, such as name, inputCostPer1KTokens, outputCostPer1KTokens, and maxLength.\n\n2. **Question:** How does the `createEmbedding` function work and what does it return?\n **Answer:** The `createEmbedding` function takes a string value as input and sends a request to the OpenAI API to create an embedding for the given input using the 'text-embedding-ada-002' model. It returns a Promise that resolves to an array of numbers representing the embedding.\n\n3. **Question:** What is the purpose of the `createCompletion` function and what parameters does it accept?\n **Answer:** The `createCompletion` function is used to generate a completion for a given prompt using the OpenAI API. It accepts an object with properties such as model, prompt, temperature, max_tokens, top_p, frequency_penalty, and presence_penalty." 7 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/utils/WaitUtil.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileName": "WaitUtil.ts", 3 | "filePath": "src/cli/utils/WaitUtil.ts", 4 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/utils/WaitUtil.ts", 5 | "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger babyagi-ts project. Both functions return Promises, making them suitable for use with `async/await` syntax.\n\n### wait function\n\nThe `wait` function takes two arguments: `timeoutMs`, which is the number of milliseconds to wait before resolving the Promise, and an optional `value` parameter, which defaults to `null`. The purpose of this function is to create a delay in the execution of asynchronous code. This can be useful in scenarios where you need to wait for a specific amount of time before proceeding with the next operation.\n\nExample usage:\n\n```javascript\nasync function example() {\n console.log(\"Starting...\");\n await wait(1000); // Wait for 1 second\n console.log(\"...Finished\");\n}\n```\n\n### forTrue function\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. The purpose of this function is to repeatedly check the result of the `fn` function until it returns `true` or a maximum number of attempts (200 in this case) is reached. The function checks the result of `fn` every 50 milliseconds. If `fn` returns `true`, the Promise resolves with the value `true`. If the maximum number of attempts is reached without `fn` returning `true`, the Promise is rejected.\n\nThis function can be useful in scenarios where you need to wait for a specific condition to be met before proceeding with the next operation, such as waiting for an element to be visible on a web page or for a specific value to be present in a data store.\n\nExample usage:\n\n```javascript\nasync function waitForElement() {\n const elementExists = () => document.querySelector(\"#my-element\") !== null;\n try {\n await forTrue(elementExists);\n console.log(\"Element found!\");\n } catch {\n console.log(\"Element not found after waiting\");\n }\n}\n```\n\nIn summary, this file provides two utility functions that help manage asynchronous operations in the babyagi-ts project by introducing delays and waiting for specific conditions to be met.", 6 | "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds, optionally returning a value when the promise resolves.\n\n2. **How does the `forTrue` function work and what is its use case?**\n\n The `forTrue` function takes a function `fn` as an argument, which should return a boolean value. It repeatedly checks the result of `fn` every 50 milliseconds, and resolves a promise when `fn` returns `true`. It can be used to wait for a certain condition to become true before proceeding with the execution of the code.\n\n3. **Is there a limit to the number of times the `forTrue` function checks the result of `fn`?**\n\n Yes, the `forTrue` function checks the result of `fn` up to 200 times. If `fn` does not return `true` within these 200 checks, the promise is rejected." 7 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/cli/utils/summary.json: -------------------------------------------------------------------------------- 1 | { 2 | "folderName": "utils", 3 | "folderPath": ".autodoc/docs/json/src/cli/utils", 4 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/utils", 5 | "files": [ 6 | { 7 | "fileName": "APIRateLimit.ts", 8 | "filePath": "src/cli/utils/APIRateLimit.ts", 9 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/utils/APIRateLimit.ts", 10 | "summary": "The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in scenarios where the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server.\n\nThe class has a constructor that takes an optional `maxConcurrentCalls` parameter, which defaults to 50. This parameter determines the maximum number of API calls that can be made concurrently.\n\nThe main method of this class is `callApi(apiFunction: () => Promise): Promise`. This method takes a function `apiFunction` that returns a Promise, and it returns a new Promise. The purpose of this method is to queue the API calls and execute them in a controlled manner, ensuring that the number of concurrent calls does not exceed the specified limit.\n\nWhen `callApi` is called, it wraps the provided `apiFunction` in a new function `executeCall`, which is then added to the internal queue. If the number of in-progress calls is less than the maximum allowed, the `dequeueAndExecute` method is called to start processing the queued calls.\n\nThe `dequeueAndExecute` method dequeues and executes the API calls as long as there are calls in the queue and the number of in-progress calls is below the limit. When a call is executed, the `inProgress` counter is incremented, and the result of the API call is used to resolve or reject the Promise returned by `callApi`. Once the call is completed, the `inProgress` counter is decremented, and the `dequeueAndExecute` method is called again to process any remaining calls in the queue.\n\nHere's an example of how this class can be used:\n\n```typescript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchData(id: number): Promise {\n // Make an API call to fetch data for the given ID\n}\n\nasync function fetchMultipleData(ids: number[]): Promise {\n const results = await Promise.all(ids.map(id => apiRateLimiter.callApi(() => fetchData(id))));\n return results;\n}\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls made by the `fetchMultipleData` function, ensuring that no more than 10 calls are made at the same time.", 11 | "questions": "1. **What is the purpose of the `APIRateLimit` class?**\n\n The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once.\n\n2. **How does the `callApi` method work and what is its return type?**\n\n The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and manages the execution of queued calls based on the maximum number of concurrent calls allowed. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`.\n\n3. **How does the `dequeueAndExecute` method function?**\n\n The `dequeueAndExecute` method is responsible for executing the queued API calls. It checks if there are any calls in the queue and if the number of in-progress calls is below the maximum allowed concurrent calls. If both conditions are met, it dequeues the next call and executes it. This method is called after each API call is completed to ensure that the queue is continuously processed." 12 | }, 13 | { 14 | "fileName": "LLMUtil.ts", 15 | "filePath": "src/cli/utils/LLMUtil.ts", 16 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/utils/LLMUtil.ts", 17 | "summary": "This code is responsible for interacting with the OpenAI API to generate embeddings and completions using different language models. It imports the necessary classes and types from the `openai` package and the project's `types.js` file. The code checks if the `OPENAI_API_KEY` environment variable is set, and initializes the `openai` instance with the API key.\n\nThe `models` object contains details about three different language models (GPT3, GPT4, and GPT432k), including their names, input and output costs per 1K tokens, and maximum token lengths.\n\nThe `createEmbedding` function takes a string as input and returns a Promise that resolves to an array of numbers representing the embedding. It calls the `openai.createEmbedding` method with the input string and the model name `text-embedding-ada-002`.\n\nThe `CreateCompletionParams` interface defines the parameters for the `createCompletion` function, which generates completions based on a given prompt and other optional parameters. The function constructs a `messages` array with a single system message containing the prompt, and sends a POST request to the OpenAI API's `/v1/chat/completions` endpoint with the necessary headers and parameters. It then extracts the completion result from the API response and returns it as a string.\n\nExample usage of these functions in the larger project might involve generating embeddings for text inputs or generating completions for prompts using the specified language models:\n\n```javascript\nconst embedding = await createEmbedding(\"This is a sample text.\");\nconsole.log(embedding);\n\nconst completion = await createCompletion({\n model: LLMModels.GPT3,\n prompt: \"Write a short story about a robot.\",\n temperature: 0.7,\n max_tokens: 100,\n});\nconsole.log(completion);\n```\n\nThese functions can be used to integrate the OpenAI API's capabilities into the babyagi-ts project, enabling it to generate embeddings and completions for various use cases.", 18 | "questions": "1. **Question:** What is the purpose of the `models` object and its properties?\n **Answer:** The `models` object is a record that maps the names of different LLMModels (GPT3, GPT4, and GPT432k) to their respective details, such as name, inputCostPer1KTokens, outputCostPer1KTokens, and maxLength.\n\n2. **Question:** How does the `createEmbedding` function work and what does it return?\n **Answer:** The `createEmbedding` function takes a string value as input and sends a request to the OpenAI API to create an embedding for the given input using the 'text-embedding-ada-002' model. It returns a Promise that resolves to an array of numbers representing the embedding.\n\n3. **Question:** What is the purpose of the `createCompletion` function and what parameters does it accept?\n **Answer:** The `createCompletion` function is used to generate a completion for a given prompt using the OpenAI API. It accepts an object with properties such as model, prompt, temperature, max_tokens, top_p, frequency_penalty, and presence_penalty." 19 | }, 20 | { 21 | "fileName": "WaitUtil.ts", 22 | "filePath": "src/cli/utils/WaitUtil.ts", 23 | "url": "https://github.com/context-labs/babyagi-ts/src/cli/utils/WaitUtil.ts", 24 | "summary": "The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger babyagi-ts project. Both functions return Promises, making them suitable for use with `async/await` syntax.\n\n### wait function\n\nThe `wait` function takes two arguments: `timeoutMs`, which is the number of milliseconds to wait before resolving the Promise, and an optional `value` parameter, which defaults to `null`. The purpose of this function is to create a delay in the execution of asynchronous code. This can be useful in scenarios where you need to wait for a specific amount of time before proceeding with the next operation.\n\nExample usage:\n\n```javascript\nasync function example() {\n console.log(\"Starting...\");\n await wait(1000); // Wait for 1 second\n console.log(\"...Finished\");\n}\n```\n\n### forTrue function\n\nThe `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. The purpose of this function is to repeatedly check the result of the `fn` function until it returns `true` or a maximum number of attempts (200 in this case) is reached. The function checks the result of `fn` every 50 milliseconds. If `fn` returns `true`, the Promise resolves with the value `true`. If the maximum number of attempts is reached without `fn` returning `true`, the Promise is rejected.\n\nThis function can be useful in scenarios where you need to wait for a specific condition to be met before proceeding with the next operation, such as waiting for an element to be visible on a web page or for a specific value to be present in a data store.\n\nExample usage:\n\n```javascript\nasync function waitForElement() {\n const elementExists = () => document.querySelector(\"#my-element\") !== null;\n try {\n await forTrue(elementExists);\n console.log(\"Element found!\");\n } catch {\n console.log(\"Element not found after waiting\");\n }\n}\n```\n\nIn summary, this file provides two utility functions that help manage asynchronous operations in the babyagi-ts project by introducing delays and waiting for specific conditions to be met.", 25 | "questions": "1. **What is the purpose of the `wait` function?**\n\n The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds, optionally returning a value when the promise resolves.\n\n2. **How does the `forTrue` function work and what is its use case?**\n\n The `forTrue` function takes a function `fn` as an argument, which should return a boolean value. It repeatedly checks the result of `fn` every 50 milliseconds, and resolves a promise when `fn` returns `true`. It can be used to wait for a certain condition to become true before proceeding with the execution of the code.\n\n3. **Is there a limit to the number of times the `forTrue` function checks the result of `fn`?**\n\n Yes, the `forTrue` function checks the result of `fn` up to 200 times. If `fn` does not return `true` within these 200 checks, the promise is rejected." 26 | } 27 | ], 28 | "folders": [], 29 | "summary": "The code in the `src/cli/utils` folder of the babyagi-ts project provides utility functions and classes to manage asynchronous operations, interact with the OpenAI API, and limit the number of concurrent API calls. These utilities can be used throughout the project to ensure efficient and controlled execution of various tasks.\n\n### APIRateLimit.ts\n\nThe `APIRateLimit` class helps manage and limit the number of concurrent API calls made by the application. This is useful when the API has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server. Here's an example of how this class can be used:\n\n```typescript\nconst apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls\n\nasync function fetchData(id: number): Promise {\n // Make an API call to fetch data for the given ID\n}\n\nasync function fetchMultipleData(ids: number[]): Promise {\n const results = await Promise.all(ids.map(id => apiRateLimiter.callApi(() => fetchData(id))));\n return results;\n}\n```\n\nIn this example, the `APIRateLimit` class is used to limit the number of concurrent calls made by the `fetchMultipleData` function, ensuring that no more than 10 calls are made at the same time.\n\n### LLMUtil.ts\n\nThis code interacts with the OpenAI API to generate embeddings and completions using different language models. Example usage of these functions might involve generating embeddings for text inputs or generating completions for prompts using the specified language models:\n\n```javascript\nconst embedding = await createEmbedding(\"This is a sample text.\");\nconsole.log(embedding);\n\nconst completion = await createCompletion({\n model: LLMModels.GPT3,\n prompt: \"Write a short story about a robot.\",\n temperature: 0.7,\n max_tokens: 100,\n});\nconsole.log(completion);\n```\n\nThese functions can be used to integrate the OpenAI API's capabilities into the babyagi-ts project, enabling it to generate embeddings and completions for various use cases.\n\n### WaitUtil.ts\n\nThe `wait` and `forTrue` utility functions help manage asynchronous operations in the project by introducing delays and waiting for specific conditions to be met. Example usage of the `wait` function:\n\n```javascript\nasync function example() {\n console.log(\"Starting...\");\n await wait(1000); // Wait for 1 second\n console.log(\"...Finished\");\n}\n```\n\nExample usage of the `forTrue` function:\n\n```javascript\nasync function waitForElement() {\n const elementExists = () => document.querySelector(\"#my-element\") !== null;\n try {\n await forTrue(elementExists);\n console.log(\"Element found!\");\n } catch {\n console.log(\"Element not found after waiting\");\n }\n}\n```\n\nIn summary, the `src/cli/utils` folder provides utility functions and classes that help manage asynchronous operations, interact with the OpenAI API, and limit the number of concurrent API calls in the babyagi-ts project. These utilities can be used throughout the project to ensure efficient and controlled execution of various tasks.", 30 | "questions": "" 31 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileName": "index.ts", 3 | "filePath": "src/index.ts", 4 | "url": "https://github.com/context-labs/babyagi-ts/src/index.ts", 5 | "summary": "The code in this file serves as the command-line interface (CLI) tool for the `babyagi-ts` project. It provides two main commands: `init` and `run`, which are used to initialize a project and run a BabyAGI Agent, respectively.\n\nThe CLI tool is built using the `commander` package, which simplifies the process of creating command-line interfaces. The `init` and `run` commands are defined using the `command` method, and their descriptions and actions are specified using the `description` and `action` methods.\n\nThe `init` command initializes a project by creating a `babyagi.config.json` file in the current directory. It first tries to read an existing configuration file, and if it fails, it calls the `init` function without any arguments. The `init` function is imported from `./cli/commands/init/index.js`.\n\nThe `run` command is used to run a BabyAGI Agent. It first tries to read the configuration file, and if it fails, it calls the `init` function to create a new configuration file. Then, it reads the configuration file again and calls the `run` function with the parsed configuration object. The `run` function is imported from `./cli/commands/run/index.js`.\n\nThe code also listens for unhandled promise rejections using the `process.on` method. If an unhandled rejection occurs, it logs the error stack, shows an error spinner using the `spinnerError` function, stops the spinner using the `stopSpinner` function, and exits the program with an error code of 1.\n\nHere's an example of how the CLI tool can be used:\n\n```sh\n# Initialize a new project\n$ babyagi-ts init\n\n# Run a BabyAGI Agent\n$ babyagi-ts run\n```\n\nIn summary, this code provides a CLI tool for the `babyagi-ts` project, allowing users to easily initialize projects and run BabyAGI Agents using simple commands.", 6 | "questions": "1. **What is the purpose of the BabyAGI CLI Tool?**\n\n The BabyAGI CLI Tool is a command-line interface for managing and running BabyAGI projects. It provides commands for initializing a project and running a BabyAGI Agent.\n\n2. **How does the `init` command work and what is the purpose of the `babyagi.config.json` file?**\n\n The `init` command initializes a project by creating a `babyagi.config.json` file in the current directory. This file stores the configuration for the BabyAGI project, which is used by the CLI tool to run the BabyAGI Agent.\n\n3. **What happens when an unhandled promise rejection occurs?**\n\n When an unhandled promise rejection occurs, the error stack is logged to the console, an error spinner is shown, the spinner is stopped, and the program exits with an error code of 1. This helps to provide a clear indication of the error and gracefully handle unexpected issues." 7 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/langchain/hnswlib.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileName": "hnswlib.ts", 3 | "filePath": "src/langchain/hnswlib.ts", 4 | "url": "https://github.com/context-labs/babyagi-ts/src/langchain/hnswlib.ts", 5 | "summary": "The `HNSWLib` class in this code is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm. It extends the `SaveableVectorStore` class and is designed to work with the `langchain` library for document embeddings and storage.\n\nThe main purpose of this class is to efficiently store and search for similar documents based on their embeddings. It provides methods to add documents, search for similar documents, and save/load the vector store to/from disk.\n\nThe constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert documents into their vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW algorithm, such as the space type and number of dimensions.\n\nThe `addDocuments` method takes an array of `Document` objects, converts them into their vector representations using the provided `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores.\n\nThe `save` and `load` methods allow the HNSW index and associated data to be saved to and loaded from disk, respectively. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively.\n\nExample usage:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnsw = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = embeddings.embedText(\"example query\");\nconst similarDocuments = await hnsw.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn this example, an `HNSWLib` instance is created from an array of texts and their associated metadata. A query vector is then generated for an example query, and the top 5 most similar documents are retrieved from the HNSW index.", 6 | "questions": "1. **Question**: What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class?\n **Answer**: The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk.\n\n2. **Question**: How does the `addDocuments` method work and what is its purpose?\n **Answer**: The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them into vectors using the `embedDocuments` method from the `embeddings` object. It then adds these vectors and their corresponding documents to the HNSW index and the `docstore`.\n\n3. **Question**: How does the `similaritySearchVectorWithScore` method work and what does it return?\n **Answer**: The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input, and searches for the `k` nearest neighbors in the HNSW index. It returns an array of tuples, where each tuple contains a `Document` object and its corresponding distance score to the query vector." 7 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/langchain/summary.json: -------------------------------------------------------------------------------- 1 | { 2 | "folderName": "langchain", 3 | "folderPath": ".autodoc/docs/json/src/langchain", 4 | "url": "https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/langchain", 5 | "files": [ 6 | { 7 | "fileName": "hnswlib.ts", 8 | "filePath": "src/langchain/hnswlib.ts", 9 | "url": "https://github.com/context-labs/babyagi-ts/src/langchain/hnswlib.ts", 10 | "summary": "The `HNSWLib` class in this code is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm. It extends the `SaveableVectorStore` class and is designed to work with the `langchain` library for document embeddings and storage.\n\nThe main purpose of this class is to efficiently store and search for similar documents based on their embeddings. It provides methods to add documents, search for similar documents, and save/load the vector store to/from disk.\n\nThe constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert documents into their vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW algorithm, such as the space type and number of dimensions.\n\nThe `addDocuments` method takes an array of `Document` objects, converts them into their vector representations using the provided `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores.\n\nThe `save` and `load` methods allow the HNSW index and associated data to be saved to and loaded from disk, respectively. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively.\n\nExample usage:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnsw = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = embeddings.embedText(\"example query\");\nconst similarDocuments = await hnsw.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn this example, an `HNSWLib` instance is created from an array of texts and their associated metadata. A query vector is then generated for an example query, and the top 5 most similar documents are retrieved from the HNSW index.", 11 | "questions": "1. **Question**: What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class?\n **Answer**: The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk.\n\n2. **Question**: How does the `addDocuments` method work and what is its purpose?\n **Answer**: The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them into vectors using the `embedDocuments` method from the `embeddings` object. It then adds these vectors and their corresponding documents to the HNSW index and the `docstore`.\n\n3. **Question**: How does the `similaritySearchVectorWithScore` method work and what does it return?\n **Answer**: The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input, and searches for the `k` nearest neighbors in the HNSW index. It returns an array of tuples, where each tuple contains a `Document` object and its corresponding distance score to the query vector." 12 | } 13 | ], 14 | "folders": [], 15 | "summary": "The `hnswlib.ts` file in the `langchain` folder of the `babyagi-ts` project contains the `HNSWLib` class, which is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm. This class is designed to work with the `langchain` library for document embeddings and storage, allowing efficient storage and search for similar documents based on their embeddings.\n\nThe `HNSWLib` class extends the `SaveableVectorStore` class and takes an `Embeddings` object and an `HNSWLibArgs` object as arguments in its constructor. The `Embeddings` object is responsible for converting documents into their vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW algorithm, such as the space type and number of dimensions.\n\nThe class provides several methods for working with document embeddings:\n\n- `addDocuments`: This method takes an array of `Document` objects, converts them into their vector representations using the provided `Embeddings` object, and adds them to the HNSW index.\n- `similaritySearchVectorWithScore`: This method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores.\n- `save` and `load`: These methods allow the HNSW index and associated data to be saved to and loaded from disk, respectively.\n- `fromTexts` and `fromDocuments`: These static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively.\n\nHere's an example of how the `HNSWLib` class might be used:\n\n```javascript\nconst embeddings = new Embeddings(/* ... */);\nconst hnsw = await HNSWLib.fromTexts(texts, metadatas, embeddings);\n\nconst queryVector = embeddings.embedText(\"example query\");\nconst similarDocuments = await hnsw.similaritySearchVectorWithScore(queryVector, 5);\n```\n\nIn this example, an `HNSWLib` instance is created from an array of texts and their associated metadata. A query vector is then generated for an example query, and the top 5 most similar documents are retrieved from the HNSW index.\n\nOverall, the `hnswlib.ts` file plays a crucial role in the `babyagi-ts` project by providing an efficient way to store and search for similar documents based on their embeddings. This functionality is essential for tasks such as document retrieval, clustering, and recommendation systems.", 16 | "questions": "" 17 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/src/types.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileName": "types.ts", 3 | "filePath": "src/types.ts", 4 | "url": "https://github.com/context-labs/babyagi-ts/src/types.ts", 5 | "summary": "The code provided defines the configuration and model details for the `babyagi-ts` project, which is likely an AI-based project utilizing OpenAI's GPT models. The code consists of three main parts: `BabyAGIConfig`, `LLMModels`, and `LLMModelDetails`.\n\n`BabyAGIConfig` is a TypeScript type that represents the configuration for the BabyAGI project. It has the following properties:\n\n- `name`: A string representing the name of the configuration.\n- `objective`: A string describing the objective of the project.\n- `initialTask`: A string representing the initial task to be performed.\n- `llm`: An instance of the `LLMModels` enum, which specifies the GPT model to be used.\n- `root`: A string representing the root directory of the project.\n\n`LLMModels` is an enumeration that lists the available GPT models for the project. It currently includes three models:\n\n- `GPT3`: GPT-3.5 Turbo, which is a powerful language model from OpenAI.\n- `GPT4`: A placeholder for the future GPT-4 model.\n- `GPT432k`: Another placeholder for a GPT-4 model with 32k tokens.\n\n`LLMModelDetails` is a TypeScript type that represents the details of a specific GPT model. It has the following properties:\n\n- `name`: An instance of the `LLMModels` enum, which specifies the GPT model.\n- `inputCostPer1KTokens`: A number representing the cost of processing 1,000 tokens in the input.\n- `outputCostPer1KTokens`: A number representing the cost of generating 1,000 tokens in the output.\n- `maxLength`: A number representing the maximum length (in tokens) that the model can handle.\n\nIn the larger project, these types and enums would be used to configure and manage the AI models and tasks. For example, a user might create a `BabyAGIConfig` object to specify the GPT model and initial task for their project:\n\n```typescript\nconst config: BabyAGIConfig = {\n name: 'My AI Project',\n objective: 'Generate text',\n initialTask: 'Text generation',\n llm: LLMModels.GPT3,\n root: './my-ai-project',\n};\n```\n\nThis configuration object could then be used to initialize and manage the AI models and tasks within the `babyagi-ts` project.", 6 | "questions": "1. **What is the purpose of the `BabyAGIConfig` type?**\n\n The `BabyAGIConfig` type is an object type that defines the configuration for the BabyAGI project, including properties like the name, objective, initial task, LLM model, and root directory.\n\n2. **What are the available LLMModels and what do they represent?**\n\n The `LLMModels` enum lists the available language models for the project, which include GPT3 (gpt-3.5-turbo), GPT4 (gpt-4), and GPT432k (gpt-4-32k). These represent different versions or configurations of the language models used in the project.\n\n3. **What information does the `LLMModelDetails` type provide?**\n\n The `LLMModelDetails` type provides information about a specific LLM model, including its name (as an LLMModels enum value), input cost per 1K tokens, output cost per 1K tokens, and maximum length (maxLength) of the model." 7 | } -------------------------------------------------------------------------------- /.autodoc/docs/json/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileName": "tsconfig.json", 3 | "filePath": "tsconfig.json", 4 | "url": "https://github.com/context-labs/babyagi-ts/tsconfig.json", 5 | "summary": "This code is a configuration file for the TypeScript compiler in the babyagi-ts project. It specifies various options that control how the TypeScript compiler processes the source code and generates the output JavaScript files. The purpose of this configuration is to ensure that the project is built consistently and with the desired settings across different environments and development stages.\n\nThe `compilerOptions` object contains several key-value pairs that define the compiler settings:\n\n- `rootDir`: Specifies the root directory of the project's source files, which is \"src\" in this case.\n- `outDir`: Defines the output directory for the compiled JavaScript files, set to \"dist\".\n- `strict`: Enables strict type checking, ensuring that the code adheres to best practices and catches potential errors early.\n- `target`: Sets the target ECMAScript version for the output JavaScript code, which is \"es2020\" in this case.\n- `module`: Configures the module system used in the generated JavaScript code, set to \"ES2020\".\n- `sourceMap`: Enables the generation of source maps, which help in debugging the compiled code by mapping it back to the original TypeScript source.\n- `esModuleInterop`: Allows for better compatibility between CommonJS and ES modules by creating a namespace object for all imports.\n- `moduleResolution`: Specifies the module resolution strategy, set to \"node\" to mimic Node.js' module resolution algorithm.\n- `allowSyntheticDefaultImports`: Permits default imports from modules with no default export, which can be useful when working with certain third-party libraries.\n- `declaration`: Generates corresponding `.d.ts` files for the compiled JavaScript files, which can be useful for distributing the project as a library.\n- `skipLibCheck`: Skips type checking of declaration files, which can speed up the compilation process.\n\nOverall, this configuration file ensures that the TypeScript compiler processes the babyagi-ts project's source code with the desired settings, resulting in a consistent and optimized output.", 6 | "questions": "1. **What is the purpose of the `rootDir` and `outDir` options in the `compilerOptions`?**\n\n The `rootDir` option specifies the root folder of the source files, while the `outDir` option specifies the output directory for the compiled files.\n\n2. **What does the `strict` option do in the `compilerOptions`?**\n\n The `strict` option enables a wide range of type checking behavior that results in stronger guarantees of program correctness.\n\n3. **What is the purpose of the `sourceMap` option in the `compilerOptions`?**\n\n The `sourceMap` option generates corresponding source map files for the compiled JavaScript files, which can be useful for debugging and understanding the relationship between the TypeScript source code and the generated JavaScript code." 7 | } -------------------------------------------------------------------------------- /.autodoc/docs/markdown/babyagi.config.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/babyagi.config.json) 2 | 3 | The `babyagi-ts` project contains a configuration file that defines the settings for an AdPilot application. The purpose of this application is to create an AdSense marketing campaign for the website EatProperGood.com, with the goal of maximizing the Return on Ad Spend (ROAS). The application interacts with the AdSense API to achieve this objective. 4 | 5 | The configuration file contains the following key-value pairs: 6 | 7 | - `"name": "AdPilot"`: This specifies the name of the application as "AdPilot". 8 | - `"objective": "Create an adsense marketing campaign for EatProperGood.com that interacts with the AdSense API to create campaigns that maximize ROAS"`: This describes the main goal of the application, which is to create an effective AdSense marketing campaign for EatProperGood.com. 9 | - `"initialTask": "Learn more about EatProperGood.com, what they sell and who their customers are"`: This indicates the first step in the process, which is to gather information about the website, its products, and its target audience. 10 | - `"llm": "gpt-3.5-turbo"`: This specifies the language model to be used for the project, which is GPT-3.5 Turbo. This advanced language model can help in generating high-quality content for the marketing campaign. 11 | - `"root": "./"`: This defines the root directory for the project, which is the current directory. 12 | 13 | In the larger project, this configuration file serves as a starting point for the AdPilot application. The application will use the provided information to interact with the AdSense API, create marketing campaigns, and optimize them for maximum ROAS. For example, the application may use the GPT-3.5 Turbo language model to generate ad copy and target specific customer segments based on the information gathered about EatProperGood.com. 14 | 15 | Overall, this code snippet is a crucial part of the `babyagi-ts` project, as it sets the foundation for the AdPilot application and its interaction with the AdSense API to create effective marketing campaigns for EatProperGood.com. 16 | ## Questions: 17 | 1. **Question:** What is the purpose of the `llm` property in this configuration file? 18 | **Answer:** The `llm` property specifies the language model to be used for this project, which in this case is "gpt-3.5-turbo". 19 | 20 | 2. **Question:** What does the `initialTask` property represent? 21 | **Answer:** The `initialTask` property represents the first step or task to be performed in the project, which is to learn more about EatProperGood.com, their products, and their target customers. 22 | 23 | 3. **Question:** What is the significance of the `root` property in this configuration file? 24 | **Answer:** The `root` property specifies the root directory for the project, which in this case is the current directory represented by "./". -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/commands/init/index.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/src/cli/commands/init/index.ts) 2 | 3 | This code is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file. 4 | 5 | The `makeConfigTemplate` function creates a default configuration object with optional values provided by the user. It takes an optional `config` parameter and returns a `BabyAGIConfig` object with default values for `name`, `objective`, `initialTask`, `llm`, and `root`. 6 | 7 | The `init` function is the main entry point for initializing the agent. It takes an optional `config` parameter, which defaults to the result of `makeConfigTemplate()`. The function first checks if a `babyagi.config.json` file already exists in the specified `root` directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. 8 | 9 | Next, the user is prompted to provide values for the agent's `name`, `objective`, `initialTask`, and `llm` (language learning model). The `llm` prompt provides a list of available models, including GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user's input is then used to create a new configuration object using the `makeConfigTemplate` function. 10 | 11 | Finally, the new configuration is written to the `babyagi.config.json` file in the specified `root` directory, and a success message is displayed, instructing the user to run `babyagi start` to start the agent. 12 | 13 | Example usage: 14 | 15 | ```javascript 16 | import { init } from './path/to/this/file'; 17 | 18 | // Initialize a new BabyAGI agent with default configuration 19 | await init(); 20 | 21 | // Initialize a new BabyAGI agent with custom configuration 22 | await init({ 23 | name: 'MyAgent', 24 | objective: 'Answer questions', 25 | initialTask: 'Learn about the topic', 26 | llm: LLMModels.GPT3, 27 | root: './my-agent', 28 | }); 29 | ``` 30 | 31 | This code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model. 32 | ## Questions: 33 | 1. **What is the purpose of the `makeConfigTemplate` function?** 34 | 35 | The `makeConfigTemplate` function is used to create a default configuration object for the BabyAGI project. It takes an optional `config` parameter and returns a new configuration object with default values for any missing properties. 36 | 37 | 2. **How does the `init` function handle existing `babyagi.config.json` files?** 38 | 39 | The `init` function checks if a `babyagi.config.json` file already exists in the specified location. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. If the user chooses not to overwrite, the process exits. 40 | 41 | 3. **What are the available LLM models in the `init` function's `questions` array?** 42 | 43 | The available LLM models are GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user is prompted to select one of these models during the initialization process. -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/commands/init/summary.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands/init) 2 | 3 | The `init` command in the `index.ts` file is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file. This code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model. 4 | 5 | The `makeConfigTemplate` function creates a default configuration object with optional values provided by the user. It takes an optional `config` parameter and returns a `BabyAGIConfig` object with default values for `name`, `objective`, `initialTask`, `llm`, and `root`. 6 | 7 | The `init` function is the main entry point for initializing the agent. It takes an optional `config` parameter, which defaults to the result of `makeConfigTemplate()`. The function first checks if a `babyagi.config.json` file already exists in the specified `root` directory. If it does, the user is prompted to confirm whether they want to overwrite the existing configuration. 8 | 9 | Next, the user is prompted to provide values for the agent's `name`, `objective`, `initialTask`, and `llm` (language learning model). The `llm` prompt provides a list of available models, including GPT-3.5 Turbo, GPT-4 8K (Early Access), and GPT-4 32K (Early Access). The user's input is then used to create a new configuration object using the `makeConfigTemplate` function. 10 | 11 | Finally, the new configuration is written to the `babyagi.config.json` file in the specified `root` directory, and a success message is displayed, instructing the user to run `babyagi start` to start the agent. 12 | 13 | Example usage: 14 | 15 | ```javascript 16 | import { init } from './path/to/this/file'; 17 | 18 | // Initialize a new BabyAGI agent with default configuration 19 | await init(); 20 | 21 | // Initialize a new BabyAGI agent with custom configuration 22 | await init({ 23 | name: 'MyAgent', 24 | objective: 'Answer questions', 25 | initialTask: 'Learn about the topic', 26 | llm: LLMModels.GPT3, 27 | root: './my-agent', 28 | }); 29 | ``` 30 | 31 | This code plays a crucial role in the babyagi-ts project, as it allows users to set up their agent's configuration interactively. The `init` command ensures that the agent is properly configured before starting, which helps prevent potential issues during runtime. Additionally, the interactive nature of the command makes it easy for users to customize their agent's behavior and underlying language model, enabling them to create agents tailored to their specific needs. 32 | -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/commands/run/index.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/src/cli/commands/run/index.ts) 2 | 3 | This code defines a module for the `babyagi-ts` project that manages the execution of tasks by an AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. The main purpose of this code is to create, prioritize, and execute tasks based on the given objective and initial task. 4 | 5 | The `run` function performs the following steps: 6 | 7 | 1. Initialize the vector store, which is used to store the results of completed tasks. If the vector store does not exist, it is created with an initial document. 8 | 9 | 2. Define the initial task list with the given `initialTask`. 10 | 11 | 3. Define three agent functions: `taskCreationAgent`, `prioritizationAgent`, and `executionAgent`. These functions are responsible for creating new tasks based on the results of completed tasks, prioritizing the task list, and executing tasks, respectively. 12 | 13 | 4. Define a `contextAgent` function, which retrieves the top completed tasks related to the given query. 14 | 15 | 5. Enter an infinite loop that performs the following steps: 16 | 17 | a. If there are tasks in the task list, print the task list and proceed with the next steps. Otherwise, wait for 1 second and check again. 18 | 19 | b. Pop the first task from the task list and execute it using the `executionAgent` function. Store the result in the vector store. 20 | 21 | c. Create new tasks based on the result using the `taskCreationAgent` function and add them to the task list. 22 | 23 | d. Prioritize the task list using the `prioritizationAgent` function. 24 | 25 | Here's an example of how the `run` function might be used: 26 | 27 | ```javascript 28 | import babyagi from 'babyagi-ts'; 29 | 30 | const config = { 31 | objective: 'Create a summary of a given text', 32 | initialTask: 'Summarize the first paragraph', 33 | llm: 'gpt-3.5-turbo', 34 | root: './data', 35 | }; 36 | 37 | babyagi.run(config); 38 | ``` 39 | 40 | In this example, the AI system will start with the initial task of summarizing the first paragraph and continue to create, prioritize, and execute tasks based on the given objective. 41 | ## Questions: 42 | 1. **Question:** What is the purpose of the `taskCreationAgent` function and how does it work? 43 | **Answer:** The `taskCreationAgent` function is responsible for creating new tasks based on the result of an execution agent. It takes the objective, result, task description, and a list of incomplete tasks as input, and returns an array of new tasks that do not overlap with the incomplete tasks. 44 | 45 | 2. **Question:** How does the `contextAgent` function work and what is its role in the code? 46 | **Answer:** The `contextAgent` function is responsible for providing context to the execution agent. It takes a query and the number of top results as input, creates an embedding for the query, and performs a similarity search on the vector store. It returns a sorted list of tasks based on their similarity scores. 47 | 48 | 3. **Question:** What is the purpose of the `vectorStore` and how is it initialized? 49 | **Answer:** The `vectorStore` is used to store and manage the embeddings of tasks and their results. It is initialized by either loading an existing vector store from the specified path or creating a new one with a sample document, and then saving it to the specified path. -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/commands/run/summary.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands/run) 2 | 3 | The `index.ts` file in the `run` folder is a crucial part of the `babyagi-ts` project, as it manages the execution of tasks by the AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. This function is responsible for creating, prioritizing, and executing tasks based on the given objective and initial task. 4 | 5 | The `run` function follows these steps: 6 | 7 | 1. Initializes the vector store for storing the results of completed tasks. 8 | 2. Defines the initial task list with the given `initialTask`. 9 | 3. Defines agent functions for task creation, prioritization, and execution. 10 | 4. Defines a `contextAgent` function for retrieving top completed tasks related to a query. 11 | 5. Enters an infinite loop that executes tasks, creates new tasks based on results, and prioritizes the task list. 12 | 13 | Here's an example of how the `run` function might be used: 14 | 15 | ```javascript 16 | import babyagi from 'babyagi-ts'; 17 | 18 | const config = { 19 | objective: 'Create a summary of a given text', 20 | initialTask: 'Summarize the first paragraph', 21 | llm: 'gpt-3.5-turbo', 22 | root: './data', 23 | }; 24 | 25 | babyagi.run(config); 26 | ``` 27 | 28 | In this example, the AI system starts with the initial task of summarizing the first paragraph and continues to create, prioritize, and execute tasks based on the given objective. 29 | 30 | The `run` function interacts with other parts of the `babyagi-ts` project by utilizing the agent functions and the vector store. The agent functions are responsible for creating new tasks, prioritizing the task list, and executing tasks. The vector store is used to store the results of completed tasks, which can be used by the `contextAgent` function to retrieve top completed tasks related to a query. 31 | 32 | This module is essential for the overall functioning of the `babyagi-ts` project, as it drives the AI system's task execution process. Developers working with this code should be familiar with the agent functions and the vector store to understand how tasks are created, prioritized, and executed within the system. 33 | -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/commands/summary.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/commands) 2 | 3 | The `.autodoc/docs/json/src/cli/commands` folder contains essential code for the `babyagi-ts` project, specifically for initializing and running BabyAGI agents. It consists of two subfolders: `init` and `run`. 4 | 5 | The `init` subfolder contains the `index.ts` file, which is responsible for initializing and configuring a BabyAGI agent. It provides an interactive command-line interface for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file. This code is essential for setting up a BabyAGI agent with the desired configuration, allowing users to easily customize their agent's behavior and underlying language model. 6 | 7 | Example usage: 8 | 9 | ```javascript 10 | import { init } from './path/to/this/file'; 11 | 12 | // Initialize a new BabyAGI agent with default configuration 13 | await init(); 14 | 15 | // Initialize a new BabyAGI agent with custom configuration 16 | await init({ 17 | name: 'MyAgent', 18 | objective: 'Answer questions', 19 | initialTask: 'Learn about the topic', 20 | llm: LLMModels.GPT3, 21 | root: './my-agent', 22 | }); 23 | ``` 24 | 25 | The `run` subfolder contains the `index.ts` file, which manages the execution of tasks by the AI system. The module exports a single function, `run`, which takes a `BabyAGIConfig` object as input. This function is responsible for creating, prioritizing, and executing tasks based on the given objective and initial task. 26 | 27 | Example usage: 28 | 29 | ```javascript 30 | import babyagi from 'babyagi-ts'; 31 | 32 | const config = { 33 | objective: 'Create a summary of a given text', 34 | initialTask: 'Summarize the first paragraph', 35 | llm: 'gpt-3.5-turbo', 36 | root: './data', 37 | }; 38 | 39 | babyagi.run(config); 40 | ``` 41 | 42 | In this example, the AI system starts with the initial task of summarizing the first paragraph and continues to create, prioritize, and execute tasks based on the given objective. 43 | 44 | The `run` function interacts with other parts of the `babyagi-ts` project by utilizing the agent functions and the vector store. The agent functions are responsible for creating new tasks, prioritizing the task list, and executing tasks. The vector store is used to store the results of completed tasks, which can be used by the `contextAgent` function to retrieve top completed tasks related to a query. 45 | 46 | This folder plays a crucial role in the babyagi-ts project, as it allows users to set up and run their agents with the desired configuration and objectives. Developers working with this code should be familiar with the agent functions and the vector store to understand how tasks are created, prioritized, and executed within the system. 47 | -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/spinner.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/src/cli/spinner.ts) 2 | 3 | This code is responsible for managing a spinner in the `babyagi-ts` project, which is a visual element that indicates a loading or processing state. The spinner is created using the `ora` library, which provides a simple and customizable way to create and manage spinners in the terminal. 4 | 5 | The code starts by importing the `ora` library and creating a singleton spinner instance with the 'dots' style. This ensures that there will only be one spinner active at any given time, preventing multiple spinners from overlapping or interfering with each other. 6 | 7 | There are several exported functions that allow other parts of the project to interact with the spinner: 8 | 9 | - `updateSpinnerText(message: string)`: This function updates the spinner's text with the provided message. If the spinner is already spinning, it simply updates the text; otherwise, it starts the spinner with the new message. 10 | 11 | Example usage: 12 | ```javascript 13 | updateSpinnerText('Loading data...'); 14 | ``` 15 | 16 | - `stopSpinner()`: This function stops the spinner if it is currently spinning. 17 | 18 | Example usage: 19 | ```javascript 20 | stopSpinner(); 21 | ``` 22 | 23 | - `spinnerError(message?: string)`: This function stops the spinner and marks it as failed, displaying an error message if provided. This is useful for indicating that an operation has failed. 24 | 25 | Example usage: 26 | ```javascript 27 | spinnerError('Failed to load data'); 28 | ``` 29 | 30 | - `spinnerSuccess(message?: string)`: This function stops the spinner and marks it as successful, displaying a success message if provided. This is useful for indicating that an operation has completed successfully. 31 | 32 | Example usage: 33 | ```javascript 34 | spinnerSuccess('Data loaded successfully'); 35 | ``` 36 | 37 | - `spinnerInfo(message: string)`: This function displays an informational message without affecting the spinner's state. This is useful for providing additional context or updates during a long-running operation. 38 | 39 | Example usage: 40 | ```javascript 41 | spinnerInfo('Processing data...'); 42 | ``` 43 | 44 | Overall, this code provides a convenient way for the `babyagi-ts` project to manage a spinner, allowing it to display loading states and provide feedback to the user during various operations. 45 | ## Questions: 46 | 1. **What is the purpose of the `ora` package in this code?** 47 | 48 | The `ora` package is used to create a spinner in the command line interface (CLI) to provide a visual indication of a running process. In this code, it is used to create a singleton spinner with the 'dots' style. 49 | 50 | 2. **What are the different functions exported in this module and what do they do?** 51 | 52 | - `updateSpinnerText`: Updates the spinner's text with the given message. If the spinner is not spinning, it starts the spinner with the given message. 53 | - `stopSpinner`: Stops the spinner if it is spinning. 54 | - `spinnerError`: If the spinner is spinning, it stops the spinner and marks it as failed with an optional message. 55 | - `spinnerSuccess`: If the spinner is spinning, it stops the spinner and marks it as successful with an optional message. 56 | - `spinnerInfo`: Displays an info message with the spinner. 57 | 58 | 3. **What is the purpose of the `spinner.isSpinning` condition in the functions?** 59 | 60 | The `spinner.isSpinning` condition is used to check if the spinner is currently spinning before performing certain actions like updating the text, stopping the spinner, or marking it as failed or successful. This ensures that the spinner's state is managed correctly and prevents any unintended behavior. -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/summary.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli) 2 | 3 | The `src/cli` folder of the babyagi-ts project contains essential code for managing the command-line interface (CLI), providing utilities for interacting with the OpenAI API, and controlling the execution of BabyAGI agents. The code in this folder is organized into three main parts: the spinner, commands, and utilities. 4 | 5 | The `spinner.ts` file manages a spinner, a visual element that indicates a loading or processing state in the terminal. It provides several exported functions to interact with the spinner, such as `updateSpinnerText`, `stopSpinner`, `spinnerError`, `spinnerSuccess`, and `spinnerInfo`. These functions allow the project to display loading states and provide feedback to the user during various operations. 6 | 7 | ```javascript 8 | import { updateSpinnerText, stopSpinner } from './path/to/spinner'; 9 | 10 | updateSpinnerText('Loading data...'); 11 | // Perform some operation 12 | stopSpinner(); 13 | ``` 14 | 15 | The `commands` subfolder contains code for initializing and running BabyAGI agents. The `init` subfolder provides an interactive CLI for users to set up their agent's configuration, which is then saved to a `babyagi.config.json` file. The `run` subfolder manages the execution of tasks by the AI system, creating, prioritizing, and executing tasks based on the given objective and initial task. 16 | 17 | ```javascript 18 | import { init } from './path/to/init'; 19 | await init(); 20 | 21 | import babyagi from 'babyagi-ts'; 22 | const config = { /* ... */ }; 23 | babyagi.run(config); 24 | ``` 25 | 26 | The `utils` folder provides utility functions and classes for managing asynchronous operations, interacting with the OpenAI API, and limiting the number of concurrent API calls. The `APIRateLimit` class helps manage and limit the number of concurrent API calls made by the application. The `LLMUtil.ts` file contains functions for generating embeddings and completions using different language models. The `WaitUtil.ts` file provides utility functions for managing asynchronous operations, such as `wait` and `forTrue`. 27 | 28 | ```javascript 29 | import { APIRateLimit } from './path/to/APIRateLimit'; 30 | const apiRateLimiter = new APIRateLimit(10); 31 | 32 | import { createEmbedding, createCompletion } from './path/to/LLMUtil'; 33 | const embedding = await createEmbedding("Sample text"); 34 | const completion = await createCompletion({ /* ... */ }); 35 | 36 | import { wait, forTrue } from './path/to/WaitUtil'; 37 | await wait(1000); 38 | await forTrue(() => someCondition); 39 | ``` 40 | 41 | In summary, the `src/cli` folder plays a crucial role in the babyagi-ts project, providing a convenient way to manage the CLI, interact with the OpenAI API, and control the execution of BabyAGI agents. Developers working with this code should be familiar with the agent functions and the vector store to understand how tasks are created, prioritized, and executed within the system. 42 | -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/utils/APIRateLimit.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/src/cli/utils/APIRateLimit.ts) 2 | 3 | The `APIRateLimit` class in this code snippet is designed to manage and limit the number of concurrent API calls made by the application. This is useful in scenarios where the API being called has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server. 4 | 5 | The class has a constructor that takes an optional `maxConcurrentCalls` parameter, which defaults to 50. This parameter determines the maximum number of API calls that can be made concurrently. 6 | 7 | The main method of this class is `callApi(apiFunction: () => Promise): Promise`. This method takes a function `apiFunction` that returns a Promise, and it returns a new Promise. The purpose of this method is to queue the API calls and execute them in a controlled manner, ensuring that the number of concurrent calls does not exceed the specified limit. 8 | 9 | When `callApi` is called, it wraps the provided `apiFunction` in a new function `executeCall`, which is then added to the internal queue. If the number of in-progress calls is less than the maximum allowed, the `dequeueAndExecute` method is called to start processing the queued calls. 10 | 11 | The `dequeueAndExecute` method dequeues and executes the API calls as long as there are calls in the queue and the number of in-progress calls is below the limit. When a call is executed, the `inProgress` counter is incremented, and the result of the API call is used to resolve or reject the Promise returned by `callApi`. Once the call is completed, the `inProgress` counter is decremented, and the `dequeueAndExecute` method is called again to process any remaining calls in the queue. 12 | 13 | Here's an example of how this class can be used: 14 | 15 | ```typescript 16 | const apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls 17 | 18 | async function fetchData(id: number): Promise { 19 | // Make an API call to fetch data for the given ID 20 | } 21 | 22 | async function fetchMultipleData(ids: number[]): Promise { 23 | const results = await Promise.all(ids.map(id => apiRateLimiter.callApi(() => fetchData(id)))); 24 | return results; 25 | } 26 | ``` 27 | 28 | In this example, the `APIRateLimit` class is used to limit the number of concurrent calls made by the `fetchMultipleData` function, ensuring that no more than 10 calls are made at the same time. 29 | ## Questions: 30 | 1. **What is the purpose of the `APIRateLimit` class?** 31 | 32 | The `APIRateLimit` class is designed to manage and limit the number of concurrent API calls to a specified maximum, preventing the application from overwhelming the API with too many requests at once. 33 | 34 | 2. **How does the `callApi` method work and what is its return type?** 35 | 36 | The `callApi` method takes an `apiFunction` as an argument, which is a function that returns a Promise. It adds the API call to a queue and manages the execution of queued calls based on the maximum number of concurrent calls allowed. The method returns a Promise of type `T`, where `T` is the expected return type of the `apiFunction`. 37 | 38 | 3. **How does the `dequeueAndExecute` method function?** 39 | 40 | The `dequeueAndExecute` method is responsible for executing the queued API calls. It checks if there are any calls in the queue and if the number of in-progress calls is below the maximum allowed concurrent calls. If both conditions are met, it dequeues the next call and executes it. This method is called after each API call is completed to ensure that the queue is continuously processed. -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/utils/LLMUtil.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/src/cli/utils/LLMUtil.ts) 2 | 3 | This code is responsible for interacting with the OpenAI API to generate embeddings and completions using different language models. It imports the necessary classes and types from the `openai` package and the project's `types.js` file. The code checks if the `OPENAI_API_KEY` environment variable is set, and initializes the `openai` instance with the API key. 4 | 5 | The `models` object contains details about three different language models (GPT3, GPT4, and GPT432k), including their names, input and output costs per 1K tokens, and maximum token lengths. 6 | 7 | The `createEmbedding` function takes a string as input and returns a Promise that resolves to an array of numbers representing the embedding. It calls the `openai.createEmbedding` method with the input string and the model name `text-embedding-ada-002`. 8 | 9 | The `CreateCompletionParams` interface defines the parameters for the `createCompletion` function, which generates completions based on a given prompt and other optional parameters. The function constructs a `messages` array with a single system message containing the prompt, and sends a POST request to the OpenAI API's `/v1/chat/completions` endpoint with the necessary headers and parameters. It then extracts the completion result from the API response and returns it as a string. 10 | 11 | Example usage of these functions in the larger project might involve generating embeddings for text inputs or generating completions for prompts using the specified language models: 12 | 13 | ```javascript 14 | const embedding = await createEmbedding("This is a sample text."); 15 | console.log(embedding); 16 | 17 | const completion = await createCompletion({ 18 | model: LLMModels.GPT3, 19 | prompt: "Write a short story about a robot.", 20 | temperature: 0.7, 21 | max_tokens: 100, 22 | }); 23 | console.log(completion); 24 | ``` 25 | 26 | These functions can be used to integrate the OpenAI API's capabilities into the babyagi-ts project, enabling it to generate embeddings and completions for various use cases. 27 | ## Questions: 28 | 1. **Question:** What is the purpose of the `models` object and its properties? 29 | **Answer:** The `models` object is a record that maps the names of different LLMModels (GPT3, GPT4, and GPT432k) to their respective details, such as name, inputCostPer1KTokens, outputCostPer1KTokens, and maxLength. 30 | 31 | 2. **Question:** How does the `createEmbedding` function work and what does it return? 32 | **Answer:** The `createEmbedding` function takes a string value as input and sends a request to the OpenAI API to create an embedding for the given input using the 'text-embedding-ada-002' model. It returns a Promise that resolves to an array of numbers representing the embedding. 33 | 34 | 3. **Question:** What is the purpose of the `createCompletion` function and what parameters does it accept? 35 | **Answer:** The `createCompletion` function is used to generate a completion for a given prompt using the OpenAI API. It accepts an object with properties such as model, prompt, temperature, max_tokens, top_p, frequency_penalty, and presence_penalty. -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/utils/WaitUtil.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/src/cli/utils/WaitUtil.ts) 2 | 3 | The code in this file provides two utility functions, `wait` and `forTrue`, which are designed to help manage asynchronous operations in the larger babyagi-ts project. Both functions return Promises, making them suitable for use with `async/await` syntax. 4 | 5 | ### wait function 6 | 7 | The `wait` function takes two arguments: `timeoutMs`, which is the number of milliseconds to wait before resolving the Promise, and an optional `value` parameter, which defaults to `null`. The purpose of this function is to create a delay in the execution of asynchronous code. This can be useful in scenarios where you need to wait for a specific amount of time before proceeding with the next operation. 8 | 9 | Example usage: 10 | 11 | ```javascript 12 | async function example() { 13 | console.log("Starting..."); 14 | await wait(1000); // Wait for 1 second 15 | console.log("...Finished"); 16 | } 17 | ``` 18 | 19 | ### forTrue function 20 | 21 | The `forTrue` function takes a single argument, `fn`, which is a function that returns a boolean value. The purpose of this function is to repeatedly check the result of the `fn` function until it returns `true` or a maximum number of attempts (200 in this case) is reached. The function checks the result of `fn` every 50 milliseconds. If `fn` returns `true`, the Promise resolves with the value `true`. If the maximum number of attempts is reached without `fn` returning `true`, the Promise is rejected. 22 | 23 | This function can be useful in scenarios where you need to wait for a specific condition to be met before proceeding with the next operation, such as waiting for an element to be visible on a web page or for a specific value to be present in a data store. 24 | 25 | Example usage: 26 | 27 | ```javascript 28 | async function waitForElement() { 29 | const elementExists = () => document.querySelector("#my-element") !== null; 30 | try { 31 | await forTrue(elementExists); 32 | console.log("Element found!"); 33 | } catch { 34 | console.log("Element not found after waiting"); 35 | } 36 | } 37 | ``` 38 | 39 | In summary, this file provides two utility functions that help manage asynchronous operations in the babyagi-ts project by introducing delays and waiting for specific conditions to be met. 40 | ## Questions: 41 | 1. **What is the purpose of the `wait` function?** 42 | 43 | The `wait` function is an asynchronous utility function that resolves a promise after a specified timeout in milliseconds, optionally returning a value when the promise resolves. 44 | 45 | 2. **How does the `forTrue` function work and what is its use case?** 46 | 47 | The `forTrue` function takes a function `fn` as an argument, which should return a boolean value. It repeatedly checks the result of `fn` every 50 milliseconds, and resolves a promise when `fn` returns `true`. It can be used to wait for a certain condition to become true before proceeding with the execution of the code. 48 | 49 | 3. **Is there a limit to the number of times the `forTrue` function checks the result of `fn`?** 50 | 51 | Yes, the `forTrue` function checks the result of `fn` up to 200 times. If `fn` does not return `true` within these 200 checks, the promise is rejected. -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/cli/utils/summary.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/cli/utils) 2 | 3 | The code in the `src/cli/utils` folder of the babyagi-ts project provides utility functions and classes to manage asynchronous operations, interact with the OpenAI API, and limit the number of concurrent API calls. These utilities can be used throughout the project to ensure efficient and controlled execution of various tasks. 4 | 5 | ### APIRateLimit.ts 6 | 7 | The `APIRateLimit` class helps manage and limit the number of concurrent API calls made by the application. This is useful when the API has a rate limit or when the application needs to control the number of simultaneous requests to avoid overloading the server. Here's an example of how this class can be used: 8 | 9 | ```typescript 10 | const apiRateLimiter = new APIRateLimit(10); // Limit to 10 concurrent calls 11 | 12 | async function fetchData(id: number): Promise { 13 | // Make an API call to fetch data for the given ID 14 | } 15 | 16 | async function fetchMultipleData(ids: number[]): Promise { 17 | const results = await Promise.all(ids.map(id => apiRateLimiter.callApi(() => fetchData(id)))); 18 | return results; 19 | } 20 | ``` 21 | 22 | In this example, the `APIRateLimit` class is used to limit the number of concurrent calls made by the `fetchMultipleData` function, ensuring that no more than 10 calls are made at the same time. 23 | 24 | ### LLMUtil.ts 25 | 26 | This code interacts with the OpenAI API to generate embeddings and completions using different language models. Example usage of these functions might involve generating embeddings for text inputs or generating completions for prompts using the specified language models: 27 | 28 | ```javascript 29 | const embedding = await createEmbedding("This is a sample text."); 30 | console.log(embedding); 31 | 32 | const completion = await createCompletion({ 33 | model: LLMModels.GPT3, 34 | prompt: "Write a short story about a robot.", 35 | temperature: 0.7, 36 | max_tokens: 100, 37 | }); 38 | console.log(completion); 39 | ``` 40 | 41 | These functions can be used to integrate the OpenAI API's capabilities into the babyagi-ts project, enabling it to generate embeddings and completions for various use cases. 42 | 43 | ### WaitUtil.ts 44 | 45 | The `wait` and `forTrue` utility functions help manage asynchronous operations in the project by introducing delays and waiting for specific conditions to be met. Example usage of the `wait` function: 46 | 47 | ```javascript 48 | async function example() { 49 | console.log("Starting..."); 50 | await wait(1000); // Wait for 1 second 51 | console.log("...Finished"); 52 | } 53 | ``` 54 | 55 | Example usage of the `forTrue` function: 56 | 57 | ```javascript 58 | async function waitForElement() { 59 | const elementExists = () => document.querySelector("#my-element") !== null; 60 | try { 61 | await forTrue(elementExists); 62 | console.log("Element found!"); 63 | } catch { 64 | console.log("Element not found after waiting"); 65 | } 66 | } 67 | ``` 68 | 69 | In summary, the `src/cli/utils` folder provides utility functions and classes that help manage asynchronous operations, interact with the OpenAI API, and limit the number of concurrent API calls in the babyagi-ts project. These utilities can be used throughout the project to ensure efficient and controlled execution of various tasks. 70 | -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/index.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/src/index.ts) 2 | 3 | The code in this file serves as the command-line interface (CLI) tool for the `babyagi-ts` project. It provides two main commands: `init` and `run`, which are used to initialize a project and run a BabyAGI Agent, respectively. 4 | 5 | The CLI tool is built using the `commander` package, which simplifies the process of creating command-line interfaces. The `init` and `run` commands are defined using the `command` method, and their descriptions and actions are specified using the `description` and `action` methods. 6 | 7 | The `init` command initializes a project by creating a `babyagi.config.json` file in the current directory. It first tries to read an existing configuration file, and if it fails, it calls the `init` function without any arguments. The `init` function is imported from `./cli/commands/init/index.js`. 8 | 9 | The `run` command is used to run a BabyAGI Agent. It first tries to read the configuration file, and if it fails, it calls the `init` function to create a new configuration file. Then, it reads the configuration file again and calls the `run` function with the parsed configuration object. The `run` function is imported from `./cli/commands/run/index.js`. 10 | 11 | The code also listens for unhandled promise rejections using the `process.on` method. If an unhandled rejection occurs, it logs the error stack, shows an error spinner using the `spinnerError` function, stops the spinner using the `stopSpinner` function, and exits the program with an error code of 1. 12 | 13 | Here's an example of how the CLI tool can be used: 14 | 15 | ```sh 16 | # Initialize a new project 17 | $ babyagi-ts init 18 | 19 | # Run a BabyAGI Agent 20 | $ babyagi-ts run 21 | ``` 22 | 23 | In summary, this code provides a CLI tool for the `babyagi-ts` project, allowing users to easily initialize projects and run BabyAGI Agents using simple commands. 24 | ## Questions: 25 | 1. **What is the purpose of the BabyAGI CLI Tool?** 26 | 27 | The BabyAGI CLI Tool is a command-line interface for managing and running BabyAGI projects. It provides commands for initializing a project and running a BabyAGI Agent. 28 | 29 | 2. **How does the `init` command work and what is the purpose of the `babyagi.config.json` file?** 30 | 31 | The `init` command initializes a project by creating a `babyagi.config.json` file in the current directory. This file stores the configuration for the BabyAGI project, which is used by the CLI tool to run the BabyAGI Agent. 32 | 33 | 3. **What happens when an unhandled promise rejection occurs?** 34 | 35 | When an unhandled promise rejection occurs, the error stack is logged to the console, an error spinner is shown, the spinner is stopped, and the program exits with an error code of 1. This helps to provide a clear indication of the error and gracefully handle unexpected issues. -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/langchain/hnswlib.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/src/langchain/hnswlib.ts) 2 | 3 | The `HNSWLib` class in this code is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm. It extends the `SaveableVectorStore` class and is designed to work with the `langchain` library for document embeddings and storage. 4 | 5 | The main purpose of this class is to efficiently store and search for similar documents based on their embeddings. It provides methods to add documents, search for similar documents, and save/load the vector store to/from disk. 6 | 7 | The constructor takes an `Embeddings` object and an `HNSWLibArgs` object as arguments. The `Embeddings` object is used to convert documents into their vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW algorithm, such as the space type and number of dimensions. 8 | 9 | The `addDocuments` method takes an array of `Document` objects, converts them into their vector representations using the provided `Embeddings` object, and adds them to the HNSW index. The `similaritySearchVectorWithScore` method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores. 10 | 11 | The `save` and `load` methods allow the HNSW index and associated data to be saved to and loaded from disk, respectively. The `fromTexts` and `fromDocuments` static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively. 12 | 13 | Example usage: 14 | 15 | ```javascript 16 | const embeddings = new Embeddings(/* ... */); 17 | const hnsw = await HNSWLib.fromTexts(texts, metadatas, embeddings); 18 | 19 | const queryVector = embeddings.embedText("example query"); 20 | const similarDocuments = await hnsw.similaritySearchVectorWithScore(queryVector, 5); 21 | ``` 22 | 23 | In this example, an `HNSWLib` instance is created from an array of texts and their associated metadata. A query vector is then generated for an example query, and the top 5 most similar documents are retrieved from the HNSW index. 24 | ## Questions: 25 | 1. **Question**: What is the purpose of the `HNSWLib` class and how does it relate to the `SaveableVectorStore` class? 26 | **Answer**: The `HNSWLib` class is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm from the `hnswlib-node` library. It extends the `SaveableVectorStore` class, which provides a base class for vector stores that can be saved and loaded from disk. 27 | 28 | 2. **Question**: How does the `addDocuments` method work and what is its purpose? 29 | **Answer**: The `addDocuments` method takes an array of `Document` objects, extracts their `pageContent`, and embeds them into vectors using the `embedDocuments` method from the `embeddings` object. It then adds these vectors and their corresponding documents to the HNSW index and the `docstore`. 30 | 31 | 3. **Question**: How does the `similaritySearchVectorWithScore` method work and what does it return? 32 | **Answer**: The `similaritySearchVectorWithScore` method takes a query vector and a number `k` as input, and searches for the `k` nearest neighbors in the HNSW index. It returns an array of tuples, where each tuple contains a `Document` object and its corresponding distance score to the query vector. -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/langchain/summary.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src/langchain) 2 | 3 | The `hnswlib.ts` file in the `langchain` folder of the `babyagi-ts` project contains the `HNSWLib` class, which is an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm. This class is designed to work with the `langchain` library for document embeddings and storage, allowing efficient storage and search for similar documents based on their embeddings. 4 | 5 | The `HNSWLib` class extends the `SaveableVectorStore` class and takes an `Embeddings` object and an `HNSWLibArgs` object as arguments in its constructor. The `Embeddings` object is responsible for converting documents into their vector representations, while the `HNSWLibArgs` object contains configuration options for the HNSW algorithm, such as the space type and number of dimensions. 6 | 7 | The class provides several methods for working with document embeddings: 8 | 9 | - `addDocuments`: This method takes an array of `Document` objects, converts them into their vector representations using the provided `Embeddings` object, and adds them to the HNSW index. 10 | - `similaritySearchVectorWithScore`: This method takes a query vector and a number `k`, and returns the top `k` most similar documents along with their similarity scores. 11 | - `save` and `load`: These methods allow the HNSW index and associated data to be saved to and loaded from disk, respectively. 12 | - `fromTexts` and `fromDocuments`: These static methods provide convenient ways to create an `HNSWLib` instance from an array of texts or documents, respectively. 13 | 14 | Here's an example of how the `HNSWLib` class might be used: 15 | 16 | ```javascript 17 | const embeddings = new Embeddings(/* ... */); 18 | const hnsw = await HNSWLib.fromTexts(texts, metadatas, embeddings); 19 | 20 | const queryVector = embeddings.embedText("example query"); 21 | const similarDocuments = await hnsw.similaritySearchVectorWithScore(queryVector, 5); 22 | ``` 23 | 24 | In this example, an `HNSWLib` instance is created from an array of texts and their associated metadata. A query vector is then generated for an example query, and the top 5 most similar documents are retrieved from the HNSW index. 25 | 26 | Overall, the `hnswlib.ts` file plays a crucial role in the `babyagi-ts` project by providing an efficient way to store and search for similar documents based on their embeddings. This functionality is essential for tasks such as document retrieval, clustering, and recommendation systems. 27 | -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/summary.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/.autodoc/docs/json/src) 2 | 3 | The `src` folder of the `babyagi-ts` project contains essential code for managing the command-line interface (CLI), providing utilities for interacting with the OpenAI API, and controlling the execution of BabyAGI agents. The code is organized into files and subfolders, each with a specific purpose. 4 | 5 | `index.ts` serves as the CLI tool for the project, providing two main commands: `init` and `run`. The `init` command initializes a project by creating a `babyagi.config.json` file, while the `run` command executes a BabyAGI Agent. The CLI tool is built using the `commander` package, which simplifies the process of creating command-line interfaces. 6 | 7 | ```sh 8 | # Initialize a new project 9 | $ babyagi-ts init 10 | 11 | # Run a BabyAGI Agent 12 | $ babyagi-ts run 13 | ``` 14 | 15 | `types.ts` defines the configuration and model details for the project, likely an AI-based project utilizing OpenAI's GPT models. It consists of three main parts: `BabyAGIConfig`, `LLMModels`, and `LLMModelDetails`. These types and enums are used to configure and manage the AI models and tasks. 16 | 17 | ```typescript 18 | const config: BabyAGIConfig = { 19 | name: 'My AI Project', 20 | objective: 'Generate text', 21 | initialTask: 'Text generation', 22 | llm: LLMModels.GPT3, 23 | root: './my-ai-project', 24 | }; 25 | ``` 26 | 27 | The `cli` folder contains code for initializing and running BabyAGI agents, managing a spinner for loading states, and providing utility functions for interacting with the OpenAI API and managing asynchronous operations. 28 | 29 | ```javascript 30 | import { updateSpinnerText, stopSpinner } from './path/to/spinner'; 31 | updateSpinnerText('Loading data...'); 32 | // Perform some operation 33 | stopSpinner(); 34 | 35 | import { init } from './path/to/init'; 36 | await init(); 37 | 38 | import babyagi from 'babyagi-ts'; 39 | const config = { /* ... */ }; 40 | babyagi.run(config); 41 | ``` 42 | 43 | The `langchain` folder contains the `HNSWLib` class, an implementation of a vector store using the Hierarchical Navigable Small World (HNSW) algorithm. This class works with the `langchain` library for document embeddings and storage, allowing efficient storage and search for similar documents based on their embeddings. 44 | 45 | ```javascript 46 | const embeddings = new Embeddings(/* ... */); 47 | const hnsw = await HNSWLib.fromTexts(texts, metadatas, embeddings); 48 | 49 | const queryVector = embeddings.embedText("example query"); 50 | const similarDocuments = await hnsw.similaritySearchVectorWithScore(queryVector, 5); 51 | ``` 52 | 53 | In summary, the `src` folder plays a crucial role in the `babyagi-ts` project, providing a convenient way to manage the CLI, interact with the OpenAI API, and control the execution of BabyAGI agents. Developers working with this code should be familiar with the agent functions and the vector store to understand how tasks are created, prioritized, and executed within the system. 54 | -------------------------------------------------------------------------------- /.autodoc/docs/markdown/src/types.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/src/types.ts) 2 | 3 | The code provided defines the configuration and model details for the `babyagi-ts` project, which is likely an AI-based project utilizing OpenAI's GPT models. The code consists of three main parts: `BabyAGIConfig`, `LLMModels`, and `LLMModelDetails`. 4 | 5 | `BabyAGIConfig` is a TypeScript type that represents the configuration for the BabyAGI project. It has the following properties: 6 | 7 | - `name`: A string representing the name of the configuration. 8 | - `objective`: A string describing the objective of the project. 9 | - `initialTask`: A string representing the initial task to be performed. 10 | - `llm`: An instance of the `LLMModels` enum, which specifies the GPT model to be used. 11 | - `root`: A string representing the root directory of the project. 12 | 13 | `LLMModels` is an enumeration that lists the available GPT models for the project. It currently includes three models: 14 | 15 | - `GPT3`: GPT-3.5 Turbo, which is a powerful language model from OpenAI. 16 | - `GPT4`: A placeholder for the future GPT-4 model. 17 | - `GPT432k`: Another placeholder for a GPT-4 model with 32k tokens. 18 | 19 | `LLMModelDetails` is a TypeScript type that represents the details of a specific GPT model. It has the following properties: 20 | 21 | - `name`: An instance of the `LLMModels` enum, which specifies the GPT model. 22 | - `inputCostPer1KTokens`: A number representing the cost of processing 1,000 tokens in the input. 23 | - `outputCostPer1KTokens`: A number representing the cost of generating 1,000 tokens in the output. 24 | - `maxLength`: A number representing the maximum length (in tokens) that the model can handle. 25 | 26 | In the larger project, these types and enums would be used to configure and manage the AI models and tasks. For example, a user might create a `BabyAGIConfig` object to specify the GPT model and initial task for their project: 27 | 28 | ```typescript 29 | const config: BabyAGIConfig = { 30 | name: 'My AI Project', 31 | objective: 'Generate text', 32 | initialTask: 'Text generation', 33 | llm: LLMModels.GPT3, 34 | root: './my-ai-project', 35 | }; 36 | ``` 37 | 38 | This configuration object could then be used to initialize and manage the AI models and tasks within the `babyagi-ts` project. 39 | ## Questions: 40 | 1. **What is the purpose of the `BabyAGIConfig` type?** 41 | 42 | The `BabyAGIConfig` type is an object type that defines the configuration for the BabyAGI project, including properties like the name, objective, initial task, LLM model, and root directory. 43 | 44 | 2. **What are the available LLMModels and what do they represent?** 45 | 46 | The `LLMModels` enum lists the available language models for the project, which include GPT3 (gpt-3.5-turbo), GPT4 (gpt-4), and GPT432k (gpt-4-32k). These represent different versions or configurations of the language models used in the project. 47 | 48 | 3. **What information does the `LLMModelDetails` type provide?** 49 | 50 | The `LLMModelDetails` type provides information about a specific LLM model, including its name (as an LLMModels enum value), input cost per 1K tokens, output cost per 1K tokens, and maximum length (maxLength) of the model. -------------------------------------------------------------------------------- /.autodoc/docs/markdown/tsconfig.md: -------------------------------------------------------------------------------- 1 | [View code on GitHub](https://github.com/context-labs/babyagi-ts/tsconfig.json) 2 | 3 | This code is a configuration file for the TypeScript compiler in the babyagi-ts project. It specifies various options that control how the TypeScript compiler processes the source code and generates the output JavaScript files. The purpose of this configuration is to ensure that the project is built consistently and with the desired settings across different environments and development stages. 4 | 5 | The `compilerOptions` object contains several key-value pairs that define the compiler settings: 6 | 7 | - `rootDir`: Specifies the root directory of the project's source files, which is "src" in this case. 8 | - `outDir`: Defines the output directory for the compiled JavaScript files, set to "dist". 9 | - `strict`: Enables strict type checking, ensuring that the code adheres to best practices and catches potential errors early. 10 | - `target`: Sets the target ECMAScript version for the output JavaScript code, which is "es2020" in this case. 11 | - `module`: Configures the module system used in the generated JavaScript code, set to "ES2020". 12 | - `sourceMap`: Enables the generation of source maps, which help in debugging the compiled code by mapping it back to the original TypeScript source. 13 | - `esModuleInterop`: Allows for better compatibility between CommonJS and ES modules by creating a namespace object for all imports. 14 | - `moduleResolution`: Specifies the module resolution strategy, set to "node" to mimic Node.js' module resolution algorithm. 15 | - `allowSyntheticDefaultImports`: Permits default imports from modules with no default export, which can be useful when working with certain third-party libraries. 16 | - `declaration`: Generates corresponding `.d.ts` files for the compiled JavaScript files, which can be useful for distributing the project as a library. 17 | - `skipLibCheck`: Skips type checking of declaration files, which can speed up the compilation process. 18 | 19 | Overall, this configuration file ensures that the TypeScript compiler processes the babyagi-ts project's source code with the desired settings, resulting in a consistent and optimized output. 20 | ## Questions: 21 | 1. **What is the purpose of the `rootDir` and `outDir` options in the `compilerOptions`?** 22 | 23 | The `rootDir` option specifies the root folder of the source files, while the `outDir` option specifies the output directory for the compiled files. 24 | 25 | 2. **What does the `strict` option do in the `compilerOptions`?** 26 | 27 | The `strict` option enables a wide range of type checking behavior that results in stronger guarantees of program correctness. 28 | 29 | 3. **What is the purpose of the `sourceMap` option in the `compilerOptions`?** 30 | 31 | The `sourceMap` option generates corresponding source map files for the compiled JavaScript files, which can be useful for debugging and understanding the relationship between the TypeScript source code and the generated JavaScript code. -------------------------------------------------------------------------------- /.eslintrc.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | parser: '@typescript-eslint/parser', // Specifies the ESLint parser 3 | extends: [ 4 | 'plugin:@typescript-eslint/recommended', // Uses the recommended rules from the @typescript-eslint/eslint-plugin 5 | 'plugin:react/recommended', 6 | 'plugin:react-hooks/recommended', 7 | 'plugin:prettier/recommended', 8 | ], 9 | parserOptions: { 10 | ecmaVersion: 2018, // Allows for the parsing of modern ECMAScript features 11 | sourceType: 'module', // Allows for the use of imports 12 | }, 13 | rules: { 14 | // Place to specify ESLint rules. Can be used to overwrite rules specified from the extended configs 15 | '@typescript-eslint/explicit-function-return-type': 'off', 16 | '@typescript-eslint/explicit-module-boundary-types': 'off', 17 | 'react/react-in-jsx-scope': 'off', 18 | 'react/prop-types': 'off', 19 | '@typescript-eslint/no-explicit-any': 'off', 20 | 'prettier/prettier': [ 21 | 'error', 22 | { 23 | endOfLine: 'auto', 24 | }, 25 | ], 26 | }, 27 | // "overrides": [ 28 | // { 29 | // "files": [], 30 | // "rules": { 31 | // "@typescript-eslint/no-unused-vars": "off" 32 | // } 33 | // } 34 | // ], 35 | settings: { 36 | react: { 37 | version: 'detect', 38 | }, 39 | }, 40 | }; 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | 6 | # next.js 7 | /.next/ 8 | /out/ 9 | 10 | # debug 11 | npm-debug.log* 12 | yarn-debug.log* 13 | yarn-error.log* 14 | .pnpm-debug.log* 15 | 16 | # local env files 17 | .env*.local 18 | 19 | # Used env file 20 | .env 21 | 22 | # typescript 23 | *.tsbuildinfo 24 | next-env.d.ts 25 | 26 | /dist* 27 | 28 | .DS_Store 29 | babyagi.config.json -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) Sam Heutmaker 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BabyAGI-ts 2 | 3 | This is an attempt to port [@yoheinakajima](https://twitter.com/yoheinakajima)'s [BabyAGI](https://github.com/yoheinakajima/babyagi) from Python to TypeScript and provide a friendly CLI tool that can be installed as a global NPM module. A few small adjustments have been made: 4 | 5 | 1. The dependency on Pincone has been removed in favor of using [HNSW](https://www.npmjs.com/package/hnswlib-node). This allows developers to get started more easily without having to make a Pinecone account and pay for an index. 6 | 2. Configuration has been moved from environment variables to an [Inquirer.js](https://www.npmjs.com/package/inquirer) flow. 7 | 8 | All other functionality remains the same. 9 | 10 | ## Getting started 11 | 12 | Export your OpenAI API Key: 13 | ```bash 14 | export OPENAI_API_KEY= 15 | ``` 16 | 17 | Install the `babyagi` NPM module globally: 18 | 19 | ```bash 20 | npm install -g babyagi 21 | ``` 22 | 23 | Create a new directory for your agent to live: 24 | 25 | ```bash 26 | mkdir travel-gpt 27 | ``` 28 | 29 | Move into the directory: 30 | ```bash 31 | cd travel-gpt 32 | ``` 33 | 34 | Create a new agent: 35 | ```bash 36 | babyagi run 37 | ``` 38 | 39 | This will begin the agent creation flow. You should see a screen like this: 40 | 41 | Markdownify 42 | 43 | After you finish the agent configuration your agent will start running. 44 | 45 | ### Multiple Agents 46 | 47 | If you'd like to create a new agent with a different objective, create a new folder and go through the the steps again: 48 | 49 | ```bash 50 | mkdir research-gpt 51 | ``` 52 | 53 | Move into the directory: 54 | ```bash 55 | cd research-gpt 56 | ``` 57 | 58 | Create a new agent: 59 | ```bash 60 | babyagi run 61 | ``` 62 | 63 | # Warning 64 | This script is designed to be run continuously as part of a task management system. Running this script continuously can result in high API usage, so please use it responsibly. Additionally, the script requires the OpenAI API to be set up correctly, so make sure you have set up the API before running the script. 65 | 66 | 67 | # Credit 68 | This project is a port of [@yoheinakajima](https://twitter.com/yoheinakajima)'s [BabyAGI](https://github.com/yoheinakajima/babyagi). All credit goes to Yohei and everyone else who has contributed to the BabyAGI project. 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /assets/babyagi-config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/context-labs/babyagi-ts/5300635caee5a923fdbda31926504ac73d772f24/assets/babyagi-config.png -------------------------------------------------------------------------------- /autodoc.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "babyagi-ts", 3 | "repositoryUrl": "https://github.com/context-labs/babyagi-ts", 4 | "root": ".", 5 | "output": "./.autodoc", 6 | "llms": [ 7 | "gpt-4" 8 | ], 9 | "ignore": [ 10 | ".*", 11 | "*package-lock.json", 12 | "*package.json", 13 | "node_modules", 14 | "*dist*", 15 | "*build*", 16 | "*test*", 17 | "*.svg", 18 | "*.md", 19 | "*.mdx", 20 | "*.toml", 21 | "*autodoc*" 22 | ], 23 | "filePrompt": "Write a detailed technical explanation of what this code does. \n Focus on the high-level purpose of the code and how it may be used in the larger project.\n Include code examples where appropriate. Keep you response between 100 and 300 words. \n DO NOT RETURN MORE THAN 300 WORDS.\n Output should be in markdown format.\n Do not just list the methods and classes in this file.", 24 | "folderPrompt": "Write a technical explanation of what the code in this file does\n and how it might fit into the larger project or work with other parts of the project.\n Give examples of how this code might be used. Include code examples where appropriate.\n Be concise. Include any information that may be relevant to a developer who is curious about this code.\n Keep you response under 400 words. Output should be in markdown format.\n Do not just list the files and folders in this folder.", 25 | "chatPrompt": "", 26 | "contentType": "code", 27 | "targetAudience": "smart developer", 28 | "linkHosted": true 29 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "babyagi", 3 | "version": "0.0.3", 4 | "description": "babyagi", 5 | "type": "module", 6 | "main": "./dist/index.js", 7 | "exports": { 8 | ".": "./dist/src/index.js" 9 | }, 10 | "publishConfig": { 11 | "access": "public" 12 | }, 13 | "scripts": { 14 | "build": "tsc", 15 | "dev": "npm run build && npm install -g .", 16 | "clean": "rm -rf dist" 17 | }, 18 | "bin": { 19 | "babyagi": "./dist/index.js" 20 | }, 21 | "prettier": { 22 | "printWidth": 80, 23 | "trailingComma": "all", 24 | "singleQuote": true 25 | }, 26 | "repository": "https://github.com/context-labs/babyagi-ts", 27 | "author": "sam@usecontext.io", 28 | "license": "MIT", 29 | "dependencies": { 30 | "@dqbd/tiktoken": "^1.0.2", 31 | "chalk": "^5.2.0", 32 | "cli-progress": "^3.12.0", 33 | "commander": "^10.0.0", 34 | "esbuild": "^0.17.12", 35 | "hnswlib-node": "^1.4.2", 36 | "inquirer": "^9.1.5", 37 | "jsdom": "^21.1.1", 38 | "langchain": "^0.0.39", 39 | "marked": "^4.3.0", 40 | "marked-terminal": "^5.1.1", 41 | "openai": "^3.2.1", 42 | "ora": "^6.2.0", 43 | "shortid": "^2.2.16", 44 | "ts-md5": "^1.3.1" 45 | }, 46 | "devDependencies": { 47 | "@types/commander": "^2.12.2", 48 | "@types/inquirer": "^9.0.3", 49 | "@types/marked": "^4.0.8", 50 | "@types/marked-terminal": "^3.1.3", 51 | "@types/node": "^18.15.5", 52 | "@typescript-eslint/eslint-plugin": "^5.37.0", 53 | "@typescript-eslint/parser": "^5.37.0", 54 | "eslint": "^7.32.0", 55 | "eslint-config-next": "^13.1.4", 56 | "eslint-config-prettier": "^8.5.0", 57 | "eslint-plugin-prettier": "^4.0.0", 58 | "eslint-plugin-react": "^7.25.1", 59 | "eslint-plugin-react-hooks": "^4.2.0", 60 | "prettier": "^2.7.1", 61 | "typescript": "^4.8.3" 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/cli/commands/init/index.ts: -------------------------------------------------------------------------------- 1 | import chalk from 'chalk'; 2 | import inquirer from 'inquirer'; 3 | import fs from 'node:fs'; 4 | import path from 'node:path'; 5 | import { BabyAGIConfig, LLMModels } from '../../../types.js'; 6 | 7 | export const makeConfigTemplate = (config?: BabyAGIConfig): BabyAGIConfig => { 8 | return { 9 | name: config?.name ?? '', 10 | objective: config?.objective ?? '', 11 | initialTask: config?.initialTask ?? '', 12 | llm: config?.llm ?? LLMModels.GPT4, 13 | root: './', 14 | }; 15 | }; 16 | 17 | export const init = async (config: BabyAGIConfig = makeConfigTemplate()) => { 18 | const configPath = path.join(config.root, 'babyagi.config.json'); 19 | 20 | if (fs.existsSync(configPath)) { 21 | const questions = [ 22 | { 23 | type: 'confirm', 24 | name: 'continue', 25 | message: 26 | 'An babyagi.config.json file already exists in this location. The existing configuration will be overwritten. Do you want to continue? ', 27 | default: false, 28 | }, 29 | ]; 30 | 31 | const answers = await inquirer.prompt(questions); 32 | if (!answers.continue) { 33 | process.exit(0); 34 | } 35 | } 36 | 37 | const questions = [ 38 | { 39 | type: 'input', 40 | name: 'name', 41 | message: chalk.yellow(`Enter the name of your agent:`), 42 | default: config.name, 43 | }, 44 | { 45 | type: 'input', 46 | name: 'objective', 47 | message: chalk.yellow(`Enter your agent's objective:`), 48 | default: config.objective, 49 | }, 50 | { 51 | type: 'input', 52 | name: 'initialTask', 53 | message: chalk.yellow( 54 | `Enter the initial task for the agent to complete the objective:`, 55 | ), 56 | default: config.initialTask, 57 | }, 58 | { 59 | type: 'list', 60 | name: 'llm', 61 | message: chalk.yellow( 62 | `Select which LLM you would like to use (select GPT-3.5 Turbo if you aren't sure):`, 63 | ), 64 | default: 0, 65 | choices: [ 66 | { 67 | name: 'GPT-3.5 Turbo', 68 | value: LLMModels.GPT3, 69 | }, 70 | { 71 | name: 'GPT-4 8K (Early Access)', 72 | value: LLMModels.GPT4, 73 | }, 74 | { 75 | name: 'GPT-4 32K (Early Access)', 76 | value: LLMModels.GPT432k, 77 | }, 78 | ], 79 | }, 80 | ]; 81 | 82 | const { name, objective, initialTask, llm } = await inquirer.prompt( 83 | questions, 84 | ); 85 | 86 | const newConfig = makeConfigTemplate({ 87 | ...config, 88 | name, 89 | objective, 90 | initialTask, 91 | llm, 92 | }); 93 | 94 | fs.writeFileSync( 95 | path.join(newConfig.root, 'babyagi.config.json'), 96 | JSON.stringify(newConfig, null, 2), 97 | 'utf-8', 98 | ); 99 | 100 | console.log( 101 | chalk.green('BabyAGI initialized. Run `babyagi start` to start the agent.'), 102 | ); 103 | }; 104 | -------------------------------------------------------------------------------- /src/cli/commands/run/index.ts: -------------------------------------------------------------------------------- 1 | import path from 'node:path'; 2 | import fs from 'node:fs'; 3 | import * as LLMUtil from '../../utils/LLMUtil.js'; 4 | import { BabyAGIConfig } from '../../../types.js'; 5 | import { OpenAIEmbeddings } from 'langchain/embeddings'; 6 | import { HNSWLib } from '../../../langchain/hnswlib.js'; 7 | import chalk from 'chalk'; 8 | 9 | export const run = async ({ 10 | objective, 11 | initialTask, 12 | llm, 13 | root, 14 | }: BabyAGIConfig) => { 15 | const vectorStorePath = path.join(root, 'data'); 16 | const vectorStoreExists = fs.existsSync( 17 | path.join(vectorStorePath, 'args.json'), 18 | ); 19 | const vectorStore = await (async () => { 20 | if (vectorStoreExists) { 21 | return await HNSWLib.load(vectorStorePath, new OpenAIEmbeddings()); 22 | } 23 | 24 | const store = await HNSWLib.fromDocuments( 25 | [{ pageContent: 'text', metadata: { test: true } }], 26 | new OpenAIEmbeddings(), 27 | ); 28 | 29 | await store.save(vectorStorePath); 30 | return store; 31 | })(); 32 | 33 | type Task = { 34 | taskId: number; 35 | taskName: string; 36 | }; 37 | 38 | // Task list 39 | let taskId = 1; 40 | let taskList: Task[] = [ 41 | { taskId: 1, taskName: initialTask }, // Add the first task 42 | ]; 43 | 44 | console.log(chalk.bold(chalk.magenta('\n*****OBJECTIVE*****\n'))); 45 | console.log(objective); 46 | console.log(`${chalk.bold(chalk.magenta('\nInitial Task:'))} ${initialTask}`); 47 | 48 | async function taskCreationAgent( 49 | objective: string, 50 | result: { data: string }, 51 | task_description: string, 52 | taskList: string[], 53 | ): Promise { 54 | const prompt = ` 55 | You are a task creation AI that uses the result of an execution agent to create new tasks with the following objective: ${objective}, 56 | The last completed task has the result: ${result}. 57 | This result was based on this task description: ${task_description}. These are incomplete tasks: ${taskList.join( 58 | ', ', 59 | )}. 60 | Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. 61 | Return the tasks as an array.`; 62 | 63 | const response = await LLMUtil.createCompletion({ 64 | model: llm, 65 | prompt, 66 | }); 67 | 68 | const newTasks: string[] = response.includes('\n') 69 | ? response.split('\n') 70 | : [response]; 71 | 72 | return newTasks.map((taskName) => ({ taskId: taskId++, taskName })); 73 | } 74 | 75 | async function prioritizationAgent(thisTaskId: number) { 76 | const taskNames = taskList.map((t) => t.taskName); 77 | const next_taskId = thisTaskId + 1; 78 | const prompt = ` 79 | You are a task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: ${taskNames}. 80 | Consider the ultimate objective of your team:${objective}. 81 | Do not remove any tasks. Return the result as a numbered list, like: 82 | #. First task 83 | #. Second task 84 | Start the task list with number ${next_taskId}.`; 85 | 86 | const response = await LLMUtil.createCompletion({ 87 | model: llm, 88 | prompt, 89 | }); 90 | 91 | const newTasks = response.includes('\n') 92 | ? response.split('\n') 93 | : [response]; 94 | taskList = []; 95 | for (const task_string of newTasks) { 96 | const [id, name] = task_string.trim().split('.', 2); 97 | const taskId = parseInt(id.trim()); 98 | const taskName = name.trim(); 99 | taskList.push({ taskId, taskName }); 100 | } 101 | } 102 | 103 | async function executionAgent( 104 | objective: string, 105 | task: string, 106 | ): Promise { 107 | const context = await contextAgent({ query: objective, topResultsNum: 5 }); 108 | const prompt = ` 109 | You are an AI who performs one task based on the following objective: ${objective} 110 | . 111 | Take into account these previously completed tasks: ${context} 112 | . 113 | Your task: ${task} 114 | Response: 115 | `; 116 | return LLMUtil.createCompletion({ 117 | prompt, 118 | model: llm, 119 | max_tokens: 2000, 120 | }); 121 | } 122 | 123 | async function contextAgent({ 124 | query, 125 | topResultsNum, 126 | }: { 127 | query: string; 128 | topResultsNum: number; 129 | }) { 130 | const embedding = await LLMUtil.createEmbedding(query); 131 | const results = await vectorStore.similaritySearchVectorWithScore( 132 | embedding, 133 | topResultsNum, 134 | ); 135 | 136 | const sorted = results.sort(([, a], [, b]) => b - a); 137 | return sorted.map(([doc]) => doc.metadata.task); 138 | } 139 | 140 | while (true) { 141 | if (taskList.length > 0) { 142 | // Print the task list 143 | console.log(chalk.bold(chalk.magenta('\n*****TASK LIST*****\n'))); 144 | for (const t of taskList) { 145 | console.log(`${t.taskId}: ${t.taskName}`); 146 | } 147 | 148 | // Step 1: Pull the first task 149 | const task = taskList.shift(); 150 | if (!task) { 151 | throw new Error('Task is undefined'); 152 | } 153 | 154 | console.log(chalk.bold(chalk.greenBright('\n*****NEXT TASK*****\n'))); 155 | console.log(`${task.taskId}: ${task.taskName}`); 156 | 157 | // Send to execution function to complete the task based on the context 158 | const result = await executionAgent(objective, task.taskName); 159 | const thisTaskId = task.taskId; 160 | console.log(chalk.bold(chalk.magenta('\n*****TASK RESULT*****\n'))); 161 | console.log(result); 162 | 163 | // Step 2: Enrich result and store in Vector Store 164 | const enriched_result = { data: result }; 165 | const result_id = `result_${task.taskId}`; 166 | await vectorStore.addDocuments([ 167 | { 168 | pageContent: enriched_result.data, 169 | metadata: { result_id, task: task.taskName, result }, 170 | }, 171 | ]); 172 | await vectorStore.save(vectorStorePath); 173 | 174 | // Step 3: Create new tasks and reprioritize task list 175 | const newTasks = await taskCreationAgent( 176 | objective, 177 | enriched_result, 178 | task.taskName, 179 | taskList.map((t) => t.taskName), 180 | ); 181 | 182 | for (const newTask of newTasks) { 183 | taskList.push(newTask); 184 | } 185 | await prioritizationAgent(thisTaskId); 186 | } 187 | 188 | await new Promise((resolve) => setTimeout(resolve, 1000)); // Sleep before checking the task list again 189 | } 190 | }; 191 | 192 | export default { 193 | run, 194 | }; 195 | -------------------------------------------------------------------------------- /src/cli/spinner.ts: -------------------------------------------------------------------------------- 1 | import ora from 'ora'; 2 | 3 | const spinner = ora({ 4 | // make a singleton so we don't ever have 2 spinners 5 | spinner: 'dots', 6 | }); 7 | 8 | export const updateSpinnerText = (message: string) => { 9 | if (spinner.isSpinning) { 10 | spinner.text = message; 11 | return; 12 | } 13 | spinner.start(message); 14 | }; 15 | 16 | export const stopSpinner = () => { 17 | if (spinner.isSpinning) { 18 | spinner.stop(); 19 | } 20 | }; 21 | 22 | export const spinnerError = (message?: string) => { 23 | if (spinner.isSpinning) { 24 | spinner.fail(message); 25 | } 26 | }; 27 | export const spinnerSuccess = (message?: string) => { 28 | if (spinner.isSpinning) { 29 | spinner.succeed(message); 30 | } 31 | }; 32 | export const spinnerInfo = (message: string) => { 33 | spinner.info(message); 34 | }; 35 | -------------------------------------------------------------------------------- /src/cli/utils/APIRateLimit.ts: -------------------------------------------------------------------------------- 1 | export class APIRateLimit { 2 | private queue: (() => void)[] = []; 3 | private inProgress = 0; 4 | 5 | constructor(private maxConcurrentCalls: number = 50) {} 6 | 7 | async callApi(apiFunction: () => Promise): Promise { 8 | return new Promise((resolve, reject) => { 9 | const executeCall = async () => { 10 | this.inProgress++; 11 | try { 12 | const result = await apiFunction(); 13 | resolve(result); 14 | } catch (error) { 15 | reject(error); 16 | } finally { 17 | this.inProgress--; 18 | this.dequeueAndExecute(); 19 | } 20 | }; 21 | 22 | this.queue.push(executeCall); 23 | 24 | // Trigger the dequeue and execute operation when there are available slots for concurrent calls 25 | if (this.inProgress < this.maxConcurrentCalls) { 26 | this.dequeueAndExecute(); 27 | } 28 | }); 29 | } 30 | 31 | private dequeueAndExecute() { 32 | while (this.queue.length > 0 && this.inProgress < this.maxConcurrentCalls) { 33 | const nextCall = this.queue.shift(); 34 | if (nextCall) { 35 | nextCall(); 36 | } 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/cli/utils/LLMUtil.ts: -------------------------------------------------------------------------------- 1 | import { 2 | Configuration as OpenAIConfiguration, 3 | OpenAIApi as OpenAIAPI, 4 | } from 'openai'; 5 | import { LLMModelDetails, LLMModels } from '../../types.js'; 6 | 7 | if (!process.env.OPENAI_API_KEY) { 8 | throw new Error('OPENAI_API_KEY environment variable is missing from .env'); 9 | process.exit(1); 10 | } 11 | 12 | const openai = new OpenAIAPI( 13 | new OpenAIConfiguration({ 14 | apiKey: process.env.OPENAI_API_KEY ?? '', 15 | }), 16 | ); 17 | 18 | export const models: Record = { 19 | [LLMModels.GPT3]: { 20 | name: LLMModels.GPT3, 21 | inputCostPer1KTokens: 0.002, 22 | outputCostPer1KTokens: 0.002, 23 | maxLength: 3050, 24 | }, 25 | [LLMModels.GPT4]: { 26 | name: LLMModels.GPT4, 27 | inputCostPer1KTokens: 0.03, 28 | outputCostPer1KTokens: 0.06, 29 | maxLength: 8192, 30 | }, 31 | [LLMModels.GPT432k]: { 32 | name: LLMModels.GPT432k, 33 | inputCostPer1KTokens: 0.06, 34 | outputCostPer1KTokens: 0.12, 35 | maxLength: 32768, 36 | }, 37 | }; 38 | 39 | export const createEmbedding = async (value: string): Promise => { 40 | const response = await openai.createEmbedding({ 41 | input: value, 42 | model: 'text-embedding-ada-002', 43 | }); 44 | 45 | const { 46 | data: { data: results }, 47 | } = response; 48 | 49 | return results?.[0]?.embedding ?? []; 50 | }; 51 | 52 | export interface CreateCompletionParams { 53 | model: string; 54 | prompt: string; 55 | temperature?: number; 56 | max_tokens?: number; 57 | top_p?: number; 58 | frequency_penalty?: number; 59 | presence_penalty?: number; 60 | } 61 | 62 | export async function createCompletion({ 63 | prompt, 64 | ...params 65 | }: CreateCompletionParams): Promise { 66 | const messages = [{ role: 'system', content: prompt }]; 67 | 68 | const res = await fetch('https://api.openai.com/v1/chat/completions', { 69 | headers: { 70 | 'Content-Type': 'application/json', 71 | Authorization: `Bearer ${process.env.OPENAI_API_KEY ?? ''}`, 72 | }, 73 | method: 'POST', 74 | body: JSON.stringify({ 75 | top_p: 1, 76 | frequency_penalty: 0.1, 77 | presence_penalty: 0.1, 78 | stream: false, 79 | n: 1, 80 | temperature: 0.0, 81 | messages, 82 | ...params, 83 | }), 84 | }); 85 | 86 | const json: any = await res.json(); 87 | const result = json?.choices?.[0]?.message?.content ?? null; 88 | const final = result ?? ''; 89 | return final; 90 | } 91 | -------------------------------------------------------------------------------- /src/cli/utils/WaitUtil.ts: -------------------------------------------------------------------------------- 1 | export async function wait(timeoutMs: number, value: any = null): Promise { 2 | return new Promise((resolve) => { 3 | setTimeout(() => resolve(value), timeoutMs); 4 | }); 5 | } 6 | 7 | export async function forTrue(fn: () => boolean) { 8 | const count = 0; 9 | return new Promise((resolve, reject) => { 10 | if (fn()) { 11 | resolve(true); 12 | return; 13 | } 14 | 15 | const interval = setInterval(() => { 16 | if (fn()) { 17 | clearInterval(interval); 18 | resolve(true); 19 | return; 20 | } 21 | if (count >= 200) reject(); 22 | }, 50); 23 | }); 24 | } 25 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import fs from 'node:fs/promises'; 4 | import { Command } from 'commander'; 5 | import { spinnerError, stopSpinner } from './cli/spinner.js'; 6 | import { init } from './cli/commands/init/index.js'; 7 | import { run } from './cli/commands/run/index.js'; 8 | import { BabyAGIConfig } from './types.js'; 9 | 10 | const program = new Command(); 11 | program.description('BabyAGI CLI Tool'); 12 | program.version('0.0.3'); 13 | 14 | program 15 | .command('init') 16 | .description( 17 | 'Initialize project by creating a `babyagi.config.json` file in the current directory.', 18 | ) 19 | .action(async () => { 20 | try { 21 | const config: BabyAGIConfig = JSON.parse( 22 | await fs.readFile('./babyagi.config.json', 'utf8'), 23 | ); 24 | init(config); 25 | } catch (e) { 26 | init(); 27 | } 28 | }); 29 | 30 | program 31 | .command('run') 32 | .description('Run a BabyAGI Agent') 33 | .action(async () => { 34 | let config: BabyAGIConfig; 35 | try { 36 | config = JSON.parse(await fs.readFile('./babyagi.config.json', 'utf8')); 37 | } catch (e) { 38 | await init(); 39 | config = JSON.parse(await fs.readFile('./babyagi.config.json', 'utf8')); 40 | } 41 | 42 | run(config); 43 | }); 44 | 45 | /** 46 | * Listen for unhandled promise rejections 47 | */ 48 | process.on('unhandledRejection', function (err: Error) { 49 | console.error(err.stack); 50 | 51 | spinnerError(); // show an error spinner 52 | stopSpinner(); // stop the spinner 53 | program.error('', { exitCode: 1 }); // exit with error code 1 54 | }); 55 | 56 | program.parse(); 57 | -------------------------------------------------------------------------------- /src/langchain/hnswlib.ts: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs/promises'; 2 | import path from 'node:path'; 3 | import HierarchicalNSW from 'hnswlib-node'; 4 | import type { 5 | HierarchicalNSW as HierarchicalNSWT, 6 | SpaceName, 7 | } from 'hnswlib-node'; 8 | import { Document, InMemoryDocstore } from 'langchain/docstore'; 9 | import { Embeddings } from 'langchain/embeddings'; 10 | import { SaveableVectorStore } from 'langchain/vectorstores'; 11 | 12 | export interface HNSWLibBase { 13 | space: SpaceName; 14 | numDimensions?: number; 15 | } 16 | 17 | export interface HNSWLibArgs extends HNSWLibBase { 18 | docstore?: InMemoryDocstore; 19 | index?: HierarchicalNSWT; 20 | } 21 | 22 | export class HNSWLib extends SaveableVectorStore { 23 | _index?: HierarchicalNSWT; 24 | 25 | docstore: InMemoryDocstore; 26 | 27 | args: HNSWLibBase; 28 | 29 | constructor(embeddings: Embeddings, args: HNSWLibArgs) { 30 | super(embeddings, args); 31 | this._index = args.index; 32 | this.args = args; 33 | this.embeddings = embeddings; 34 | this.docstore = args?.docstore ?? new InMemoryDocstore(); 35 | } 36 | 37 | async addDocuments(documents: Document[]): Promise { 38 | const texts = documents.map(({ pageContent }) => pageContent); 39 | return this.addVectors( 40 | await this.embeddings.embedDocuments(texts), 41 | documents, 42 | ); 43 | } 44 | 45 | private static async getHierarchicalNSW(args: HNSWLibBase) { 46 | const { HierarchicalNSW } = await HNSWLib.imports(); 47 | if (!args.space) { 48 | throw new Error('hnswlib-node requires a space argument'); 49 | } 50 | if (args.numDimensions === undefined) { 51 | throw new Error('hnswlib-node requires a numDimensions argument'); 52 | } 53 | return new HierarchicalNSW(args.space, args.numDimensions); 54 | } 55 | 56 | private async initIndex(vectors: number[][]) { 57 | if (!this._index) { 58 | if (this.args.numDimensions === undefined) { 59 | this.args.numDimensions = vectors[0].length; 60 | } 61 | this.index = await HNSWLib.getHierarchicalNSW(this.args); 62 | } 63 | if (!this.index.getCurrentCount()) { 64 | this.index.initIndex(vectors.length); 65 | } 66 | } 67 | 68 | public get index(): HierarchicalNSWT { 69 | if (!this._index) { 70 | throw new Error( 71 | 'Vector store not initialised yet. Try calling `addTexts` first.', 72 | ); 73 | } 74 | return this._index; 75 | } 76 | 77 | private set index(index: HierarchicalNSWT) { 78 | this._index = index; 79 | } 80 | 81 | async addVectors(vectors: number[][], documents: Document[]) { 82 | if (vectors.length === 0) { 83 | return; 84 | } 85 | await this.initIndex(vectors); 86 | 87 | if (vectors.length !== documents.length) { 88 | throw new Error(`Vectors and metadatas must have the same length`); 89 | } 90 | if (vectors[0].length !== this.args.numDimensions) { 91 | throw new Error( 92 | `Vectors must have the same length as the number of dimensions (${this.args.numDimensions})`, 93 | ); 94 | } 95 | const capacity = this.index.getMaxElements(); 96 | const needed = this.index.getCurrentCount() + vectors.length; 97 | if (needed > capacity) { 98 | this.index.resizeIndex(needed); 99 | } 100 | const docstoreSize = this.docstore.count; 101 | for (let i = 0; i < vectors.length; i += 1) { 102 | this.index.addPoint(vectors[i], docstoreSize + i); 103 | this.docstore.add({ [docstoreSize + i]: documents[i] }); 104 | } 105 | } 106 | 107 | async similaritySearchVectorWithScore(query: number[], k: number) { 108 | if (query.length !== this.args.numDimensions) { 109 | throw new Error( 110 | `Query vector must have the same length as the number of dimensions (${this.args.numDimensions})`, 111 | ); 112 | } 113 | if (k > this.index.getCurrentCount()) { 114 | const total = this.index.getCurrentCount(); 115 | console.warn( 116 | `k (${k}) is greater than the number of elements in the index (${total}), setting k to ${total}`, 117 | ); 118 | // eslint-disable-next-line no-param-reassign 119 | k = total; 120 | } 121 | const result = this.index.searchKnn(query, k); 122 | return result.neighbors.map( 123 | (docIndex, resultIndex) => 124 | [ 125 | this.docstore.search(String(docIndex)), 126 | result.distances[resultIndex], 127 | ] as [Document, number], 128 | ); 129 | } 130 | 131 | async save(directory: string) { 132 | await fs.mkdir(directory, { recursive: true }); 133 | await Promise.all([ 134 | this.index.writeIndex(path.join(directory, 'hnswlib.index')), 135 | await fs.writeFile( 136 | path.join(directory, 'args.json'), 137 | JSON.stringify(this.args), 138 | ), 139 | await fs.writeFile( 140 | path.join(directory, 'docstore.json'), 141 | JSON.stringify(Array.from(this.docstore._docs.entries())), 142 | ), 143 | ]); 144 | } 145 | 146 | static async load(directory: string, embeddings: Embeddings) { 147 | const args = JSON.parse( 148 | await fs.readFile(path.join(directory, 'args.json'), 'utf8'), 149 | ); 150 | const index = await HNSWLib.getHierarchicalNSW(args); 151 | const [docstoreFiles] = await Promise.all([ 152 | fs 153 | .readFile(path.join(directory, 'docstore.json'), 'utf8') 154 | .then(JSON.parse), 155 | index.readIndex(path.join(directory, 'hnswlib.index')), 156 | ]); 157 | args.docstore = new InMemoryDocstore(new Map(docstoreFiles)); 158 | 159 | args.index = index; 160 | 161 | return new HNSWLib(embeddings, args); 162 | } 163 | 164 | static async fromTexts( 165 | texts: string[], 166 | metadatas: object[], 167 | embeddings: Embeddings, 168 | dbConfig?: { 169 | docstore?: InMemoryDocstore; 170 | }, 171 | ): Promise { 172 | const docs: Document[] = []; 173 | for (let i = 0; i < texts.length; i += 1) { 174 | const newDoc = new Document({ 175 | pageContent: texts[i], 176 | metadata: metadatas[i], 177 | }); 178 | docs.push(newDoc); 179 | } 180 | return HNSWLib.fromDocuments(docs, embeddings, dbConfig); 181 | } 182 | 183 | static async fromDocuments( 184 | docs: Document[], 185 | embeddings: Embeddings, 186 | dbConfig?: { 187 | docstore?: InMemoryDocstore; 188 | }, 189 | ): Promise { 190 | const args: HNSWLibArgs = { 191 | docstore: dbConfig?.docstore, 192 | space: 'cosine', 193 | }; 194 | const instance = new this(embeddings, args); 195 | await instance.addDocuments(docs); 196 | return instance; 197 | } 198 | 199 | static async imports(): Promise<{ 200 | HierarchicalNSW: typeof HierarchicalNSWT; 201 | }> { 202 | return HierarchicalNSW; 203 | } 204 | } 205 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | export type BabyAGIConfig = { 2 | name: string; 3 | objective: string; 4 | initialTask: string; 5 | llm: LLMModels; 6 | root: string; 7 | }; 8 | 9 | export enum LLMModels { 10 | GPT3 = 'gpt-3.5-turbo', 11 | GPT4 = 'gpt-4', 12 | GPT432k = 'gpt-4-32k', 13 | } 14 | 15 | export type LLMModelDetails = { 16 | name: LLMModels; 17 | inputCostPer1KTokens: number; 18 | outputCostPer1KTokens: number; 19 | maxLength: number; 20 | }; 21 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "rootDir": "src", 4 | "outDir": "dist", 5 | "strict": true, 6 | "target": "es2020", 7 | "module": "ES2020", 8 | "sourceMap": true, 9 | "esModuleInterop": true, 10 | "moduleResolution": "node", 11 | "allowSyntheticDefaultImports": true, 12 | "declaration": true, 13 | "skipLibCheck": true, 14 | } 15 | } --------------------------------------------------------------------------------