├── tests ├── __init__.py ├── data │ ├── __init__.py │ └── packs │ │ ├── __init__.py │ │ ├── invalid.py │ │ ├── noop.py │ │ └── summarization_pack.py ├── end_to_end │ ├── __init__.py │ └── test_selection.py ├── test_init_langchain_tool.py ├── test_pack_config.py ├── test_pack_use.py ├── conftest.py ├── test_get_pack_data.py └── test_get_pack.py ├── autopack ├── VERSION ├── filesystem_emulation │ ├── __init__.py │ ├── file_manager.py │ ├── ram_file_manager.py │ ├── filesystem_file_manager.py │ └── workspace_file_manager.py ├── __init__.py ├── __main__.py ├── errors.py ├── search.py ├── pack_response.py ├── langchain_wrapper.py ├── cli.py ├── prompts.py ├── api.py ├── get_pack.py ├── pack_config.py ├── installation.py ├── selection.py ├── pack.py └── utils.py ├── pytest.ini ├── .github ├── dependabot.yml ├── release_message.sh ├── PULL_REQUEST_TEMPLATE.md ├── FUNDING.yml ├── ISSUE_TEMPLATE │ └── bug_report.md ├── rename_project.sh ├── workflows │ ├── release.yml │ ├── rename_project.yml │ └── main.yml └── init.sh ├── .flake8 ├── LICENSE ├── pyproject.toml ├── README.md └── .gitignore /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /autopack/VERSION: -------------------------------------------------------------------------------- 1 | 0.1.0 2 | -------------------------------------------------------------------------------- /tests/data/packs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/end_to_end/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /autopack/filesystem_emulation/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = tests 3 | -------------------------------------------------------------------------------- /tests/data/packs/invalid.py: -------------------------------------------------------------------------------- 1 | class InvalidPack: 2 | pass 3 | -------------------------------------------------------------------------------- /autopack/__init__.py: -------------------------------------------------------------------------------- 1 | from autopack.pack import Pack 2 | 3 | __all__ = ["Pack"] 4 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" -------------------------------------------------------------------------------- /.github/release_message.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | previous_tag=$(git tag --sort=-creatordate | sed -n 2p) 3 | git shortlog "${previous_tag}.." | sed 's/^./ &/' 4 | -------------------------------------------------------------------------------- /autopack/__main__.py: -------------------------------------------------------------------------------- 1 | """Entry point for autopack.""" 2 | 3 | from .cli import main # pragma: no cover 4 | 5 | if __name__ == "__main__": # pragma: no cover 6 | main() 7 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | select = "E303, W293, W291, W292, E305, E231, E302" 4 | exclude = 5 | .tox, 6 | __pycache__, 7 | *.pyc, 8 | .env 9 | venv*/*, 10 | .venv/*, 11 | reports/*, 12 | dist/*, 13 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ### Summary :memo: 2 | _Write an overview about it._ 3 | 4 | ### Details 5 | _Describe more what you did on changes._ 6 | 1. (...) 7 | 2. (...) 8 | 9 | ### Bugfixes :bug: (delete if dind't have any) 10 | - 11 | 12 | ### Checks 13 | - [ ] Closed #798 14 | - [ ] Tested Changes 15 | - [ ] Stakeholder Approval 16 | -------------------------------------------------------------------------------- /autopack/errors.py: -------------------------------------------------------------------------------- 1 | class AutoPackError(Exception): 2 | pass 3 | 4 | 5 | class AutoPackFetchError(AutoPackError): 6 | pass 7 | 8 | 9 | class AutoPackNotFoundError(AutoPackError): 10 | pass 11 | 12 | 13 | class AutoPackNotInstalledError(AutoPackError): 14 | pass 15 | 16 | 17 | class AutoPackLoadError(AutoPackError): 18 | pass 19 | 20 | 21 | class AutoPackInstallationError(AutoPackError): 22 | pass 23 | -------------------------------------------------------------------------------- /autopack/search.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from autopack.api import pack_search 4 | 5 | 6 | def print_search(query: str): 7 | matching_packs = pack_search(query) 8 | for pack in matching_packs: 9 | print("--------") 10 | print(f"Pack ID: {pack.pack_id}") 11 | print(f"Dependencies: {', '.join(pack.dependencies)}") 12 | print(f"Description: {pack.name}") 13 | print(f"Run Args: {json.dumps(pack.run_args)}") 14 | -------------------------------------------------------------------------------- /tests/data/packs/noop.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, Field 2 | 3 | from autopack import Pack 4 | 5 | 6 | class NoopArgs(BaseModel): 7 | query: str = Field(..., description="The thing to do nothing about") 8 | 9 | 10 | class NoopPack(Pack): 11 | name = "noop_pack" 12 | description = "Does nothing" 13 | categories = ["Nothingness"] 14 | args_schema = NoopArgs 15 | 16 | def _run(self, query: str): 17 | return f"noop: {query}" 18 | 19 | async def _arun(self, query: str): 20 | return self.run(query=query) 21 | -------------------------------------------------------------------------------- /autopack/pack_response.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | 3 | from dataclasses_json import dataclass_json 4 | 5 | 6 | @dataclass_json 7 | @dataclass 8 | class PackResponse: 9 | """Class to store metadata about a (possibly uninstalled) pack""" 10 | 11 | pack_id: str 12 | package_path: str 13 | class_name: str 14 | repo_url: str 15 | name: str 16 | description: str 17 | dependencies: list[str] = field(default_factory=list) 18 | run_args: dict[str, dict[str, str]] = field(default_factory=list) 19 | categories: list[str] = field(default_factory=list) 20 | -------------------------------------------------------------------------------- /tests/test_init_langchain_tool.py: -------------------------------------------------------------------------------- 1 | from langchain.agents import initialize_agent, AgentType 2 | from langchain.chat_models import ChatOpenAI 3 | 4 | from tests.data.packs.noop import NoopPack 5 | 6 | 7 | def test_init_langchain_tool(): 8 | pack = NoopPack() 9 | tool = pack.init_langchain_tool() 10 | llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k-0613") 11 | 12 | agent = initialize_agent( 13 | [tool], 14 | llm, 15 | agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, 16 | verbose=True, 17 | ) 18 | assert len(agent.tools) == 1 19 | assert agent.tools[0].name == "noop_pack" 20 | -------------------------------------------------------------------------------- /tests/data/packs/summarization_pack.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, Field 2 | 3 | from autopack import Pack 4 | 5 | 6 | class SummarizationArgs(BaseModel): 7 | text: str = Field(..., description="The text to summarize") 8 | 9 | 10 | class SummarizationPack(Pack): 11 | name = "text_summarization" 12 | description = "Summarizes the given text" 13 | categories = ["Text"] 14 | args_schema = SummarizationArgs 15 | 16 | def _run(self, text: str) -> str: 17 | return self.call_llm(f"Summarize the following text:\n{text}") 18 | 19 | async def _arun(self, text: str) -> str: 20 | return await self.acall_llm(f"Summarize the following text:\n{text}") 21 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 13 | -------------------------------------------------------------------------------- /tests/end_to_end/test_selection.py: -------------------------------------------------------------------------------- 1 | from langchain.chat_models import ChatOpenAI 2 | 3 | 4 | def test_select_packs(): 5 | from autopack.get_pack import get_all_pack_info 6 | from autopack.installation import install_pack 7 | from autopack.selection import select_packs 8 | 9 | for pack_data in get_all_pack_info(): 10 | install_pack(pack_data.pack_id) 11 | 12 | llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k-0613") 13 | packs = select_packs( 14 | "Put my current OS version, OS name, and free disk space into a file called my_computer.txt", 15 | llm=llm, 16 | ) 17 | 18 | actual_names = [pack.name.split("/")[-1] for pack in packs] 19 | for expected in ["os_name_and_version", "disk_usage"]: 20 | assert expected in actual_names 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug, help wanted 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Version [e.g. 22] 29 | 30 | **Additional context** 31 | Add any other context about the problem here. 32 | -------------------------------------------------------------------------------- /tests/test_pack_config.py: -------------------------------------------------------------------------------- 1 | from autopack.filesystem_emulation.ram_file_manager import RAMFileManager 2 | from autopack.filesystem_emulation.workspace_file_manager import WorkspaceFileManager 3 | from autopack.pack_config import PackConfig 4 | 5 | 6 | def test_create(): 7 | config = PackConfig.create() 8 | assert isinstance(config, PackConfig) 9 | assert isinstance(config.filesystem_manager, WorkspaceFileManager) 10 | 11 | 12 | def test_global_config(): 13 | config = PackConfig.create(workspace_path="asdf") 14 | PackConfig.set_global_config(config) 15 | assert PackConfig.global_config().workspace_path == config.workspace_path 16 | 17 | 18 | def test_override_filesystem_manager(): 19 | config = PackConfig.create() 20 | config.init_filesystem_manager(RAMFileManager) 21 | assert isinstance(config.filesystem_manager, RAMFileManager) 22 | -------------------------------------------------------------------------------- /autopack/langchain_wrapper.py: -------------------------------------------------------------------------------- 1 | from langchain.tools import BaseTool 2 | from pydantic import Field 3 | 4 | from autopack import Pack 5 | 6 | 7 | class LangchainWrapper(BaseTool): 8 | """Thin wrapper around a Pack to allow it to be a LangChain-compatible Tool""" 9 | 10 | pack: Pack = Field(..., description="The AutoPack Pack") 11 | 12 | def __init__(self, **kwargs): 13 | pack = kwargs.get("pack") 14 | kwargs["name"] = pack.name 15 | kwargs["description"] = pack.description 16 | kwargs["args_schema"] = pack.args_schema 17 | super().__init__(**kwargs) 18 | 19 | def _run(self, *args, **kwargs): 20 | return self.pack.run(*args, **kwargs) 21 | 22 | async def _arun(self, *args, **kwargs): 23 | return self.pack.arun(*args, **kwargs) 24 | 25 | def is_single_input(self) -> bool: 26 | return False 27 | -------------------------------------------------------------------------------- /tests/test_pack_use.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from tests.data.packs.noop import NoopPack 4 | from tests.data.packs.summarization_pack import SummarizationPack 5 | 6 | 7 | def test_sync_llm(): 8 | def mock_llm(text_in: str): 9 | return text_in.upper() 10 | 11 | pack = SummarizationPack(llm=mock_llm) 12 | 13 | assert pack.run(text="some text") == "SUMMARIZE THE FOLLOWING TEXT:\nSOME TEXT" 14 | 15 | 16 | @pytest.mark.asyncio 17 | async def test_async_llm(): 18 | async def mock_allm(text_in: str): 19 | return text_in.upper() + "!" 20 | 21 | pack = SummarizationPack(allm=mock_allm) 22 | 23 | assert await pack.arun(text="some text") == "SUMMARIZE THE FOLLOWING TEXT:\nSOME TEXT!" 24 | 25 | 26 | def test_noop(): 27 | assert NoopPack().run(query="some query") == "noop: some query" 28 | 29 | 30 | @pytest.mark.asyncio 31 | async def test_async_noop(): 32 | assert await NoopPack().arun(query="some query") == "noop: some query" 33 | -------------------------------------------------------------------------------- /.github/rename_project.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | while getopts a:n:u:d: flag 3 | do 4 | case "${flag}" in 5 | a) author=${OPTARG};; 6 | n) name=${OPTARG};; 7 | u) urlname=${OPTARG};; 8 | d) description=${OPTARG};; 9 | esac 10 | done 11 | 12 | echo "Author: $author"; 13 | echo "Project Name: $name"; 14 | echo "Project URL name: $urlname"; 15 | echo "Description: $description"; 16 | 17 | echo "Renaming project..." 18 | 19 | original_author="Orbital-Tools" 20 | original_name="orbital_tools" 21 | original_urlname="orbital-tools" 22 | original_description="Awesome orbital_tools created by Orbital-Tools" 23 | # for filename in $(find . -name "*.*") 24 | for filename in $(git ls-files) 25 | do 26 | sed -i "s/$original_author/$author/g" $filename 27 | sed -i "s/$original_name/$name/g" $filename 28 | sed -i "s/$original_urlname/$urlname/g" $filename 29 | sed -i "s/$original_description/$description/g" $filename 30 | echo "Renamed $filename" 31 | done 32 | 33 | mv autopack $name 34 | 35 | # This command runs only once on GHA! 36 | rm -rf .github/template.yml 37 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import sys 4 | 5 | import pytest 6 | from dotenv import load_dotenv 7 | 8 | 9 | # each test runs on cwd to its temp dir 10 | @pytest.fixture(autouse=True) 11 | def go_to_tmpdir(request): 12 | # Get the fixture dynamically by its name. 13 | tmpdir = request.getfixturevalue("tmpdir") 14 | # ensure local test created packages can be imported 15 | sys.path.insert(0, str(tmpdir)) 16 | # Chdir only for the duration of the test. 17 | with tmpdir.as_cwd(): 18 | yield 19 | 20 | 21 | @pytest.fixture(autouse=True, scope="session") 22 | def load_env(): 23 | load_dotenv() 24 | 25 | 26 | @pytest.fixture(autouse=True) 27 | def setup_autopack_dir(go_to_tmpdir): 28 | source_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data", "dot_autopack") 29 | destination_dir = ".autopack" 30 | 31 | if os.path.isdir(destination_dir): 32 | shutil.rmtree(destination_dir) 33 | shutil.copytree(source_dir, destination_dir) 34 | 35 | 36 | @pytest.fixture 37 | def mock_requests_get(mocker): 38 | return mocker.patch("requests.get") 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) [year] [fullname] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /autopack/cli.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from autopack.installation import install_pack 4 | from autopack.search import print_search 5 | 6 | 7 | def parse_args(): 8 | parser = argparse.ArgumentParser(description="AutoPack CLI tool") 9 | 10 | subparsers = parser.add_subparsers(dest="command") 11 | install_parser = subparsers.add_parser("install", help="Install a pack") 12 | install_parser.add_argument("pack", help="ID of the pack to install") 13 | 14 | search_parser = subparsers.add_parser("search", help="Search for packs") 15 | search_parser.add_argument("query", help="The search query") 16 | 17 | parser.add_argument( 18 | "-f", 19 | "--force", 20 | help="Force automatic dependency installation", 21 | action="store_true", 22 | ) 23 | 24 | return parser.parse_args() 25 | 26 | 27 | def main(): 28 | args = parse_args() 29 | 30 | if args.command == "install": 31 | result = install_pack(args.pack, args.force) 32 | if result: 33 | print("Installation completed") 34 | else: 35 | print("Installation failed") 36 | 37 | if args.command == "search": 38 | print_search(args.query) 39 | 40 | 41 | if __name__ == "__main__": 42 | main() 43 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "autopack-tools" 3 | version = "0.4.6" 4 | repository = "https://github.com/AutoPackAI/autopack" 5 | homepage = "https://autopack.ai" 6 | description = "Package Manager for AI Agent tools" 7 | authors = ["Erik Peterson "] 8 | license = "MIT" 9 | readme = "README.md" 10 | packages = [{ include = "autopack" }] 11 | 12 | [tool.poetry.scripts] 13 | autopack = 'autopack.cli:main' 14 | 15 | [tool.poetry.dependencies] 16 | python = ">=3.8.1,<4.0" 17 | requests = "^2.31.0" 18 | dataclasses-json = "^0.5.8" 19 | urllib3 = "^1.26.16" 20 | gitpython = "^3.1.31" 21 | langchain = ">=0.0.215" 22 | types-requests = "^2.31.0.2" 23 | aiofiles = "^23.1.0" 24 | 25 | [tool.poetry.group.dev.dependencies] 26 | black = "^23.3.0" 27 | flake8 = "^6.0.0" 28 | mypy = "^1.4.0" 29 | isort = "^5.12.0" 30 | pytest = "^7.3.2" 31 | pytest-mock = "^3.11.1" 32 | types-requests = "^2.31.0.1" 33 | psycopg2 = "^2.9.6" 34 | autoflake = "^2.2.0" 35 | types-psycopg2 = "^2.9.21.10" 36 | pipreqs = "^0.4.13" 37 | gitpython = "^3.1.31" 38 | pytest-asyncio = "^0.21.1" 39 | openai = "^0.27.8" 40 | 41 | [build-system] 42 | requires = ["poetry-core"] 43 | build-backend = "poetry.core.masonry.api" 44 | 45 | [tool.ruff] 46 | line-length = 120 47 | 48 | [tool.black] 49 | line-length = 120 50 | 51 | [flake8] 52 | max-line-length = 120 -------------------------------------------------------------------------------- /autopack/filesystem_emulation/file_manager.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import TYPE_CHECKING 3 | 4 | if TYPE_CHECKING: 5 | from autopack.pack_config import PackConfig 6 | 7 | 8 | class FileManager(ABC): 9 | # A few Packs will use poetry inside of the workspace, and the AI gets hella confused when these files are present. 10 | IGNORE_FILES = ["pyproject.toml", "poetry.lock"] 11 | 12 | def __init__(self, config: "PackConfig" = None): 13 | from autopack.pack_config import PackConfig 14 | 15 | self.config = config or PackConfig.global_config() 16 | 17 | @abstractmethod 18 | def read_file(self, file_path: str) -> str: 19 | pass 20 | 21 | @abstractmethod 22 | def aread_file(self, file_path: str) -> str: 23 | pass 24 | 25 | @abstractmethod 26 | def write_file(self, file_path: str, content: str) -> str: 27 | pass 28 | 29 | @abstractmethod 30 | async def awrite_file(self, file_path: str, content: str) -> str: 31 | pass 32 | 33 | @abstractmethod 34 | def delete_file(self, file_path: str) -> str: 35 | pass 36 | 37 | @abstractmethod 38 | def adelete_file(self, file_path: str) -> str: 39 | pass 40 | 41 | @abstractmethod 42 | def list_files(self, dir_path: str) -> str: 43 | pass 44 | 45 | @abstractmethod 46 | def alist_files(self, dir_path: str) -> str: 47 | pass 48 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package 2 | 3 | on: 4 | push: 5 | # Sequence of patterns matched against refs/tags 6 | tags: 7 | - '*' # Push events to matching v*, i.e. v1.0, v20.15.10 8 | 9 | # Allows you to run this workflow manually from the Actions tab 10 | workflow_dispatch: 11 | 12 | jobs: 13 | release: 14 | name: Create Release 15 | runs-on: ubuntu-latest 16 | permissions: 17 | contents: write 18 | steps: 19 | - uses: actions/checkout@v3 20 | with: 21 | # by default, it uses a depth of 1 22 | # this fetches all history so that we can read each commit 23 | fetch-depth: 0 24 | - name: Generate Changelog 25 | run: .github/release_message.sh > release_message.md 26 | - name: Release 27 | uses: softprops/action-gh-release@v1 28 | with: 29 | body_path: release_message.md 30 | 31 | deploy: 32 | needs: release 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v3 36 | - name: Set up Python 37 | uses: actions/setup-python@v4 38 | with: 39 | python-version: '3.x' 40 | - name: Install dependencies 41 | run: | 42 | python -m pip install --upgrade pip 43 | pip install setuptools wheel twine 44 | - name: Build and publish 45 | env: 46 | TWINE_USERNAME: __token__ 47 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} 48 | run: | 49 | python setup.py sdist bdist_wheel 50 | twine upload dist/* 51 | -------------------------------------------------------------------------------- /.github/workflows/rename_project.yml: -------------------------------------------------------------------------------- 1 | name: Rename the project from template 2 | 3 | on: [push] 4 | 5 | permissions: write-all 6 | 7 | jobs: 8 | rename-project: 9 | if: ${{ !contains (github.repository, '/python-project-template') }} 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | with: 14 | # by default, it uses a depth of 1 15 | # this fetches all history so that we can read each commit 16 | fetch-depth: 0 17 | ref: ${{ github.head_ref }} 18 | 19 | - run: echo "REPOSITORY_NAME=$(echo '${{ github.repository }}' | awk -F '/' '{print $2}' | tr '-' '_' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_ENV 20 | shell: bash 21 | 22 | - run: echo "REPOSITORY_URLNAME=$(echo '${{ github.repository }}' | awk -F '/' '{print $2}')" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - run: echo "REPOSITORY_OWNER=$(echo '${{ github.repository }}' | awk -F '/' '{print $1}')" >> $GITHUB_ENV 26 | shell: bash 27 | 28 | - name: Is this still a template 29 | id: is_template 30 | run: echo "::set-output name=is_template::$(ls .github/template.yml &> /dev/null && echo true || echo false)" 31 | 32 | - name: Rename the project 33 | if: steps.is_template.outputs.is_template == 'true' 34 | run: | 35 | echo "Renaming the project with -a(author) ${{ env.REPOSITORY_OWNER }} -n(name) ${{ env.REPOSITORY_NAME }} -u(urlname) ${{ env.REPOSITORY_URLNAME }}" 36 | .github/rename_project.sh -a ${{ env.REPOSITORY_OWNER }} -n ${{ env.REPOSITORY_NAME }} -u ${{ env.REPOSITORY_URLNAME }} -d "Awesome ${{ env.REPOSITORY_NAME }} created by ${{ env.REPOSITORY_OWNER }}" 37 | 38 | - uses: stefanzweifel/git-auto-commit-action@v4 39 | with: 40 | commit_message: "✅ Ready to clone and code." 41 | # commit_options: '--amend --no-edit' 42 | push_options: --force 43 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AutoPack 2 | 3 | AutoPack is a Python library and CLI designed to interact with the [AutoPack repository](https://autopack.ai) 4 | repository, a collection of tools for AI. It is designed to be agent-neutral with a simple interface. 5 | 6 | ## Installation 7 | 8 | You can install AutoPack using pip: 9 | 10 | ```bash 11 | pip install autopack-tools 12 | ``` 13 | 14 | ## Usage 15 | 16 | AutoPack provides both a CLI and a Python library for interacting with the AutoPack repository. 17 | 18 | ### CLI: `autopack` 19 | 20 | - Search for Packs: `autopack search {query}` 21 | - Install Packs: `autopack install {Pack ID}` 22 | 23 | ### Python library: `autopack` 24 | 25 | The `autopack` Python library allows you to work with Packs programmatically. Key functionalities include: 26 | 27 | - Search for Packs: `pack_search(query)` 28 | - Get a Pack: `get_pack(pack_id)` 29 | - Get all installed Packs: `get_all_installed_packs()` 30 | - Install a Pack: `install_pack(pack_id)` 31 | - Select packs using an LLM: `select_packs(task_description, llm)` 32 | 33 | For detailed examples and more information, refer to 34 | the [AutoPack documentation](https://github.com/AutoPackAI/autopack/wiki). 35 | 36 | ## Contributing 37 | 38 | We welcome contributions to the AutoPack ecosystem. Here are some ways you can help: 39 | 40 | - **Create new tools!** Expand the AutoPack repository by developing and submitting your own tools. Share your ideas and 41 | solutions with the AutoPack community. 42 | - **Try it out for yourself**: Test AutoPack in your projects and provide feedback. Share your experiences, report bugs, 43 | and suggest improvements by opening issues on GitHub. 44 | - **Contribute code**: Help improve AutoPack by opening pull requests. You can choose to work on unresolved issues or 45 | implement new features that you believe would enhance the functionality of the library. Please note that the AutoPack 46 | library is intentionally designed to be compact and straightforward. 47 | 48 | We appreciate your contributions and look forward to your involvement in making AutoPack a vibrant and valuable resource 49 | for the autonomous AI community. 50 | 51 | ## License 52 | 53 | AutoPack is released under the [MIT License](https://opensource.org/licenses/MIT). -------------------------------------------------------------------------------- /autopack/prompts.py: -------------------------------------------------------------------------------- 1 | TOOL_SELECTION_TEMPLATE = """As the AI Tool Selector your responsibility is to identify functions (tools) that could be useful for an autonomous AI agent to accomplish a given task. 2 | 3 | Analyze the task and available functions, and determine which functions could be useful. Consider functions that can achieve the goal directly or indirectly, in combination with other tools. 4 | 5 | When a task involves coding, prioritize tools that can execute code. Also, keep an eye on functions that, even if not explicitly coding-related, may indirectly aid in achieving the task objectives when combined with other tools. 6 | 7 | Only recommend programming functions if the task explicitly requires programming. 8 | 9 | Task: 10 | Your original task, given by the human, is: 11 | {task} 12 | 13 | Available functions: 14 | You may only recommend functions from the following list: 15 | {functions}. 16 | 17 | Please respond with a comma-separated list of function names, excluding parentheses and arguments. Do not include any other explanatory text. 18 | 19 | By providing more flexibility in the selection and emphasizing the consideration of alternative functions, we can ensure a wider range of function recommendations that align with the given task. 20 | """ 21 | 22 | GET_MORE_TOOLS_TEMPLATE = """As the AI Tool Selector your responsibility is to identify functions (tools) that could be useful for an autonomous AI agent to accomplish a given task. 23 | 24 | Analyze the functions request and determine which functions could be useful. Consider functions that can achieve the goal directly or indirectly, in combination with other tools. 25 | 26 | Only recommend programming functions if the task explicitly requires programming. 27 | 28 | Functions request: 29 | The Autonomous AI has made this request for more tools: {functions_request} 30 | 31 | Task: 32 | Your original task, given by the human, is: 33 | {task} 34 | 35 | Available functions: 36 | You may only recommend functions from the following list: 37 | {functions}. 38 | 39 | Please respond with a comma-separated list of function names, excluding parentheses and arguments. Do not include any other explanatory text. 40 | 41 | By providing more flexibility in the selection and emphasizing the consideration of alternative functions, we can ensure a wider range of function recommendations that align with the given task. 42 | """ 43 | -------------------------------------------------------------------------------- /.github/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | overwrite_template_dir=0 3 | 4 | while getopts t:o flag 5 | do 6 | case "${flag}" in 7 | t) template=${OPTARG};; 8 | o) overwrite_template_dir=1;; 9 | esac 10 | done 11 | 12 | if [ -z "${template}" ]; then 13 | echo "Available templates: flask" 14 | read -p "Enter template name: " template 15 | fi 16 | 17 | repo_urlname=$(basename -s .git `git config --get remote.origin.url`) 18 | repo_name=$(basename -s .git `git config --get remote.origin.url` | tr '-' '_' | tr '[:upper:]' '[:lower:]') 19 | repo_owner=$(git config --get remote.origin.url | awk -F ':' '{print $2}' | awk -F '/' '{print $1}') 20 | echo "Repo name: ${repo_name}" 21 | echo "Repo owner: ${repo_owner}" 22 | echo "Repo urlname: ${repo_urlname}" 23 | 24 | if [ -f ".github/workflows/rename_project.yml" ]; then 25 | .github/rename_project.sh -a "${repo_owner}" -n "${repo_name}" -u "${repo_urlname}" -d "Awesome ${repo_name} created by ${repo_owner}" 26 | fi 27 | 28 | function download_template { 29 | rm -rf "${template_dir}" 30 | mkdir -p .github/templates 31 | git clone "${template_url}" "${template_dir}" 32 | } 33 | 34 | echo "Using template:${template}" 35 | template_url="https://github.com/rochacbruno/${template}-project-template" 36 | template_dir=".github/templates/${template}" 37 | if [ -d "${template_dir}" ]; then 38 | # Template directory already exists 39 | if [ "${overwrite_template_dir}" -eq 1 ]; then 40 | # user passed -o flag, delete and re-download 41 | echo "Overwriting ${template_dir}" 42 | download_template 43 | else 44 | # Ask user if they want to overwrite 45 | echo "Directory ${template_dir} already exists." 46 | read -p "Do you want to overwrite it? [y/N] " -n 1 -r 47 | echo 48 | if [[ $REPLY =~ ^[Yy]$ ]]; then 49 | echo "Overwriting ${template_dir}" 50 | download_template 51 | else 52 | # User decided not to overwrite 53 | echo "Using existing ${template_dir}" 54 | fi 55 | fi 56 | else 57 | # Template directory does not exist, download it 58 | echo "Downloading ${template_url}" 59 | download_template 60 | fi 61 | 62 | echo "Applying ${template} template to this project"} 63 | ./.github/templates/${template}/apply.sh -a "${repo_owner}" -n "${repo_name}" -u "${repo_urlname}" -d "Awesome ${repo_name} created by ${repo_owner}" 64 | 65 | # echo "Removing temporary template files" 66 | # rm -rf .github/templates/${template} 67 | 68 | echo "Done! review, commit and push the changes" 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | scripts/ 3 | tmp/ 4 | .autopack 5 | 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | pip-wheel-metadata/ 29 | share/python-wheels/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | MANIFEST 34 | 35 | # PyInstaller 36 | # Usually these files are written by a python script from a template 37 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 38 | *.manifest 39 | *.spec 40 | 41 | # Installer logs 42 | pip-log.txt 43 | pip-delete-this-directory.txt 44 | 45 | # Unit test / coverage reports 46 | htmlcov/ 47 | .tox/ 48 | .nox/ 49 | .coverage 50 | .coverage.* 51 | .cache 52 | nosetests.xml 53 | coverage.xml 54 | *.cover 55 | *.py,cover 56 | .hypothesis/ 57 | .pytest_cache/ 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | local_settings.py 66 | db.sqlite3 67 | db.sqlite3-journal 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyBuilder 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | 85 | # IPython 86 | profile_default/ 87 | ipython_config.py 88 | 89 | # pyenv 90 | .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 100 | __pypackages__/ 101 | 102 | # Celery stuff 103 | celerybeat-schedule 104 | celerybeat.pid 105 | 106 | # SageMath parsed files 107 | *.sage.py 108 | 109 | # Environments 110 | .env 111 | .venv 112 | env/ 113 | venv/ 114 | ENV/ 115 | env.bak/ 116 | venv.bak/ 117 | 118 | # Spyder project settings 119 | .spyderproject 120 | .spyproject 121 | 122 | # Rope project settings 123 | .ropeproject 124 | 125 | # mkdocs documentation 126 | /site 127 | 128 | # mypy 129 | .mypy_cache/ 130 | .dmypy.json 131 | dmypy.json 132 | 133 | # Pyre type checker 134 | .pyre/ 135 | 136 | # templates 137 | .github/templates/* 138 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | 3 | name: CI 4 | 5 | # Controls when the workflow will run 6 | on: 7 | # Triggers the workflow on push or pull request events but only for the main branch 8 | push: 9 | branches: [ main ] 10 | pull_request: 11 | branches: [ main ] 12 | 13 | # Allows you to run this workflow manually from the Actions tab 14 | workflow_dispatch: 15 | 16 | jobs: 17 | linter: 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python-version: [3.9] 22 | os: [ubuntu-latest] 23 | runs-on: ${{ matrix.os }} 24 | steps: 25 | - uses: actions/checkout@v3 26 | - uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | - name: Install project 30 | run: make install 31 | - name: Run linter 32 | run: make lint 33 | 34 | tests_linux: 35 | needs: linter 36 | strategy: 37 | fail-fast: false 38 | matrix: 39 | python-version: [3.9] 40 | os: [ubuntu-latest] 41 | runs-on: ${{ matrix.os }} 42 | steps: 43 | - uses: actions/checkout@v3 44 | - uses: actions/setup-python@v4 45 | with: 46 | python-version: ${{ matrix.python-version }} 47 | - name: Install project 48 | run: make install 49 | - name: Run tests 50 | run: make test 51 | - name: "Upload coverage to Codecov" 52 | uses: codecov/codecov-action@v3 53 | # with: 54 | # fail_ci_if_error: true 55 | 56 | tests_mac: 57 | needs: linter 58 | strategy: 59 | fail-fast: false 60 | matrix: 61 | python-version: [3.9] 62 | os: [macos-latest] 63 | runs-on: ${{ matrix.os }} 64 | steps: 65 | - uses: actions/checkout@v3 66 | - uses: actions/setup-python@v4 67 | with: 68 | python-version: ${{ matrix.python-version }} 69 | - name: Install project 70 | run: make install 71 | - name: Run tests 72 | run: make test 73 | 74 | tests_win: 75 | needs: linter 76 | strategy: 77 | fail-fast: false 78 | matrix: 79 | python-version: [3.9] 80 | os: [windows-latest] 81 | runs-on: ${{ matrix.os }} 82 | steps: 83 | - uses: actions/checkout@v3 84 | - uses: actions/setup-python@v4 85 | with: 86 | python-version: ${{ matrix.python-version }} 87 | - name: Install Pip 88 | run: pip install --user --upgrade pip 89 | - name: Install project 90 | run: pip install -e .[test] 91 | - name: run tests 92 | run: pytest -s -vvvv -l --tb=long tests 93 | -------------------------------------------------------------------------------- /autopack/api.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from json import JSONDecodeError 4 | from urllib.parse import urljoin 5 | 6 | import requests 7 | from marshmallow import ValidationError 8 | 9 | from autopack.errors import AutoPackFetchError 10 | from autopack.pack_config import PackConfig 11 | from autopack.pack_response import PackResponse 12 | from autopack.utils import find_or_create_autopack_dir 13 | 14 | 15 | def get_pack_details(pack_id: str, remote=False) -> PackResponse: 16 | if remote: 17 | return get_pack_details_remotely(pack_id) 18 | 19 | return get_pack_details_locally(pack_id) 20 | 21 | 22 | def get_pack_details_locally(pack_id: str) -> PackResponse: 23 | metadata_dir = find_or_create_autopack_dir() 24 | 25 | metadata_file = os.path.join(metadata_dir, "pack_metadata.json") 26 | 27 | if not os.path.exists(metadata_file): 28 | raise AutoPackFetchError(f"Metadata file does not exist, please install or re-install {pack_id}.") 29 | 30 | with open(metadata_file, "r") as f: 31 | try: 32 | metadata = json.load(f) 33 | except JSONDecodeError as e: 34 | raise AutoPackFetchError(f"Could can't fetch locally. Please install or re-install {pack_id}. {e}") 35 | 36 | pack_metadata = metadata.get(pack_id) 37 | if not pack_metadata: 38 | raise AutoPackFetchError(f"Could can't find pack locally. Please install {pack_id}") 39 | 40 | return PackResponse(**pack_metadata) 41 | 42 | 43 | def get_pack_details_remotely(pack_id: str) -> PackResponse: 44 | endpoint = "/api/details" 45 | 46 | url = urljoin(PackConfig.global_config().api_url, endpoint) 47 | params = {"id": pack_id} 48 | 49 | response = requests.get(url, params=params) 50 | if response.status_code == 200: 51 | data = response.json() 52 | 53 | try: 54 | return PackResponse(**data) 55 | except (ValidationError, TypeError) as e: 56 | message = f"Pack fetch received invalid data: {e}" 57 | raise AutoPackFetchError(message) 58 | 59 | elif response.status_code <= 500: 60 | raise AutoPackFetchError(f"Error: {response.status_code}") 61 | else: 62 | raise AutoPackFetchError(f"Error: {response.status_code}") 63 | 64 | 65 | def pack_search(query: str) -> list[PackResponse]: 66 | endpoint = "/api/search" 67 | url = urljoin(PackConfig.global_config().api_url, endpoint) 68 | params = {"query": query} 69 | 70 | response = requests.get(url, params=params) 71 | if response.status_code == 200: 72 | data = response.json() 73 | 74 | try: 75 | return [PackResponse(**datum) for datum in data["packs"]] 76 | except (ValidationError, TypeError) as e: 77 | message = f"Pack fetch received invalid data: {e}" 78 | print(message) 79 | raise AutoPackFetchError(message) 80 | 81 | elif response.status_code <= 500: 82 | print(f"Error: {response.status_code}") 83 | return [] 84 | else: 85 | print(f"Error: {response.status_code}") 86 | error_message = f"Error: {response.status_code}" 87 | raise AutoPackFetchError(error_message) 88 | -------------------------------------------------------------------------------- /autopack/get_pack.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from datetime import timedelta, datetime 4 | from typing import Union 5 | 6 | from autopack.api import get_pack_details, pack_search 7 | from autopack.errors import AutoPackError, AutoPackNotFoundError 8 | from autopack.pack import Pack 9 | from autopack.pack_response import PackResponse 10 | from autopack.utils import fetch_pack_object, load_metadata_file, find_or_create_autopack_dir 11 | 12 | 13 | def try_get_pack(pack_id: str, remote=False) -> Union[type[Pack], None]: 14 | """ 15 | Get a pack based on its ID. Same as `get_pack` but does not raise an Exception. If there is a problem finding or 16 | loading a pack it will return None. 17 | 18 | Args: 19 | pack_id (str): The ID of the pack to fetch. 20 | quiet (bool, Optional): If True, won't print any output 21 | remote (bool, Optional): If True, will make network requests to fetch pack metadata 22 | 23 | Returns: 24 | Pack or None: The fetched pack, None if the pack could not be loaded 25 | """ 26 | 27 | try: 28 | return get_pack(pack_id, remote=remote) 29 | except AutoPackError: 30 | return None 31 | 32 | 33 | def get_all_pack_info(): 34 | cache_file = os.path.join(find_or_create_autopack_dir(), f"pack_info_cache.json") 35 | if os.path.exists(cache_file) and datetime.now() - datetime.fromtimestamp(os.path.getmtime(cache_file)) < timedelta( 36 | hours=1 37 | ): 38 | with open(cache_file, "r") as f: 39 | results = json.load(f) 40 | return [PackResponse(**result) for result in results] 41 | results = pack_search("") 42 | with open(cache_file, "w") as f: 43 | json.dump([result.__dict__ for result in results], f) 44 | return results 45 | 46 | 47 | def get_all_installed_packs(): 48 | """Returns all of the packs that are currently installed""" 49 | metadata = load_metadata_file() 50 | pack_ids = list(metadata.keys()) 51 | return try_get_packs(pack_ids, remote=False) 52 | 53 | 54 | def try_get_packs(pack_ids: list[str], remote=False) -> list[type[Pack]]: 55 | """ 56 | Get a list of packs based on their IDs 57 | 58 | Args: 59 | pack_ids (list[str]): The IDs of the packs to fetch. 60 | quiet (bool, Optional): If True, won't print any output 61 | remote (bool, Optional): If True, will make network requests to fetch pack metadata 62 | 63 | Returns: 64 | list[Pack]: The successfully fetched packs 65 | """ 66 | packs = [] 67 | for pack_id in pack_ids: 68 | pack = try_get_pack(pack_id, remote) 69 | if pack: 70 | packs.append(pack) 71 | 72 | return packs 73 | 74 | 75 | def get_pack(pack_id: str, remote=False) -> type[Pack]: 76 | """ 77 | Get a pack based on its ID. 78 | 79 | Args: 80 | pack_id (str): The ID of the pack to fetch. 81 | remote (bool, Optional): If True, will make network requests to fetch pack metadata 82 | 83 | Returns: 84 | Pack: The fetched pack 85 | 86 | Raises: 87 | AutoPackFetchError: If there was an error during the data fetch. 88 | AutoPackNotInstalledError: If the pack was found but not installed. 89 | AutoPackNotFoundError: If no pack matching that ID was found. 90 | AutoPackLoadError: If the pack was found but there was an error importing or finding the pack class. 91 | """ 92 | pack_data = get_pack_details(pack_id, remote=remote) 93 | 94 | if not pack_data: 95 | raise AutoPackNotFoundError() 96 | 97 | return fetch_pack_object(pack_data) 98 | -------------------------------------------------------------------------------- /autopack/pack_config.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import TYPE_CHECKING, ClassVar, Optional 3 | 4 | from pydantic import Field, BaseSettings 5 | 6 | from autopack.filesystem_emulation.file_manager import FileManager 7 | 8 | if TYPE_CHECKING: 9 | from autopack import Pack 10 | 11 | 12 | class InstallerStyle(str, Enum): 13 | # Packs and dependencies are automatically installed on use 14 | automatic = "automatic" 15 | # Packs, but not dependencies, are installed automatically on use 16 | semiautomatic = "semiautomatic" 17 | # Packs installed either through CLI or through your own code calling `install_pack` 18 | manual = "manual" 19 | 20 | 21 | class PackConfig(BaseSettings): 22 | """ 23 | Class for defining the configuration of AutoPack. This will either be set by: 24 | - A custom instantiated class or subclass, and then passed to the various autopack functions 25 | - A custom instantiated class or subclass, and then set as the global config 26 | - An automatically-configured global config based on environment variables and hard-coded defaults 27 | 28 | For each of these configuration settings there is a corresponding environment variable prefixed with `AUTOPACK_`, 29 | e.g.: AUTOPACK_FILESYSTEM_TYPE, AUTPACK_WORKSPACE_PATH 30 | """ 31 | 32 | _global_config: ClassVar["PackConfig"] = None 33 | filesystem_manager: Optional[FileManager] = None 34 | 35 | class Config: 36 | env_prefix = "AUTOPACK_" 37 | 38 | workspace_path: str = Field( 39 | description="The directory for artifact storage and storage of working files.", default="workspace" 40 | ) 41 | installer_style: InstallerStyle = Field( 42 | description="Style and permissiveness of Pack installation", default=InstallerStyle.automatic 43 | ) 44 | restrict_code_execution: bool = Field( 45 | description="If True will signal to Packs that they should not execute code", default=False 46 | ) 47 | api_url: str = Field( 48 | description="Scheme, hostname, and port of the AutoPack API you wish to use.", default="https://autopack.ai/" 49 | ) 50 | # Not implemented yet 51 | local_packs: list[type["Pack"]] = Field( 52 | description="A list of local Pack classes that you wish to be included in the selection process", 53 | default_factory=list, 54 | ) 55 | 56 | @classmethod 57 | def set_global_config(cls, config_obj: "PackConfig" = None) -> "PackConfig": 58 | """ 59 | Set the default global config to either a PackConfig object of your choice, or `None` to use the default. 60 | """ 61 | cls._global_config = config_obj or cls() 62 | return cls._global_config 63 | 64 | @classmethod 65 | def global_config(cls) -> "PackConfig": 66 | return cls._global_config or cls.set_global_config() 67 | 68 | def init_filesystem_manager(self, file_manager: type["FileManager"] = None): 69 | if file_manager: 70 | # Override previous / set new manager 71 | self.filesystem_manager = file_manager(self) 72 | elif not self.filesystem_manager: 73 | # Default 74 | from autopack.filesystem_emulation.workspace_file_manager import WorkspaceFileManager 75 | 76 | self.filesystem_manager = WorkspaceFileManager(self) 77 | 78 | @classmethod 79 | def create(cls, **kwargs): 80 | instance = cls(**kwargs) 81 | instance.init_filesystem_manager() 82 | return instance 83 | 84 | @property 85 | def automatically_install_dependencies(self) -> bool: 86 | return self.installer_style == InstallerStyle.automatic 87 | -------------------------------------------------------------------------------- /autopack/filesystem_emulation/ram_file_manager.py: -------------------------------------------------------------------------------- 1 | from autopack.filesystem_emulation.file_manager import FileManager 2 | from autopack.pack_config import PackConfig 3 | 4 | 5 | class RAMFileManager(FileManager): 6 | """ 7 | This class emulates a filesystem in RAM, storing files in a dictionary where keys are file paths and values are 8 | file content. Recommended for sandboxing or for testing. 9 | """ 10 | 11 | def __init__(self, config: PackConfig = PackConfig.global_config()): 12 | super().__init__(config) 13 | self.files = {} 14 | 15 | def read_file(self, file_path: str) -> str: 16 | """Reads a file from the virtual file system in RAM. 17 | 18 | Args: 19 | file_path (str): The path to the file to be read. 20 | 21 | Returns: 22 | str: The content of the file. If the file does not exist, returns an error message. 23 | """ 24 | if file_path in self.files: 25 | return self.files[file_path] 26 | else: 27 | return "Error: File not found" 28 | 29 | async def aread_file(self, file_path: str) -> str: 30 | return self.read_file(file_path) 31 | 32 | def write_file(self, file_path: str, content: str) -> str: 33 | """Writes to a file in the virtual file system in RAM. 34 | 35 | Args: 36 | file_path (str): The path to the file to be written to. 37 | content (str): The content to be written to the file. 38 | 39 | Returns: 40 | str: A success message indicating the file was written. 41 | """ 42 | self.files[file_path] = content 43 | return f"Successfully wrote {len(content.encode('utf-8'))} bytes to {file_path}" 44 | 45 | async def awrite_file(self, file_path: str, content: str) -> str: 46 | return self.write_file(file_path, content) 47 | 48 | def delete_file(self, file_path: str) -> str: 49 | """Deletes a file from the virtual file system in RAM. 50 | 51 | Args: 52 | file_path (str): The path to the file to be deleted. 53 | 54 | Returns: 55 | str: A success message indicating the file was deleted. If the file does not exist, returns an error message. 56 | """ 57 | if file_path in self.files: 58 | del self.files[file_path] 59 | return f"Successfully deleted file {file_path}." 60 | else: 61 | return f"Error: File not found '{file_path}'" 62 | 63 | async def adelete_file(self, file_path: str) -> str: 64 | return self.delete_file(file_path) 65 | 66 | def list_files(self, dir_path: str) -> str: 67 | """Lists all files in the specified directory in the virtual file system in RAM. 68 | 69 | Args: 70 | dir_path (str): The path to the directory to list files from. 71 | 72 | Returns: 73 | str: A list of all files in the directory. If the directory does not exist, returns an error message. 74 | """ 75 | # For simplicity, let's assume that all keys in `self.files` are file paths, and to list files in a directory, 76 | # we just need to find all keys that start with `dir_path`. 77 | files_in_dir = [ 78 | file_path 79 | for file_path in self.files.keys() 80 | if file_path.startswith(dir_path) and file_path not in self.IGNORE_FILES 81 | ] 82 | if files_in_dir: 83 | return "\n".join(files_in_dir) 84 | else: 85 | return f"Error: No such directory {dir_path}." 86 | 87 | def alist_files(self, dir_path: str) -> str: 88 | return self.list_files(dir_path) 89 | -------------------------------------------------------------------------------- /tests/test_get_pack_data.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import Mock 2 | 3 | import pytest 4 | 5 | from autopack.api import get_pack_details 6 | from autopack.errors import AutoPackFetchError 7 | from autopack.installation import install_pack 8 | from autopack.pack_config import PackConfig 9 | from tests.data.packs.noop import NoopPack 10 | 11 | 12 | @pytest.fixture 13 | def valid_pack_data(): 14 | return { 15 | "pack_id": "autopack/tests/noop", 16 | "repo_url": "git@github.com:AutoPackAI/autopack.git", 17 | "package_path": "tests.data.packs.noop", 18 | "class_name": "NoopPack", 19 | "name": NoopPack.name, 20 | "description": NoopPack.description, 21 | "categories": NoopPack.categories, 22 | "run_args": [{"name": "query", "type": "string"}], 23 | "dependencies": [], 24 | } 25 | 26 | 27 | def test_fetch_remote_pack_data_success(mock_requests_get, valid_pack_data): 28 | mock_response = Mock() 29 | mock_response.status_code = 200 30 | mock_response.json.return_value = valid_pack_data 31 | mock_requests_get.return_value = mock_response 32 | 33 | response = get_pack_details("pack_id", remote=True) 34 | 35 | api_url = PackConfig.global_config().api_url 36 | mock_requests_get.assert_called_once_with(f"{api_url}api/details", params={"id": "pack_id"}) 37 | 38 | assert response.repo_url == valid_pack_data["repo_url"] 39 | assert response.package_path == valid_pack_data["package_path"] 40 | assert response.class_name == valid_pack_data["class_name"] 41 | 42 | 43 | def test_fetch_remote_pack_data_invalid_response(mock_requests_get, valid_pack_data): 44 | valid_pack_data.pop("repo_url") 45 | 46 | mock_response = Mock() 47 | mock_response.status_code = 200 48 | mock_response.json.return_value = valid_pack_data 49 | mock_requests_get.return_value = mock_response 50 | 51 | with pytest.raises(AutoPackFetchError): 52 | get_pack_details("pack_id", remote=True) 53 | 54 | api_url = PackConfig.global_config().api_url 55 | mock_requests_get.assert_called_once_with(f"{api_url}api/details", params={"id": "pack_id"}) 56 | 57 | 58 | def test_fetch_remote_pack_data_error_response(mock_requests_get, valid_pack_data): 59 | mock_response = Mock() 60 | mock_response.status_code = 503 61 | mock_requests_get.return_value = mock_response 62 | 63 | with pytest.raises(AutoPackFetchError): 64 | get_pack_details("pack_id", remote=True) 65 | 66 | api_url = PackConfig.global_config().api_url 67 | mock_requests_get.assert_called_once_with(f"{api_url}api/details", params={"id": "pack_id"}) 68 | 69 | 70 | def test_fetch_remote_pack_data_not_found_response(mock_requests_get, valid_pack_data): 71 | mock_response = Mock() 72 | mock_response.status_code = 404 73 | mock_requests_get.return_value = mock_response 74 | 75 | with pytest.raises(AutoPackFetchError): 76 | get_pack_details("pack_id", remote=True) 77 | 78 | api_url = PackConfig.global_config().api_url 79 | mock_requests_get.assert_called_once_with(f"{api_url}api/details", params={"id": "pack_id"}) 80 | 81 | 82 | def test_fetch_local_not_found(valid_pack_data): 83 | with pytest.raises(AutoPackFetchError): 84 | get_pack_details("pack_id") 85 | 86 | 87 | @pytest.mark.skip 88 | def test_fetch_local_exists(mock_requests_get, valid_pack_data): 89 | pack_id = "I need to create a pack remotely and then use that" 90 | # First install the pack 91 | mock_response = Mock() 92 | mock_response.status_code = 200 93 | mock_response.json.return_value = valid_pack_data 94 | mock_requests_get.return_value = mock_response 95 | 96 | install_pack(pack_id) 97 | 98 | api_url = PackConfig.global_config().api_url 99 | mock_requests_get.assert_called_once_with(f"{api_url}api/details", params={"id": pack_id}) 100 | 101 | response = get_pack_details(pack_id) 102 | 103 | assert response.repo_url == valid_pack_data["repo_url"] 104 | assert response.package_path == valid_pack_data["package_path"] 105 | assert response.class_name == valid_pack_data["class_name"] 106 | -------------------------------------------------------------------------------- /autopack/installation.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | import shutil 4 | import subprocess 5 | 6 | from git import Repo 7 | 8 | from autopack.api import PackResponse, get_pack_details 9 | from autopack.errors import AutoPackError, AutoPackInstallationError 10 | from autopack.get_pack import try_get_pack, get_pack 11 | from autopack.pack import Pack 12 | from autopack.pack_config import PackConfig 13 | from autopack.utils import ( 14 | find_or_create_autopack_dir, 15 | load_metadata_file, 16 | write_metadata_file, 17 | extract_unique_directory_name, 18 | ) 19 | 20 | 21 | def is_dependency_installed(dependency: str) -> bool: 22 | try: 23 | importlib.import_module(dependency) 24 | return True 25 | except ImportError: 26 | return False 27 | 28 | 29 | def install_dependency(dependency: str, quiet=True): 30 | try: 31 | subprocess.check_output( 32 | ["pip", "install", dependency], 33 | stderr=subprocess.STDOUT, 34 | universal_newlines=True, 35 | ) 36 | if not quiet: 37 | print(f"{dependency} has been successfully installed.") 38 | except subprocess.CalledProcessError as e: 39 | if not quiet: 40 | print(f"Installation of {dependency} failed with the following error:") 41 | print(e.output) 42 | 43 | 44 | def ask_to_install_dependencies(dependencies: list[str], force=False, quiet=True): 45 | for dependency in dependencies: 46 | if is_dependency_installed(dependency): 47 | continue 48 | 49 | if force: 50 | install_dependency(dependency) 51 | else: 52 | if not quiet: 53 | print(f"This pack requires the dependency {dependency} to be installed. Continue?") 54 | agree = input("[Yn]") 55 | if agree.lower() == "y" or agree == "": 56 | install_dependency(dependency, quiet=quiet) 57 | elif not quiet: 58 | print(f"Skipping install of {dependency}") 59 | 60 | 61 | def install_from_git(pack_data: PackResponse, quiet=True) -> str: 62 | autopack_dir = find_or_create_autopack_dir() 63 | 64 | url = pack_data.repo_url 65 | pack_path = os.path.join(autopack_dir, *extract_unique_directory_name(pack_data.repo_url).split("/")) 66 | 67 | if os.path.exists(pack_path): 68 | if not quiet: 69 | print("Repo already exists, pulling updates") 70 | Repo(pack_path).remotes.origin.pull() 71 | else: 72 | if not quiet: 73 | print(f"Cloning repo into {pack_path}") 74 | Repo.clone_from(url, pack_path) 75 | 76 | return pack_path 77 | 78 | 79 | def update_metadata_file(pack_id: str, pack_response: PackResponse): 80 | metadata = load_metadata_file() 81 | metadata[pack_id] = pack_response.__dict__ 82 | 83 | write_metadata_file(metadata) 84 | 85 | 86 | def install_pack(pack_id: str, quiet=True, config: PackConfig = PackConfig.global_config()) -> type[Pack]: 87 | if not quiet: 88 | print(f"Installing pack: {pack_id}") 89 | 90 | find_or_create_autopack_dir() 91 | 92 | pack = try_get_pack(pack_id) 93 | if pack: 94 | if not quiet: 95 | print(f"Pack {pack_id} already installed.") 96 | return pack 97 | 98 | try: 99 | pack_data = get_pack_details(pack_id, remote=True) 100 | 101 | if not pack_data: 102 | raise AutoPackInstallationError("Could not find pack details") 103 | except AutoPackError as e: 104 | # Maybe do something else 105 | raise AutoPackInstallationError(f"Could not install pack {e}") 106 | except BaseException as e: 107 | raise AutoPackInstallationError(f"Could not install pack {e}") 108 | 109 | try: 110 | git_dir = install_from_git(pack_data, quiet=quiet) 111 | 112 | update_metadata_file(pack_id, pack_data) 113 | pack = get_pack(pack_id) 114 | 115 | if pack: 116 | if pack.dependencies: 117 | ask_to_install_dependencies( 118 | pack.dependencies, force=config.automatically_install_dependencies, quiet=quiet 119 | ) 120 | return pack 121 | except Exception as e: 122 | raise AutoPackInstallationError(f"Couldn't install pack due to error {e}") 123 | 124 | # Clean up bad repo directories to make sure there aren't bad packs in the .autopack dir 125 | if git_dir and os.path.isdir(git_dir): 126 | shutil.rmtree(git_dir) 127 | 128 | raise AutoPackInstallationError("Error: Installation completed but pack could still not be found.") 129 | -------------------------------------------------------------------------------- /autopack/selection.py: -------------------------------------------------------------------------------- 1 | import re 2 | from typing import Callable, Union, Optional 3 | 4 | from langchain.chat_models.base import BaseChatModel 5 | 6 | from autopack import Pack 7 | from autopack.get_pack import get_all_installed_packs, get_all_pack_info 8 | from autopack.pack_config import PackConfig, InstallerStyle 9 | from autopack.pack_response import PackResponse 10 | from autopack.prompts import GET_MORE_TOOLS_TEMPLATE, TOOL_SELECTION_TEMPLATE 11 | from autopack.utils import call_llm 12 | 13 | 14 | def functions_bulleted_list(packs: list[PackResponse]) -> str: 15 | functions_string = [] 16 | grouped_packs: dict[str, list[type[PackResponse]]] = {} 17 | for pack in packs: 18 | if not pack.categories: 19 | continue 20 | 21 | for category in pack.categories: 22 | if category not in grouped_packs: 23 | grouped_packs[category] = [] 24 | grouped_packs[category].append(pack) 25 | 26 | for category, category_packs in grouped_packs.items(): 27 | functions_string.append(f"\n## {category}") 28 | sorted_by_name = sorted(category_packs, key=lambda p: p.name) 29 | for pack in sorted_by_name: 30 | args = pack.run_args 31 | args_signature = ", ".join([f"{arg.get('name')}: {arg.get('type')}" for arg in args.values()]) 32 | args_descriptions = ( 33 | "; ".join([f"{arg.get('name')} ({arg.get('type')}): {arg.get('description')}" for arg in args.values()]) 34 | or "None." 35 | ) 36 | functions_string.append( 37 | f"- {pack.name}({args_signature}): {pack.description} | Arguments: {args_descriptions}" 38 | ) 39 | 40 | return "\n".join(functions_string) 41 | 42 | 43 | def select_packs_prompt( 44 | packs: list[Union[Pack, PackResponse]], task_description: str, function_request: Optional[str] = None 45 | ) -> str: 46 | """ 47 | Generate a prompt for the pack selection process based on the task description and an optional function request. 48 | 49 | Args: 50 | packs: (list[Pack | PackResponse]): Packs to include in selection 51 | task_description (str): A description of the task to be used when selecting tools. 52 | function_request (Optional[str]): A specific type of function asked for (e.g. a `get_more_tools` function). 53 | 54 | Returns: 55 | str: A prompt that can be fed to the LLM for pack selection. 56 | """ 57 | 58 | if function_request: 59 | return TOOL_SELECTION_TEMPLATE.format(task=task_description, functions=functions_bulleted_list(packs)) 60 | 61 | return GET_MORE_TOOLS_TEMPLATE.format( 62 | task=task_description, 63 | functions=functions_bulleted_list(packs), 64 | functions_request=function_request, 65 | ) 66 | 67 | 68 | def select_packs( 69 | task_description: str, 70 | llm: Union[BaseChatModel, Callable], 71 | function_request: Optional[str] = None, 72 | config: PackConfig = PackConfig.global_config(), 73 | ) -> list[type[Pack]]: 74 | """Given a user input describing the task they wish to accomplish, return a list of Pack IDs that the given LLM 75 | thinks will be suitable for this task. 76 | 77 | This is good for a "pre-processing" step after receiving the task but before trying to solve it. This allows you 78 | to benefit from the wide tool selection while keeping your token usage low. 79 | 80 | You can then further filter, install the packs if desired, and then fetch them using get_pack(). 81 | 82 | # TODO: Include user-provided packs into selection 83 | 84 | Args: 85 | task_description (str): A description of the task to be used when selecting tools 86 | llm (BaseChatModel): An LLM which will be used to evaluate the selection 87 | function_request (Optional[str]): A specific type of function asked for (e.g. a `get_more_tools` function) 88 | config (PackConfig): Custom config to use 89 | 90 | Returns: 91 | list[str]: A list of selected Pack IDs 92 | """ 93 | 94 | if config.installer_style == InstallerStyle.manual: 95 | selection_pool = get_all_installed_packs() 96 | else: 97 | selection_pool = get_all_pack_info() 98 | 99 | prompt = select_packs_prompt(selection_pool, task_description, function_request) 100 | 101 | response = call_llm(prompt, llm) 102 | 103 | return parse_selection_response(response) 104 | 105 | 106 | def parse_selection_response(response: str) -> list[type[Pack]]: 107 | """ 108 | Parse the response from the LLM and extract pack IDs. 109 | 110 | The response is split by commas and newlines, and any arguments provided in the response are removed. 111 | 112 | Args: 113 | response (str): The response from the LLM. 114 | 115 | Returns: 116 | list[str]: A list of parsed pack IDs. 117 | """ 118 | pack_names = [r.split("(")[0].strip() for r in re.split(r"(?<=\w),|\n", response)] 119 | 120 | installed_packs = get_all_installed_packs() 121 | selected_packs = [] 122 | for pack_name in pack_names: 123 | try: 124 | selected_pack = next(pack for pack in installed_packs if pack.name == pack_name) 125 | selected_packs.append(selected_pack) 126 | except StopIteration: 127 | # This means that the pack selected is not installed. This error should've been caught elsewhere 128 | continue 129 | 130 | return selected_packs 131 | -------------------------------------------------------------------------------- /tests/test_get_pack.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import Mock, call, patch 2 | 3 | import pytest 4 | 5 | from autopack.api import PackResponse 6 | from autopack.errors import AutoPackLoadError, AutoPackNotFoundError 7 | from autopack.get_pack import get_all_installed_packs, get_pack, try_get_pack, try_get_packs 8 | from autopack.installation import install_pack 9 | from tests.data.packs.noop import NoopPack 10 | 11 | 12 | @pytest.fixture 13 | def pack_response_valid(): 14 | return PackResponse( 15 | pack_id="autopack/tests/noop", 16 | repo_url="git@github.com:AutoPackAI/autopack.git", 17 | package_path="tests.data.packs.noop", 18 | class_name="NoopPack", 19 | name=NoopPack.name, 20 | dependencies=[], 21 | description=NoopPack.description, 22 | categories=NoopPack.categories, 23 | run_args=[{"name": "query", "type": "string"}], 24 | ) 25 | 26 | 27 | @pytest.fixture 28 | def installed_valid_pack(mock_requests_get, pack_response_valid): 29 | # First install the pack 30 | mock_response = Mock() 31 | mock_response.status_code = 200 32 | mock_response.json.return_value = pack_response_valid.__dict__ 33 | mock_requests_get.return_value = mock_response 34 | install_pack("") 35 | 36 | 37 | @pytest.fixture 38 | def pack_response_invalid_path(pack_response_valid): 39 | invalid_path_response = PackResponse(**pack_response_valid.__dict__) 40 | invalid_path_response.package_path = "some.bad.path" 41 | return invalid_path_response 42 | 43 | 44 | @pytest.fixture 45 | def pack_response_invalid_class(pack_response_valid): 46 | invalid_class_response = PackResponse(**pack_response_valid.__dict__) 47 | invalid_class_response.class_name = "InvalidClass" 48 | return invalid_class_response 49 | 50 | 51 | @patch("autopack.get_pack.get_pack_details") 52 | def test_get_pack_success(mock_get_pack_details, pack_response_valid, installed_valid_pack): 53 | pack_id = "noop_pack" 54 | mock_get_pack_details.return_value = pack_response_valid 55 | 56 | result = get_pack(pack_id) 57 | 58 | assert result == NoopPack 59 | mock_get_pack_details.assert_called_once_with(pack_id, remote=False) 60 | 61 | 62 | @patch("autopack.get_pack.get_pack_details") 63 | def test_get_pack_not_found(mock_get_pack_details): 64 | pack_id = "asdf" 65 | mock_get_pack_details.return_value = None 66 | 67 | with pytest.raises(AutoPackNotFoundError): 68 | get_pack(pack_id) 69 | 70 | mock_get_pack_details.assert_called_once_with(pack_id, remote=False) 71 | 72 | 73 | @patch("autopack.get_pack.get_pack_details") 74 | def test_get_pack_module_not_found(mock_get_pack_details, pack_response_invalid_path): 75 | pack_id = "invalidmodule" 76 | mock_get_pack_details.return_value = pack_response_invalid_path 77 | 78 | with pytest.raises(AutoPackLoadError): 79 | get_pack(pack_id) 80 | 81 | mock_get_pack_details.assert_called_once_with(pack_id, remote=False) 82 | 83 | 84 | @patch("autopack.get_pack.get_pack_details") 85 | def test_get_pack_invalid_class(mock_get_pack_details, pack_response_invalid_class): 86 | pack_id = "invalidclass" 87 | mock_get_pack_details.return_value = pack_response_invalid_class 88 | 89 | with pytest.raises(AutoPackLoadError): 90 | get_pack(pack_id) 91 | 92 | mock_get_pack_details.assert_called_once_with(pack_id, remote=False) 93 | 94 | 95 | @patch("autopack.get_pack.get_pack_details") 96 | def test_try_get_pack_success(mock_get_pack_details, pack_response_valid, installed_valid_pack): 97 | pack_id = "noop_pack" 98 | mock_get_pack_details.return_value = pack_response_valid 99 | 100 | # TODO: Why does this work without it being installed lol 101 | result = try_get_pack(pack_id) 102 | 103 | assert result == NoopPack 104 | mock_get_pack_details.assert_called_once_with(pack_id, remote=False) 105 | 106 | 107 | @patch("autopack.get_pack.get_pack_details") 108 | def test_try_get_pack_not_found(mock_get_pack_details): 109 | pack_id = "some_author/my_packs/NoopPack" 110 | mock_get_pack_details.return_value = None 111 | 112 | assert try_get_pack(pack_id) is None 113 | 114 | mock_get_pack_details.assert_called_once_with(pack_id, remote=False) 115 | 116 | 117 | @patch("autopack.get_pack.get_pack_details") 118 | def test_try_get_packs_success( 119 | mock_get_pack_details, 120 | pack_response_valid, 121 | pack_response_invalid_class, 122 | installed_valid_pack, 123 | ): 124 | mock_get_pack_details.side_effect = [ 125 | pack_response_valid, 126 | pack_response_invalid_class, 127 | ] 128 | 129 | valid_pack_id = "valid pack id" 130 | invalid_pack_id = "bogus pack id" 131 | results = try_get_packs([valid_pack_id, invalid_pack_id]) 132 | 133 | assert len(results) == 1 134 | result = results[0] 135 | 136 | assert result == NoopPack 137 | mock_get_pack_details.assert_has_calls( 138 | [ 139 | call(valid_pack_id, remote=False), 140 | call(invalid_pack_id, remote=False), 141 | ] 142 | ) 143 | 144 | 145 | @patch("autopack.get_pack.get_pack_details") 146 | def test_try_get_all_installed_packs(mock_get_pack_details, pack_response_valid, installed_valid_pack): 147 | mock_get_pack_details.return_value = pack_response_valid 148 | 149 | results = get_all_installed_packs() 150 | 151 | assert len(results) == 1 152 | result = results[0] 153 | 154 | assert result == NoopPack 155 | mock_get_pack_details.assert_called_with("", remote=False) 156 | -------------------------------------------------------------------------------- /autopack/pack.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from asyncio import iscoroutinefunction 3 | from typing import ClassVar, Optional, Callable, Coroutine, Any, Union 4 | 5 | from pydantic import BaseModel, ValidationError, Field 6 | 7 | from autopack.filesystem_emulation.file_manager import FileManager 8 | from autopack.pack_config import PackConfig 9 | from autopack.utils import run_args_from_args_schema, acall_llm, call_llm 10 | 11 | 12 | class Pack(BaseModel): 13 | class Config: 14 | arbitrary_types_allowed = True 15 | 16 | ## Required 17 | 18 | # The name of the tool that will be provided to the LLM 19 | name: ClassVar[str] 20 | # The description of the tool which is passed to the LLM 21 | description: ClassVar[str] 22 | 23 | ## Optional 24 | 25 | # Any pip packages this Pack depends on. 26 | dependencies: ClassVar[Optional[list[str]]] = None 27 | # A list of Pack IDs needed for this tool to function effectively (e.g. `write_file` depends on `read_file`) 28 | depends_on: ClassVar[Optional[list[str]]] = None 29 | # Enhances tool selection by grouping this tool with other tools of the same category 30 | categories: ClassVar[Optional[list[str]]] = None 31 | # If this tool has side effects that cannot be undone (e.g. sending an email) 32 | reversible: ClassVar[bool] = True 33 | # A Pydantic BaseModel describing the Pack's run arguments 34 | args_schema: ClassVar[Optional[type[BaseModel]]] = None 35 | 36 | llm: Optional[Callable[[str], str]] = Field( 37 | None, description="A callable function to call an LLM (string in string out)" 38 | ) 39 | allm: Union[None, Callable[[str], str], Coroutine[Any, Any, str]] = Field( 40 | None, description="An asynchronous callable function to call an LLM (string in string out)" 41 | ) 42 | config: PackConfig = Field(default_factory=PackConfig.global_config) 43 | 44 | def __init__(self, **data): 45 | super().__init__(**data) 46 | if not self.config.filesystem_manager: 47 | self.config.init_filesystem_manager() 48 | 49 | if self.llm and not callable(self.llm): 50 | raise TypeError(f"LLM object {self.llm} must be callable") 51 | 52 | if self.allm and not iscoroutinefunction(self.allm): 53 | raise TypeError(f"Async LLM object {self.llm} must be async and callable") 54 | 55 | if self.name is None: 56 | raise TypeError(f"Class {self.__class__.__name__} must define 'name' as a class variable") 57 | if self.description is None: 58 | raise TypeError(f"Class {self.__class__.__name__} must define 'description' as a class variable") 59 | 60 | def run(self, *args, **kwargs) -> str: 61 | """Execute the _run function of the subclass, verifying the arguments. (Will eventually do callbacks or some 62 | such) 63 | 64 | Args: **kwargs (dict): The arguments to pass to _run. Each key should be the name of an argument, 65 | and the value should be the value of the argument. 66 | 67 | Returns: The response from the _run function of the subclass 68 | """ 69 | try: 70 | # Validate the arguments 71 | self.validate_tool_args(**kwargs) 72 | except ValidationError as e: 73 | # If a ValidationError is raised, the arguments are invalid 74 | error_list = ". ".join([f"{err['loc'][0]}: {err['msg']}" for err in e.errors()]) 75 | return f"Error: Invalid arguments. Details: {error_list}" 76 | 77 | return self._run(*args, **kwargs) 78 | 79 | async def arun(self, *args, **kwargs): 80 | """Asynchronously execute the _arun function of the subclass, verifying the arguments. (Will eventually do 81 | callbacks or some such) 82 | 83 | Args: 84 | **kwargs (dict): The arguments to pass to _arun. Each key should be the name of an argument, 85 | and the value should be the value of the argument. 86 | 87 | Returns: 88 | str: The response from the _arun function of the subclass 89 | """ 90 | try: 91 | # Validate the arguments 92 | self.validate_tool_args(**kwargs) 93 | except ValidationError as e: 94 | # If a ValidationError is raised, the arguments are invalid 95 | error_list = ". ".join([f"{err['loc'][0]}: {err['msg']}" for err in e.errors()]) 96 | return f"Error: Invalid arguments. Details: {error_list}" 97 | 98 | return await self._arun(*args, **kwargs) 99 | 100 | @abstractmethod 101 | def _run(self, *args, **kwargs): 102 | pass 103 | 104 | @abstractmethod 105 | def _arun(self, *args, **kwargs): 106 | pass 107 | 108 | @property 109 | def args(self) -> dict: 110 | """Turn the args schema into a dict that's easier to work with""" 111 | if not self.args_schema: 112 | return {} 113 | return run_args_from_args_schema(self.args_schema) 114 | 115 | def call_llm(self, prompt: str) -> str: 116 | if self.llm is None: 117 | return "No LLM available, cannot proceed" 118 | return call_llm(prompt, self.llm) 119 | 120 | async def acall_llm(self, prompt: str) -> str: 121 | if self.allm is None: 122 | return self.call_llm(prompt) 123 | return await acall_llm(prompt, self.allm) 124 | 125 | @property 126 | def filesystem_manager(self) -> FileManager: 127 | return self.config.filesystem_manager 128 | 129 | def validate_tool_args(self, **kwargs): 130 | """Validate the arguments against the args_schema model. 131 | 132 | Args: 133 | kwargs (dict[str, Any]): The arguments to validate. 134 | Returns: 135 | True if the arguments are valid. 136 | Raises: 137 | ValidationError If any arguments are invalid. 138 | """ 139 | self.args_schema(**kwargs) 140 | 141 | return True 142 | 143 | def init_langchain_tool(self): 144 | from autopack.langchain_wrapper import LangchainWrapper 145 | 146 | return LangchainWrapper(pack=self) 147 | -------------------------------------------------------------------------------- /autopack/filesystem_emulation/filesystem_file_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | import aiofiles 5 | 6 | from autopack.filesystem_emulation.file_manager import FileManager 7 | from autopack.pack_config import PackConfig 8 | 9 | 10 | class FileSystemManager(FileManager): 11 | """ 12 | This class provides unrestricted file operations on the local file system. 13 | """ 14 | 15 | def __init__(self, config: PackConfig = PackConfig.global_config()): 16 | super().__init__(config) 17 | 18 | def read_file(self, file_path: str) -> str: 19 | """Reads a file from the local file system. 20 | 21 | Args: 22 | file_path (str): The absolute path to the file to be read. 23 | 24 | Returns: 25 | str: The content of the file. If the file does not exist, returns an error message. 26 | """ 27 | absolute_path = Path(file_path) 28 | if absolute_path.exists(): 29 | with open(absolute_path, "r") as file: 30 | return file.read() 31 | else: 32 | return "Error: File not found" 33 | 34 | async def aread_file(self, file_path: str) -> str: 35 | """Reads a file from the local file system asynchronously. 36 | 37 | Args: 38 | file_path (str): The absolute path to the file to be read. 39 | 40 | Returns: 41 | str: The content of the file. If the file does not exist, returns an error message. 42 | """ 43 | absolute_path = Path(file_path) 44 | if absolute_path.exists(): 45 | async with aiofiles.open(absolute_path, mode="r") as file: 46 | return await file.read() 47 | else: 48 | return "Error: File not found" 49 | 50 | def write_file(self, file_path: str, content: str) -> str: 51 | """Writes to a file on the local file system. 52 | 53 | Args: 54 | file_path (str): The absolute path to the file to be written to. 55 | content (str): The content to be written to the file. 56 | 57 | Returns: 58 | str: A success message indicating the file was written. 59 | """ 60 | absolute_path = Path(file_path) 61 | absolute_path.parent.mkdir(parents=True, exist_ok=True) 62 | 63 | with open(absolute_path, "w") as file: 64 | file.write(content) 65 | 66 | return f"Successfully wrote {len(content.encode('utf-8'))} bytes to {file_path}" 67 | 68 | async def awrite_file(self, file_path: str, content: str) -> str: 69 | """Writes to a file on the local file system. 70 | 71 | Args: 72 | file_path (str): The absolute path to the file to be written to. 73 | content (str): The content to be written to the file. 74 | 75 | Returns: 76 | str: A success message indicating the file was written. 77 | """ 78 | absolute_path = Path(file_path) 79 | absolute_path.parent.mkdir(parents=True, exist_ok=True) 80 | 81 | async with aiofiles.open(absolute_path, "w") as file: 82 | await file.write(content) 83 | 84 | return f"Successfully wrote {len(content.encode('utf-8'))} bytes to {file_path}" 85 | 86 | def delete_file(self, file_path: str) -> str: 87 | """Deletes a file from the local file system. 88 | 89 | Args: 90 | file_path (str): The absolute path to the file to be deleted. 91 | 92 | Returns: 93 | str: A success message indicating the file was deleted. If the file does not exist, returns an error message. 94 | """ 95 | absolute_path = Path(file_path) 96 | if absolute_path.exists(): 97 | os.remove(absolute_path) 98 | return f"Successfully deleted file {file_path}." 99 | else: 100 | return f"Error: File not found '{file_path}'" 101 | 102 | async def adelete_file(self, file_path: str) -> str: 103 | """Deletes a file from the local file system asynchronously. 104 | 105 | Args: 106 | file_path (str): The absolute path to the file to be deleted. 107 | 108 | Returns: 109 | str: A success message indicating the file was deleted. If the file does not exist, returns an error message. 110 | """ 111 | absolute_path = Path(file_path) 112 | if absolute_path.exists(): 113 | os.remove(absolute_path) 114 | return f"Successfully deleted file {file_path}." 115 | else: 116 | return f"Error: File not found '{file_path}'" 117 | 118 | def list_files(self, dir_path: str) -> str: 119 | """Lists all files in the specified directory on the local file system. 120 | 121 | Args: 122 | dir_path (str): The absolute path to the directory to list files from. 123 | 124 | Returns: 125 | str: A list of all files in the directory. If the directory does not exist, returns an error message. 126 | """ 127 | absolute_dir_path = Path(dir_path) 128 | if absolute_dir_path.exists() and absolute_dir_path.is_dir(): 129 | files_in_dir = absolute_dir_path.glob("*") 130 | return "\n".join(str(file) for file in files_in_dir if file not in self.IGNORE_FILES) 131 | else: 132 | return f"Error: No such directory {dir_path}." 133 | 134 | async def alist_files(self, dir_path: str) -> str: 135 | """Lists all files in the specified directory on the local file system asynchronously. 136 | 137 | Args: 138 | dir_path (str): The absolute path to the directory to list files from. 139 | 140 | Returns: 141 | str: A list of all files in the directory. If the directory does not exist, returns an error message. 142 | """ 143 | absolute_dir_path = Path(dir_path) 144 | if absolute_dir_path.exists() and absolute_dir_path.is_dir(): 145 | files_in_dir = absolute_dir_path.glob("*") 146 | return "\n".join(str(file) for file in files_in_dir if file not in self.IGNORE_FILES) 147 | else: 148 | return f"Error: No such directory {dir_path}." 149 | -------------------------------------------------------------------------------- /autopack/filesystem_emulation/workspace_file_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | import aiofiles as aiofiles 5 | 6 | from autopack.filesystem_emulation.file_manager import FileManager 7 | from autopack.pack_config import PackConfig 8 | 9 | 10 | class WorkspaceFileManager(FileManager): 11 | """ 12 | This class provides file operations restricted to a workspace directory on the local file system. 13 | """ 14 | 15 | def __init__(self, config: PackConfig = PackConfig.global_config()): 16 | super().__init__(config) 17 | 18 | @property 19 | def workspace_dir(self) -> Path: 20 | return Path(self.config.workspace_path) 21 | 22 | def read_file(self, file_path: str) -> str: 23 | """Reads a file from the workspace directory on the local file system. 24 | 25 | Args: 26 | file_path (str): The path to the file to be read, relative to the workspace directory. 27 | 28 | Returns: 29 | str: The content of the file. If the file does not exist, returns an error message. 30 | """ 31 | absolute_path = self.workspace_dir / file_path 32 | if absolute_path.exists(): 33 | with open(absolute_path, "r") as file: 34 | return file.read() 35 | else: 36 | return "Error: File not found" 37 | 38 | async def aread_file(self, file_path: str) -> str: 39 | """Reads a file from the workspace directory on the local file system asynchronously. 40 | 41 | Args: 42 | file_path (str): The path to the file to be read, relative to the workspace directory. 43 | 44 | Returns: 45 | str: The content of the file. If the file does not exist, returns an error message. 46 | """ 47 | absolute_path = self.workspace_dir / file_path 48 | if absolute_path.exists(): 49 | async with aiofiles.open(absolute_path, mode="r") as file: 50 | return await file.read() 51 | else: 52 | return "Error: File not found" 53 | 54 | def write_file(self, file_path: str, content: str) -> str: 55 | """Writes to a file in the workspace directory on the local file system. 56 | 57 | Args: 58 | file_path (str): The path to the file to be written to, relative to the workspace directory. 59 | content (str): The content to be written to the file. 60 | 61 | Returns: 62 | str: A success message indicating the file was written. 63 | """ 64 | absolute_path = self.workspace_dir / file_path 65 | absolute_path.parent.mkdir(parents=True, exist_ok=True) 66 | 67 | with open(absolute_path, "w+") as file: 68 | file.write(content) 69 | 70 | return f"Successfully wrote {len(content.encode('utf-8'))} bytes to {file_path}" 71 | 72 | async def awrite_file(self, file_path: str, content: str) -> str: 73 | """Writes to a file in the workspace directory on the local file system. 74 | 75 | Args: 76 | file_path (str): The path to the file to be written to, relative to the workspace directory. 77 | content (str): The content to be written to the file. 78 | 79 | Returns: 80 | str: A success message indicating the file was written. 81 | """ 82 | absolute_path = self.workspace_dir / file_path 83 | absolute_path.parent.mkdir(parents=True, exist_ok=True) 84 | 85 | async with aiofiles.open(absolute_path, "w+") as file: 86 | await file.write(content) 87 | 88 | return f"Successfully wrote {len(content.encode('utf-8'))} bytes to {file_path}" 89 | 90 | def delete_file(self, file_path: str) -> str: 91 | """Deletes a file from the workspace directory on the local file system. 92 | 93 | Args: 94 | file_path (str): The path to the file to be deleted, relative to the workspace directory. 95 | 96 | Returns: 97 | str: A success message indicating the file was deleted. If the file does not exist, returns an error message. 98 | """ 99 | absolute_path = self.workspace_dir / file_path 100 | if absolute_path.exists(): 101 | os.remove(absolute_path) 102 | return f"Successfully deleted file {file_path}." 103 | else: 104 | return f"Error: File not found '{file_path}'" 105 | 106 | async def adelete_file(self, file_path: str) -> str: 107 | """Deletes a file from the workspace directory on the local file system asynchronously. 108 | 109 | Args: 110 | file_path (str): The path to the file to be deleted, relative to the workspace directory. 111 | 112 | Returns: 113 | str: A success message indicating the file was deleted. If the file does not exist, returns an error message. 114 | """ 115 | absolute_path = self.workspace_dir / file_path 116 | if absolute_path.exists(): 117 | os.remove(absolute_path) 118 | return f"Successfully deleted file {file_path}." 119 | else: 120 | return f"Error: File not found '{file_path}'" 121 | 122 | def list_files(self, dir_path: str) -> str: 123 | """Lists all files in the specified directory in the workspace directory on the local file system. 124 | 125 | Args: 126 | dir_path (str): The path to the directory to list files from, relative to the workspace directory. 127 | 128 | Returns: 129 | str: A list of all files in the directory. If the directory does not exist, returns an error message. 130 | """ 131 | absolute_dir_path = self.workspace_dir / dir_path 132 | if absolute_dir_path.exists() and absolute_dir_path.is_dir(): 133 | files_in_dir = absolute_dir_path.glob("*") 134 | return "\n".join(str(file) for file in files_in_dir if file not in self.IGNORE_FILES) 135 | else: 136 | return f"Error: No such directory {dir_path}." 137 | 138 | async def alist_files(self, dir_path: str) -> str: 139 | """Lists all files in the specified directory in the workspace directory on the local file system asynchronously. 140 | 141 | Args: 142 | dir_path (str): The path to the directory to list files from, relative to the workspace directory. 143 | 144 | Returns: 145 | str: A list of all files in the directory. If the directory does not exist, returns an error message. 146 | """ 147 | absolute_dir_path = self.workspace_dir / dir_path 148 | if absolute_dir_path.exists() and absolute_dir_path.is_dir(): 149 | files_in_dir = absolute_dir_path.glob("*") 150 | return "\n".join(str(file) for file in files_in_dir if file not in self.IGNORE_FILES) 151 | else: 152 | return f"Error: No such directory {dir_path}." 153 | -------------------------------------------------------------------------------- /autopack/utils.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import json 3 | import os 4 | import re 5 | import sys 6 | from asyncio import iscoroutinefunction 7 | from json import JSONDecodeError 8 | from types import ModuleType 9 | from typing import Callable 10 | from typing import TYPE_CHECKING, Any, Union, Coroutine 11 | 12 | from langchain.chat_models.base import BaseChatModel 13 | from langchain.schema import SystemMessage, BaseMessage 14 | from pydantic import BaseModel 15 | 16 | from autopack.errors import AutoPackLoadError 17 | from autopack.pack_response import PackResponse 18 | 19 | if TYPE_CHECKING: 20 | from autopack.pack import Pack 21 | 22 | 23 | def find_or_create_autopack_dir(depth=0) -> str: 24 | """Try to find a suitable .autopack directory. Tries in this order: 25 | 1. Directory specified in environment variable AUTOPACK_DIR 26 | 2. Existing .autopack directory in current directory 27 | 3. Existing .autopack directory up to 3 directories up 28 | 4. Creates an .autopack directory in current directory 29 | """ 30 | env_dir = os.environ.get("AUTOPACK_DIR") 31 | if env_dir: 32 | return env_dir 33 | 34 | autopack_dir = os.path.abspath(os.path.join(os.getcwd(), *[os.pardir] * depth, ".autopack")) 35 | 36 | if not os.path.exists(autopack_dir) or not os.path.isdir(autopack_dir): 37 | if depth > 3: 38 | os.makedirs(".autopack", exist_ok=True) 39 | return ".autopack" 40 | return find_or_create_autopack_dir(depth=depth + 1) 41 | 42 | return autopack_dir 43 | 44 | 45 | def load_metadata_file() -> dict[str, Any]: 46 | """Return the parsed contents of the metadata file, returning an empty dict if not found or otherwise failed""" 47 | metadata_dir = find_or_create_autopack_dir() 48 | metadata_file = os.path.join(metadata_dir, "pack_metadata.json") 49 | 50 | if not os.path.exists(metadata_file): 51 | return {} 52 | 53 | with open(metadata_file, "r") as f: 54 | try: 55 | return json.load(f) 56 | except JSONDecodeError: 57 | return {} 58 | 59 | 60 | def write_metadata_file(data: dict[str, Any]): 61 | metadata_dir = find_or_create_autopack_dir() 62 | metadata_file = os.path.join(metadata_dir, "pack_metadata.json") 63 | 64 | with open(metadata_file, "w+") as f: 65 | json.dump(data, f) 66 | 67 | 68 | def find_module(pack_data: PackResponse) -> ModuleType: 69 | autopack_dir = find_or_create_autopack_dir() 70 | package_path = pack_data.package_path 71 | pack_module_path = os.path.join(autopack_dir, extract_unique_directory_name(pack_data.repo_url)) 72 | 73 | sys.path.insert(0, autopack_dir) 74 | sys.path.insert(0, pack_module_path) 75 | 76 | try: 77 | return importlib.import_module(package_path) 78 | finally: 79 | sys.path.remove(autopack_dir) 80 | sys.path.remove(pack_module_path) 81 | 82 | 83 | def fetch_pack_object(pack_data: PackResponse) -> type["Pack"]: 84 | package_path = pack_data.package_path 85 | class_name = pack_data.class_name 86 | try: 87 | module = find_module(pack_data) 88 | pack_class = getattr(module, class_name) 89 | return pack_class 90 | except ImportError: 91 | raise AutoPackLoadError( 92 | f"Could not import module '{package_path}'. The path is incorrect or module does not exist." 93 | ) 94 | except AttributeError: 95 | raise AutoPackLoadError( 96 | f"Module '{package_path}' does not contain a class named '{class_name}'. The class name is incorrect." 97 | ) 98 | except TypeError: 99 | raise AutoPackLoadError( 100 | f"'{class_name}' in module '{package_path}' is not a class or cannot be instantiated without arguments." 101 | ) 102 | 103 | 104 | def format_packs_to_openai_functions(packs: list["Pack"]) -> list[dict[str, Any]]: 105 | return [format_pack_to_openai_function(pack) for pack in packs] 106 | 107 | 108 | def format_pack_to_openai_function(pack: "Pack") -> dict[str, Any]: 109 | # Change this if/when other LLMs support functions 110 | required = [] 111 | run_args = pack.args 112 | for arg_name, arg in run_args.items(): 113 | arg_required = arg.pop("required", []) 114 | if arg_required: 115 | required.append(arg_name) 116 | run_args[arg_name] = arg 117 | 118 | return { 119 | "name": pack.name, 120 | "description": pack.description, 121 | "parameters": {"type": "object", "properties": run_args}, 122 | "required": required, 123 | } 124 | 125 | 126 | def run_args_from_args_schema(args_schema: type[BaseModel]) -> dict[str, dict[str, str]]: 127 | run_args: dict[str, Any] = {} 128 | if not args_schema: 129 | return run_args 130 | 131 | schema = args_schema.schema() 132 | if not schema: 133 | return run_args 134 | 135 | for param_name, param in schema.get("properties", []).items(): 136 | run_args[param_name] = { 137 | "type": param.get("type", param.get("anyOf", "string")), 138 | "name": param_name, 139 | "description": param.get("description", ""), 140 | "required": param_name in schema.get("required", []), 141 | } 142 | return run_args 143 | 144 | 145 | def functions_bulleted_list(packs: list[Union[PackResponse, type["Pack"]]]) -> str: 146 | functions_string = [] 147 | grouped_packs: dict[str, list[type[Pack]]] = {} 148 | for pack in packs: 149 | if not pack.categories: 150 | continue 151 | 152 | for category in pack.categories: 153 | if category not in grouped_packs: 154 | grouped_packs[category] = [] 155 | grouped_packs[category].append(pack) 156 | 157 | for category, category_packs in grouped_packs.items(): 158 | functions_string.append(f"\n## {category}") 159 | sorted_by_name = sorted(category_packs, key=lambda p: p.name) 160 | for pack in sorted_by_name: 161 | if hasattr(pack, "run_args"): 162 | args = pack.run_args 163 | elif hasattr(pack, "args_schema") and pack.args_schema: 164 | args = run_args_from_args_schema(pack.args_schema) 165 | else: 166 | args = {} 167 | 168 | args_signature = ", ".join([f"{name}: {arg.get('type')}" for name, arg in args.items()]) 169 | args_descriptions = ( 170 | "; ".join([f"{name} ({arg.get('type')}): {arg.get('description')}" for name, arg in args.items()]) 171 | or "None." 172 | ) 173 | functions_string.append( 174 | f"- {pack.name}({args_signature}): {pack.description} | Arguments: {args_descriptions}" 175 | ) 176 | 177 | return "\n".join(functions_string) 178 | 179 | 180 | def functions_summary(packs: list["Pack"]) -> str: 181 | return ", ".join([f"{pack.name}" for pack in packs]) 182 | 183 | 184 | def extract_unique_directory_name(repo_url: str) -> str: 185 | repo_name = repo_url.split("/")[-1].replace(".git", "") 186 | # Replace any non-alphanumeric characters with underscores 187 | return re.sub(r"[^a-zA-Z0-9]", "_", repo_name) 188 | 189 | 190 | def call_llm(prompt: str, llm: Union[BaseChatModel, Callable[[str], str]]) -> str: 191 | """ 192 | Call the given LLM with the specified prompt. 193 | 194 | The function supports both callable LLMs and LLMs that are instances of the BaseChatModel class. 195 | 196 | Args: 197 | prompt (str): The prompt to feed to the LLM. 198 | llm (Union[BaseChatModel, Callable]): The LLM to call. 199 | 200 | Returns: 201 | str: The response from the LLM. 202 | """ 203 | if isinstance(llm, BaseChatModel): 204 | message = SystemMessage(content=prompt) 205 | response = llm(messages=[message]) 206 | if isinstance(response, BaseMessage): 207 | return response.content 208 | else: 209 | return response 210 | elif callable(llm): 211 | return llm(prompt) 212 | 213 | return "" 214 | 215 | 216 | async def acall_llm(prompt: str, llm: Union[BaseChatModel, Callable[[str], str], Coroutine[Any, Any, str]]) -> str: 217 | """ 218 | Asynchronously call the given LLM with the specified prompt. 219 | 220 | The function supports both callable LLMs and LLMs that are instances of the BaseChatModel class. 221 | 222 | Args: 223 | prompt (str): The prompt to feed to the LLM. 224 | llm (Union[BaseChatModel, Awaitable[Callable]]): The LLM to call. 225 | 226 | Returns: 227 | str: The response from the LLM. 228 | """ 229 | if isinstance(llm, BaseChatModel): 230 | message = SystemMessage(content=prompt) 231 | response = await llm._call_async(messages=[message]) 232 | if isinstance(response, BaseMessage): 233 | return response.content 234 | else: 235 | return response 236 | elif callable(llm) and iscoroutinefunction(llm): 237 | return await llm(prompt) 238 | 239 | return call_llm(prompt, llm) # type: ignore 240 | --------------------------------------------------------------------------------