├── .flake8
├── .github
└── workflows
│ ├── build.yml
│ ├── integration_test.yml
│ ├── integration_test_minimal.yml
│ ├── integration_test_workflow.yml
│ ├── publish.yml
│ └── test.yml
├── .gitignore
├── .readthedocs.yaml
├── LICENSE
├── Makefile
├── README.md
├── docs
├── Makefile
├── make.bat
└── source
│ ├── _static
│ └── .gitkeep
│ ├── _templates
│ ├── .gitkeep
│ └── autosummary
│ │ ├── base.rst
│ │ └── module.rst
│ ├── agents.rst
│ ├── api.rst
│ ├── autogen.rst
│ ├── caching_observability.nblink
│ ├── choosing_llms.rst
│ ├── conf.py
│ ├── examples.rst
│ ├── examples
│ ├── advanced_output_handling.nblink
│ ├── blog_with_images.nblink
│ ├── customer_support.rst
│ ├── event_driven.nblink
│ ├── integrating_autogen.nblink
│ ├── math_single_agent.nblink
│ ├── research_agent.nblink
│ ├── streaming_agent_output.nblink
│ └── validating_agent_output.nblink
│ ├── images
│ └── crew_diagram.png
│ ├── index.rst
│ ├── install_pandoc.py
│ ├── installation.rst
│ ├── key_concepts.rst
│ ├── key_value_store.rst
│ ├── knowledge_graph.nblink
│ ├── quickstart.rst
│ └── usage.rst
├── examples
├── Advanced output handling.ipynb
├── Blog with Images.ipynb
├── Caching and observability.ipynb
├── Event-driven orchestration for AI systems.ipynb
├── Interaction with the knowledge graph.ipynb
├── Math via python code with a single agent.ipynb
├── Multi-step research agent.ipynb
├── Streaming agent output.ipynb
├── Using AutoGen with motleycrew.ipynb
├── Validating agent output.ipynb
├── __init__.py
├── aider_code_generation.py
├── blog_post
│ ├── blog_post.py
│ └── blog_post_input.py
├── data
│ ├── groupchat
│ │ ├── fetch_arxiv_gpt4.py
│ │ └── fetch_latest_gpt4_paper.py
│ └── research_agent_storage
│ │ ├── default__vector_store.json
│ │ ├── docstore.json
│ │ ├── graph_store.json
│ │ ├── image__vector_store.json
│ │ └── index_store.json
├── delegation_demo.py
├── hacking dependencies.ipynb
├── images
│ ├── 0b96405f.png
│ ├── 651cf780.png
│ └── cc33d04d.png
├── key_value_store.py
├── llama_index_output_handler.py
└── tool_calling_with_memory.py
├── motleycrew
├── __init__.py
├── agents
│ ├── __init__.py
│ ├── abstract_parent.py
│ ├── crewai
│ │ ├── __init__.py
│ │ ├── agent_with_config.py
│ │ ├── crewai.py
│ │ └── crewai_agent.py
│ ├── langchain
│ │ ├── __init__.py
│ │ ├── langchain.py
│ │ ├── legacy_react.py
│ │ ├── tool_calling_react.py
│ │ └── tool_calling_react_prompts.py
│ ├── llama_index
│ │ ├── __init__.py
│ │ ├── llama_index.py
│ │ └── llama_index_react.py
│ ├── mixins.py
│ └── parent.py
├── applications
│ ├── __init__.py
│ ├── customer_support
│ │ ├── README.md
│ │ ├── communication.py
│ │ ├── example_issues.csv
│ │ ├── issue_tree.py
│ │ ├── ray_serve_app.py
│ │ ├── requirements.txt
│ │ ├── static
│ │ │ └── index.html
│ │ └── support_agent.py
│ ├── expenses
│ │ ├── expenses.py
│ │ ├── schema_delta.py
│ │ └── sql_tools.py
│ ├── faust_workflow
│ │ ├── __init__.py
│ │ ├── faust_workflow.py
│ │ └── visualize.py
│ └── research_agent
│ │ ├── __init__.py
│ │ ├── answer_task.py
│ │ ├── question.py
│ │ ├── question_answerer.py
│ │ ├── question_generator.py
│ │ ├── question_prioritizer.py
│ │ └── question_task.py
├── common
│ ├── __init__.py
│ ├── aux_prompts.py
│ ├── defaults.py
│ ├── enums.py
│ ├── exceptions.py
│ ├── llms.py
│ ├── logging.py
│ ├── types.py
│ └── utils.py
├── crew
│ ├── __init__.py
│ ├── crew.py
│ └── crew_threads.py
├── storage
│ ├── __init__.py
│ ├── graph_node.py
│ ├── graph_store.py
│ ├── graph_store_utils.py
│ ├── kuzu_graph_store.py
│ └── kv_store_domain.py
├── tasks
│ ├── __init__.py
│ ├── simple.py
│ ├── task.py
│ └── task_unit.py
├── tools
│ ├── __init__.py
│ ├── agentic_validation_loop.py
│ ├── autogen_chat_tool.py
│ ├── code
│ │ ├── __init__.py
│ │ ├── aider_tool.py
│ │ ├── postgresql_linter.py
│ │ ├── python_linter.py
│ │ └── python_repl.py
│ ├── html_render_tool.py
│ ├── image
│ │ ├── __init__.py
│ │ ├── dall_e.py
│ │ ├── download_image.py
│ │ └── replicate_tool.py
│ ├── llm_tool.py
│ ├── mermaid_evaluator_tool.py
│ ├── simple_retriever_tool.py
│ ├── structured_passthrough.py
│ └── tool.py
└── tracking
│ ├── __init__.py
│ ├── callbacks.py
│ └── utils.py
├── poetry.lock
├── pyproject.toml
├── pytest.ini
├── requirements-extra.txt
└── tests
├── __init__.py
├── conftest.py
├── run_integration_tests.py
├── test_agents
├── __init__.py
├── conftest.py
├── test_agent_chain.py
├── test_agents.py
├── test_langchain_output_handler.py
├── test_llama_index_output_handler.py
└── test_llms.py
├── test_crew
├── __init__.py
├── test_crew.py
└── test_crew_threads.py
├── test_storage
├── __init__.py
├── test_graph_store.py
└── test_kuzu_graph_store.py
├── test_tasks
├── __init__.py
├── test_simple_task.py
├── test_task.py
└── test_task_unit.py
├── test_tools
├── __init__.py
├── test_html_render_tool.py
├── test_linter_tools.py
├── test_repl_tool.py
├── test_structured_passthrough_tool.py
├── test_tool.py
└── test_tool_chain.py
└── tools
└── test_agentic_validation_loop.py
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length=88
3 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: Build
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 |
11 | # If another push to the same PR or branch happens while this workflow is still running,
12 | # cancel the earlier run in favor of the next run.
13 | #
14 | # There's no point in testing an outdated version of the code. GitHub only allows
15 | # a limited number of job runners to be active at the same time, so it's better to cancel
16 | # pointless jobs early so that more useful jobs can run sooner.
17 | concurrency:
18 | group: ${{ github.workflow }}-${{ github.ref }}
19 | cancel-in-progress: true
20 |
21 | jobs:
22 | build:
23 | strategy:
24 | matrix:
25 | python-version: [ "3.10", "3.11", "3.12" ]
26 | os: [ ubuntu-latest, macos-latest, windows-latest ]
27 | runs-on: ${{ matrix.os }}
28 | steps:
29 | - uses: actions/checkout@v4
30 |
31 | - name: Set up Python ${{ matrix.python-version }}
32 | id: setup-python
33 | uses: actions/setup-python@v4
34 | with:
35 | python-version: ${{ matrix.python-version }}
36 |
37 | - name: Install poetry
38 | run: pip install -U poetry
39 |
40 | - name: Configure poetry
41 | run: |
42 | poetry config virtualenvs.create true
43 | poetry config virtualenvs.in-project true
44 | poetry config installer.parallel true
45 |
46 | - name: Load cached venv
47 | id: cached-poetry-dependencies
48 | uses: actions/cache@v4
49 | with:
50 | path: .venv
51 | key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
52 |
53 | - name: Install project
54 | run: poetry install --no-interaction --with dev --all-extras
55 |
56 | - name: Run build
57 | run: poetry build
58 |
59 | - name: Install pandoc
60 | working-directory: ./docs/source
61 | run: poetry run python install_pandoc.py
62 |
63 | - name: Run docs build
64 | env:
65 | TZ: UTC
66 | working-directory: ./docs
67 | run: poetry run make html
68 |
--------------------------------------------------------------------------------
/.github/workflows/integration_test.yml:
--------------------------------------------------------------------------------
1 | name: Integration test
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 |
11 | concurrency:
12 | group: ${{ github.workflow }}-${{ github.ref }}
13 | cancel-in-progress: true
14 |
15 | jobs:
16 | pre-test:
17 | # default smoke test against python-3.10 on ubuntu-latest
18 | uses: ./.github/workflows/integration_test_workflow.yml
19 | secrets:
20 | openai-api-key: ${{ secrets.OPENAI_API_KEY }}
21 |
22 | integration-test:
23 | needs: pre-test
24 | strategy:
25 | matrix:
26 | python-version: ["3.10", "3.11", "3.12"]
27 | os: [ubuntu-latest, macos-latest, windows-latest]
28 | uses: ./.github/workflows/integration_test_workflow.yml
29 | with:
30 | os: ${{ matrix.os }}
31 | python-version: ${{ matrix.python-version }}
32 | secrets:
33 | openai-api-key: ${{ secrets.OPENAI_API_KEY }}
34 |
--------------------------------------------------------------------------------
/.github/workflows/integration_test_minimal.yml:
--------------------------------------------------------------------------------
1 | name: Minimal integration test
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 |
11 | concurrency:
12 | group: ${{ github.workflow }}-${{ github.ref }}
13 | cancel-in-progress: true
14 |
15 | jobs:
16 | minimal-test:
17 | strategy:
18 | matrix:
19 | python-version: ["3.10", "3.11", "3.12"]
20 | os: [ubuntu-latest, macos-latest, windows-latest]
21 | uses: ./.github/workflows/integration_test_workflow.yml
22 | with:
23 | os: ${{ matrix.os }}
24 | python-version: ${{ matrix.python-version }}
25 | minimal-only: true
26 | secrets:
27 | openai-api-key: ${{ secrets.OPENAI_API_KEY }}
28 |
--------------------------------------------------------------------------------
/.github/workflows/integration_test_workflow.yml:
--------------------------------------------------------------------------------
1 | name: Integration test workflow
2 |
3 | on:
4 | workflow_call:
5 | inputs:
6 | os:
7 | default: ubuntu-latest
8 | type: string
9 | python-version:
10 | default: "3.12"
11 | type: string
12 | minimal-only:
13 | default: false
14 | type: boolean
15 | secrets:
16 | openai-api-key:
17 | required: true
18 |
19 |
20 | jobs:
21 | test:
22 | environment: integration_test
23 | runs-on: ${{ inputs.os }}
24 | steps:
25 | - uses: actions/checkout@v4
26 |
27 | - name: Set up Python ${{ inputs.python-version }}
28 | id: setup-python
29 | uses: actions/setup-python@v4
30 | with:
31 | python-version: ${{ inputs.python-version }}
32 |
33 | - name: Install poetry
34 | run: pip install -U poetry
35 |
36 | - name: Configure poetry
37 | run: |
38 | poetry config virtualenvs.create true
39 | poetry config virtualenvs.in-project true
40 | poetry config installer.parallel true
41 |
42 | - name: Load cached venv
43 | id: cached-poetry-dependencies
44 | uses: actions/cache@v4
45 | with:
46 | path: .venv
47 | key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
48 |
49 | - name: Load test cache
50 | uses: actions/cache@v4
51 | with:
52 | path: tests/itest_cache
53 | enableCrossOsArchive: true
54 | key: integration-tests-cache
55 | restore-keys: |
56 | integration-tests-cache
57 |
58 | - name: Install extra dependencies
59 | run: poetry run pip install -r requirements-extra.txt
60 |
61 | - name: Install minimal dependencies
62 | if: ${{ inputs.minimal-only }}
63 | run: poetry install --no-interaction
64 |
65 | - name: Install dependencies
66 | if: ${{ !inputs.minimal-only }}
67 | run: poetry install --no-interaction --all-extras
68 |
69 | - name: Run minimal integration tests
70 | env:
71 | OPENAI_API_KEY: ${{ secrets.openai-api-key }}
72 | TIKTOKEN_CACHE_DIR: tests/itest_cache/tiktoken_cache
73 | if: ${{ inputs.minimal-only }}
74 | run: poetry run python tests/run_integration_tests.py --minimal-only --os ${{ runner.os }}
75 |
76 | - name: Run integration tests
77 | env:
78 | OPENAI_API_KEY: ${{ secrets.openai-api-key }}
79 | TIKTOKEN_CACHE_DIR: tests/itest_cache/tiktoken_cache
80 | if: ${{ !inputs.minimal-only }}
81 | run: poetry run python tests/run_integration_tests.py --os ${{ runner.os }}
82 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish
2 |
3 | on:
4 | release:
5 | types: [published]
6 |
7 | permissions:
8 | contents: write
9 |
10 | jobs:
11 | publish:
12 | runs-on: ubuntu-latest
13 |
14 | permissions:
15 | # This permission is used for trusted publishing:
16 | # https://blog.pypi.org/posts/2023-04-20-introducing-trusted-publishers/
17 | #
18 | # Trusted publishing has to also be configured on PyPI for each package:
19 | # https://docs.pypi.org/trusted-publishers/adding-a-publisher/
20 | id-token: write
21 |
22 | steps:
23 | - uses: actions/checkout@v4
24 |
25 | - name: Set up Python
26 | id: setup-python
27 | uses: actions/setup-python@v4
28 | with:
29 | python-version: "3.12"
30 |
31 | - name: Install poetry
32 | run: pip install -U poetry
33 |
34 | - name: Configure poetry
35 | run: |
36 | poetry config virtualenvs.create true
37 | poetry config virtualenvs.in-project true
38 | poetry config installer.parallel true
39 | poetry config pypi-token.pypi "${{ secrets.PYPI_API_KEY }}"
40 |
41 | - name: Load cached venv
42 | id: cached-poetry-dependencies
43 | uses: actions/cache@v4
44 | with:
45 | path: .venv
46 | key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
47 |
48 | - name: Install dependencies
49 | run: poetry install --no-interaction
50 |
51 | - name: Build using poetry
52 | run: poetry build
53 |
54 | - name: Publish package distributions to PyPI
55 | uses: pypa/gh-action-pypi-publish@release/v1
56 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Test
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 |
11 | # If another push to the same PR or branch happens while this workflow is still running,
12 | # cancel the earlier run in favor of the next run.
13 | #
14 | # There's no point in testing an outdated version of the code. GitHub only allows
15 | # a limited number of job runners to be active at the same time, so it's better to cancel
16 | # pointless jobs early so that more useful jobs can run sooner.
17 | concurrency:
18 | group: ${{ github.workflow }}-${{ github.ref }}
19 | cancel-in-progress: true
20 |
21 | jobs:
22 | test:
23 | strategy:
24 | matrix:
25 | python-version: ["3.10", "3.11", "3.12"]
26 | os: [ubuntu-latest, macos-latest, windows-latest]
27 | runs-on: ${{ matrix.os }}
28 | steps:
29 | - uses: actions/checkout@v4
30 |
31 | - name: Set up Python ${{ matrix.python-version }}
32 | id: setup-python
33 | uses: actions/setup-python@v4
34 | with:
35 | python-version: ${{ matrix.python-version }}
36 |
37 | - name: Install poetry
38 | run: pip install -U poetry
39 |
40 | - name: Configure poetry
41 | run: |
42 | poetry config virtualenvs.create true
43 | poetry config virtualenvs.in-project true
44 | poetry config installer.parallel true
45 |
46 | - name: Load cached venv
47 | id: cached-poetry-dependencies
48 | uses: actions/cache@v4
49 | with:
50 | path: .venv
51 | key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
52 |
53 | - name: Install extra dependencies
54 | run: poetry run pip install -r requirements-extra.txt
55 |
56 | - name: Install dependencies
57 | run: poetry install --no-interaction --all-extras --with dev
58 |
59 | - name: Run tests
60 | run: poetry run pytest
61 |
62 | - name: Tests coverage
63 | run: poetry run pytest --cov=motleycrew tests/
64 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Editors
2 | .vscode/
3 | .idea/
4 |
5 | # Vagrant
6 | .vagrant/
7 |
8 | # Mac/OSX
9 | .DS_Store
10 |
11 | # Windows
12 | Thumbs.db
13 |
14 | # Byte-compiled / optimized / DLL files
15 | __pycache__/
16 | *.py[cod]
17 | *$py.class
18 |
19 | # C extensions
20 | *.so
21 |
22 | # Distribution / packaging
23 | .Python
24 | build/
25 | develop-eggs/
26 | dist/
27 | downloads/
28 | eggs/
29 | .eggs/
30 | lib/
31 | lib64/
32 | parts/
33 | sdist/
34 | var/
35 | wheels/
36 | *.egg-info/
37 | .installed.cfg
38 | *.egg
39 | MANIFEST
40 |
41 | # PyInstaller
42 | # Usually these files are written by a python script from a template
43 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
44 | *.manifest
45 | *.spec
46 |
47 | # Installer logs
48 | pip-log.txt
49 | pip-delete-this-directory.txt
50 |
51 | # Unit test / coverage reports
52 | htmlcov/
53 | .tox/
54 | .nox/
55 | .coverage
56 | .coverage.*
57 | .cache
58 | nosetests.xml
59 | coverage.xml
60 | *.cover
61 | .hypothesis/
62 | .pytest_cache/
63 |
64 | # Translations
65 | *.mo
66 | *.pot
67 |
68 | # IPython
69 | profile_default/
70 | ipython_config.py
71 |
72 | # Environments
73 | .env
74 | .venv
75 | env/
76 | venv/
77 | ENV/
78 | env.bak/
79 | venv.bak/
80 | .python-version
81 |
82 | # mypy
83 | .mypy_cache/
84 | .dmypy.json
85 | dmypy.json
86 | *.pyc
87 |
88 | scripts/
89 |
90 | # docs-related stuff
91 | docs/source/_autosummary
92 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # Read the Docs configuration file
2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3 |
4 | # Required
5 | version: 2
6 |
7 | # Set the OS, Python version and other tools you might need
8 | build:
9 | os: ubuntu-22.04
10 | tools:
11 | python: "3.12"
12 | # You can also specify other tool versions:
13 | # nodejs: "19"
14 | # rust: "1.64"
15 | # golang: "1.19"
16 | apt_packages:
17 | - pandoc
18 | jobs:
19 | post_create_environment:
20 | # Install poetry
21 | # https://python-poetry.org/docs/#installing-manually
22 | - python -m pip install poetry
23 | post_install:
24 | # Install dependencies with 'docs' dependency group
25 | - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install
26 |
27 | # Build documentation in the "docs/" directory with Sphinx
28 | sphinx:
29 | configuration: docs/source/conf.py
30 |
31 | # Optionally build your docs in additional formats such as PDF and ePub
32 | # formats:
33 | # - pdf
34 | # - epub
35 |
36 | # Optional but recommended, declare the Python requirements required
37 | # to build your documentation
38 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
39 | #python:
40 | # install:
41 | # - requirements: docs/requirements.txt
42 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 motleycrew
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: all
2 | all: clean cov build
3 |
4 | .PHONY: clean
5 | clean:
6 | rm -rf dist/
7 |
8 | .PHONY: test
9 | test:
10 | poetry run pytest
11 |
12 | .PHONY: cov
13 | cov:
14 | poetry run pytest --cov --cov-report=term-missing
15 |
16 | .PHONY: build
17 | build:
18 | poetry build
19 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | %SPHINXBUILD% >NUL 2>NUL
14 | if errorlevel 9009 (
15 | echo.
16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17 | echo.installed, then set the SPHINXBUILD environment variable to point
18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
19 | echo.may add the Sphinx directory to PATH.
20 | echo.
21 | echo.If you don't have Sphinx installed, grab it from
22 | echo.https://www.sphinx-doc.org/
23 | exit /b 1
24 | )
25 |
26 | if "%1" == "" goto help
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/source/_static/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShoggothAI/motleycrew/413d2c3ae9c5497229d784b9da0ca64cca7103cd/docs/source/_static/.gitkeep
--------------------------------------------------------------------------------
/docs/source/_templates/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShoggothAI/motleycrew/413d2c3ae9c5497229d784b9da0ca64cca7103cd/docs/source/_templates/.gitkeep
--------------------------------------------------------------------------------
/docs/source/_templates/autosummary/base.rst:
--------------------------------------------------------------------------------
1 | {{ fullname | escape | underline }}
2 |
3 | .. currentmodule:: {{ module }}
4 |
5 |
6 | .. auto{{ objtype }}:: {{ objname }}
7 |
--------------------------------------------------------------------------------
/docs/source/_templates/autosummary/module.rst:
--------------------------------------------------------------------------------
1 | {{ fullname | escape | underline}}
2 |
3 |
4 | .. automodule:: {{ fullname }}
5 |
6 | {% block attributes %}
7 | {% if attributes %}
8 | .. rubric:: {{ _('Module Attributes') }}
9 |
10 | .. autosummary::
11 | {% for item in attributes %}
12 | {{ item }}
13 | {%- endfor %}
14 | {% endif %}
15 | {% endblock %}
16 |
17 | {% block functions %}
18 | {% if functions %}
19 | .. rubric:: {{ _('Functions') }}
20 |
21 | .. autosummary::
22 | {% for item in functions %}
23 | {{ item }}
24 | {%- endfor %}
25 | {% endif %}
26 | {% endblock %}
27 |
28 | {% block classes %}
29 | {% if classes %}
30 | .. rubric:: {{ _('Classes') }}
31 |
32 | .. autosummary::
33 | {% for item in classes %}
34 | {{ item }}
35 | {%- endfor %}
36 | {% endif %}
37 | {% endblock %}
38 |
39 | {% block exceptions %}
40 | {% if exceptions %}
41 | .. rubric:: {{ _('Exceptions') }}
42 |
43 | .. autosummary::
44 | {% for item in exceptions %}
45 | {{ item }}
46 | {%- endfor %}
47 | {% endif %}
48 | {% endblock %}
49 |
50 | {% block modules %}
51 | {% if modules %}
52 | .. rubric:: Modules
53 |
54 | .. autosummary::
55 | :toctree:
56 | :recursive:
57 | {% for item in modules %}
58 | {{ item }}
59 | {%- endfor %}
60 | {% endif %}
61 | {% endblock %}
62 |
--------------------------------------------------------------------------------
/docs/source/agents.rst:
--------------------------------------------------------------------------------
1 | Agents
2 | ======
3 |
4 | MotleyCrew is first and foremost a multi-agent framework, so the concept of an agent is central to it.
5 |
6 | An agent is essentially an actor that can perform tasks. Usually, it contains some kind of loop
7 | that interacts with an LLM and performs actions based on the data it receives.
8 |
9 |
10 | ReAct tool calling agent
11 | ------------------------
12 | MotleyCrew provides a robust general-purpose agent that implements
13 | `ReAct prompting `_: :class:`motleycrew.agents.langchain.ReActToolCallingMotleyAgent`.
14 | This agent is probably a good starting point for most tasks.
15 |
16 | .. code-block:: python
17 |
18 | from motleycrew.agents.langchain import ReActToolCallingMotleyAgent
19 | from langchain_community.tools import DuckDuckGoSearchRun
20 |
21 | agent = ReActToolCallingMotleyAgent(tools=[DuckDuckGoSearchRun()])
22 | agent.invoke({"prompt": "Which country currently has more population, China or India?"})
23 |
24 |
25 | ``ReActToolCallingMotleyAgent`` was tested with the newer OpenAI and Anthropic models, and it should work
26 | with any model that supports function calling. If you want a similar agent for models without
27 | function calling support, look at :class:`motleycrew.agents.langchain.LegacyReActMotleyAgent`
28 | or :class:`motleycrew.agents.llama_index.ReActLlamaIndexMotleyAgent`.
29 |
30 |
31 | Using agents from other frameworks
32 | ----------------------------------
33 | For many tasks, it's reasonable to use a pre-built agent from some framework,
34 | like Langchain, LlamaIndex, CrewAI etc. MotleyCrew provides adapters for these frameworks,
35 | which allow you to mix and match different agents together and easily provide them with any tools.
36 |
37 | * :class:`motleycrew.agents.langchain.LangchainMotleyAgent`
38 | * :class:`motleycrew.agents.llama_index.LlamaIndexMotleyAgent`
39 |
40 |
41 | 2025 update on CrewAI agents
42 | ============================
43 |
44 | Lately, CrewAI has been increasingly complex and hard to integrate, as it's driving towards a more
45 | opinionated and closed environment.
46 | For this reason, we're deprecating the :class:`motleycrew.agents.crewai.CrewAIMotleyAgent`.
47 | Please use :class:`motleycrew.agents.langchain.ReActToolCallingMotleyAgent` instead.
48 |
49 |
50 | Creating your own agent
51 | -----------------------
52 | The simplest way to create your own agent is to subclass :class:`motleycrew.agents.parent.MotleyAgentParent`.
53 |
54 | Note that in a `crew `_,
55 | not only an agent can be a `worker `_.
56 | A worker is basically any `Runnable `_,
57 | and all agents and tools implement the Runnable interface in motleycrew.
58 |
--------------------------------------------------------------------------------
/docs/source/api.rst:
--------------------------------------------------------------------------------
1 | API
2 | ===
3 |
4 | .. autosummary::
5 | :toctree: _autosummary
6 | :recursive:
7 |
8 | motleycrew
9 |
--------------------------------------------------------------------------------
/docs/source/autogen.rst:
--------------------------------------------------------------------------------
1 | AutoGen-related Examples
2 | ========================
3 |
4 | Here are some examples that firstly, show how some AutoGen patterns translate into motleycrew (in particular,
5 | how cases where UserProxy is only used as an AgentExecutor don't need multiple agents in other frameworks),
6 | and secondly, how to use motleycrew together with autogen, both by wrapping a collection of autogen agents as
7 | a motleycrew tool, and by giving motleycrew tools and agents as tools to autogen.
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 |
12 | examples/math_single_agent
13 | examples/integrating_autogen
14 |
--------------------------------------------------------------------------------
/docs/source/caching_observability.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../examples/Caching and observability.ipynb"
3 | }
--------------------------------------------------------------------------------
/docs/source/choosing_llms.rst:
--------------------------------------------------------------------------------
1 | Choosing LLMs
2 | ====================
3 |
4 | Generally, the interaction with an LLM is up to the agent implementation.
5 | However, as motleycrew integrates with several agent frameworks, there is some common ground for how to choose LLMs.
6 |
7 |
8 | Providing an LLM to an agent
9 | ----------------------------
10 |
11 | In general, you can pass a specific LLM to the agent you're using.
12 |
13 | .. code-block:: python
14 |
15 | from motleycrew.agents.langchain import ReActToolCallingMotleyAgent
16 | from langchain_openai import ChatOpenAI
17 |
18 | llm = ChatOpenAI(model="gpt-4o", temperature=0)
19 | agent = ReActToolCallingMotleyAgent(llm=llm, tools=[...])
20 |
21 |
22 | The LLM class depends on the agent framework you're using.
23 | That's why we have an ``init_llm`` function to help you set up the LLM.
24 |
25 | .. code-block:: python
26 |
27 | from motleycrew.common.llms import init_llm
28 | from motleycrew.common import LLMFramework, LLMProvider
29 |
30 | llm = init_llm(
31 | llm_framework=LLMFramework.LANGCHAIN,
32 | llm_provider=LLMProvider.ANTHROPIC,
33 | llm_name="claude-3-5-sonnet-latest",
34 | llm_temperature=0
35 | )
36 | agent = ReActToolCallingMotleyAgent(llm=llm, tools=[...])
37 |
38 |
39 | The currently supported frameworks (:py:class:`motleycrew.common.enums.LLMFramework`) are:
40 |
41 | - :py:class:`Langchain ` for Langchain-based agents from Langchain, CrewAI, motelycrew etc.
42 | - :py:class:`LlamaIndex ` for LlamaIndex-based agents.
43 |
44 | The currently supported LLM providers (:py:class:`motleycrew.common.enums.LLMProvider`) are:
45 |
46 | - :py:class:`OpenAI `
47 | - :py:class:`Anthropic `
48 | - :py:class:`Groq `
49 | - :py:class:`Together `
50 | - :py:class:`Replicate `
51 | - :py:class:`Ollama `
52 | - :py:class:`Azure OpenAI `
53 |
54 | Please raise an issue if you need to add support for another LLM provider.
55 |
56 |
57 | Default LLM
58 | -----------
59 |
60 | At present, we default to OpenAI's latest ``gpt-4o`` model for our agents,
61 | and rely on the user to set the `OPENAI_API_KEY` environment variable.
62 |
63 | You can control the default LLM as follows:
64 |
65 | .. code-block:: python
66 |
67 | from motleycrew.common import Defaults
68 | Defaults.DEFAULT_LLM_PROVIDE = "the_new_default_LLM_provider"
69 | Defaults.DEFAULT_LLM_NAME = "name_of_the_new_default_model_from_the_provider"
70 |
71 |
72 | Using custom LLMs
73 | -----------------
74 |
75 | To use a custom LLM provider to use as the default or via the ``init_llm`` function,
76 | you need to make sure that for all the frameworks you're using (currently at most Langchain, LlamaIndex),
77 | the `LLM_MAP` has an entry for the LLM provider, for example as follows:
78 |
79 | .. code-block:: python
80 |
81 | from motleycrew.common import LLMProvider
82 | from motleycrew.common.llms import LLM_MAP
83 |
84 | LLM_MAP[(LLMFramework.LANGCHAIN, "MyLLMProvider")] = my_langchain_llm_factory
85 | LLM_MAP[(LLMFramework.LLAMA_INDEX, "MyLLMProvider")] = my_llamaindex_llm_factory
86 |
87 | Here each llm factory is a function with a signature
88 | ``def llm_factory(llm_name: str, llm_temperature: float, **kwargs)`` that returns the model object for the relevant framework.
89 |
90 | For example, this is the built-in OpenAI model factory for Langchain:
91 |
92 | .. code-block:: python
93 |
94 | def langchain_openai_llm(
95 | llm_name: str = Defaults.DEFAULT_LLM_NAME,
96 | llm_temperature: float = Defaults.DEFAULT_LLM_TEMPERATURE,
97 | **kwargs,
98 | ):
99 | from langchain_openai import ChatOpenAI
100 |
101 | return ChatOpenAI(model=llm_name, temperature=llm_temperature, **kwargs)
102 |
103 |
104 | You can also overwrite the `LLM_MAP` values for e.g. the OpenAI models if, for example,
105 | you want to use an in-house wrapper for Langchain or Llamaindex model adapters
106 | (for example, to use an internal gateway instead of directly hitting the OpenAI endpoints).
107 |
108 | Note that at present, if you use Autogen with motleycrew, you will need to separately control
109 | the models that Autogen uses, using the Autogen-specific APIs.
110 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 |
6 | # -- Project information -----------------------------------------------------
7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
8 |
9 | import os
10 | import sys
11 |
12 | sys.path.append(os.path.abspath("../.."))
13 |
14 | project = "motleycrew"
15 | copyright = "2024, motleycrew"
16 | author = "motleycrew"
17 |
18 | # -- General configuration ---------------------------------------------------
19 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
20 |
21 | extensions = [
22 | "sphinx.ext.autodoc",
23 | "sphinx.ext.autosummary",
24 | "sphinx.ext.coverage",
25 | "sphinx.ext.napoleon",
26 | "sphinx_rtd_theme",
27 | "nbsphinx",
28 | "nbsphinx_link",
29 | ]
30 |
31 |
32 | templates_path = ["_templates", "_templates/autosummary"]
33 | exclude_patterns = []
34 |
35 | # autodoc_default_options = {
36 | # "member-order": "bysource",
37 | # "special-members": "__init__",
38 | # }
39 |
40 | autodoc_default_options = {
41 | "members": True,
42 | "member-order": "bysource",
43 | "special-members": "__init__",
44 | "show-inheritance": True,
45 | "inherited-members": False,
46 | "undoc-members": True,
47 | }
48 |
49 | napoleon_google_docstring = True
50 | napoleon_numpy_docstring = True
51 |
52 |
53 | # -- Options for HTML output -------------------------------------------------
54 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
55 |
56 | html_theme = "sphinx_rtd_theme"
57 | html_static_path = ["_static"]
58 |
59 | nbsphinx_allow_errors = True
60 | nbsphinx_execute = "never"
61 |
62 | # Additional configuration for better auto-generated documentation
63 | autosummary_generate = True # Turn on autosummary
64 |
65 | # Create separate .rst files for each module
66 | autosummary_generate_overwrite = False
67 |
68 | # Make sure that the generated files are included in the toctree
69 | autosummary_generate_include_files = True
70 |
--------------------------------------------------------------------------------
/docs/source/examples.rst:
--------------------------------------------------------------------------------
1 | Examples
2 | ========
3 |
4 |
5 | .. toctree::
6 | :maxdepth: 2
7 |
8 | examples/blog_with_images
9 | examples/research_agent
10 | examples/validating_agent_output
11 | examples/advanced_output_handling
12 | examples/customer_support
13 | examples/streaming_agent_output
14 | examples/event_driven
15 | autogen
16 |
--------------------------------------------------------------------------------
/docs/source/examples/advanced_output_handling.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../../examples/Advanced output handling.ipynb"
3 | }
4 |
--------------------------------------------------------------------------------
/docs/source/examples/blog_with_images.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../../examples/Blog with Images.ipynb",
3 | "extra-media": [
4 | "../../../examples/images"
5 | ]
6 | }
7 |
--------------------------------------------------------------------------------
/docs/source/examples/customer_support.rst:
--------------------------------------------------------------------------------
1 | Customer support chatbot with Ray Serve
2 | =======================================
3 |
4 | This example demonstrates how to build a customer support chatbot using MotleyCrew and Ray Serve.
5 | The chatbot is designed to answer customer queries based on a database of past issues and their resolutions.
6 |
7 | The code for this example can be found `here `_.
8 | Also, see the `blog post `_ about this app.
9 |
10 | Key Components
11 | --------------
12 |
13 | 1. Issue Database
14 |
15 | - Stores information about past issues and their solutions in a tree structure
16 | - Intermediate nodes represent issue categories
17 | - Leaf nodes represent individual issues
18 | - Uses Kuzu to store and query the issue tree through our OGM (see :doc:`../knowledge_graph` for more details)
19 |
20 | 2. AI Support Agent
21 |
22 | - Attempts to resolve customer issues based on past solutions
23 | - Navigates the issue tree to find relevant information
24 | - Can ask clarifying questions to the customer
25 | - Proposes solutions or escalates to a human agent if necessary
26 |
27 | 3. Agent Tools
28 |
29 | - IssueTreeViewTool: Allows the agent to navigate the issue tree
30 | - CustomerChatTool: Enables the agent to ask additional questions to the customer
31 | - ResolveIssueTool: Used to submit a solution or escalate to a human agent
32 |
33 | 4. Ray Serve Deployment
34 |
35 | - Exposes the chatbot as an API
36 | - Allows multiple customers to connect simultaneously
37 | - Uses WebSockets over FastAPI for communication
38 |
39 | Implementation Details
40 | ----------------------
41 |
42 | The support agent is implemented using the "every response is a tool call" design.
43 | The agent loop can only end with a ResolveIssueTool call or when a constraint (e.g., number of iterations) is reached.
44 | This is achieved by making the ResolveIssueTool an output handler.
45 |
46 | The Ray Serve deployment is configured using a simple decorator:
47 |
48 | .. code-block:: python
49 |
50 | @serve.deployment(num_replicas=3, ray_actor_options={"num_cpus": 1, "num_gpus": 0})
51 | class SupportAgentDeployment:
52 | ...
53 |
54 | This setup allows for easy scaling and supports multiple simultaneous sessions balanced between replicas.
55 |
56 | Running the Example
57 | -------------------
58 |
59 | The project includes sample issue data that can be used to populate the issue tree.
60 |
61 | To run this example:
62 |
63 | .. code-block:: bash
64 |
65 | git clone https://github.com/ShoggothAI/motleycrew.git
66 | cd motleycrew
67 | pip install -r requirements.txt
68 |
69 | python -m motleycrew.applications.customer_support.issue_tree # populate the issue tree
70 | ray start --head
71 | python -m motleycrew.applications.customer_support.ray_serve_app
72 |
73 | This example showcases the flexibility of MotleyCrew for building agent-based applications, allowing you to choose your preferred agent framework, orchestration model, and deployment solution.
74 |
--------------------------------------------------------------------------------
/docs/source/examples/event_driven.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../../examples/Event-driven orchestration for AI systems.ipynb"
3 | }
4 |
--------------------------------------------------------------------------------
/docs/source/examples/integrating_autogen.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../../examples/Using AutoGen with motleycrew.ipynb"
3 | }
--------------------------------------------------------------------------------
/docs/source/examples/math_single_agent.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../../examples/Math via python code with a single agent.ipynb"
3 | }
--------------------------------------------------------------------------------
/docs/source/examples/research_agent.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../../examples/Multi-step research agent.ipynb"
3 | }
4 |
--------------------------------------------------------------------------------
/docs/source/examples/streaming_agent_output.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../../examples/Streaming agent output.ipynb"
3 | }
4 |
--------------------------------------------------------------------------------
/docs/source/examples/validating_agent_output.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../../examples/Validating agent output.ipynb"
3 | }
4 |
--------------------------------------------------------------------------------
/docs/source/images/crew_diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShoggothAI/motleycrew/413d2c3ae9c5497229d784b9da0ca64cca7103cd/docs/source/images/crew_diagram.png
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. motleycrew documentation master file, created by
2 | sphinx-quickstart on Fri May 3 12:05:08 2024.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to motleycrew's documentation!
7 | ======================================
8 |
9 | .. toctree::
10 | Home
11 | installation
12 | usage
13 | examples
14 | api
15 | :maxdepth: 3
16 | :caption: Contents:
17 |
18 |
19 | Indices and tables
20 | ==================
21 |
22 | * :ref:`genindex`
23 | * :ref:`modindex`
24 | * :ref:`search`
25 |
--------------------------------------------------------------------------------
/docs/source/install_pandoc.py:
--------------------------------------------------------------------------------
1 | """
2 | Script for installing Pandoc in GitHub Actions CI environments.
3 | """
4 |
5 | import os
6 | import shutil
7 | from pypandoc.pandoc_download import download_pandoc
8 |
9 | pandoc_location = os.path.abspath("../../.venv/_pandoc")
10 |
11 | with open(os.environ["GITHUB_PATH"], "a") as path:
12 | path.write(str(pandoc_location) + "\n")
13 |
14 | if not shutil.which("pandoc"):
15 | download_pandoc(targetfolder=pandoc_location)
16 |
--------------------------------------------------------------------------------
/docs/source/installation.rst:
--------------------------------------------------------------------------------
1 | Installation
2 | ============
3 |
4 | Installation using pip
5 | ---------------------
6 |
7 | .. code-block:: console
8 |
9 | pip install motleycrew
10 |
11 | Installation from source
12 | ------------------------
13 | | Motleycrew uses Poetry to manage its dependencies. We suggest you use it if you want to install motleycrew from source.
14 | | For installation instructions, see https://python-poetry.org/docs/#installation.
15 |
16 | Clone the repository_ and install the dependencies:
17 |
18 | .. code-block:: console
19 |
20 | git clone https://github.com/ShoggothAI/motleycrew.git
21 | cd motleycrew
22 | poetry install
23 |
24 | This will create a virtual environment and install the required dependencies.
25 |
26 | If you want to install extra dependencies for development, you can use the following command:
27 |
28 | .. code-block:: console
29 |
30 | poetry install --all-extras --with dev
31 |
32 | .. _repository: https://github.com/ShoggothAI/motleycrew
33 |
--------------------------------------------------------------------------------
/docs/source/key_value_store.rst:
--------------------------------------------------------------------------------
1 | Key-value store for manipulating objects
2 | ========================================
3 |
4 | In many real-world tasks, the agent needs to deal with different types of data.
5 | Even assuming that the LLM can reliably manipulate text, you often need to work
6 | with other objects, such as structured data in the form of dataframes, JSONs,
7 | or just any Python objects.
8 |
9 | MotleyCrew provides a simple way to store and retrieve objects using a key-value store,
10 | allowing the agent to use them in its operations, like passing them to tools
11 | (e.g. calling a query tool to get some statistics about a particular dataset).
12 |
13 | The key-value store is a dictionary that can be accessed at ``agent.kv_store``.
14 |
15 |
16 | .. code-block:: python
17 | class ObjectWriterTool(MotleyTool):
18 | def run(self, key: str, value: Any):
19 | """Write an object to the key-value store."""
20 | self.agent.kv_store[key] = value
21 |
22 |
23 | class ObjectReaderTool(MotleyTool):
24 | def run(self, key: str) -> Any:
25 | """Read an object from the key-value store."""
26 | return self.agent.kv_store[key]
27 |
28 |
29 | A simple example of using the key-value store can be found
30 | `here `_.
31 |
--------------------------------------------------------------------------------
/docs/source/knowledge_graph.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../examples/Interaction with the knowledge graph.ipynb"
3 | }
--------------------------------------------------------------------------------
/docs/source/usage.rst:
--------------------------------------------------------------------------------
1 | Usage
2 | =====
3 |
4 |
5 | .. toctree::
6 | :maxdepth: 2
7 |
8 | quickstart
9 | key_concepts
10 | agents
11 | knowledge_graph
12 | choosing_llms
13 | caching_observability
14 | key_value_store
15 |
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
1 | # Has to be here to make the examples package importable for integration tests
2 |
--------------------------------------------------------------------------------
/examples/aider_code_generation.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import sys
4 | from pathlib import Path
5 |
6 | from dotenv import load_dotenv
7 | from langchain_community.tools import ShellTool
8 | from motleycache import logger
9 |
10 | from motleycrew.agents.langchain.tool_calling_react import ReActToolCallingMotleyAgent
11 | from motleycrew.common import configure_logging
12 | from motleycrew.tasks import SimpleTask
13 | from motleycrew.tools.code.aider_tool import AiderTool
14 |
15 | logger.setLevel(logging.INFO)
16 | WORKING_DIR = Path(os.path.realpath("."))
17 |
18 | try:
19 | from motleycrew import MotleyCrew
20 | except ImportError:
21 | # if we are running this from source
22 | motleycrew_location = os.path.realpath(WORKING_DIR / "..")
23 | sys.path.append(motleycrew_location)
24 |
25 |
26 | # run instruction
27 | # to run the example, you need to clone the repository for the example at the same level as your project
28 | # cd ../../
29 | # git clone https://github.com/ShoggothAI/motleycrew-code-generation-example.git
30 |
31 |
32 | def main():
33 | crew = MotleyCrew()
34 |
35 | git_repo_path = r"../../motleycrew-code-generation-example" # cloned repository path
36 | tests_file = os.path.join(git_repo_path, "test_math_functions.py")
37 | target_files = [tests_file]
38 |
39 | aider_tool = AiderTool(fnames=target_files, git_dname=git_repo_path, auto_commits=False)
40 | shell_tool = ShellTool()
41 |
42 | developer = ReActToolCallingMotleyAgent(
43 | prompt_prefix="You are a lead software engineer working in a big tech company.",
44 | verbose=True,
45 | tools=[aider_tool, shell_tool],
46 | )
47 |
48 | create_unit_tests_task = SimpleTask(
49 | crew=crew,
50 | name="Adding a unit test",
51 | description=f"Generate unit tests for the module math_functions.py using pytest. "
52 | f"You should also add test cases for possible exceptions "
53 | f"and write comments to the tests. You should also use test parameterization. "
54 | f"After go to the directory {git_repo_path} and run created unit tests. "
55 | f"If the tests were executed successfully, return the result of execution, "
56 | f"if not, rewrite the tests and rerun them until they are working.",
57 | additional_params={"expected_output": "result of tests execution"},
58 | agent=developer,
59 | )
60 |
61 | result = crew.run()
62 |
63 | # Get the outputs of the task
64 | print(create_unit_tests_task.output)
65 | return create_unit_tests_task.output
66 |
67 |
68 | if __name__ == "__main__":
69 | configure_logging(verbose=True)
70 | load_dotenv()
71 | main()
72 |
--------------------------------------------------------------------------------
/examples/blog_post/blog_post_input.py:
--------------------------------------------------------------------------------
1 | text = """
2 | Wise Pizza: A library for automated figuring out most unusual segments
3 | WisePizza is a library to find and visualise the most interesting slices in multidimensional data based on Lasso and LP solvers, which provides different functions to find segments whose average is most different from the global one or find segments most useful in explaining the difference between two datasets.
4 |
5 | The approach
6 | WisePizza assumes you have a dataset with a number of discrete dimensions (could be currency, region, etc). For each combination of dimensions, the dataset must have a total value (total of the metric over that segment, for example the total volume in that region and currency), and an optional size value (set to 1 if not specified), this could for example be the total number of customers for that region and currency. The average value of the outcome for the segment is defined as total divided by size, in this example it would be the average volume per customer.
7 |
8 | explain_levels takes such a dataset and looks for a small number of 'simple' segments (each only constraining a small number of dimensions) that between them explain most of the variation in the averages; you could also think of them as the segments whose size-weighted deviation from the overall dataset average is the largest. This trades off unusual averages (which will naturally occur more for smaller segments) against segment size.
9 |
10 | Yet another way of looking at it is that we look for segments which, if their average was reset to the overall dataset average, would move overall total the most.
11 |
12 | explain_changes_in_totals and explain_changes_in_average take two datasets of the kind described above, with the same column names, and apply the same kind of logic to find the segments that contribute most to the difference (in total or average, respectively) between the two datasets, optionally splitting that into contributions from changes in segment size and changes in segment total.
13 |
14 | Sometimes, rather than explaining the change in totals from one period to the next, one wishes to explain a change in averages. The analytics of this are a little different - for example, while (as long as all weights and totals are positive) increasing a segment size (other things remaining equal) always increases the overall total, it can increase or decrease the pverall average, depending on whether the average value of that segment is below or above the overall average.
15 |
16 | Table of Contents
17 | What can this do for you?
18 | Find interesting slices
19 | Comparison between two datasets
20 | Installation
21 | Quick Start
22 | For Developers
23 | Tests
24 | What can this do for you?
25 | The automated search for interesting segments can give you the following:
26 |
27 | 1. Better information about segments and subsegments in your data
28 | By using WisePizza and defining initial segments, you can find a segment which maximizes a specific outcome, such as adoption rates.
29 |
30 | 2. Understanding differences in two time periods or two dataframes
31 | If you have two time periods or two datasets, you can find segments that experience the largest change in the totals from previous period/dataset.
32 |
33 | Installation
34 | You can always get the newest wise_pizza release using pip: https://pypi.org/project/wise-pizza/
35 |
36 | pip install wise-pizza
37 | From the command line (another way):
38 |
39 | pip install git+https://github.com/transferwise/wise-pizza.git
40 | From Jupyter notebook (another way):
41 |
42 | !pip install git+https://github.com/transferwise/wise-pizza.git
43 | Or you can clone and run from source, in which case you should pip -r requirements.txt before running.
44 |
45 | Quick Start
46 | The wisepizza package can be used for finding segments with unusual average:
47 |
48 | sf = explain_levels(
49 | df=data,
50 | dims=dims,
51 | total_name=totals,
52 | size_name=size,
53 | max_depth=2,
54 | min_segments=20,
55 | solver="lasso"
56 | )
57 | plot
58 |
59 | Or for finding changes between two datasets in totals:
60 |
61 | sf1 = explain_changes_in_totals(
62 | df1=pre_data,
63 | df2=data,
64 | dims=dims,
65 | total_name=totals,
66 | size_name=size,
67 | max_depth=2,
68 | min_segments=20,
69 | how="totals",
70 | solver="lasso"
71 | )
72 | plot
73 |
74 | Or for finding changes between two datasets in average:
75 |
76 | sf1 = explain_changes_in_average(
77 | df1=pre_data,
78 | df2=data,
79 | dims=dims,
80 | total_name=totals,
81 | size_name=size,
82 | max_depth=2,
83 | min_segments=20,
84 | how="totals",
85 | solver="lasso"
86 | )
87 | plot
88 |
89 | And then you can visualize differences:
90 |
91 | sf.plot()
92 | And check segments:
93 |
94 | sf.segments
95 | Please see the full example here
96 |
97 | For Developers
98 | Testing
99 | We use PyTest for testing. If you want to contribute code, make sure that the tests in tests/ run without errors.
100 |
101 | Wise-pizza is open sourced and maintained by Wise Plc. Copyright 2023 Wise Plc.
102 | """
103 |
--------------------------------------------------------------------------------
/examples/data/groupchat/fetch_arxiv_gpt4.py:
--------------------------------------------------------------------------------
1 | # filename: fetch_arxiv_gpt4.py
2 | import urllib.request
3 | import urllib.parse
4 | import xml.etree.ElementTree as ET
5 |
6 | def search_arxiv(query):
7 | url = 'http://export.arxiv.org/api/query?'
8 | params = {
9 | 'search_query': query,
10 | 'start': 0,
11 | 'max_results': 5,
12 | 'sortBy': 'submittedDate',
13 | 'sortOrder': 'descending'
14 | }
15 | query_string = urllib.parse.urlencode(params)
16 | url += query_string
17 | with urllib.request.urlopen(url) as response:
18 | response_text = response.read()
19 | return response_text
20 |
21 | def parse_response(response):
22 | root = ET.fromstring(response)
23 | papers = []
24 | for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):
25 | title = entry.find('{http://www.w3.org/2005/Atom}title').text
26 | published = entry.find('{http://www.w3.org/2005/Atom}published').text
27 | summary = entry.find('{http://www.w3.org/2005/Atom}summary').text
28 | papers.append({'title': title, 'published': published, 'summary': summary})
29 | return papers
30 |
31 | def main():
32 | query = 'all:"GPT-4"'
33 | response = search_arxiv(query)
34 | papers = parse_response(response)
35 | if papers:
36 | print("Most Recent Paper on GPT-4:")
37 | print("Title:", papers[0]['title'])
38 | print("Published Date:", papers[0]['published'])
39 | print("Summary:", papers[0]['summary'])
40 | else:
41 | print("No papers found.")
42 |
43 | if __name__ == '__main__':
44 | main()
--------------------------------------------------------------------------------
/examples/data/groupchat/fetch_latest_gpt4_paper.py:
--------------------------------------------------------------------------------
1 | # filename: fetch_latest_gpt4_paper.py
2 | import requests
3 | from datetime import datetime
4 |
5 | def fetch_latest_paper():
6 | # Define the API endpoint
7 | url = "http://export.arxiv.org/api/query"
8 |
9 | # Set the search parameters to find papers related to GPT-4
10 | params = {
11 | "search_query": "all:GPT-4",
12 | "sortBy": "submittedDate",
13 | "sortOrder": "descending",
14 | "max_results": 1
15 | }
16 |
17 | # Send a GET request to the API
18 | response = requests.get(url, params=params)
19 |
20 | if response.status_code == 200:
21 | # Parse the response XML
22 | from xml.etree import ElementTree as ET
23 | root = ET.fromstring(response.content)
24 |
25 | # Navigate to the entry element
26 | entry = root.find('{http://www.w3.org/2005/Atom}entry')
27 | if entry is not None:
28 | # Extract title and summary (abstract)
29 | title = entry.find('{http://www.w3.org/2005/Atom}title').text
30 | summary = entry.find('{http://www.w3.org/2005/Atom}summary').text
31 | published_date = entry.find('{http://www.w3.org/2005/Atom}published').text
32 |
33 | # Convert published date to a readable format
34 | published_datetime = datetime.strptime(published_date, '%Y-%m-%dT%H:%M:%SZ')
35 |
36 | print("Title:", title)
37 | print("Published Date:", published_datetime.strftime('%Y-%m-%d'))
38 | print("Abstract:", summary.strip())
39 | else:
40 | print("No GPT-4 papers found.")
41 | else:
42 | print("Failed to fetch data from arXiv. Status code:", response.status_code)
43 |
44 | fetch_latest_paper()
--------------------------------------------------------------------------------
/examples/data/research_agent_storage/graph_store.json:
--------------------------------------------------------------------------------
1 | {"graph_dict": {}}
--------------------------------------------------------------------------------
/examples/data/research_agent_storage/image__vector_store.json:
--------------------------------------------------------------------------------
1 | {"embedding_dict": {}, "text_id_to_ref_doc_id": {}, "metadata_dict": {}}
--------------------------------------------------------------------------------
/examples/delegation_demo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import platform
3 | import sys
4 | from pathlib import Path
5 |
6 | import kuzu
7 | from dotenv import load_dotenv
8 | from langchain_community.tools import DuckDuckGoSearchRun
9 |
10 | from motleycrew.agents.crewai import CrewAIMotleyAgent
11 | from motleycrew.agents.langchain import ReActToolCallingMotleyAgent
12 | from motleycrew.agents.llama_index import ReActLlamaIndexMotleyAgent
13 | from motleycrew.common import configure_logging
14 | from motleycrew.storage import MotleyKuzuGraphStore
15 | from motleycrew.tasks import SimpleTask
16 | from motleycrew.tools.image.dall_e import DallEImageGeneratorTool
17 |
18 | WORKING_DIR = Path(os.path.realpath("."))
19 |
20 | try:
21 | from motleycrew import MotleyCrew
22 | except ImportError:
23 | # if we are running this from source
24 | motleycrew_location = os.path.realpath(WORKING_DIR / "..")
25 | sys.path.append(motleycrew_location)
26 |
27 | if __name__ == "__main__":
28 | if "Dropbox" in WORKING_DIR.parts and platform.system() == "Windows":
29 | # On Windows, kuzu has file locking issues with Dropbox
30 | DB_PATH = os.path.realpath(os.path.expanduser("~") + "/Documents/research_db")
31 | else:
32 | DB_PATH = os.path.realpath(WORKING_DIR / "research_db")
33 |
34 | else:
35 | DB_PATH = os.path.realpath(WORKING_DIR / "tests/research_db")
36 |
37 |
38 | def main():
39 |
40 | db = kuzu.Database(DB_PATH)
41 | graph_store = MotleyKuzuGraphStore(db)
42 | crew = MotleyCrew(graph_store=graph_store)
43 |
44 | search_tool = DuckDuckGoSearchRun()
45 |
46 | researcher = CrewAIMotleyAgent(
47 | role="Senior Research Analyst",
48 | goal="Uncover cutting-edge developments in AI and data science, doing web search if necessary",
49 | backstory="""You work at a leading tech think tank.
50 | Your expertise lies in identifying emerging trends.
51 | You have a knack for dissecting complex data and presenting actionable insights.""",
52 | verbose=True,
53 | tools=[search_tool],
54 | )
55 |
56 | # You can give agents as tools to other agents
57 | writer = ReActToolCallingMotleyAgent(
58 | name="AI writer agent",
59 | prompt_prefix="You are an experienced writer with a passion for technology.",
60 | description="Experienced writer with a passion for technology.",
61 | tools=[researcher],
62 | verbose=True,
63 | )
64 |
65 | # Illustrator
66 | illustrator = ReActLlamaIndexMotleyAgent(
67 | name="Illustrator",
68 | prompt_prefix="Create beautiful and insightful illustrations for a blog post",
69 | tools=[DallEImageGeneratorTool(os.path.realpath("./images"))],
70 | )
71 |
72 | blog_post_task = SimpleTask(
73 | crew=crew,
74 | name="produce blog post on AI advancements",
75 | description="""Using the insights provided by a thorough web search, develop an engaging blog
76 | post that highlights the most significant AI advancements.
77 | Your post should be informative yet accessible, catering to a tech-savvy audience.
78 | Make it sound cool, avoid complex words so it doesn't sound like AI.
79 | Create a blog post of at least 4 paragraphs, in markdown format.""",
80 | agent=writer,
81 | )
82 |
83 | illustration_task = SimpleTask(
84 | crew=crew,
85 | name="create an illustration for the blog post",
86 | description="""Create beautiful and insightful illustrations to accompany the blog post on AI advancements.
87 | The blog post will be provided to you in markdown format.
88 | Make sure to use the illustration tool provided to you, once per illustration, and embed the URL provided by
89 | the tool into the blog post.""",
90 | agent=illustrator,
91 | )
92 |
93 | # Make sure the illustration task runs only once the blog post task is complete, and gets its input
94 | blog_post_task >> illustration_task
95 |
96 | # Get your crew to work!
97 | result = crew.run()
98 |
99 | # Get the outputs of the task
100 | print(blog_post_task.output)
101 | print(illustration_task.output)
102 | return illustration_task.output
103 |
104 |
105 | if __name__ == "__main__":
106 | configure_logging(verbose=True)
107 |
108 | load_dotenv()
109 | main()
110 | print("yay!")
111 |
--------------------------------------------------------------------------------
/examples/images/0b96405f.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShoggothAI/motleycrew/413d2c3ae9c5497229d784b9da0ca64cca7103cd/examples/images/0b96405f.png
--------------------------------------------------------------------------------
/examples/images/651cf780.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShoggothAI/motleycrew/413d2c3ae9c5497229d784b9da0ca64cca7103cd/examples/images/651cf780.png
--------------------------------------------------------------------------------
/examples/images/cc33d04d.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShoggothAI/motleycrew/413d2c3ae9c5497229d784b9da0ca64cca7103cd/examples/images/cc33d04d.png
--------------------------------------------------------------------------------
/examples/key_value_store.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from motleycrew import MotleyCrew
4 | from motleycrew.agents.langchain import ReActToolCallingMotleyAgent
5 | from motleycrew.storage.kv_store_domain import SimpleRetrievableObject
6 | from motleycrew.tasks import SimpleTask
7 | from motleycrew.tools import MotleyTool
8 |
9 |
10 | class ObjectInsertionTool(MotleyTool):
11 | def run(self, query: str) -> List[str]:
12 | response1 = SimpleRetrievableObject(
13 | id="321",
14 | name="test",
15 | description="test",
16 | payload={"test": 2.35, "blah": 3.14},
17 | )
18 | response2 = SimpleRetrievableObject(
19 | id="aaa",
20 | name="test1",
21 | description="another dummy object",
22 | payload={"test": 2.35, "blah": 3.14},
23 | )
24 | self.agent.kv_store[response1.id] = response1
25 | self.agent.kv_store[response2.id] = response2
26 | return [response1.summary, response2.summary]
27 |
28 |
29 | class ObjectFetcherTool(MotleyTool):
30 | def run(self, object_id: str) -> str:
31 | result = self.agent.kv_store[object_id]
32 | print(result.payload)
33 | return "success!"
34 |
35 |
36 | if __name__ == "__main__":
37 | from motleycrew.common.logging import configure_logging
38 |
39 | configure_logging()
40 |
41 | instructions = """Your job is to first call the Object Insertion Tool and get back from it the metadata
42 | of some data objects; each metadata will include an id.
43 | You should then call the Object Fetcher Tool with the id of one of the objects you got back from the Object Insertion Tool.
44 | """
45 |
46 | my_agent = ReActToolCallingMotleyAgent(
47 | tools=[
48 | ObjectInsertionTool(
49 | name="Object_Insertion_tool",
50 | description="When called with a query, returns back the "
51 | "metadata of one or several relevant cached objects ",
52 | ),
53 | ObjectFetcherTool(
54 | name="Object_Fetcher_tool",
55 | description="When called with an object id, prints the payload of the object",
56 | ),
57 | ],
58 | description="Object retrieval agent",
59 | name="Object retrieval agent",
60 | verbose=True,
61 | )
62 | crew = MotleyCrew()
63 | task = SimpleTask(crew=crew, agent=my_agent, description=instructions)
64 | crew.run()
65 |
66 | print("yay!")
67 |
--------------------------------------------------------------------------------
/examples/llama_index_output_handler.py:
--------------------------------------------------------------------------------
1 | from dotenv import load_dotenv
2 | from langchain_community.tools import DuckDuckGoSearchRun
3 |
4 |
5 | from motleycrew import MotleyCrew
6 | from motleycrew.agents.llama_index import ReActLlamaIndexMotleyAgent
7 | from motleycrew.common import configure_logging
8 | from motleycrew.tasks import SimpleTask
9 | from motleycrew.common.exceptions import InvalidOutput
10 | from motleycrew.common import AsyncBackend
11 | from motleycrew.tools import MotleyTool
12 |
13 |
14 | def main():
15 | """Main function of running the example."""
16 | search_tool = DuckDuckGoSearchRun()
17 |
18 | class OutputHandler(MotleyTool):
19 | def run(self, output: str):
20 | if "medicine" not in output.lower():
21 | raise InvalidOutput("Add more information about AI applications in medicine.")
22 |
23 | return {"checked_output": output}
24 |
25 | output_handler = OutputHandler(
26 | name="output_handler", description="Output handler", return_direct=True
27 | )
28 |
29 | # TODO: add LlamaIndex native tools
30 | researcher = ReActLlamaIndexMotleyAgent(
31 | prompt_prefix="Your goal is to uncover cutting-edge developments in AI and data science",
32 | tools=[search_tool, output_handler],
33 | force_output_handler=True,
34 | verbose=True,
35 | max_iterations=16, # default is 10, we add more because the output handler may reject the output
36 | )
37 |
38 | crew = MotleyCrew(async_backend=AsyncBackend.NONE)
39 |
40 | # Create tasks for your agents
41 | task = SimpleTask(
42 | crew=crew,
43 | name="produce comprehensive analysis report on AI advancements",
44 | description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
45 | Identify key trends, breakthrough technologies, and potential industry impacts.
46 | Your final answer MUST be a full analysis report""",
47 | agent=researcher,
48 | )
49 |
50 | # Get your crew to work!
51 | crew.run()
52 |
53 | print(task.output)
54 | return task.output
55 |
56 |
57 | if __name__ == "__main__":
58 | configure_logging(verbose=True)
59 |
60 | load_dotenv()
61 | main()
62 |
--------------------------------------------------------------------------------
/examples/tool_calling_with_memory.py:
--------------------------------------------------------------------------------
1 | from dotenv import load_dotenv
2 | from langchain_community.tools import DuckDuckGoSearchRun
3 |
4 | from motleycrew import MotleyCrew
5 | from motleycrew.agents.langchain.tool_calling_react import ReActToolCallingMotleyAgent
6 | from motleycrew.common import configure_logging
7 | from motleycrew.tasks import SimpleTask
8 |
9 |
10 | def main():
11 | search_tool = DuckDuckGoSearchRun()
12 |
13 | tools = [search_tool]
14 |
15 | researcher = ReActToolCallingMotleyAgent(
16 | tools=tools,
17 | verbose=True,
18 | chat_history=True,
19 | # llm=init_llm(
20 | # llm_framework=LLMFramework.LANGCHAIN,
21 | # llm_family=LLMFamily.ANTHROPIC,
22 | # llm_name="claude-3-sonnet-20240229",
23 | # ),
24 | )
25 |
26 | outputs = []
27 |
28 | crew = MotleyCrew()
29 | task = SimpleTask(
30 | crew=crew,
31 | name="produce comprehensive analysis report on AI advancements",
32 | description="""Conduct a comprehensive analysis of the latest advancements in AI.
33 | Identify key trends, breakthrough technologies, and potential industry impacts.
34 | Your final answer MUST be a full analysis report""",
35 | agent=researcher,
36 | )
37 | crew.run()
38 | print(task.output)
39 |
40 | # See whether the researcher's memory persists across tasks
41 | tldr_task = SimpleTask(
42 | crew=crew,
43 | name="provide a TLDR summary of the report",
44 | description="Write a short summary of the comprehensive analysis report on AI advancements that you just wrote.",
45 | agent=researcher,
46 | )
47 |
48 | crew.run()
49 | print(tldr_task.output)
50 |
51 |
52 | if __name__ == "__main__":
53 | configure_logging(verbose=True)
54 |
55 | load_dotenv()
56 | main()
57 |
--------------------------------------------------------------------------------
/motleycrew/__init__.py:
--------------------------------------------------------------------------------
1 | """MotleyCrew root package."""
2 | from importlib import metadata
3 |
4 | from .crew import MotleyCrew
5 | from .tasks import Task
6 |
7 | try:
8 | __version__ = metadata.version(__package__)
9 | except metadata.PackageNotFoundError:
10 | __version__ = ""
11 |
--------------------------------------------------------------------------------
/motleycrew/agents/__init__.py:
--------------------------------------------------------------------------------
1 | """Everything agent-related: wrappers, pre-made agents, output handlers etc."""
2 |
3 | from .abstract_parent import MotleyAgentAbstractParent
4 | from .parent import MotleyAgentParent
5 | from .langchain import LangchainMotleyAgent
6 |
7 | __all__ = [
8 | "MotleyAgentAbstractParent",
9 | "MotleyAgentParent",
10 | "LangchainMotleyAgent",
11 | ]
12 |
--------------------------------------------------------------------------------
/motleycrew/agents/abstract_parent.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod, abstractproperty
2 | from typing import Optional, Any, TYPE_CHECKING
3 |
4 | from langchain_core.runnables import Runnable, RunnableConfig
5 |
6 | if TYPE_CHECKING:
7 | from motleycrew.tools import MotleyTool
8 |
9 |
10 | class MotleyAgentAbstractParent(Runnable, ABC):
11 | """Abstract class for describing agents.
12 |
13 | Agents in motleycrew implement the Langchain Runnable interface.
14 | """
15 |
16 | @property
17 | @abstractmethod
18 | def kv_store(self) -> dict:
19 | pass
20 |
21 | @abstractmethod
22 | def invoke(
23 | self,
24 | input: dict,
25 | config: Optional[RunnableConfig] = None,
26 | **kwargs: Any,
27 | ) -> Any:
28 | pass
29 |
30 | @abstractmethod
31 | def as_tool(self, **kwargs) -> Any:
32 | """Convert the agent to a tool to be used by other agents via delegation.
33 |
34 | Args:
35 | kwargs: Additional arguments to pass to the tool.
36 | See :class:`motleycrew.tools.tool.MotleyTool` for more details.
37 | """
38 | pass
39 |
--------------------------------------------------------------------------------
/motleycrew/agents/crewai/__init__.py:
--------------------------------------------------------------------------------
1 | from .agent_with_config import CrewAIAgentWithConfig
2 | from .crewai import CrewAIMotleyAgentParent
3 | from .crewai_agent import CrewAIMotleyAgent
4 |
5 | __all__ = [
6 | "CrewAIAgentWithConfig",
7 | "CrewAIMotleyAgentParent",
8 | "CrewAIMotleyAgent",
9 | ]
10 |
--------------------------------------------------------------------------------
/motleycrew/agents/crewai/agent_with_config.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Optional, List
2 |
3 | from langchain.tools.render import render_text_description
4 | from langchain_core.runnables import RunnableConfig
5 |
6 | from motleycrew.common.utils import ensure_module_is_installed
7 |
8 | try:
9 | from crewai import Agent
10 | from crewai.memory.contextual.contextual_memory import ContextualMemory
11 | except ImportError:
12 | Agent = object
13 | ContextualMemory = object
14 |
15 |
16 | class CrewAIAgentWithConfig(Agent):
17 | def __init__(self, *args, **kwargs):
18 | """Subclass for CrewAI Agent that overrides the execute_task method to include a config parameter.
19 |
20 | Args:
21 | *args:
22 | **kwargs:
23 |
24 | Todo:
25 | * get rid of this when https://github.com/joaomdmoura/crewAI/pull/483 is merged.
26 | """
27 | ensure_module_is_installed("crewai")
28 | super(CrewAIAgentWithConfig, self).__init__(*args, **kwargs)
29 |
30 | def execute_task(
31 | self,
32 | task: Any,
33 | context: Optional[str] = None,
34 | tools: Optional[List[Any]] = None,
35 | config: Optional[RunnableConfig] = None,
36 | ) -> str:
37 | """Execute a task with the agent.
38 |
39 | Args:
40 | task: Task to execute.
41 | context: Context to execute the task in.
42 | tools: Tools to use for the task.
43 |
44 | Returns:
45 | Output of the agent
46 | """
47 | if self.tools_handler:
48 | # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
49 | self.tools_handler.last_used_tool = {}
50 |
51 | task_prompt = task.prompt()
52 |
53 | if context:
54 | task_prompt = self.i18n.slice("task_with_context").format(
55 | task=task_prompt, context=context
56 | )
57 |
58 | if self.crew and self.crew.memory:
59 | contextual_memory = ContextualMemory(
60 | self.crew._short_term_memory,
61 | self.crew._long_term_memory,
62 | self.crew._entity_memory,
63 | )
64 | memory = contextual_memory.build_context_for_task(task, context)
65 | if memory.strip() != "":
66 | task_prompt += self.i18n.slice("memory").format(memory=memory)
67 |
68 | tools = tools or self.tools
69 | # type: ignore # Argument 1 to "_parse_tools" of "Agent" has incompatible type "list[Any] | None"; expected "list[Any]"
70 | parsed_tools = self._parse_tools(tools or [])
71 | self.create_agent_executor(tools=tools)
72 | self.agent_executor.tools = parsed_tools
73 | self.agent_executor.task = task
74 |
75 | self.agent_executor.tools_description = render_text_description(parsed_tools)
76 | self.agent_executor.tools_names = self.__tools_names(parsed_tools)
77 |
78 | if self.crew and self.crew._train:
79 | task_prompt = self._training_handler(task_prompt=task_prompt)
80 | else:
81 | task_prompt = self._use_trained_data(task_prompt=task_prompt)
82 |
83 | result = self.agent_executor.invoke(
84 | {
85 | "input": task_prompt,
86 | "tool_names": self.agent_executor.tools_names,
87 | "tools": self.agent_executor.tools_description,
88 | },
89 | config=config,
90 | )["output"]
91 | if self.max_rpm:
92 | self._rpm_controller.stop_rpm_counter()
93 | return result
94 |
95 | @staticmethod
96 | def __tools_names(tools) -> str:
97 | return ", ".join([t.name for t in tools])
98 |
--------------------------------------------------------------------------------
/motleycrew/agents/crewai/crewai_agent.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import Optional, Any, Sequence
4 |
5 | from motleycrew.agents.crewai import CrewAIAgentWithConfig
6 | from motleycrew.agents.crewai import CrewAIMotleyAgentParent
7 | from motleycrew.common import LLMFramework
8 | from motleycrew.common import MotleySupportedTool
9 | from motleycrew.common.llms import init_llm
10 | from motleycrew.tools import MotleyTool
11 |
12 |
13 | class CrewAIMotleyAgent(CrewAIMotleyAgentParent):
14 | """MotleyCrew wrapper for CrewAI Agent.
15 |
16 | This wrapper is made to mimic the CrewAI agent's interface.
17 | That is why it has mostly the same arguments.
18 | """
19 |
20 | def __init__(
21 | self,
22 | role: str,
23 | goal: str,
24 | backstory: str,
25 | prompt_prefix: str | None = None,
26 | description: str | None = None,
27 | delegation: bool = False,
28 | tools: Sequence[MotleySupportedTool] | None = None,
29 | force_output_handler: bool = False,
30 | llm: Optional[Any] = None,
31 | verbose: bool = False,
32 | ):
33 | """
34 | Args:
35 | role: ``role`` param of the CrewAI Agent.
36 |
37 | goal: ``goal`` param of the CrewAI Agent.
38 |
39 | backstory: ``backstory`` param of the CrewAI Agent.
40 |
41 | prompt_prefix: Prefix to the agent's prompt.
42 | Can be used for providing additional context, such as the agent's role or backstory.
43 |
44 | description: Description of the agent.
45 |
46 | Unlike the prompt prefix, it is not included in the prompt.
47 | The description is only used for describing the agent's purpose
48 | when giving it as a tool to other agents.
49 |
50 | delegation: Whether to allow delegation or not.
51 | **Delegation is not supported in this wrapper.**
52 | Instead, pass the agents you want to delegate to as tools.
53 |
54 | tools: Tools to add to the agent.
55 |
56 | llm: LLM instance to use.
57 |
58 | force_output_handler: Whether to force the use of an output handler.
59 | NOTE: This is currently not supported for CrewAI agents.
60 |
61 | verbose: Whether to log verbose output.
62 | """
63 | if tools is None:
64 | tools = []
65 |
66 | if llm is None:
67 | # CrewAI uses Langchain LLMs by default
68 | llm = init_llm(llm_framework=LLMFramework.LANGCHAIN)
69 |
70 | if delegation:
71 | raise ValueError(
72 | "'delegation' is not supported, pass the agents you want to delegate to as tools instead."
73 | )
74 |
75 | def agent_factory(tools: dict[str, MotleyTool]):
76 | langchain_tools = [t.to_langchain_tool() for t in tools.values()]
77 | agent = CrewAIAgentWithConfig(
78 | role=role,
79 | goal=goal,
80 | backstory=backstory,
81 | verbose=verbose,
82 | allow_delegation=False,
83 | tools=langchain_tools,
84 | llm=llm,
85 | )
86 | return agent
87 |
88 | super().__init__(
89 | goal=goal,
90 | prompt_prefix=prompt_prefix,
91 | description=description,
92 | name=role,
93 | agent_factory=agent_factory,
94 | tools=tools,
95 | force_output_handler=force_output_handler,
96 | verbose=verbose,
97 | )
98 |
--------------------------------------------------------------------------------
/motleycrew/agents/langchain/__init__.py:
--------------------------------------------------------------------------------
1 | from .langchain import LangchainMotleyAgent
2 |
3 | from .legacy_react import LegacyReActMotleyAgent
4 | from .tool_calling_react import ReActToolCallingMotleyAgent
5 |
6 | __all__ = [
7 | "LangchainMotleyAgent",
8 | "LegacyReActMotleyAgent",
9 | "ReActToolCallingMotleyAgent",
10 | ]
11 |
--------------------------------------------------------------------------------
/motleycrew/agents/langchain/legacy_react.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import Optional, Sequence
4 |
5 | from langchain.agents import AgentExecutor, create_react_agent
6 | from langchain_core.language_models import BaseLanguageModel
7 | from langchain_core.prompts import PromptTemplate
8 | from langchain_core.runnables.history import GetSessionHistoryCallable
9 |
10 | from motleycrew.agents.langchain import LangchainMotleyAgent
11 | from motleycrew.common import LLMFramework, MotleySupportedTool
12 | from motleycrew.common.llms import init_llm
13 | from motleycrew.tools import MotleyTool
14 |
15 | DEFAULT_REACT_PROMPT = PromptTemplate.from_template(
16 | """Answer the following questions as best you can. You have access to the following tools:
17 |
18 | {tools}
19 |
20 | Use the following format:
21 |
22 | Question: the input question you must answer
23 | Thought: you should always think about what to do
24 | Action: the action to take, should be one of [{tool_names}]
25 | Action Input: the input to the action
26 | Observation: the result of the action
27 | ... (this Thought/Action/Action Input/Observation can repeat N times)
28 | Thought: I now know the final answer
29 | Final Answer: the final answer to the original input question
30 |
31 | Begin!
32 |
33 | Question: {input}
34 | Thought: {agent_scratchpad}
35 | """
36 | )
37 |
38 |
39 | class LegacyReActMotleyAgent(LangchainMotleyAgent):
40 | """Basic ReAct agent compatible with older models without dedicated tool calling support.
41 |
42 | It's probably better to use the more advanced
43 | :class:`motleycrew.agents.langchain.tool_calling_react.ReActToolCallingAgent` with newer models.
44 | """
45 |
46 | def __init__(
47 | self,
48 | tools: Sequence[MotleySupportedTool],
49 | description: str | None = None,
50 | name: str | None = None,
51 | prompt_prefix: str | None = None,
52 | chat_history: bool | GetSessionHistoryCallable = True,
53 | force_output_handler: bool = False,
54 | prompt: str | None = None,
55 | handle_parsing_errors: bool = True,
56 | handle_tool_errors: bool = True,
57 | llm: BaseLanguageModel | None = None,
58 | runnable_config: RunnableConfig | None = None,
59 | verbose: bool = False,
60 | ):
61 | """
62 | Args:
63 | tools: Tools to add to the agent.
64 | description: Description of the agent.
65 | name: Name of the agent.
66 | prompt_prefix: Prefix to the agent's prompt.
67 | output_handler: Output handler for the agent.
68 | chat_history: Whether to use chat history or not.
69 | force_output_handler: Whether to force the agent to return through an output handler.
70 | prompt: Custom prompt to use with the agent.
71 | handle_parsing_errors: Whether to handle parsing errors.
72 | handle_tool_errors: Whether to handle tool errors.
73 | llm: Language model to use.
74 | runnable_config: Default Langchain config to use when invoking the agent.
75 | It can be used to add callbacks, metadata, etc.
76 | verbose: Whether to log verbose output.
77 | """
78 | if force_output_handler:
79 | raise Exception("Forced output handler is not supported with legacy ReAct agent.")
80 |
81 | if prompt is None:
82 | prompt = DEFAULT_REACT_PROMPT
83 |
84 | if llm is None:
85 | llm = init_llm(llm_framework=LLMFramework.LANGCHAIN)
86 |
87 | if not tools:
88 | raise ValueError("You must provide at least one tool to the ReActMotleyAgent")
89 |
90 | def agent_factory(
91 | tools: dict[str, MotleyTool], output_handler: Optional[MotleyTool] = None
92 | ) -> AgentExecutor:
93 | langchain_tools = [t.to_langchain_tool() for t in tools.values()]
94 | if output_handler:
95 | langchain_tools.append(output_handler.to_langchain_tool())
96 |
97 | if handle_tool_errors:
98 | for tool in langchain_tools:
99 | tool.handle_tool_error = True
100 | tool.handle_validation_error = True
101 |
102 | agent = create_react_agent(llm=llm, tools=langchain_tools, prompt=prompt)
103 | agent_executor = AgentExecutor(
104 | agent=agent,
105 | tools=langchain_tools,
106 | handle_parsing_errors=handle_parsing_errors,
107 | verbose=verbose,
108 | )
109 | return agent_executor
110 |
111 | super().__init__(
112 | prompt_prefix=prompt_prefix,
113 | description=description,
114 | name=name,
115 | agent_factory=agent_factory,
116 | tools=tools,
117 | chat_history=chat_history,
118 | runnable_config=runnable_config,
119 | verbose=verbose,
120 | )
121 |
--------------------------------------------------------------------------------
/motleycrew/agents/llama_index/__init__.py:
--------------------------------------------------------------------------------
1 | from .llama_index import LlamaIndexMotleyAgent
2 | from .llama_index_react import ReActLlamaIndexMotleyAgent
3 |
4 | __all__ = [
5 | "LlamaIndexMotleyAgent",
6 | "ReActLlamaIndexMotleyAgent",
7 | ]
8 |
--------------------------------------------------------------------------------
/motleycrew/agents/llama_index/llama_index_react.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import Sequence
4 |
5 | try:
6 | from llama_index.core.agent import ReActAgent
7 | from llama_index.core.llms import LLM
8 | from llama_index.core.callbacks import CallbackManager
9 | except ImportError:
10 | LLM = object
11 |
12 | from motleycrew.agents.llama_index import LlamaIndexMotleyAgent
13 | from motleycrew.tools import MotleyTool
14 | from motleycrew.common import MotleySupportedTool
15 | from motleycrew.common import LLMFramework
16 | from motleycrew.common.llms import init_llm
17 | from motleycrew.tracking import get_default_callbacks_list
18 | from motleycrew.common.utils import ensure_module_is_installed
19 |
20 |
21 | class ReActLlamaIndexMotleyAgent(LlamaIndexMotleyAgent):
22 | """Wrapper for the LlamaIndex implementation of ReAct agent."""
23 |
24 | def __init__(
25 | self,
26 | prompt_prefix: str | None = None,
27 | description: str | None = None,
28 | name: str | None = None,
29 | tools: Sequence[MotleySupportedTool] | None = None,
30 | force_output_handler: bool = False,
31 | llm: LLM | None = None,
32 | verbose: bool = False,
33 | max_iterations: int = 10,
34 | ):
35 | """
36 | Args:
37 | prompt_prefix: Prefix to the agent's prompt.
38 | Can be used for providing additional context, such as the agent's role or backstory.
39 |
40 | description: Description of the agent.
41 |
42 | Unlike the prompt prefix, it is not included in the prompt.
43 | The description is only used for describing the agent's purpose
44 | when giving it as a tool to other agents.
45 |
46 | name: Name of the agent.
47 | The name is used for identifying the agent when it is given as a tool
48 | to other agents, as well as for logging purposes.
49 |
50 | It is not included in the agent's prompt.
51 |
52 | tools: Tools to add to the agent.
53 |
54 | force_output_handler: Whether to force the agent to return through an output handler.
55 | If True, at least one tool must have return_direct set to True.
56 |
57 | llm: LLM instance to use.
58 |
59 | verbose: Whether to log verbose output.
60 |
61 | max_iterations: Maximum number of iterations for the agent.
62 | Passed on to the ``max_iterations`` parameter of the ReActAgent.
63 | """
64 | ensure_module_is_installed("llama_index")
65 | if llm is None:
66 | llm = init_llm(llm_framework=LLMFramework.LLAMA_INDEX)
67 |
68 | def agent_factory(tools: dict[str, MotleyTool]) -> ReActAgent:
69 | llama_index_tools = [t.to_llama_index_tool() for t in tools.values()]
70 | callbacks = get_default_callbacks_list(LLMFramework.LLAMA_INDEX)
71 | agent = ReActAgent.from_tools(
72 | tools=llama_index_tools,
73 | llm=llm,
74 | verbose=verbose,
75 | max_iterations=max_iterations,
76 | callback_manager=CallbackManager(callbacks),
77 | )
78 | return agent
79 |
80 | super().__init__(
81 | prompt_prefix=prompt_prefix,
82 | description=description,
83 | name=name,
84 | agent_factory=agent_factory,
85 | tools=tools,
86 | force_output_handler=force_output_handler,
87 | verbose=verbose,
88 | )
89 |
--------------------------------------------------------------------------------
/motleycrew/applications/__init__.py:
--------------------------------------------------------------------------------
1 | """Miscellaneous motleycrew applications."""
2 |
--------------------------------------------------------------------------------
/motleycrew/applications/customer_support/README.md:
--------------------------------------------------------------------------------
1 | # Customer support agent demo
2 |
3 | This is a demo of a customer support app built using motleycrew and Ray.
4 |
5 | It includes sample data for populating the issue tree.
6 |
7 |
8 | ## Installation and usage
9 | We suggest you set up a virtualenv for managing the environment.
10 |
11 | ```
12 | git clone https://github.com/ShoggothAI/motleycrew.git
13 | cd motleycrew
14 | pip install -r requirements.txt
15 |
16 | python -m motleycrew.applications.customer_support.issue_tree # populate the issue tree
17 | ray start --head
18 | python -m motleycrew.applications.customer_support.ray_serve_app
19 | ```
20 |
21 | Navigate to http://127.0.0.1:8000/ and have fun!
22 | Also, check out the Ray dashboard for the app logs etc.
23 |
24 | ## Example screenshot
25 |
26 |
--------------------------------------------------------------------------------
/motleycrew/applications/customer_support/communication.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | import asyncio
3 |
4 |
5 | class CommunicationInterface(ABC):
6 | @abstractmethod
7 | async def send_message_to_customer(self, message: str) -> str:
8 | """
9 | Send a message to the customer and return their response.
10 |
11 | Args:
12 | message (str): The message to send to the customer.
13 |
14 | Returns:
15 | str: The customer's response.
16 | """
17 | pass
18 |
19 | @abstractmethod
20 | def escalate_to_human_agent(self) -> None:
21 | """
22 | Escalate the current issue to a human agent.
23 | """
24 | pass
25 |
26 | @abstractmethod
27 | def resolve_issue(self, resolution: str) -> str:
28 | """
29 | Resolve the current issue.
30 |
31 | Args:
32 | resolution (str): The resolution to the issue.
33 |
34 | Returns:
35 | str: The resolution to the issue.
36 | """
37 | pass
38 |
39 |
40 | class DummyCommunicationInterface(CommunicationInterface):
41 | async def send_message_to_customer(self, message: str) -> str:
42 | print(f"Message sent to customer: {message}")
43 | return await asyncio.to_thread(input, "Enter customer's response: ")
44 |
45 | def escalate_to_human_agent(self) -> None:
46 | print("Issue escalated to human agent.")
47 |
48 | def resolve_issue(self, resolution: str) -> str:
49 | print(f"Proposed resolution: {resolution}")
50 | confirmation = input("Is the issue resolved? (y/n): ")
51 | if confirmation.lower().startswith("y"):
52 | return "Issue resolved"
53 | else:
54 | self.escalate_to_human_agent()
55 |
56 |
57 | # Placeholder for future implementation
58 | class RealCommunicationInterface(CommunicationInterface):
59 | async def send_message_to_customer(self, message: str) -> str:
60 | # TODO: Implement real asynchronous communication with the customer
61 | # This could involve integrating with a chat system, email, or other communication channels
62 | pass
63 |
64 | def escalate_to_human_agent(self) -> None:
65 | # TODO: Implement real escalation to a human agent
66 | # This could involve creating a ticket in a support system or notifying a human agent directly
67 | pass
68 |
--------------------------------------------------------------------------------
/motleycrew/applications/customer_support/ray_serve_app.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import ray
4 | from dotenv import load_dotenv
5 | from fastapi import FastAPI, WebSocket
6 | from fastapi.responses import FileResponse
7 | from ray import serve
8 |
9 | from motleycrew.applications.customer_support.support_agent import (
10 | CustomerChatTool,
11 | IssueTreeViewTool,
12 | ResolveIssueTool,
13 | SupportAgent,
14 | SupportAgentContext,
15 | )
16 | from motleycrew.common import configure_logging, logger
17 | from motleycrew.common.llms import LLMFramework, init_llm
18 | from motleycrew.storage import MotleyKuzuGraphStore
19 |
20 | app = FastAPI()
21 |
22 |
23 | class WebSocketCommunicationInterface:
24 | def __init__(self, websocket: WebSocket):
25 | self.websocket = websocket
26 |
27 | async def send_message_to_customer(self, message: str) -> str:
28 | await self.websocket.send_json({"type": "agent_message", "content": message})
29 | response = await self.websocket.receive_text()
30 | return response
31 |
32 | async def escalate_to_human_agent(self) -> None:
33 | await self.websocket.send_json(
34 | {"type": "escalation", "content": "Your issue has been escalated to a human agent."}
35 | )
36 |
37 | async def resolve_issue(self, resolution: str) -> str:
38 | await self.websocket.send_json({"type": "resolution", "content": resolution})
39 |
40 |
41 | @serve.deployment(num_replicas=3, ray_actor_options={"num_cpus": 1, "num_gpus": 0})
42 | @serve.ingress(app)
43 | class SupportAgentDeployment:
44 | def __init__(self):
45 | configure_logging(verbose=True)
46 | load_dotenv()
47 |
48 | database = MotleyKuzuGraphStore.from_persist_dir(
49 | str(Path(__file__).parent / "issue_tree_db"), read_only=True
50 | )
51 | self.graph_store = database
52 | self.llm = init_llm(LLMFramework.LANGCHAIN)
53 |
54 | @app.get("/")
55 | async def root(self):
56 | return FileResponse(Path(__file__).parent / "static" / "index.html")
57 |
58 | @app.websocket("/ws")
59 | async def websocket_endpoint(self, websocket: WebSocket):
60 | await websocket.accept()
61 | await websocket.send_json(
62 | {"type": "agent_message", "content": "Hello! How can I help you?"}
63 | )
64 |
65 | communication_interface = WebSocketCommunicationInterface(websocket)
66 |
67 | context = SupportAgentContext(self.graph_store, communication_interface)
68 | issue_tree_view_tool = IssueTreeViewTool(context)
69 | customer_chat_tool = CustomerChatTool(context)
70 | resolve_issue_tool = ResolveIssueTool(context)
71 |
72 | agent = SupportAgent(
73 | issue_tree_view_tool=issue_tree_view_tool,
74 | customer_chat_tool=customer_chat_tool,
75 | resolve_issue_tool=resolve_issue_tool,
76 | llm=self.llm,
77 | )
78 |
79 | try:
80 | while True:
81 | data = await websocket.receive_text()
82 | resolution = await agent.ainvoke({"prompt": data})
83 | if resolution.additional_kwargs.get("escalate"):
84 | await communication_interface.escalate_to_human_agent()
85 | else:
86 | await communication_interface.resolve_issue(resolution.content)
87 | except Exception as e:
88 | logger.error(f"WebSocket error: {e}")
89 |
90 |
91 | def main():
92 | configure_logging(verbose=True)
93 |
94 | deployment = SupportAgentDeployment.bind()
95 |
96 | ray.init(address="auto")
97 | serve.run(deployment)
98 |
99 |
100 | if __name__ == "__main__":
101 | main()
102 |
--------------------------------------------------------------------------------
/motleycrew/applications/customer_support/requirements.txt:
--------------------------------------------------------------------------------
1 | motleycrew
2 | ray[serve]
3 | pandas
4 |
--------------------------------------------------------------------------------
/motleycrew/applications/customer_support/static/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Customer Support Chat
7 |
58 |
59 |
60 | Customer Support Chat
61 |
62 |
63 |
64 |
65 |
66 |
67 |
127 |
128 |
129 |
--------------------------------------------------------------------------------
/motleycrew/applications/expenses/expenses.py:
--------------------------------------------------------------------------------
1 | from motleycrew.agents.langchain import ReActToolCallingMotleyAgent
2 | from schema_delta import delta_generator
3 | from sql_tools import SQLExpression
4 |
5 | schema_critic_prompt = """ You are an experienced data engineer and you have been tasked with reviewing whether a
6 | SQL query correctly checks whether expenses conform to policy. You are given a schema, a query against it, and the
7 | expenses policy section text to verify against it. If the query correctly expresses the policy, return an empty string.
8 | If it doesn't, call the exception tool, passing it a detailed explanation of why it doesn't.
9 | Policy section:
10 | {policy}
11 | Schema:
12 | {schema}
13 | Query:
14 | {query}
15 | """
16 |
17 | schema_critic = ReActToolCallingMotleyAgent(
18 | description=schema_critic_prompt,
19 | input_schema="auto", # Input schema should be auto-parsed from the prompt, with string types
20 | tools=[ExceptionTool],
21 | )
22 |
23 |
24 | class SchemaDesignerOutput(BaseModel):
25 | query: Field(
26 | SQLExpression, description="A valid PGSQL query that returns rows that violate the policy"
27 | )
28 | latest_schema: Field(
29 | SQLExpression,
30 | description="The expenses table schema, represented as a valid PGSQL query that will create the table, "
31 | "with any new columns you added to represent the policy",
32 | )
33 |
34 |
35 | class SchemaDesignerExtendedOutput(SchemaDesignerOutput):
36 | data_examples: Field(
37 | SQLExpression,
38 | description="Examples of valid and invalid rows in the expenses table according to the policy",
39 | )
40 |
41 |
42 | class SchemaDesignerInput(BaseModel):
43 | schema: Field(
44 | SQLExpression,
45 | description="The expenses table schema so far, represented as a valid PGSQL query that will create the table",
46 | )
47 | policy: Field(str, description="The policy section to be represented in the schema")
48 |
49 |
50 | class VerifyPolicyRepresentation(MotleyTool):
51 | def invoke(self, latest_schema: str, query: str):
52 | # First check whether the inputs are valid SQL
53 | latest_schema = SQLExpression(latest_schema)
54 | query = SQLExpression(query)
55 |
56 | # Retrieve the inputs to the calling agent - how can we do this gracefully?
57 | original_schema = self.retrieve_input("schema")
58 | policy = self.retrieve_input("policy")
59 |
60 | # Now check whether the query correctly expresses the policy
61 | # This will raise an exception if the query doesn't correctly express the policy
62 | schema_critic.invoke(schema=latest_schema, query=query, policy=policy)
63 |
64 | # This will raise an exception if the latest schema is not a strict extension of the original one
65 | # Is there an easier way to do this?
66 | schema_change = delta_generator.invoke(
67 | latest_schema=SQLExpression(latest_schema),
68 | original_schema=SQLExpression(original_schema),
69 | )
70 |
71 | # TODO: write a data generator, separate agent with validation of output
72 | # For the first time in this flow, actually call postgres to verify?
73 | data_examples = data_example_generator.invoke(schema=latest_schema, query=query)
74 |
75 | return SchemaDesignerExtendedOutput(
76 | query=query, latest_schema=latest_schema, data_examples=data_examples
77 | )
78 |
79 |
80 | # Should this be a task?
81 | # parametrize tasks
82 | schema_designer_prompt = """ You are an experienced data engineer and you have been tasked with designing a schema and
83 | a validation query to check whether expenses confirm to policy. You are given a draft schema and the description of
84 | a section of the expense policy.
85 |
86 | You are allowed to add new columns to the schema ONLY IF NECESSARY TO REPRESENT THE POLICY SECTION
87 | YOU ARE GIVEN. ONLY add new columns if the current policy section can't be expressed with exist
88 |
89 | Policy section:
90 | {policy}
91 | Schema:
92 | {schema}
93 | """
94 |
95 | schema_designer = ReActToolCallingMotleyAgent(
96 | description=schema_designer_prompt,
97 | input_schema=SchemaDesignerInput,
98 | output_handler=VerifyPolicyRepresentation(),
99 | )
100 |
--------------------------------------------------------------------------------
/motleycrew/applications/expenses/schema_delta.py:
--------------------------------------------------------------------------------
1 | from sql_tools import SQLExpression
2 |
3 |
4 | class DeltaGeneratorInput(BaseModel):
5 | original_schema: SQLExpression
6 | latest_schema: SQLExpression
7 |
8 |
9 | delta_schema_prompt = """ You are an experienced data engineer. You are given two database schemas, the original schema
10 | and the latest schema. The latest schema is the original schema with some columns added.
11 |
12 | Your job is to write a sql query that will alter the original schema to turn it into the latest schema, ONLY
13 | ADDING THE NEW COLUMNS. You are not allowed to modify the schema in any other way.
14 | If the original schema is empty, you should write a query that will create the latest schema from scratch.
15 | If the latest schema is the same as the original schema, return a query that does nothing.
16 | If fulfilling these instructions is not possible, call the exception tool,
17 | passing it a detailed explanation of why it is not possible.
18 |
19 | (this should be prepended to the tool description of the output_handler)
20 | ONLY return your input by calling the output handler tool provided. Your response MUST be a tool call to
21 | either the exception tool or the output handler tool.
22 |
23 |
24 | Original schema:
25 | {original_schema}
26 |
27 | Latest schema:
28 | {latest_schema}
29 | """
30 |
31 | delta_generator = ReactToolsMotleyAgent(
32 | description=delta_schema_prompt, # Should be auto-parsed into a prompt template
33 | input_schema=DeltaGeneratorInput,
34 | output_handler=SQLExpression, # Or should it be output_tool=SQLExpression?
35 | tools=[ExceptionTool], # Always include by default?
36 | )
37 |
--------------------------------------------------------------------------------
/motleycrew/applications/expenses/sql_tools.py:
--------------------------------------------------------------------------------
1 | class SQLExpression(BaseModel):
2 | # TODO: when creating it, validate that it's a valid pgsql expression!
3 | # TODO: __str__ should just return expression (for use in prompts)
4 | expression: str
5 |
--------------------------------------------------------------------------------
/motleycrew/applications/faust_workflow/__init__.py:
--------------------------------------------------------------------------------
1 | from .faust_workflow import Event, FaustWorkflow, step
2 | from .visualize import draw_faust_workflow
3 |
4 | __all__ = ["Event", "FaustWorkflow", "step", "draw_faust_workflow"]
5 |
--------------------------------------------------------------------------------
/motleycrew/applications/faust_workflow/visualize.py:
--------------------------------------------------------------------------------
1 | from typing import get_args, get_origin
2 |
3 | from pyvis.network import Network
4 |
5 | from .faust_workflow import Event, FaustWorkflow
6 |
7 |
8 | def draw_faust_workflow(
9 | workflow: FaustWorkflow,
10 | filename: str = "faust_workflow.html",
11 | notebook: bool = False,
12 | ) -> None:
13 | """Draws the Faust workflow as a graph."""
14 | net = Network(directed=True, height="750px", width="100%")
15 |
16 | # Add the nodes + edge for stop events
17 | if workflow.result_event_type is not None:
18 | net.add_node(
19 | workflow.result_event_type.__name__,
20 | label=workflow.result_event_type.__name__,
21 | color="#FFA07A",
22 | shape="ellipse",
23 | )
24 | net.add_node("_done", label="_done", color="#ADD8E6", shape="box")
25 | net.add_edge(workflow.result_event_type.__name__, "_done")
26 |
27 | # Add nodes from all steps
28 | steps = {
29 | name: func
30 | for name, func in workflow.__class__.__dict__.items()
31 | if hasattr(func, "_is_step")
32 | }
33 |
34 | for step_name, step_func in steps.items():
35 | net.add_node(
36 | step_name, label=step_name, color="#ADD8E6", shape="box"
37 | ) # Light blue for steps
38 |
39 | # Get input and output types
40 | input_type = step_func.__annotations__.get("ev")
41 | output_types = [
42 | t
43 | for t in step_func.__annotations__.values()
44 | if isinstance(t, type) and issubclass(t, Event)
45 | ]
46 |
47 | if input_type:
48 | input_types = [input_type] if not get_origin(input_type) else get_args(input_type)
49 | for t in input_types:
50 | net.add_node(
51 | t.__name__,
52 | label=t.__name__,
53 | color="#90EE90",
54 | shape="ellipse",
55 | )
56 | net.add_edge(t.__name__, step_name)
57 |
58 | output_type = step_func.__annotations__.get("return")
59 | if output_type:
60 | output_types = [output_type] if not get_origin(output_type) else get_args(output_type)
61 | for t in output_types:
62 | if t != type(None):
63 | net.add_node(
64 | t.__name__,
65 | label=t.__name__,
66 | color="#90EE90" if t != workflow.result_event_type else "#FFA07A",
67 | shape="ellipse",
68 | )
69 | net.add_edge(step_name, t.__name__)
70 |
71 | net.show(filename, notebook=notebook)
72 |
--------------------------------------------------------------------------------
/motleycrew/applications/research_agent/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShoggothAI/motleycrew/413d2c3ae9c5497229d784b9da0ca64cca7103cd/motleycrew/applications/research_agent/__init__.py
--------------------------------------------------------------------------------
/motleycrew/applications/research_agent/answer_task.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from langchain_core.runnables import Runnable
4 | from langchain_core.language_models import BaseLanguageModel
5 |
6 | from motleycrew.applications.research_agent.question import Question
7 | from motleycrew.applications.research_agent.question_answerer import AnswerSubQuestionTool
8 | from motleycrew.common import logger
9 | from motleycrew.crew import MotleyCrew
10 | from motleycrew.tasks import Task, TaskUnit
11 | from motleycrew.tools import MotleyTool
12 |
13 |
14 | class QuestionAnsweringTaskUnit(TaskUnit):
15 | question: Question
16 |
17 |
18 | class AnswerTask(Task):
19 | """Task to answer a question based on the notes and sub-questions."""
20 |
21 | def __init__(
22 | self,
23 | crew: MotleyCrew,
24 | answer_length: int = 1000,
25 | llm: Optional[BaseLanguageModel] = None,
26 | ):
27 | super().__init__(
28 | name="AnswerTask",
29 | task_unit_class=QuestionAnsweringTaskUnit,
30 | crew=crew,
31 | allow_async_units=True,
32 | )
33 | self.answer_length = answer_length
34 | self.answerer = AnswerSubQuestionTool(
35 | graph=self.graph_store, answer_length=self.answer_length, llm=llm
36 | )
37 |
38 | def get_next_unit(self) -> QuestionAnsweringTaskUnit | None:
39 | """Choose an unanswered question to answer.
40 |
41 | The question should have a context and no unanswered subquestions."""
42 |
43 | query = (
44 | "MATCH (n1:{}) "
45 | "WHERE n1.answer IS NULL AND n1.context IS NOT NULL "
46 | "AND NOT EXISTS {{MATCH (n1)-[]->(n2:{}) "
47 | "WHERE n2.answer IS NULL AND n2.context IS NOT NULL}} "
48 | "RETURN n1"
49 | ).format(Question.get_label(), Question.get_label())
50 |
51 | query_result = self.graph_store.run_cypher_query(query, container=Question)
52 | logger.info("Available questions: %s", query_result)
53 |
54 | existing_units = self.get_units()
55 | for question in query_result:
56 | if not any(unit.question.question == question.question for unit in existing_units):
57 | return QuestionAnsweringTaskUnit(question=question)
58 |
59 | return None
60 |
61 | def get_worker(self, tools: Optional[List[MotleyTool]]) -> Runnable:
62 | return self.answerer
63 |
--------------------------------------------------------------------------------
/motleycrew/applications/research_agent/question.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | from motleycrew.storage.graph_node import MotleyGraphNode
4 |
5 | REPR_CONTEXT_LENGTH_LIMIT = 30
6 |
7 |
8 | class Question(MotleyGraphNode):
9 | """Represents a question node in the graph."""
10 |
11 | question: str
12 | answer: Optional[str] = None
13 | context: Optional[list[str]] = None
14 |
15 | def __repr__(self):
16 | if self.context and len(self.context):
17 | context_repr = '", "'.join(self.context)
18 | if len(context_repr) > REPR_CONTEXT_LENGTH_LIMIT:
19 | context_repr = '["' + context_repr[:REPR_CONTEXT_LENGTH_LIMIT] + "...]"
20 | else:
21 | context_repr = '["' + context_repr + '"]'
22 | else:
23 | context_repr = str(self.context)
24 |
25 | return "Question(id={}, question={}, answer={}, context={})".format(
26 | self.id, self.question, self.answer, context_repr
27 | )
28 |
--------------------------------------------------------------------------------
/motleycrew/applications/research_agent/question_generator.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | from langchain_core.language_models import BaseLanguageModel
4 | from langchain_core.prompts import PromptTemplate
5 | from langchain_core.prompts.base import BasePromptTemplate
6 | from langchain_core.runnables import RunnableLambda, RunnablePassthrough
7 | from langchain_core.tools import Tool
8 | from pydantic import BaseModel, Field
9 |
10 | from motleycrew.applications.research_agent.question import Question
11 | from motleycrew.common import LLMFramework, logger
12 | from motleycrew.common.llms import init_llm
13 | from motleycrew.common.utils import print_passthrough
14 | from motleycrew.storage import MotleyGraphStore
15 | from motleycrew.tools import MotleyTool
16 |
17 | IS_SUBQUESTION_PREDICATE = "is_subquestion"
18 |
19 | default_prompt = PromptTemplate.from_template(
20 | """
21 | You are a part of a team. The ultimate goal of your team is to
22 | answer the following Question: '{question}'.\n
23 | Your team has discovered some new text (delimited by ```) that may be relevant to your ultimate goal.
24 | text: \n ``` {context} ``` \n
25 | Your task is to ask new questions that may help your team achieve the ultimate goal.
26 | If you think that the text is relevant to your ultimate goal, then ask new questions.
27 | New questions should be based only on the text and the goal Question and no other previous knowledge.
28 |
29 | You can ask up to {num_questions} new questions.
30 | Return the questions each on a new line and ending with a single question mark.
31 | Don't return anything else except these questions.
32 | """
33 | )
34 |
35 |
36 | class QuestionGeneratorTool(MotleyTool):
37 | """
38 | Gets a question as input
39 | Retrieves relevant docs (llama index basic RAG)
40 | (Retrieves existing questions from graph (to avoid overlap))
41 | Generates extra questions (research agent prompt)
42 |
43 | Adds questions as children of current q by calling Q insertion tool once
44 | exits
45 | """
46 |
47 | def __init__(
48 | self,
49 | query_tool: MotleyTool,
50 | graph: MotleyGraphStore,
51 | max_questions: int = 3,
52 | llm: Optional[BaseLanguageModel] = None,
53 | prompt: str | BasePromptTemplate = None,
54 | ):
55 | langchain_tool = create_question_generator_langchain_tool(
56 | query_tool=query_tool,
57 | graph=graph,
58 | max_questions=max_questions,
59 | llm=llm,
60 | prompt=prompt,
61 | )
62 |
63 | super().__init__(langchain_tool)
64 |
65 |
66 | class QuestionGeneratorToolInput(BaseModel, arbitrary_types_allowed=True):
67 | """Input for the Question Generator Tool."""
68 |
69 | question: Question = Field(description="The input question for which to generate subquestions.")
70 |
71 |
72 | def create_question_generator_langchain_tool(
73 | query_tool: MotleyTool,
74 | graph: MotleyGraphStore,
75 | max_questions: int = 3,
76 | llm: Optional[BaseLanguageModel] = None,
77 | prompt: str | BasePromptTemplate = None,
78 | ):
79 | if llm is None:
80 | llm = init_llm(llm_framework=LLMFramework.LANGCHAIN)
81 |
82 | llm.bind(json_mode=True)
83 |
84 | if prompt is None:
85 | prompt = default_prompt
86 | elif isinstance(prompt, str):
87 | prompt = PromptTemplate.from_template(prompt)
88 |
89 | assert isinstance(prompt, BasePromptTemplate), "Prompt must be a string or a BasePromptTemplate"
90 |
91 | def insert_questions(input_dict) -> None:
92 | questions_raw = input_dict["subquestions"].content
93 | questions = [q.strip() for q in questions_raw.split("\n") if len(q.strip()) > 1]
94 | for q in questions:
95 | logger.info("Inserting question: %s", q)
96 | subquestion = graph.insert_node(Question(question=q))
97 | graph.create_relation(input_dict["question"], subquestion, IS_SUBQUESTION_PREDICATE)
98 | logger.info("Inserted %s questions", len(questions))
99 |
100 | def set_context(input_dict: dict):
101 | node = input_dict["question"]
102 | node.context = input_dict["context"]
103 |
104 | pipeline = (
105 | RunnableLambda(print_passthrough)
106 | | RunnablePassthrough().assign(context=query_tool.to_langchain_tool())
107 | | RunnableLambda(print_passthrough)
108 | | RunnablePassthrough().assign(
109 | subquestions=prompt.partial(num_questions=str(max_questions)) | llm
110 | )
111 | | RunnableLambda(print_passthrough)
112 | | {
113 | "set_context": RunnableLambda(set_context),
114 | "insert_questions": RunnableLambda(insert_questions),
115 | }
116 | )
117 |
118 | return Tool.from_function(
119 | func=lambda q: pipeline.invoke({"question": q}),
120 | name="Question Generator Tool",
121 | description="""Generate a list of questions based on the input question,
122 | and insert them into the knowledge graph.""",
123 | args_schema=QuestionGeneratorToolInput,
124 | )
125 |
--------------------------------------------------------------------------------
/motleycrew/applications/research_agent/question_prioritizer.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | from langchain.prompts import PromptTemplate
4 | from langchain_core.prompts.base import BasePromptTemplate
5 | from langchain_core.runnables import RunnableLambda, RunnablePassthrough, chain
6 | from langchain_core.tools import StructuredTool
7 | from langchain_core.language_models import BaseLanguageModel
8 | from pydantic import BaseModel, Field
9 |
10 | from motleycrew.applications.research_agent.question import Question
11 | from motleycrew.common.utils import print_passthrough
12 | from motleycrew.tools import MotleyTool
13 | from motleycrew.tools.llm_tool import LLMTool
14 |
15 |
16 | class QuestionPrioritizerTool(MotleyTool):
17 | """Tool to prioritize subquestions based on the original question."""
18 |
19 | def __init__(
20 | self,
21 | prompt: str | BasePromptTemplate = None,
22 | llm: Optional[BaseLanguageModel] = None,
23 | ):
24 | langchain_tool = create_question_prioritizer_langchain_tool(prompt=prompt, llm=llm)
25 |
26 | super().__init__(langchain_tool)
27 |
28 |
29 | _default_prompt = PromptTemplate(
30 | template=(
31 | "You are provided with the following list of questions:"
32 | " {unanswered_questions_text} \n"
33 | " Your task is to choose one question from the above list"
34 | " that is the most pertinent to the following query:\n"
35 | " '{original_question_text}' \n"
36 | " Respond with a single number the chosen question out of the provided list of questions."
37 | " Return only the number as it is without any edits."
38 | ),
39 | input_variables=["unanswered_questions", "original_question"],
40 | )
41 |
42 |
43 | class QuestionPrioritizerInput(BaseModel, arbitrary_types_allowed=True):
44 | """Input for the QuestionPrioritizerTool."""
45 |
46 | original_question: Question = Field(description="The original question.")
47 | unanswered_questions: list[Question] = Field(
48 | description="Questions to pick the most pertinent to the original question from.",
49 | )
50 |
51 |
52 | def create_question_prioritizer_langchain_tool(
53 | prompt: str | BasePromptTemplate = None,
54 | llm: Optional[BaseLanguageModel] = None,
55 | ) -> StructuredTool:
56 | if prompt is None:
57 | prompt = _default_prompt
58 |
59 | question_prioritizer = LLMTool(
60 | prompt=prompt,
61 | name="Question prioritizer",
62 | description="Takes the original question and a list of derived questions, "
63 | "and selects from the latter the one most pertinent to the former",
64 | llm=llm,
65 | )
66 |
67 | @chain
68 | def get_original_question_text(input_dict: dict):
69 | return input_dict["original_question"].question
70 |
71 | @chain
72 | def format_unanswered_questions(input_dict: dict):
73 | unanswered_questions: list[Question] = input_dict["unanswered_questions"]
74 | return "\n".join(
75 | "{}. {}".format(i + 1, question.question)
76 | for i, question in enumerate(unanswered_questions)
77 | )
78 |
79 | @chain
80 | def get_most_pertinent_question(input_dict: dict):
81 | unanswered_questions: list[Question] = input_dict["unanswered_questions"]
82 | most_pertinent_question_id = (
83 | int(input_dict["most_pertinent_question_id_message"].content.strip(" \n.")) - 1
84 | )
85 | assert most_pertinent_question_id < len(unanswered_questions)
86 | return unanswered_questions[most_pertinent_question_id]
87 |
88 | this_chain = (
89 | RunnablePassthrough.assign(
90 | original_question_text=lambda x: x["original_question"].question,
91 | unanswered_questions_text=format_unanswered_questions,
92 | )
93 | | RunnableLambda(print_passthrough)
94 | | RunnablePassthrough.assign(
95 | most_pertinent_question_id_message=question_prioritizer.to_langchain_tool()
96 | )
97 | | RunnableLambda(print_passthrough)
98 | | get_most_pertinent_question
99 | )
100 |
101 | langchain_tool = StructuredTool.from_function(
102 | func=lambda original_question, unanswered_questions: this_chain.invoke(
103 | {"original_question": original_question, "unanswered_questions": unanswered_questions}
104 | ),
105 | name=question_prioritizer.name,
106 | description=question_prioritizer.tool.description,
107 | args_schema=QuestionPrioritizerInput,
108 | )
109 |
110 | return langchain_tool
111 |
--------------------------------------------------------------------------------
/motleycrew/applications/research_agent/question_task.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from langchain_core.language_models import BaseLanguageModel
4 | from langchain_core.runnables import Runnable
5 |
6 | from motleycrew.common import logger
7 | from motleycrew.crew import MotleyCrew
8 | from motleycrew.tasks import Task, TaskUnit
9 | from motleycrew.tasks.task_unit import TaskUnitType
10 | from motleycrew.tools import MotleyTool
11 | from .question import Question
12 | from .question_generator import QuestionGeneratorTool
13 | from .question_prioritizer import QuestionPrioritizerTool
14 |
15 |
16 | class QuestionGenerationTaskUnit(TaskUnit):
17 | question: Question
18 |
19 |
20 | class QuestionTask(Task):
21 | """Task to generate subquestions based on a given question."""
22 |
23 | def __init__(
24 | self,
25 | question: str,
26 | query_tool: MotleyTool,
27 | crew: MotleyCrew,
28 | max_iter: int = 10,
29 | allow_async_units: bool = False,
30 | llm: Optional[BaseLanguageModel] = None,
31 | name: str = "QuestionTask",
32 | ):
33 | super().__init__(
34 | name=name,
35 | task_unit_class=QuestionGenerationTaskUnit,
36 | crew=crew,
37 | allow_async_units=allow_async_units,
38 | )
39 |
40 | self.max_iter = max_iter
41 | self.n_iter = 0
42 | self.question = Question(question=question)
43 | self.graph_store.insert_node(self.question)
44 | self.question_prioritization_tool = QuestionPrioritizerTool(llm=llm)
45 | self.question_generation_tool = QuestionGeneratorTool(
46 | query_tool=query_tool, graph=self.graph_store, llm=llm
47 | )
48 |
49 | def get_next_unit(self) -> QuestionGenerationTaskUnit | None:
50 | """Choose the most pertinent question to generate subquestions for."""
51 |
52 | if self.done or self.n_iter >= self.max_iter:
53 | return None
54 |
55 | unanswered_questions = self.get_unanswered_questions(only_without_children=True)
56 | logger.info("Loaded unanswered questions: %s", unanswered_questions)
57 |
58 | existing_units = self.get_units()
59 | question_candidates = []
60 | for question in unanswered_questions:
61 | if not any(unit.question.question == question.question for unit in existing_units):
62 | question_candidates.append(question)
63 |
64 | if not len(question_candidates):
65 | return None
66 |
67 | most_pertinent_question = self.question_prioritization_tool.invoke(
68 | {
69 | "original_question": self.question,
70 | "unanswered_questions": question_candidates,
71 | }
72 | )
73 | logger.info("Most pertinent question according to the tool: %s", most_pertinent_question)
74 | return QuestionGenerationTaskUnit(question=most_pertinent_question)
75 |
76 | def on_unit_dispatch(self, unit: TaskUnitType) -> None:
77 | """Increment the iteration count when a unit is dispatched."""
78 |
79 | logger.info("==== Started iteration %s of %s ====", self.n_iter + 1, self.max_iter)
80 | self.n_iter += 1
81 |
82 | def on_unit_completion(self, unit: TaskUnitType) -> None:
83 | """Check if the task is done after each unit completion.
84 |
85 | The task is done if the maximum number of iterations is reached."""
86 |
87 | if self.n_iter >= self.max_iter:
88 | self.set_done(True)
89 |
90 | def get_worker(self, tools: Optional[List[MotleyTool]]) -> Runnable:
91 | """Return the worker that will process the task units."""
92 |
93 | return self.question_generation_tool
94 |
95 | def get_unanswered_questions(self, only_without_children: bool = False) -> list[Question]:
96 | if only_without_children:
97 | query = (
98 | "MATCH (n1:{}) WHERE n1.answer IS NULL AND NOT (n1)-[]->(:{}) RETURN n1;".format(
99 | Question.get_label(), Question.get_label()
100 | )
101 | )
102 | else:
103 | query = "MATCH (n1:{}) WHERE n1.answer IS NULL RETURN n1;".format(Question.get_label())
104 |
105 | query_result = self.graph_store.run_cypher_query(query, container=Question)
106 | return query_result
107 |
--------------------------------------------------------------------------------
/motleycrew/common/__init__.py:
--------------------------------------------------------------------------------
1 | """Common utilities, types, enums, exceptions, loggers etc."""
2 |
3 | from .aux_prompts import AuxPrompts
4 | from .defaults import Defaults
5 | from .enums import AsyncBackend
6 | from .enums import GraphStoreType
7 | from .enums import LLMFramework
8 | from .enums import LLMProvider
9 | from .enums import LunaryEventName
10 | from .enums import LunaryRunType
11 | from .enums import TaskUnitStatus
12 | from .logging import logger, configure_logging
13 | from .types import MotleyAgentFactory
14 | from .types import MotleySupportedTool
15 |
16 | __all__ = [
17 | "AuxPrompts",
18 | "Defaults",
19 | "MotleySupportedTool",
20 | "MotleyAgentFactory",
21 | "logger",
22 | "configure_logging",
23 | "AsyncBackend",
24 | "GraphStoreType",
25 | "LLMProvider",
26 | "LLMFramework",
27 | "LunaryEventName",
28 | "LunaryRunType",
29 | "TaskUnitStatus",
30 | ]
31 |
--------------------------------------------------------------------------------
/motleycrew/common/aux_prompts.py:
--------------------------------------------------------------------------------
1 | from typing import List, TYPE_CHECKING
2 |
3 | if TYPE_CHECKING:
4 | from motleycrew.common import MotleyTool
5 |
6 |
7 | class AuxPrompts:
8 | """Singleton containing miscellaneous auxiliary prompts.
9 | In rare cases where you need to customize these, you can modify them before instantiating your agents.
10 | """
11 |
12 | DIRECT_OUTPUT_ERROR_WITH_SINGLE_OUTPUT_HANDLER = (
13 | "You made a mistake by returning plain text instead of calling a tool. "
14 | "If you need to finish, you must call the `{output_handler}` tool."
15 | )
16 | DIRECT_OUTPUT_ERROR_WITH_MULTIPLE_OUTPUT_HANDLERS = (
17 | "You made a mistake by returning plain text instead of calling a tool. "
18 | "You must call one of the following tools to return the final output: {output_handlers}"
19 | )
20 | AMBIGUOUS_OUTPUT_HANDLER_CALL_ERROR = (
21 | "You attempted to return output by calling `{current_output_handler}` tool, "
22 | "but included other tool calls in your response. "
23 | "You must only call one of the following tools to return: {output_handlers}."
24 | )
25 |
26 | @staticmethod
27 | def get_direct_output_error_message(output_handlers: List["MotleyTool"]) -> str:
28 | if len(output_handlers) == 1:
29 | message = AuxPrompts.DIRECT_OUTPUT_ERROR_WITH_SINGLE_OUTPUT_HANDLER.format(
30 | output_handler=output_handlers[0].name
31 | )
32 | else:
33 | message = AuxPrompts.DIRECT_OUTPUT_ERROR_WITH_MULTIPLE_OUTPUT_HANDLERS.format(
34 | output_handlers=", ".join([f"`{handler.name}`" for handler in output_handlers])
35 | )
36 |
37 | return message
38 |
39 | @staticmethod
40 | def get_ambiguous_output_handler_call_error_message(
41 | current_output_handler: "MotleyTool", output_handlers: List["MotleyTool"]
42 | ) -> str:
43 | return AuxPrompts.AMBIGUOUS_OUTPUT_HANDLER_CALL_ERROR.format(
44 | current_output_handler=current_output_handler.name,
45 | output_handlers=", ".join([f"`{handler.name}`" for handler in output_handlers]),
46 | )
47 |
--------------------------------------------------------------------------------
/motleycrew/common/defaults.py:
--------------------------------------------------------------------------------
1 | from motleycrew.common.enums import GraphStoreType, LLMProvider
2 |
3 |
4 | class Defaults:
5 | """Default values for various settings."""
6 |
7 | DEFAULT_REACT_AGENT_MAX_ITERATIONS = 15
8 | DEFAULT_LLM_PROVIDER = LLMProvider.OPENAI
9 | DEFAULT_LLM_NAME = "gpt-4o"
10 | DEFAULT_LLM_TEMPERATURE = 0.0
11 |
12 | DEFAULT_GRAPH_STORE_TYPE = GraphStoreType.KUZU
13 |
14 | MODULE_INSTALL_COMMANDS = {
15 | "crewai": "pip install crewai",
16 | "llama_index": "pip install llama-index",
17 | "autogen": "pip install pyautogen",
18 | "lunary": "pip install lunary",
19 | "aider": "pip install aider-chat",
20 | "pglast": "pip install pglast",
21 | "crewai_tools": "pip install 'crewai[tools]'",
22 | "replicate": "pip install replicate",
23 | "ray": "pip install 'ray[default]'",
24 | }
25 |
26 | DEFAULT_NUM_THREADS = 4
27 | DEFAULT_EVENT_LOOP_SLEEP = 1
28 | DEFAULT_OUTPUT_HANDLER_MAX_ITERATIONS = 5
29 |
--------------------------------------------------------------------------------
/motleycrew/common/enums.py:
--------------------------------------------------------------------------------
1 | """Various enums used in the project."""
2 |
3 |
4 | class LLMProvider:
5 | OPENAI = "openai"
6 | ANTHROPIC = "anthropic"
7 | REPLICATE = "replicate"
8 | TOGETHER = "together"
9 | GROQ = "groq"
10 | OLLAMA = "ollama"
11 | AZURE_OPENAI = "azure_openai"
12 |
13 | ALL = {OPENAI, ANTHROPIC, REPLICATE, TOGETHER, GROQ, OLLAMA, AZURE_OPENAI}
14 |
15 |
16 | class LLMFramework:
17 | LANGCHAIN = "langchain"
18 | LLAMA_INDEX = "llama_index"
19 |
20 | ALL = {LANGCHAIN, LLAMA_INDEX}
21 |
22 |
23 | class GraphStoreType:
24 | KUZU = "kuzu"
25 |
26 | ALL = {KUZU}
27 |
28 |
29 | class TaskUnitStatus:
30 | PENDING = "pending"
31 | RUNNING = "running"
32 | DONE = "done"
33 |
34 | ALL = {PENDING, RUNNING, DONE}
35 |
36 |
37 | class LunaryRunType:
38 | LLM = "llm"
39 | AGENT = "agent"
40 | TOOL = "tool"
41 | CHAIN = "chain"
42 | EMBED = "embed"
43 |
44 | ALL = {LLM, AGENT, TOOL, CHAIN, EMBED}
45 |
46 |
47 | class LunaryEventName:
48 | START = "start"
49 | END = "end"
50 | UPDATE = "update"
51 | ERROR = "error"
52 |
53 | ALL = {START, END, UPDATE, ERROR}
54 |
55 |
56 | class AsyncBackend:
57 | """Backends for parallel crew execution.
58 |
59 | Attributes:
60 | ASYNCIO: Asynchronous execution using asyncio.
61 | THREADING: Parallel execution using threads.
62 | RAY: Parallel execution using Ray.
63 | NONE: Synchronous execution.
64 | """
65 |
66 | ASYNCIO = "asyncio"
67 | THREADING = "threading"
68 | RAY = "ray"
69 | NONE = "none"
70 |
71 | ALL = {ASYNCIO, THREADING, RAY, NONE}
72 |
--------------------------------------------------------------------------------
/motleycrew/common/exceptions.py:
--------------------------------------------------------------------------------
1 | """Exceptions for motleycrew"""
2 |
3 | from typing import Any, Dict, Optional
4 |
5 | from motleycrew.common import Defaults
6 |
7 |
8 | class LLMProviderNotSupported(Exception):
9 | """Raised when an LLM provider is not supported in motleycrew via a framework."""
10 |
11 | def __init__(self, llm_framework: str, llm_provider: str):
12 | self.llm_framework = llm_framework
13 | self.llm_provider = llm_provider
14 |
15 | def __str__(self) -> str:
16 | return f"LLM provider `{self.llm_provider}` is not supported via the framework `{self.llm_framework}`"
17 |
18 |
19 | class LLMFrameworkNotSupported(Exception):
20 | """Raised when an LLM framework is not supported in motleycrew."""
21 |
22 | def __init__(self, llm_framework: str):
23 | self.llm_framework = llm_framework
24 |
25 | def __str__(self) -> str:
26 | return f"LLM framework `{self.llm_framework}` is not supported"
27 |
28 |
29 | class AgentNotMaterialized(Exception):
30 | """Raised when an attempt is made to use an agent that is not yet materialized."""
31 |
32 | def __init__(self, agent_name: str):
33 | self.agent_name = agent_name
34 |
35 | def __str__(self) -> str:
36 | return f"Agent `{self.agent_name}` is not yet materialized"
37 |
38 |
39 | class CannotModifyMaterializedAgent(Exception):
40 | """Raised when an attempt is made to modify a materialized agent, e.g. to add tools."""
41 |
42 | def __init__(self, agent_name: str | None):
43 | self.agent_name = agent_name
44 |
45 | def __str__(self) -> str:
46 | return "Cannot modify agent{} as it is already materialized".format(
47 | f" '{self.agent_name}'" if self.agent_name is not None else ""
48 | )
49 |
50 |
51 | class TaskDependencyCycleError(Exception):
52 | """Raised when a task is set to depend on itself"""
53 |
54 |
55 | class IntegrationTestException(Exception):
56 | """One or more integration tests failed."""
57 |
58 | def __init__(self, test_names: list[str]):
59 | """
60 | Args:
61 | test_names: List of names of failed integration tests.
62 | """
63 | self.test_names = test_names
64 |
65 | def __str__(self):
66 | return "Some integration tests failed: {}".format(self.test_names)
67 |
68 |
69 | class IpynbIntegrationTestResultNotFound(Exception):
70 | """Raised when the result file of an ipynb integration test run is not found."""
71 |
72 | def __init__(self, ipynb_path: str, result_path: str):
73 | self.ipynb_path = ipynb_path
74 | self.result_path = result_path
75 |
76 | def __str__(self):
77 | return "File {} with result of the ipynb {} execution not found.".format(
78 | self.result_path, self.ipynb_path
79 | )
80 |
81 |
82 | class ModuleNotInstalled(Exception):
83 | """Raised when trying to use some functionality that requires a module that is not installed.
84 | """
85 |
86 | def __init__(self, module_name: str, install_command: str = None):
87 | """
88 | Args:
89 | module_name: Name of the module.
90 | install_command: Command to install the module.
91 | """
92 | self.module_name = module_name
93 | self.install_command = install_command or Defaults.MODULE_INSTALL_COMMANDS.get(
94 | module_name, None
95 | )
96 |
97 | def __str__(self):
98 | msg = "{} is not installed".format(self.module_name)
99 |
100 | if self.install_command is not None:
101 | msg = "{}, please install ({})".format(msg, self.install_command)
102 |
103 | return "{}.".format(msg)
104 |
105 |
106 | class InvalidToolInput(Exception):
107 | """Raised when the tool input is invalid"""
108 |
109 | def __init__(self, tool: Any, input: Any, message: Optional[str] = None):
110 | self.tool = tool
111 | self.input = input
112 | self.message = message
113 |
114 | def __str__(self):
115 | msg = "Invalid input `{}` for tool `{}`".format(self.input, self.tool.name)
116 | if self.message:
117 | msg = "{}: {}".format(msg, self.message)
118 | return msg
119 |
120 |
121 | class InvalidOutput(Exception):
122 | """Raised in output handlers when an agent's output is not accepted."""
123 |
124 | pass
125 |
--------------------------------------------------------------------------------
/motleycrew/common/logging.py:
--------------------------------------------------------------------------------
1 | """Project logger configuration module
2 |
3 | Attributes:
4 | logger (logging.Logger): project logger
5 | """
6 | import logging
7 |
8 |
9 | # init logger
10 | logger = logging.getLogger("motleycrew")
11 | stream_handler = logging.StreamHandler()
12 | formatter = logging.Formatter(fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
13 | stream_handler.setFormatter(formatter)
14 | logger.addHandler(stream_handler)
15 | logger.propagate = False
16 |
17 |
18 | def configure_logging(verbose: bool = False, debug: bool = False):
19 | """Logging configuration
20 |
21 | Args:
22 | verbose (:obj:`bool`, optional): if true logging level = INFO
23 | debug (:obj:`bool`, optional): if true logging level = DEBUG else WARNING
24 |
25 | Returns:
26 |
27 | """
28 | if debug:
29 | logger.setLevel(logging.DEBUG)
30 | elif verbose:
31 | logger.setLevel(logging.INFO)
32 | else:
33 | logger.setLevel(logging.WARNING)
34 |
--------------------------------------------------------------------------------
/motleycrew/common/types.py:
--------------------------------------------------------------------------------
1 | """Various types and type protocols used in motleycrew.
2 |
3 | Attributes:
4 | MotleySupportedTool: Type that represents a tool that is supported by motleycrew.
5 | It includes tools from motleycrew, langchain, llama_index, and motleycrew agents.
6 | """
7 |
8 | from __future__ import annotations
9 |
10 | from typing import TYPE_CHECKING, Union, Optional, Protocol, TypeVar
11 |
12 | if TYPE_CHECKING:
13 | from langchain.tools import BaseTool
14 |
15 | try:
16 | from llama_index.core.tools import BaseTool as LlamaIndex__BaseTool
17 | except ImportError:
18 | LlamaIndex__BaseTool = "LlamaIndex__BaseTool"
19 |
20 | from motleycrew.tools import MotleyTool
21 | from motleycrew.agents.abstract_parent import MotleyAgentAbstractParent
22 |
23 | else:
24 | MotleyTool = "MotleyTool"
25 | BaseTool = "BaseTool"
26 | LlamaIndex__BaseTool = "LlamaIndex__BaseTool"
27 | MotleyAgentAbstractParent = "MotleyAgentAbstractParent"
28 |
29 |
30 | MotleySupportedTool = Union[MotleyTool, BaseTool, LlamaIndex__BaseTool, MotleyAgentAbstractParent]
31 |
32 |
33 | AgentType = TypeVar("AgentType")
34 |
35 |
36 | class MotleyAgentFactory(Protocol[AgentType]):
37 | """Type protocol for an agent factory.
38 |
39 | It is a function that accepts tools as an argument and returns an agent instance
40 | of an appropriate class.
41 |
42 | Agent factory is typically needed because the agent may need the list of available tools
43 | or other context at the time of its creation (e.g. to compose the prompt),
44 | and it may not be available at the time of the agent wrapper initialization.
45 | """
46 |
47 | def __call__(
48 | self,
49 | tools: dict[str, MotleyTool],
50 | ) -> AgentType: ...
51 |
--------------------------------------------------------------------------------
/motleycrew/common/utils.py:
--------------------------------------------------------------------------------
1 | """Various helpers and utility functions used throughout the project."""
2 |
3 | import hashlib
4 | import sys
5 | from typing import Optional, Sequence
6 | from urllib.parse import urlparse
7 |
8 | from langchain_core.messages import BaseMessage
9 |
10 | from motleycrew.common.exceptions import ModuleNotInstalled
11 |
12 |
13 | def to_str(value: str | BaseMessage | Sequence[str] | Sequence[BaseMessage]) -> str:
14 | """Converts a message to a string."""
15 |
16 | if isinstance(value, str):
17 | return value
18 | elif isinstance(value, BaseMessage):
19 | return value.content
20 | else:
21 | try:
22 | return "\n".join([to_str(v) for v in value])
23 | except TypeError:
24 | raise TypeError(f"Expected str, BaseMessage, or an iterable of them, got {type(value)}")
25 |
26 |
27 | def is_http_url(url):
28 | """Check if the URL is an HTTP URL."""
29 |
30 | try:
31 | parsed_url = urlparse(url)
32 | return parsed_url.scheme in ["http", "https"]
33 | except ValueError:
34 | return False
35 |
36 |
37 | def generate_hex_hash(data: str, length: Optional[int] = None):
38 | """Generate a SHA256 hex digest from the given data."""
39 |
40 | hash_obj = hashlib.sha256()
41 | hash_obj.update(data.encode("utf-8"))
42 | hex_hash = hash_obj.hexdigest()
43 |
44 | if length is not None:
45 | hex_hash = hex_hash[:length]
46 | return hex_hash
47 |
48 |
49 | def print_passthrough(x):
50 | """A helper function useful for debugging LCEL chains. It just returns the input value.
51 |
52 | You can put a breakpoint in this function to debug a chain.
53 | """
54 | return x
55 |
56 |
57 | def ensure_module_is_installed(module_name: str, install_command: str = None) -> None:
58 | """Ensure that the given module is installed."""
59 |
60 | module_path = sys.modules.get(module_name, None)
61 | if module_path is None:
62 | raise ModuleNotInstalled(module_name, install_command)
63 |
--------------------------------------------------------------------------------
/motleycrew/crew/__init__.py:
--------------------------------------------------------------------------------
1 | """MotleyCrew class, orchestration and related functionality."""
2 |
3 | from .crew import MotleyCrew
4 |
5 | __all__ = ["MotleyCrew"]
6 |
--------------------------------------------------------------------------------
/motleycrew/crew/crew_threads.py:
--------------------------------------------------------------------------------
1 | """Thread pool module for running agents."""
2 |
3 | import threading
4 | from enum import Enum
5 | from queue import Queue
6 | from typing import TYPE_CHECKING, Tuple, Any, List
7 |
8 | from langchain_core.runnables import Runnable
9 |
10 | from motleycrew.common import Defaults
11 |
12 | if TYPE_CHECKING:
13 | from motleycrew import Task
14 | from motleycrew.tasks import TaskUnit
15 |
16 |
17 | class TaskUnitThreadState(Enum):
18 | BUSY = "busy"
19 | WAITING = "waiting"
20 | EXITED = "exited"
21 |
22 |
23 | SENTINEL = object() # sentinel object for closing threads
24 |
25 |
26 | class TaskUnitThread(threading.Thread):
27 | """The thread class for running agents on task units."""
28 |
29 | def __init__(self, input_queue: Queue, output_queue: Queue, *args, **kwargs):
30 | """Initialize the thread.
31 |
32 | Args:
33 | input_queue: Queue of task units to complete.
34 | output_queue: Queue of completed task units.
35 | *args: threading.Thread arguments.
36 | **kwargs: threading.Thread keyword arguments.
37 | """
38 | self.input_queue = input_queue
39 | self.output_queue = output_queue
40 | self._state = TaskUnitThreadState.WAITING
41 |
42 | super(TaskUnitThread, self).__init__(*args, **kwargs)
43 |
44 | @property
45 | def state(self):
46 | """State of the thread."""
47 | return self._state
48 |
49 | def run(self) -> None:
50 | """Main loop of the thread.
51 |
52 | Gets a task unit from the input queue, runs it, and puts the result in the output queue.
53 | Exits when the sentinel object is retrieved from the input queue.
54 | """
55 | while True:
56 | run_data = self.input_queue.get()
57 | self._state = TaskUnitThreadState.BUSY
58 |
59 | if run_data is SENTINEL:
60 | self._state = TaskUnitThreadState.EXITED
61 | self.input_queue.task_done()
62 | break
63 |
64 | agent, task, unit = run_data
65 | try:
66 | result = agent.invoke(unit.as_dict())
67 | except Exception as e:
68 | self.output_queue.put(e)
69 | else:
70 | self.output_queue.put((task, unit, result))
71 | finally:
72 | self._state = TaskUnitThreadState.WAITING
73 | self.input_queue.task_done()
74 |
75 |
76 | class TaskUnitThreadPool:
77 | """The thread pool class for running agents on task units."""
78 |
79 | def __init__(self, num_threads: int = Defaults.DEFAULT_NUM_THREADS):
80 | """Initialize the thread pool.
81 |
82 | Args:
83 | num_threads: Number of threads to create.
84 | """
85 | self.num_threads = num_threads
86 |
87 | self.input_queue = Queue()
88 | self.output_queue = Queue()
89 |
90 | self._threads = []
91 | for i in range(self.num_threads):
92 | thread = TaskUnitThread(self.input_queue, self.output_queue)
93 | thread.start()
94 | self._threads.append(thread)
95 | self._task_units_in_progress = []
96 |
97 | def add_task_unit(self, agent: Runnable, task: "Task", unit: "TaskUnit") -> None:
98 | """Adds a task unit to the queue for execution.
99 |
100 | Args:
101 | agent: Agent to run the task unit.
102 | task: Task to which the unit belongs.
103 | unit: Task unit to run.
104 | """
105 | self._task_units_in_progress.append((task, unit))
106 | self.input_queue.put((agent, task, unit))
107 |
108 | def get_completed_task_units(self) -> List[Tuple["Task", "TaskUnit", Any]]:
109 | """Returns a list of completed task units with their results.
110 |
111 | Returns:
112 | List of triplets of (task, task unit, result).
113 | """
114 | completed_tasks = []
115 | while not self.output_queue.empty():
116 | task_result = self.output_queue.get()
117 | if isinstance(task_result, Exception):
118 | raise task_result
119 |
120 | task, unit, result = task_result
121 | completed_tasks.append((task, unit, result))
122 | self._task_units_in_progress.remove((task, unit))
123 | return completed_tasks
124 |
125 | def wait_and_close(self):
126 | """Wait for all task units to complete and close the threads."""
127 | for t in self._threads:
128 | if t.is_alive():
129 | self.input_queue.put(SENTINEL)
130 | self.input_queue.join()
131 |
132 | for t in self._threads:
133 | t.join()
134 |
135 | @property
136 | def is_completed(self) -> bool:
137 | """Whether all task units have been completed."""
138 | return not bool(self._task_units_in_progress)
139 |
--------------------------------------------------------------------------------
/motleycrew/storage/__init__.py:
--------------------------------------------------------------------------------
1 | """Graph storage."""
2 |
3 | from .graph_node import MotleyGraphNode
4 | from .graph_node import MotleyGraphNodeType
5 |
6 | from .graph_store import MotleyGraphStore
7 |
8 | from .kuzu_graph_store import MotleyKuzuGraphStore
9 |
--------------------------------------------------------------------------------
/motleycrew/storage/graph_node.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, Any, TypeVar, TYPE_CHECKING
2 | from abc import ABC
3 | from pydantic import BaseModel
4 |
5 | if TYPE_CHECKING:
6 | from motleycrew.storage import MotleyGraphStore
7 |
8 |
9 | class MotleyGraphNode(BaseModel, ABC):
10 | """Base class for describing nodes in the graph.
11 |
12 | Attributes:
13 | __label__: Label of the node in the graph. If not set, the class name is used.
14 | __graph_store__: Graph store in which the node is stored.
15 | """
16 |
17 | # Q: KuzuGraphNode a better name? Because def id is specific?
18 | # A: No, I think _id attribute is pretty universal
19 | __label__: Optional[str] = None
20 | __graph_store__: Optional["MotleyGraphStore"] = None
21 |
22 | @property
23 | def id(self) -> Optional[Any]:
24 | """Identifier of the node in the graph.
25 |
26 | The identifier is unique **among nodes of the same label**.
27 | If the node is not inserted in the graph, the identifier is None.
28 | """
29 | return getattr(self, "_id", None)
30 |
31 | @property
32 | def is_inserted(self) -> bool:
33 | """Whether the node is inserted in the graph."""
34 | return self.id is not None
35 |
36 | @classmethod
37 | def get_label(cls) -> str:
38 | """Get the label of the node.
39 |
40 | Labels can be viewed as node types in the graph.
41 | Generally, the label is the class name,
42 | but it can be overridden by setting the __label__ attribute.
43 |
44 | Returns:
45 | Label of the node.
46 | """
47 |
48 | # Q: why not @property def label(cls) -> str: return cls.__label__ or cls.__name__ ?
49 | # A: Because we want to be able to call this method without an instance
50 | # and properties can't be class methods since Python 3.12
51 | if cls.__label__:
52 | return cls.__label__
53 | return cls.__name__
54 |
55 | def __setattr__(self, name, value):
56 | """Set the attribute value
57 | and update the property in the graph store if the node is inserted.
58 |
59 | Args:
60 | name: Name of the attribute.
61 | value: Value of the attribute.
62 | """
63 | super().__setattr__(name, value)
64 |
65 | if name not in self.model_fields:
66 | # Q: Should we not raise an error here instead?
67 | # A: No, there are technical attributes like __graph_store__ that are not in the model
68 | return # Non-pydantic field => not in the DB
69 |
70 | if self.__graph_store__ and self.is_inserted:
71 | self.__graph_store__.update_property(self, name)
72 |
73 | def __eq__(self, other):
74 | """Comparison operator for nodes.
75 |
76 | Two nodes are considered equal if they have the same label and identifier.
77 |
78 | Args:
79 | other: Node to compare with.
80 |
81 | Returns:
82 | Whether the nodes are equal.
83 | """
84 | return self.is_inserted and self.get_label() == other.get_label() and self.id == other.id
85 |
86 |
87 | MotleyGraphNodeType = TypeVar("MotleyGraphNodeType", bound=MotleyGraphNode)
88 |
--------------------------------------------------------------------------------
/motleycrew/storage/graph_store_utils.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | import tempfile
3 | import os
4 |
5 | from motleycrew.common import Defaults
6 | from motleycrew.common import GraphStoreType
7 | from motleycrew.common import logger
8 | from motleycrew.storage import MotleyKuzuGraphStore, MotleyGraphStore
9 |
10 |
11 | def init_graph_store(
12 | graph_store_type: str = Defaults.DEFAULT_GRAPH_STORE_TYPE,
13 | db_path: Optional[str] = None,
14 | ) -> MotleyGraphStore:
15 | """Create and initialize a graph store with the given parameters.
16 |
17 | Args:
18 | graph_store_type: Type of the graph store to use.
19 | db_path: Path to the database for the graph store.
20 |
21 | Returns:
22 | Initialized graph store.
23 | """
24 | if graph_store_type == GraphStoreType.KUZU:
25 | import kuzu
26 |
27 | if db_path is None:
28 | logger.info("No db_path provided, creating temporary directory for database")
29 | db_path = os.path.join(tempfile.mkdtemp(), "kuzu_db")
30 |
31 | logger.info("Using Kuzu graph store with path: %s", db_path)
32 | db = kuzu.Database(db_path)
33 | return MotleyKuzuGraphStore(db)
34 |
35 | raise ValueError(f"Unknown graph store type: {graph_store_type}")
36 |
--------------------------------------------------------------------------------
/motleycrew/storage/kv_store_domain.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Optional
2 | from abc import ABC, abstractmethod
3 | from pprint import pprint
4 |
5 |
6 | class RetrievableObjectParent(ABC):
7 | def __init__(self, id: str, name: str, description: Optional[str] = None):
8 | assert id is not None, "id must be provided"
9 | assert name is not None, "name must be provided"
10 |
11 | self.id = id
12 | self.name = name
13 | self.description = description
14 |
15 | @property
16 | @abstractmethod
17 | def summary(self) -> str:
18 | pass
19 |
20 |
21 | class SimpleRetrievableObject(RetrievableObjectParent):
22 | def __init__(
23 | self, id: str, name: str, payload: Any, description: Optional[str] = None
24 | ):
25 | super().__init__(id, name, description)
26 | self.payload = payload
27 |
28 | @property
29 | def summary(self) -> str:
30 | return f"""SimpleRetrievableObject: {self.name}
31 | id: {self.id}
32 | description: {self.description}
33 | payload: {self.payload}"""
34 |
--------------------------------------------------------------------------------
/motleycrew/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | """Tasks and task units."""
2 |
3 | from motleycrew.tasks.simple import SimpleTask
4 | from motleycrew.tasks.task import Task
5 | from motleycrew.tasks.task_unit import TaskUnit
6 | from motleycrew.tasks.task_unit import TaskUnitType
7 |
8 | __all__ = [
9 | "Task",
10 | "SimpleTask",
11 | "TaskUnit",
12 | ]
13 |
--------------------------------------------------------------------------------
/motleycrew/tasks/task_unit.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from abc import ABC
4 | from typing import Optional, Any, TypeVar
5 |
6 | from motleycrew.common import TaskUnitStatus
7 | from motleycrew.storage import MotleyGraphNode
8 |
9 |
10 | class TaskUnit(MotleyGraphNode, ABC):
11 | """Base class for describing task units.
12 | A task unit should contain all the input data for the worker (usually an agent).
13 | When a task unit is dispatched by the crew, it is converted to a dictionary
14 | and passed to the worker's ``invoke()`` method.
15 |
16 | Attributes:
17 | status: Status of the task unit.
18 | output: Output of the task unit.
19 |
20 | """
21 |
22 | status: str = TaskUnitStatus.PENDING
23 | output: Optional[Any] = None
24 |
25 | def __repr__(self) -> str:
26 | return f"TaskUnit(status={self.status})"
27 |
28 | def __str__(self) -> str:
29 | return self.__repr__()
30 |
31 | def __eq__(self, other: TaskUnit):
32 | return self.id is not None and self.get_label() == other.get_label and self.id == other.id
33 |
34 | @property
35 | def pending(self):
36 | """Whether the task unit is pending."""
37 | return self.status == TaskUnitStatus.PENDING
38 |
39 | @property
40 | def running(self):
41 | """Whether the task unit is running."""
42 | return self.status == TaskUnitStatus.RUNNING
43 |
44 | @property
45 | def done(self):
46 | """Whether the task unit is done."""
47 | return self.status == TaskUnitStatus.DONE
48 |
49 | def set_pending(self):
50 | """Set the task unit status to pending."""
51 | self.status = TaskUnitStatus.PENDING
52 |
53 | def set_running(self):
54 | """Set the task unit status to running."""
55 | self.status = TaskUnitStatus.RUNNING
56 |
57 | def set_done(self):
58 | """Set the task unit status to done."""
59 | self.status = TaskUnitStatus.DONE
60 |
61 | def as_dict(self):
62 | """Represent the task as a dictionary for passing to invoke() methods of runnables."""
63 | return dict(self)
64 |
65 |
66 | TaskUnitType = TypeVar("TaskUnitType", bound=TaskUnit)
67 |
--------------------------------------------------------------------------------
/motleycrew/tools/__init__.py:
--------------------------------------------------------------------------------
1 | """MotleyTool class and tools library."""
2 |
3 | from motleycrew.tools.tool import DirectOutput, MotleyTool, RetryConfig
4 |
5 | __all__ = ["MotleyTool", "RetryConfig", "DirectOutput"]
6 |
--------------------------------------------------------------------------------
/motleycrew/tools/agentic_validation_loop.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Any, Callable, Type, Dict
2 |
3 | from pydantic import BaseModel, Field, create_model
4 | from langchain_core.prompts import PromptTemplate
5 | from langchain_core.prompts.base import BasePromptTemplate
6 |
7 | from motleycrew.agents.langchain import ReActToolCallingMotleyAgent
8 | from motleycrew.common import LLMFramework
9 | from motleycrew.common.llms import init_llm
10 | from motleycrew.tools import MotleyTool
11 | from motleycrew.tools.structured_passthrough import StructuredPassthroughTool
12 |
13 |
14 | class PivotConfigToolInputSchema(BaseModel):
15 | question: str = Field(description="The question to answer with the pivot chart.")
16 | datasource_kv_store_keys: List[str] = Field(
17 | description="The key(s) of the datasource(s) to use in the KV store."
18 | )
19 |
20 |
21 | class AgenticValidationLoop(MotleyTool):
22 |
23 | def __init__(
24 | self,
25 | name: str,
26 | description: str,
27 | prompt: str | BasePromptTemplate,
28 | schema: Optional[Type[BaseModel]] = None,
29 | post_process: Optional[Callable] = None,
30 | llm: Optional[Any] = None,
31 | ):
32 | super().__init__(
33 | name=name,
34 | description=description,
35 | )
36 | self.llm = llm or init_llm(LLMFramework.LANGCHAIN)
37 |
38 | # Handle prompt template
39 | if not isinstance(prompt, BasePromptTemplate):
40 | prompt = PromptTemplate.from_template(prompt)
41 | self.prompt_template = prompt
42 |
43 | # Auto-create schema if not provided
44 | if schema is None and prompt.input_variables:
45 | fields = {
46 | var: (str, Field(description=f"Input {var} for validation."))
47 | for var in prompt.input_variables
48 | }
49 | schema = create_model("ValidationLoopInput", **fields)
50 |
51 | self.schema = schema
52 | self.post_process = post_process
53 |
54 | def run(self, **kwargs) -> Any:
55 | """
56 | Run the tool with the provided inputs.
57 | """
58 | # Format the prompt with the provided inputs
59 | prompt = self.prompt_template.format(**kwargs)
60 |
61 | output_tool = StructuredPassthroughTool(
62 | schema=self.schema,
63 | post_process=self.post_process,
64 | exceptions_to_reflect=[Exception],
65 | )
66 |
67 | agent = ReActToolCallingMotleyAgent(
68 | tools=[output_tool],
69 | llm=self.llm,
70 | name=self.name + "_agent",
71 | force_output_handler=True,
72 | prompt_prefix=prompt,
73 | )
74 |
75 | # Run the agent with the prompt
76 | response = agent.invoke({})
77 |
78 | return response
79 |
--------------------------------------------------------------------------------
/motleycrew/tools/autogen_chat_tool.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Callable, List, Optional, Type
2 |
3 | from langchain_core.prompts import PromptTemplate
4 | from langchain_core.prompts.base import BasePromptTemplate
5 | from langchain_core.tools import StructuredTool
6 | from pydantic import BaseModel, Field, create_model
7 |
8 | try:
9 | from autogen import ChatResult, ConversableAgent
10 | except ImportError:
11 | ConversableAgent = None
12 | ChatResult = None
13 |
14 | from motleycrew.common.utils import ensure_module_is_installed
15 | from motleycrew.tools import MotleyTool
16 |
17 |
18 | def get_last_message(chat_result: ChatResult) -> str:
19 | for message in reversed(chat_result.chat_history):
20 | if message.get("content") and "TERMINATE" not in message["content"]:
21 | return message["content"]
22 |
23 |
24 | class AutoGenChatTool(MotleyTool):
25 | """A tool for incorporating AutoGen chats into MotleyCrew."""
26 |
27 | def __init__(
28 | self,
29 | name: str,
30 | description: str,
31 | prompt: str | BasePromptTemplate,
32 | initiator: ConversableAgent,
33 | recipient: ConversableAgent,
34 | result_extractor: Callable[[ChatResult], Any] = get_last_message,
35 | input_schema: Optional[Type[BaseModel]] = None,
36 | return_direct: bool = False,
37 | exceptions_to_reflect: Optional[List[Exception]] = None,
38 | ):
39 | """
40 | Args:
41 | name: Name of the tool.
42 | description: Description of the tool.
43 | prompt: Prompt to use for the tool. Can be a string or a PromptTemplate object.
44 | initiator: The agent initiating the chat.
45 | recipient: The first recipient agent.
46 | This is the agent that you would specify in ``initiate_chat`` arguments.
47 | result_extractor: Function to extract the result from the chat result.
48 | input_schema: Input schema for the tool.
49 | The input variables should match the variables in the prompt.
50 | If not provided, a schema will be generated based on the input variables
51 | in the prompt, if any, with string fields.
52 | """
53 | ensure_module_is_installed("autogen")
54 | langchain_tool = create_autogen_chat_tool(
55 | name=name,
56 | description=description,
57 | prompt=prompt,
58 | initiator=initiator,
59 | recipient=recipient,
60 | result_extractor=result_extractor,
61 | input_schema=input_schema,
62 | )
63 | super().__init__(
64 | tool=langchain_tool,
65 | return_direct=return_direct,
66 | exceptions_to_reflect=exceptions_to_reflect,
67 | )
68 |
69 |
70 | def create_autogen_chat_tool(
71 | name: str,
72 | description: str,
73 | prompt: str | BasePromptTemplate,
74 | initiator: ConversableAgent,
75 | recipient: ConversableAgent,
76 | result_extractor: Callable[[ChatResult], Any],
77 | input_schema: Optional[Type[BaseModel]] = None,
78 | ):
79 | if not isinstance(prompt, BasePromptTemplate):
80 | prompt = PromptTemplate.from_template(prompt)
81 |
82 | if input_schema is None:
83 | fields = {
84 | var: (str, Field(description=f"Input {var} for the tool."))
85 | for var in prompt.input_variables
86 | }
87 |
88 | # Create the AutoGenChatToolInput class dynamically
89 | input_schema = create_model("AutoGenChatToolInput", **fields)
90 |
91 | def run_autogen_chat(**kwargs) -> Any:
92 | message = prompt.format(**kwargs)
93 | chat_result = initiator.initiate_chat(recipient, message=message)
94 | return result_extractor(chat_result)
95 |
96 | return StructuredTool.from_function(
97 | func=run_autogen_chat,
98 | name=name,
99 | description=description,
100 | args_schema=input_schema,
101 | )
102 |
--------------------------------------------------------------------------------
/motleycrew/tools/code/__init__.py:
--------------------------------------------------------------------------------
1 | from .aider_tool import AiderTool
2 | from .postgresql_linter import PostgreSQLLinterTool
3 | from .python_linter import PythonLinterTool
4 | from .python_repl import PythonREPLTool
5 |
6 | __all__ = ["PythonLinterTool", "PostgreSQLLinterTool", "AiderTool", "PythonREPLTool"]
7 |
--------------------------------------------------------------------------------
/motleycrew/tools/code/aider_tool.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from motleycrew.common.utils import ensure_module_is_installed
4 |
5 | try:
6 | from aider.coders import Coder
7 | from aider.models import Model
8 | except ImportError:
9 | Coder = None
10 | Model = None
11 |
12 | from langchain.tools import Tool
13 | from pydantic import BaseModel, Field
14 |
15 | from motleycrew.common import Defaults
16 | from motleycrew.tools import MotleyTool
17 |
18 |
19 | class AiderTool(MotleyTool):
20 | """Tool for code generation using Aider."""
21 |
22 | def __init__(
23 | self,
24 | model: str = None,
25 | return_direct: bool = False,
26 | exceptions_to_reflect: Optional[List[Exception]] = None,
27 | **kwargs
28 | ):
29 | ensure_module_is_installed("aider")
30 |
31 | model = model or Defaults.DEFAULT_LLM_NAME
32 | llm_model = Model(model=model)
33 | coder = Coder.create(main_model=llm_model, **kwargs)
34 |
35 | langchain_tool = create_aider_tool(coder)
36 | super().__init__(
37 | tool=langchain_tool,
38 | return_direct=return_direct,
39 | exceptions_to_reflect=exceptions_to_reflect,
40 | )
41 |
42 |
43 | class AiderToolInput(BaseModel):
44 | """Input for the Aider tool."""
45 |
46 | with_message: str = Field(description="instructions for code generation")
47 |
48 |
49 | def create_aider_tool(coder: Coder):
50 | return Tool.from_function(
51 | func=coder.run,
52 | name="aider_tool",
53 | description="Tool for code generation that has access to the provided repository. "
54 | "Ask it to make changes in the code: fix bugs, add features, write tests etc. "
55 | "It doesn't run the code by itself.",
56 | args_schema=AiderToolInput,
57 | )
58 |
--------------------------------------------------------------------------------
/motleycrew/tools/code/postgresql_linter.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from langchain_core.tools import Tool
4 | from pydantic import BaseModel, Field
5 |
6 | try:
7 | from pglast import parse_sql, prettify
8 | from pglast.parser import ParseError
9 | except ImportError:
10 | parse_sql = None
11 | prettify = None
12 | ParseError = None
13 |
14 | from motleycrew.common.utils import ensure_module_is_installed
15 | from motleycrew.tools import MotleyTool
16 |
17 |
18 | class PostgreSQLLinterTool(MotleyTool):
19 | """PostgreSQL code verification tool."""
20 |
21 | def __init__(
22 | self,
23 | return_direct: bool = False,
24 | exceptions_to_reflect: Optional[List[Exception]] = None,
25 | ):
26 | ensure_module_is_installed("pglast")
27 |
28 | langchain_tool = create_pgsql_linter_tool()
29 | super().__init__(
30 | tool=langchain_tool,
31 | return_direct=return_direct,
32 | exceptions_to_reflect=exceptions_to_reflect,
33 | )
34 |
35 |
36 | class PostgreSQLLinterInput(BaseModel):
37 | """Input for the PostgreSQLLinterTool."""
38 |
39 | query: str = Field(description="SQL code for validation")
40 |
41 |
42 | def create_pgsql_linter_tool() -> Tool:
43 | def parse_func(query: str) -> str:
44 | try:
45 | parse_sql(query)
46 | return prettify(query)
47 | except ParseError as e:
48 | return str(e)
49 |
50 | return Tool.from_function(
51 | func=parse_func,
52 | name="postgresql_linter",
53 | description="Tool for validating PostgreSQL code",
54 | args_schema=PostgreSQLLinterInput,
55 | )
56 |
--------------------------------------------------------------------------------
/motleycrew/tools/code/python_linter.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import List, Optional, Union
3 |
4 | from langchain_core.tools import StructuredTool
5 | from pydantic import BaseModel, Field
6 |
7 | try:
8 | from aider.linter import Linter
9 | except ImportError:
10 | Linter = None
11 |
12 | from motleycrew.common.utils import ensure_module_is_installed
13 | from motleycrew.tools import MotleyTool
14 |
15 |
16 | class PythonLinterTool(MotleyTool):
17 | """Python code verification tool"""
18 |
19 | def __init__(
20 | self,
21 | return_direct: bool = False,
22 | exceptions_to_reflect: Optional[List[Exception]] = None,
23 | ):
24 | ensure_module_is_installed("aider")
25 |
26 | langchain_tool = create_python_linter_tool()
27 | super().__init__(
28 | tool=langchain_tool,
29 | return_direct=return_direct,
30 | exceptions_to_reflect=exceptions_to_reflect,
31 | )
32 |
33 |
34 | class PythonLinterInput(BaseModel):
35 | """Input for the PythonLinterTool."""
36 |
37 | code: str = Field(description="Python code for linting")
38 | file_name: str = Field(description="file name for the code", default="code.py")
39 |
40 |
41 | def create_python_linter_tool() -> StructuredTool:
42 | def lint(code: str, file_name: str = None) -> Union[str, None]:
43 | # create temp python file
44 | temp_file_name = file_name or "code.py"
45 | _, file_ext = os.path.splitext(temp_file_name)
46 | if file_ext != ".py":
47 | raise ValueError("The file extension must be .py")
48 |
49 | with open(temp_file_name, "w") as f:
50 | f.write(code)
51 |
52 | # lint code
53 | try:
54 | linter = Linter()
55 | return linter.lint(temp_file_name)
56 | except Exception as e:
57 | return str(e)
58 | finally:
59 | os.remove(temp_file_name)
60 |
61 | return StructuredTool.from_function(
62 | func=lint,
63 | name="python_linter",
64 | description="Tool for validating Python code",
65 | args_schema=PythonLinterInput,
66 | )
67 |
--------------------------------------------------------------------------------
/motleycrew/tools/code/python_repl.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from langchain_experimental.utilities import PythonREPL
4 | from pydantic import BaseModel, Field
5 |
6 | from motleycrew.tools import MotleyTool
7 |
8 |
9 | class MissingPrintStatementError(Exception):
10 | """Exception raised when a print statement is missing from the command."""
11 |
12 | def __init__(self, command: str):
13 | self.command = command
14 | super().__init__(
15 | f"Command must contain at least one print statement. Remember to print the results you want to see using print(...)."
16 | )
17 |
18 |
19 | class PythonREPLTool(MotleyTool):
20 | """Python REPL tool. Use this to execute python commands.
21 |
22 | Note that the tool's output is the content printed to stdout by the executed code.
23 | Because of this, any data you want to be in the output should be printed using `print(...)`.
24 | """
25 |
26 | def __init__(
27 | self, return_direct: bool = False, exceptions_to_reflect: Optional[List[Exception]] = None
28 | ):
29 | exceptions_to_reflect = (exceptions_to_reflect or []) + [MissingPrintStatementError]
30 | super().__init__(
31 | name="python_repl",
32 | description="A Python shell. Use this to execute python commands. Input should be a valid python command. "
33 | "MAKE SURE TO PRINT OUT THE RESULTS YOU CARE ABOUT USING `print(...)`.",
34 | return_direct=return_direct,
35 | exceptions_to_reflect=exceptions_to_reflect,
36 | args_schema=REPLToolInput,
37 | )
38 |
39 | def run(self, command: str) -> str:
40 | self.validate_input(command)
41 | return PythonREPL().run(command)
42 |
43 | def validate_input(self, command: str):
44 | if "print(" not in command:
45 | raise MissingPrintStatementError(command)
46 |
47 |
48 | class REPLToolInput(BaseModel):
49 | """Input for the REPL tool."""
50 |
51 | command: str = Field(description="code to execute")
52 |
--------------------------------------------------------------------------------
/motleycrew/tools/image/__init__.py:
--------------------------------------------------------------------------------
1 | from .dall_e import DallEImageGeneratorTool
2 | from .replicate_tool import ReplicateImageGeneratorTool
3 |
4 | __all__ = ["DallEImageGeneratorTool", "ReplicateImageGeneratorTool"]
5 |
--------------------------------------------------------------------------------
/motleycrew/tools/image/download_image.py:
--------------------------------------------------------------------------------
1 | import mimetypes
2 | import os
3 | from typing import Optional
4 |
5 | import requests
6 |
7 | from motleycrew.common import logger, utils as motley_utils
8 |
9 |
10 | def download_image(url: str, file_path: str) -> Optional[str]:
11 | response = requests.get(url, stream=True)
12 | if response.status_code == requests.codes.ok:
13 | try:
14 | content_type = response.headers.get("content-type")
15 | extension = mimetypes.guess_extension(content_type)
16 | except Exception as e:
17 | logger.error("Failed to guess content type: %s", e)
18 | extension = None
19 |
20 | if not extension:
21 | extension = ".png"
22 |
23 | file_path_with_extension = file_path + extension
24 | logger.info("Downloading image %s to %s", url, file_path_with_extension)
25 |
26 | with open(file_path_with_extension, "wb") as f:
27 | for chunk in response:
28 | f.write(chunk)
29 |
30 | return file_path_with_extension
31 | else:
32 | logger.error("Failed to download image. Status code: %s", response.status_code)
33 |
34 |
35 | def download_url_to_directory(url: str, images_directory: str, file_name_length: int = 8) -> str:
36 | os.makedirs(images_directory, exist_ok=True)
37 | file_name = motley_utils.generate_hex_hash(url, length=file_name_length)
38 | file_path = os.path.join(images_directory, file_name)
39 |
40 | file_path_with_extension = download_image(url=url, file_path=file_path).replace(os.sep, "/")
41 | return file_path_with_extension
42 |
--------------------------------------------------------------------------------
/motleycrew/tools/image/replicate_tool.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from langchain.agents import Tool
4 | from pydantic import BaseModel, Field
5 |
6 | import motleycrew.common.utils as motley_utils
7 | from motleycrew.common import logger
8 | from motleycrew.common.utils import ensure_module_is_installed
9 | from motleycrew.tools.image.download_image import download_url_to_directory
10 | from motleycrew.tools.tool import MotleyTool
11 |
12 | try:
13 | import replicate
14 | except ImportError:
15 | replicate = None
16 |
17 |
18 | model_map = {
19 | "sdxl": "stability-ai/sdxl:39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
20 | "flux-pro": "black-forest-labs/flux-pro",
21 | "flux-dev": "black-forest-labs/flux-dev",
22 | "flux-schnell": "black-forest-labs/flux-schnell",
23 | }
24 |
25 | # Each model has a different set of extra parameters, documented at pages like
26 | # https://replicate.com/black-forest-labs/flux-dev/api/schema
27 |
28 |
29 | def run_model_in_replicate(model_name: str, prompt: str, **kwargs) -> str | List[str]:
30 | if model_name in model_map:
31 | model_name = model_map[model_name]
32 | output = replicate.run(model_name, input={"prompt": prompt, **kwargs})
33 | return output
34 |
35 |
36 | def run_model_in_replicate_and_save_images(
37 | model_name: str, prompt: str, directory_name: Optional[str] = None, **kwargs
38 | ) -> List[str]:
39 | download_urls = run_model_in_replicate(model_name, prompt, **kwargs)
40 | if isinstance(download_urls, str):
41 | download_urls = [download_urls]
42 | if directory_name is None:
43 | logger.info("Images directory is not provided, returning URLs")
44 | return download_urls
45 | out_files = []
46 | for url in download_urls:
47 | if motley_utils.is_http_url(url):
48 | out_files.append(download_url_to_directory(url, directory_name))
49 | return out_files
50 |
51 |
52 | class ImageToolInput(BaseModel):
53 | """Input for the Dall-E tool."""
54 |
55 | description: str = Field(description="image description")
56 |
57 |
58 | class ReplicateImageGeneratorTool(MotleyTool):
59 | def __init__(
60 | self,
61 | model_name: str,
62 | images_directory: Optional[str] = None,
63 | return_direct: bool = False,
64 | exceptions_to_reflect: Optional[List[Exception]] = None,
65 | **kwargs,
66 | ):
67 | """
68 | A tool for generating images from text descriptions using the Replicate API.
69 | :param model_name: one of "sdxl", "flux-pro", "flux-dev", "flux-schnell", or a full model name supported by replicate
70 | :param images_directory: the directory to save the images to
71 | :param kwargs: model-specific parameters, from pages such as https://replicate.com/black-forest-labs/flux-dev/api/schema
72 | """
73 | ensure_module_is_installed("replicate")
74 |
75 | self.model_name = model_name
76 | self.kwargs = kwargs
77 | langchain_tool = create_replicate_image_generator_langchain_tool(
78 | model_name=model_name, images_directory=images_directory, **kwargs
79 | )
80 |
81 | super().__init__(
82 | tool=langchain_tool,
83 | return_direct=return_direct,
84 | exceptions_to_reflect=exceptions_to_reflect,
85 | )
86 |
87 |
88 | def create_replicate_image_generator_langchain_tool(
89 | model_name: str, images_directory: Optional[str] = None, **kwargs
90 | ):
91 | def run_replicate_image_generator(description: str):
92 | return run_model_in_replicate_and_save_images(
93 | model_name=model_name,
94 | prompt=description,
95 | directory_name=images_directory,
96 | **kwargs,
97 | )
98 |
99 | return Tool(
100 | name=f"{model_name}_image_generator",
101 | func=run_replicate_image_generator,
102 | description=f"A wrapper around the {model_name} image generation model. Useful for when you need to generate images from a text description. "
103 | "Input should be an image description.",
104 | args_schema=ImageToolInput,
105 | )
106 |
107 |
108 | if __name__ == "__main__":
109 | import os
110 | image_dir = os.path.join(os.path.expanduser("~"), "images")
111 | tool = ReplicateImageGeneratorTool("flux-pro", image_dir, aspect_ratio="3:2")
112 | output = tool.invoke(
113 | "A beautiful sunset over the mountains, with a dragon flying into the sunset, photorealistic style."
114 | )
115 | print(output)
116 | print("yay!")
117 |
--------------------------------------------------------------------------------
/motleycrew/tools/llm_tool.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Type
2 |
3 | from langchain_core.language_models import BaseLanguageModel
4 | from langchain_core.prompts import PromptTemplate
5 | from langchain_core.prompts.base import BasePromptTemplate
6 | from langchain_core.tools import StructuredTool
7 | from pydantic import BaseModel, Field, create_model
8 |
9 | from motleycrew.common import LLMFramework
10 | from motleycrew.common.llms import init_llm
11 | from motleycrew.tools import MotleyTool
12 |
13 |
14 | class LLMTool(MotleyTool):
15 | """A tool that uses a language model to generate output based on a prompt."""
16 |
17 | def __init__(
18 | self,
19 | name: str,
20 | description: str,
21 | prompt: str | BasePromptTemplate,
22 | llm: Optional[BaseLanguageModel] = None,
23 | input_schema: Optional[Type[BaseModel]] = None,
24 | return_direct: bool = False,
25 | exceptions_to_reflect: Optional[List[Exception]] = None,
26 | ):
27 | """
28 | Args:
29 | name: Name of the tool.
30 | description: Description of the tool.
31 | prompt: Prompt to use for the tool. Can be a string or a PromptTemplate object.
32 | llm: Language model to use for the tool.
33 | input_schema: Input schema for the tool.
34 | The input variables should match the variables in the prompt.
35 | If not provided, a schema will be generated based on the input variables
36 | in the prompt, if any, with string fields.
37 | """
38 | langchain_tool = create_llm_langchain_tool(
39 | name=name,
40 | description=description,
41 | prompt=prompt,
42 | llm=llm,
43 | input_schema=input_schema,
44 | )
45 | super().__init__(
46 | tool=langchain_tool,
47 | return_direct=return_direct,
48 | exceptions_to_reflect=exceptions_to_reflect,
49 | )
50 |
51 |
52 | def create_llm_langchain_tool(
53 | name: str,
54 | description: str,
55 | prompt: str | BasePromptTemplate,
56 | llm: Optional[BaseLanguageModel] = None,
57 | input_schema: Optional[Type[BaseModel]] = None,
58 | ):
59 | if llm is None:
60 | llm = init_llm(llm_framework=LLMFramework.LANGCHAIN)
61 |
62 | if not isinstance(prompt, BasePromptTemplate):
63 | prompt = PromptTemplate.from_template(prompt)
64 |
65 | if input_schema is None:
66 | fields = {
67 | var: (str, Field(description=f"Input {var} for the tool."))
68 | for var in prompt.input_variables
69 | }
70 |
71 | # Create the LLMToolInput class dynamically
72 | input_schema = create_model("LLMToolInput", **fields)
73 |
74 | def call_llm(**kwargs) -> str:
75 | chain = prompt | llm
76 | return chain.invoke(kwargs)
77 |
78 | return StructuredTool.from_function(
79 | func=call_llm,
80 | name=name,
81 | description=description,
82 | args_schema=input_schema,
83 | )
84 |
--------------------------------------------------------------------------------
/motleycrew/tools/mermaid_evaluator_tool.py:
--------------------------------------------------------------------------------
1 | # https://nodejs.org/en/download
2 | # npm install -g @mermaid-js/mermaid-cli
3 | import io
4 | import os.path
5 | import subprocess
6 | import tempfile
7 | from typing import List, Optional
8 |
9 | from langchain_core.tools import Tool
10 | from pydantic import Field, create_model
11 |
12 | from motleycrew.tools import MotleyTool
13 |
14 |
15 | class MermaidEvaluatorTool(MotleyTool):
16 | def __init__(
17 | self,
18 | format: Optional[str] = "svg",
19 | return_direct: bool = False,
20 | exceptions_to_reflect: Optional[List[Exception]] = None,
21 | ):
22 | def eval_mermaid_partial(mermaid_code: str):
23 | return eval_mermaid(mermaid_code, format)
24 |
25 | langchain_tool = Tool.from_function(
26 | func=eval_mermaid_partial,
27 | name="mermaid_evaluator_tool",
28 | description="Evaluates Mermaid code and returns the output as a BytesIO object.",
29 | args_schema=create_model(
30 | "MermaidEvaluatorToolInput",
31 | mermaid_code=(str, Field(description="The Mermaid code to evaluate.")),
32 | ),
33 | )
34 | super().__init__(
35 | tool=langchain_tool,
36 | return_direct=return_direct,
37 | exceptions_to_reflect=exceptions_to_reflect,
38 | )
39 |
40 |
41 | def eval_mermaid(mermaid_code: str, format: Optional[str] = "svg") -> io.BytesIO:
42 | with tempfile.NamedTemporaryFile(delete=True, mode="w+", suffix=".mmd") as temp_in:
43 | temp_in.write(mermaid_code)
44 | temp_in.flush() # Ensure all data is written to disk
45 |
46 | if format in ["md", "markdown"]:
47 | raise NotImplementedError("Markdown format is not yet supported in this wrapper.")
48 | assert format in [
49 | "svg",
50 | "png",
51 | "pdf",
52 | ], "Invalid format specified, must be svg, png, or pdf."
53 | out_file = f"output.{format}"
54 |
55 | # Prepare the command to call the mermaid CLI
56 | full_code = f"mmdc -i {temp_in.name} -o {out_file} -b transparent"
57 |
58 | try:
59 | # Execute the command
60 | subprocess.run(
61 | full_code,
62 | shell=True,
63 | check=True,
64 | stdout=subprocess.PIPE,
65 | stderr=subprocess.PIPE,
66 | )
67 | # If the process succeeds, read the output file into BytesIO
68 | with open(out_file, "rb") as f:
69 | output_bytes = io.BytesIO(f.read())
70 | return output_bytes
71 | except subprocess.CalledProcessError as e:
72 | # If the process fails, print the error message
73 | return e.stderr.decode()
74 | finally:
75 | # Clean up the output file if it exists
76 | try:
77 | os.remove(out_file)
78 | except FileNotFoundError:
79 | pass
80 |
81 |
82 | # Define the Mermaid code for the flowchart
83 |
84 | if __name__ == "__main__":
85 | mermaid_code = """
86 | graph TD;
87 | A[Start] --> B[Decision]
88 | B -- Yes --> C[Option 1]
89 | B -- No --> D[Option 2]
90 | C --> E[End]
91 | D --> E
92 | E[End] --> F[End]
93 |
94 | [[
95 | """
96 |
97 | out1 = eval_mermaid(mermaid_code)
98 | output_file_path = "output_file.bin"
99 | if isinstance(out1, str):
100 | print(out1)
101 | exit(1)
102 | # Ensure the pointer is at the beginning of the BytesIO object
103 | out1.seek(0)
104 |
105 | # Open the output file in binary write mode and write the contents of the BytesIO object
106 | with open(output_file_path, "wb") as file_output:
107 | file_output.write(out1.read())
108 | tool = MermaidEvaluatorTool()
109 | out2 = tool.invoke({"mermaid_code": mermaid_code})
110 | print(out2)
111 |
--------------------------------------------------------------------------------
/motleycrew/tools/simple_retriever_tool.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import List, Optional
3 |
4 | from langchain_core.tools import StructuredTool
5 | from llama_index.core import (
6 | SimpleDirectoryReader,
7 | StorageContext,
8 | VectorStoreIndex,
9 | load_index_from_storage,
10 | )
11 | from llama_index.core.node_parser import SentenceSplitter
12 | from llama_index.core.embeddings import BaseEmbedding
13 | from llama_index.embeddings.openai import OpenAIEmbedding
14 | from pydantic import BaseModel, Field
15 |
16 | from motleycrew.applications.research_agent.question import Question
17 | from motleycrew.tools import MotleyTool
18 |
19 |
20 | class SimpleRetrieverTool(MotleyTool):
21 | """A simple retriever tool that retrieves relevant documents from a local knowledge base."""
22 |
23 | def __init__(
24 | self,
25 | data_dir: str,
26 | persist_dir: str,
27 | return_strings_only: bool = False,
28 | return_direct: bool = False,
29 | exceptions_to_reflect: Optional[List[Exception]] = None,
30 | embeddings: Optional[BaseEmbedding] = None,
31 | ):
32 | """
33 | Args:
34 | data_dir: Path to the directory containing the documents.
35 | persist_dir: Path to the directory to store the index.
36 | return_strings_only: Whether to return only the text of the retrieved documents.
37 | """
38 | tool = make_retriever_langchain_tool(
39 | data_dir, persist_dir, return_strings_only=return_strings_only, embeddings=embeddings
40 | )
41 | super().__init__(
42 | tool=tool, return_direct=return_direct, exceptions_to_reflect=exceptions_to_reflect
43 | )
44 |
45 |
46 | class RetrieverToolInput(BaseModel, arbitrary_types_allowed=True):
47 | """Input for the retriever tool."""
48 |
49 | question: Question = Field(
50 | description="The input question for which to retrieve relevant data."
51 | )
52 |
53 |
54 | def make_retriever_langchain_tool(
55 | data_dir,
56 | persist_dir,
57 | return_strings_only: bool = False,
58 | embeddings: Optional[BaseEmbedding] = None,
59 | ):
60 | if embeddings is None:
61 | text_embedding_model = "text-embedding-ada-002"
62 | embeddings = OpenAIEmbedding(model=text_embedding_model)
63 |
64 | if not os.path.exists(persist_dir):
65 | # load the documents and create the index
66 | documents = SimpleDirectoryReader(data_dir).load_data()
67 | index = VectorStoreIndex.from_documents(
68 | documents,
69 | transformations=[SentenceSplitter(chunk_size=512), embeddings],
70 | embed_model=embeddings,
71 | )
72 | # store it for later
73 | index.storage_context.persist(persist_dir=persist_dir)
74 | else:
75 | # load the existing index
76 | storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
77 | index = load_index_from_storage(storage_context, embed_model=embeddings)
78 |
79 | retriever = index.as_retriever(
80 | similarity_top_k=10,
81 | embed_model=embeddings,
82 | )
83 |
84 | def call_retriever(question: Question) -> list:
85 | out = retriever.retrieve(question.question)
86 | if return_strings_only:
87 | return [node.text for node in out]
88 | return out
89 |
90 | retriever_tool = StructuredTool.from_function(
91 | func=call_retriever,
92 | name="information_retriever_tool",
93 | description="Useful for running a natural language query against a"
94 | " knowledge base and retrieving a set of relevant documents.",
95 | args_schema=RetrieverToolInput,
96 | )
97 | return retriever_tool
98 |
--------------------------------------------------------------------------------
/motleycrew/tools/structured_passthrough.py:
--------------------------------------------------------------------------------
1 | from typing import Type, Any, Optional, Callable
2 |
3 | from pydantic import BaseModel
4 |
5 | from motleycrew.tools.tool import MotleyTool
6 |
7 |
8 | class StructuredPassthroughTool(MotleyTool):
9 | """
10 | A tool that enforces a certain output shape, raising an error if the output is not as expected.
11 | """
12 |
13 | def __init__(
14 | self,
15 | schema: Type[BaseModel],
16 | post_process: Optional[Callable] = None,
17 | return_direct: bool = True,
18 | **kwargs
19 | ):
20 | super().__init__(
21 | name="structured_passthrough_tool",
22 | description="A tool that checks output validity.",
23 | args_schema=schema,
24 | return_direct=return_direct,
25 | **kwargs
26 | )
27 |
28 | self.schema = schema
29 | self.post_process = post_process
30 |
31 | def run(self, **kwargs) -> Any:
32 | """
33 | Run the tool with the provided inputs.
34 | """
35 | # Validate the input against the schema
36 | validated_input = self.schema(**kwargs)
37 |
38 | if self.post_process:
39 | # Apply the post-processing function if provided
40 | validated_input = self.post_process(validated_input)
41 |
42 | # Return the validated input
43 | return validated_input
44 |
--------------------------------------------------------------------------------
/motleycrew/tracking/__init__.py:
--------------------------------------------------------------------------------
1 | """Observability-related functionality."""
2 |
3 | from .utils import add_default_callbacks_to_langchain_config, get_default_callbacks_list
4 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "motleycrew"
3 | version = "0.2.5"
4 | description = "A lightweight agent interaction framework."
5 | authors = ["MotleyCrew "]
6 | readme = "README.md"
7 |
8 | [tool.poetry.dependencies]
9 | python = "^3.10"
10 | langchain = "^0.3"
11 | langchain-experimental = "*"
12 | langchain-openai = "*"
13 | python-dotenv = "^1.0.0"
14 | kuzu = "^0.8"
15 | cloudpickle = "^3.0.0"
16 | platformdirs = "^4.2.1"
17 | pydantic = "^2.7.1"
18 |
19 | [tool.poetry.group.dev.dependencies]
20 | black = "^24.2.0"
21 | pytest = "^8.0.2"
22 | pytest-cov = "^4.1.0"
23 | flake8 = "^7.0.0"
24 | mypy = "^1.8.0"
25 | isort = "^5.13.2"
26 | sphinx = "^7.3.7"
27 | sphinx-rtd-theme = "^2.0.0"
28 | nbsphinx = "^0.9.4"
29 | pypandoc_binary = "^1.13"
30 | ipykernel = "^6.29.4"
31 | nbsphinx-link = "^1.3.0"
32 | nbformat = "^5.10.4"
33 |
34 | [build-system]
35 | requires = ["poetry-core"]
36 | build-backend = "poetry.core.masonry.api"
37 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | markers =
3 | fat: marks fat tests
4 | addopts = -m "not fat"
5 |
--------------------------------------------------------------------------------
/requirements-extra.txt:
--------------------------------------------------------------------------------
1 | motleycache
2 | lunary
3 | llama-index
4 | duckduckgo-search
5 | pglast
6 | ray[default]
7 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShoggothAI/motleycrew/413d2c3ae9c5497229d784b9da0ca64cca7103cd/tests/__init__.py
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
4 |
--------------------------------------------------------------------------------
/tests/test_agents/__init__.py:
--------------------------------------------------------------------------------
1 | from typing import Type
2 |
3 | from langchain_core.tools import BaseTool
4 | from pydantic import BaseModel, Field
5 |
6 |
7 | class MockToolInput(BaseModel):
8 | """Input for the MockTool tool."""
9 |
10 | tool_input: str = Field(description="tool_input")
11 |
12 |
13 | class MockTool(BaseTool):
14 | """Mock tool for run agent tests"""
15 |
16 | name: str = "mock tool"
17 | description: str = "Mock tool for tests"
18 |
19 | args_schema: Type[BaseModel] = MockToolInput
20 |
21 | def _run(self, tool_input: str, *args, **kwargs):
22 | return tool_input
23 |
--------------------------------------------------------------------------------
/tests/test_agents/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
4 |
--------------------------------------------------------------------------------
/tests/test_agents/test_agent_chain.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from motleycrew.agents.parent import MotleyAgentParent
4 |
5 |
6 | class AgentMock:
7 | def invoke(self, input_dict: dict, *args, **kwargs):
8 | return input_dict
9 |
10 |
11 | class MotleyAgentMock(MotleyAgentParent):
12 |
13 | def invoke(self, *args, **kwargs):
14 | self.materialize()
15 | return self.agent.invoke(*args, **kwargs)
16 |
17 |
18 | def agent_factory(*args, **kwargs):
19 | return AgentMock()
20 |
21 |
22 | @pytest.fixture
23 | def motley_agents():
24 | agent1 = MotleyAgentMock("agent1 description", agent_factory=agent_factory)
25 | agent2 = MotleyAgentMock("agent2 description", agent_factory=agent_factory)
26 | return [agent1, agent2]
27 |
28 |
29 | def test_agent_chain(motley_agents):
30 | agent1, agent2 = motley_agents
31 | agent_chain = agent1 | agent2
32 | assert hasattr(agent_chain, "invoke")
33 | prompt = {"prompt": "test_prompt"}
34 | assert agent_chain.invoke(prompt) == prompt
35 |
--------------------------------------------------------------------------------
/tests/test_agents/test_agents.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 | from langchain_core.prompts.chat import ChatPromptTemplate
5 |
6 | from motleycrew.agents.langchain.tool_calling_react import ReActToolCallingMotleyAgent
7 | from motleycrew.agents.llama_index.llama_index_react import ReActLlamaIndexMotleyAgent
8 | from motleycrew.common.exceptions import (
9 | AgentNotMaterialized,
10 | CannotModifyMaterializedAgent,
11 | )
12 | from tests.test_agents import MockTool
13 |
14 | os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
15 |
16 | test_agents_names = ("langchain", "llama_index")
17 |
18 |
19 | class TestAgents:
20 | @pytest.fixture(scope="class")
21 | def langchain_agent(self):
22 | agent = ReActToolCallingMotleyAgent(
23 | name="AI writer agent",
24 | prompt_prefix="Generate AI-generated content",
25 | description="AI-generated content",
26 | tools=[MockTool()],
27 | verbose=True,
28 | )
29 | return agent
30 |
31 | @pytest.fixture(scope="class")
32 | def llama_index_agent(self):
33 | agent = ReActLlamaIndexMotleyAgent(
34 | prompt_prefix="Uncover cutting-edge developments in AI and data science",
35 | description="AI researcher",
36 | tools=[MockTool()],
37 | verbose=True,
38 | )
39 | return agent
40 |
41 | @pytest.fixture(scope="class")
42 | def agent(self, request, langchain_agent, llama_index_agent):
43 | agents = {
44 | "langchain": langchain_agent,
45 | "llama_index": llama_index_agent,
46 | }
47 | return agents.get(request.param)
48 |
49 | @pytest.mark.parametrize("agent", test_agents_names, indirect=True)
50 | def test_add_tools(self, agent):
51 | assert len(agent.tools) == 1
52 | tools = [MockTool()]
53 | agent.add_tools(tools)
54 | assert len(agent.tools) == 1
55 |
56 | @pytest.mark.parametrize("agent", test_agents_names, indirect=True)
57 | def test_materialized(self, agent):
58 | with pytest.raises(AgentNotMaterialized):
59 | agent.agent
60 |
61 | assert not agent.is_materialized
62 | agent.materialize()
63 | assert agent.is_materialized
64 |
65 | with pytest.raises(CannotModifyMaterializedAgent):
66 | agent.add_tools([MockTool(name="another_tool")])
67 |
68 | @pytest.mark.parametrize("agent", test_agents_names, indirect=True)
69 | def test_compose_prompt(self, agent):
70 | task_prompt = ChatPromptTemplate.from_template("What are the latest {topic} trends?")
71 | task_dict = {"topic": "AI"}
72 | prompt = agent.compose_prompt(input_dict=task_dict, prompt=task_prompt)
73 |
74 | assert str(agent.prompt_prefix) in prompt
75 | assert "What are the latest AI trends?" in prompt
76 |
--------------------------------------------------------------------------------
/tests/test_agents/test_langchain_output_handler.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from langchain_core.agents import AgentAction, AgentFinish
3 |
4 | from motleycrew.agents.langchain.tool_calling_react import ReActToolCallingMotleyAgent
5 | from motleycrew.common import AuxPrompts
6 | from motleycrew.common.exceptions import InvalidOutput
7 | from motleycrew.tools import DirectOutput, MotleyTool
8 | from tests.test_agents import MockTool
9 |
10 | invalid_output = "Add more information about AI applications in medicine."
11 |
12 |
13 | class ReportOutputHandler(MotleyTool):
14 | def __init__(self):
15 | super().__init__(
16 | name="output_handler",
17 | description="Output handler",
18 | return_direct=True,
19 | )
20 |
21 | def run(self, output: str):
22 | if "medical" not in output.lower():
23 | raise InvalidOutput(invalid_output)
24 |
25 | return {"checked_output": output}
26 |
27 |
28 | def fake_agent_plan(intermediate_steps, step, **kwargs):
29 | return step
30 |
31 |
32 | def fake_agent_take_next_step(
33 | name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager
34 | ):
35 |
36 | output_handler = name_to_tool_map.get("output_handler")
37 | result = output_handler._run(inputs, config=None)
38 |
39 | if isinstance(result, DirectOutput):
40 | raise result
41 |
42 | return result
43 |
44 |
45 | @pytest.fixture
46 | def agent():
47 | agent = ReActToolCallingMotleyAgent(
48 | tools=[MockTool(), ReportOutputHandler()],
49 | verbose=True,
50 | chat_history=True,
51 | force_output_handler=True,
52 | )
53 | agent.materialize()
54 | object.__setattr__(agent._agent, "plan", fake_agent_plan)
55 | object.__setattr__(agent.agent, "plan", agent.agent_plan_decorator(agent.agent.plan))
56 |
57 | object.__setattr__(agent._agent, "_take_next_step", fake_agent_take_next_step)
58 | object.__setattr__(
59 | agent._agent,
60 | "_take_next_step",
61 | agent.take_next_step_decorator(agent.agent._take_next_step),
62 | )
63 | return agent
64 |
65 |
66 | @pytest.fixture
67 | def run_kwargs(agent):
68 | agent_executor = agent.agent.bound.bound.last.bound.deps[0].bound
69 |
70 | run_kwargs = {
71 | "name_to_tool_map": {tool.name: tool for tool in agent_executor.tools},
72 | "color_mapping": {},
73 | "inputs": {},
74 | "intermediate_steps": [],
75 | }
76 | return run_kwargs
77 |
78 |
79 | def test_agent_plan(agent):
80 | agent_executor = agent.agent
81 | agent_actions = [AgentAction("tool", "tool_input", "tool_log")]
82 | step = agent_executor.plan([], agent_actions)
83 | assert agent_actions == step
84 |
85 | return_values = {"output": "test_output"}
86 | agent_finish = AgentFinish(return_values=return_values, log="test_output")
87 |
88 | step = agent_executor.plan([], agent_finish)
89 | assert isinstance(step, AgentAction)
90 | assert step.tool == agent._agent_error_tool.name
91 | assert step.tool_input == {
92 | "error_message": AuxPrompts.get_direct_output_error_message(agent.get_output_handlers()),
93 | "message": "test_output",
94 | }
95 |
96 |
97 | def test_agent_take_next_step(agent, run_kwargs):
98 |
99 | # test wrong output
100 | input_data = "Latest advancements in AI in 2024."
101 | run_kwargs["inputs"] = input_data
102 | step_result = agent.agent._take_next_step(**run_kwargs)
103 | assert step_result == f"{InvalidOutput.__name__}: {invalid_output}"
104 |
105 | # test correct output
106 | input_data = "Latest advancements in medical AI in 2024."
107 | run_kwargs["inputs"] = input_data
108 | step_result = agent.agent._take_next_step(**run_kwargs)
109 | assert isinstance(step_result, AgentFinish)
110 | assert isinstance(step_result.return_values, dict)
111 | output_result = step_result.return_values.get("output")
112 | assert output_result == {"checked_output": input_data}
113 |
--------------------------------------------------------------------------------
/tests/test_agents/test_llms.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from langchain_openai import ChatOpenAI
3 | from llama_index.llms.openai import OpenAI
4 |
5 | from motleycrew.common import LLMProvider, LLMFramework
6 | from motleycrew.common.exceptions import LLMProviderNotSupported
7 | from motleycrew.common.llms import init_llm
8 |
9 |
10 | @pytest.mark.parametrize(
11 | "llm_provider, llm_framework, expected_class",
12 | [
13 | (LLMProvider.OPENAI, LLMFramework.LANGCHAIN, ChatOpenAI),
14 | (LLMProvider.OPENAI, LLMFramework.LLAMA_INDEX, OpenAI),
15 | ],
16 | )
17 | def test_init_llm(llm_provider, llm_framework, expected_class):
18 | llm = init_llm(llm_provider=llm_provider, llm_framework=llm_framework)
19 | assert isinstance(llm, expected_class)
20 |
21 |
22 | def test_raise_init_llm():
23 | with pytest.raises(LLMProviderNotSupported):
24 | llm = init_llm(llm_provider=LLMProvider.OPENAI, llm_framework="unknown_framework")
25 |
--------------------------------------------------------------------------------
/tests/test_crew/__init__.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from motleycrew.crew import MotleyCrew
4 | from motleycrew.tasks import SimpleTask
5 |
6 |
7 | class AgentMock:
8 | def invoke(self, input_dict) -> str:
9 | clear_dict = self.clear_input_dict(input_dict)
10 | return str(clear_dict)
11 |
12 | async def ainvoke(self, input_dict) -> str:
13 | return self.invoke(input_dict)
14 |
15 | @staticmethod
16 | def clear_input_dict(input_dict: dict) -> dict:
17 | clear_dict = {}
18 | for param in ("name", "prompt"):
19 | value = input_dict.get(param, None)
20 | if value is not None:
21 | clear_dict[param] = value
22 | return clear_dict
23 |
24 |
25 | class CrewFixtures:
26 | num_task = 0
27 |
28 | @pytest.fixture(scope="class")
29 | def crew(self):
30 | obj = MotleyCrew()
31 | return obj
32 |
33 | @pytest.fixture
34 | def agent(self):
35 | return AgentMock()
36 |
37 | @pytest.fixture
38 | def tasks(self, request, crew, agent):
39 | num_tasks = request.param or 1
40 | tasks = []
41 | for i in range(num_tasks):
42 | description = "task{} description".format(self.num_task)
43 | tasks.append(SimpleTask(description=description, agent=agent, crew=crew))
44 | CrewFixtures.num_task += 1
45 | return tasks
46 |
--------------------------------------------------------------------------------
/tests/test_crew/test_crew.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from motleycrew.tasks.simple import SimpleTask, SimpleTaskUnit
4 | from tests.test_crew import CrewFixtures
5 |
6 |
7 | class TestCrew(CrewFixtures):
8 |
9 | def test_create_simple_task(self, crew, agent):
10 | assert len(crew.tasks) == 0
11 | simple_task = SimpleTask(crew=crew, description="task description", agent=agent)
12 | assert len(crew.tasks) == 1
13 | node = simple_task.node
14 | assert crew.graph_store.get_node_by_class_and_id(type(node), node.id) == node
15 |
16 | @pytest.mark.parametrize("tasks", [2], indirect=True)
17 | def test_add_dependency(self, crew, tasks):
18 | task1, task2 = tasks
19 | crew.add_dependency(task1, task2)
20 | assert crew.graph_store.check_relation_exists(task1.node, task2.node)
21 |
22 | @pytest.mark.parametrize("tasks", [1], indirect=True)
23 | def test_register_added_task(self, tasks, crew):
24 | task = tasks[0]
25 | len_tasks = len(crew.tasks)
26 | crew.register_tasks([task])
27 | assert len(crew.tasks) == len_tasks
28 |
29 | def test_get_available_task(self, crew):
30 | tasks = crew.get_available_tasks()
31 | assert len(tasks) == 3
32 |
33 | def test_get_extra_tools(self, crew):
34 | tasks = crew.get_available_tasks()
35 | assert not crew.get_extra_tools(tasks[-1])
36 |
37 | def test_run(self, crew, agent):
38 | available_tasks = crew.get_available_tasks()
39 | crew.run()
40 | for task in crew.tasks:
41 | assert task.done
42 | assert task.node.done
43 | unit = SimpleTaskUnit(
44 | name=task.name,
45 | prompt=task.description,
46 | )
47 | if task in available_tasks:
48 | assert agent.invoke(unit.as_dict()) == task.output
49 | else:
50 | assert agent.invoke(unit.as_dict()) != task.output
51 |
--------------------------------------------------------------------------------
/tests/test_crew/test_crew_threads.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from motleycrew.crew.crew_threads import TaskUnitThreadPool, TaskUnitThreadState
4 | from motleycrew.common import Defaults
5 | from tests.test_crew import CrewFixtures
6 |
7 |
8 | class TestInvokeThreadPool(CrewFixtures):
9 |
10 | @pytest.fixture
11 | def thread_pool(self):
12 | obj = TaskUnitThreadPool()
13 | yield obj
14 | obj.wait_and_close()
15 |
16 | @pytest.mark.parametrize("tasks", [4], indirect=True)
17 | @pytest.fixture
18 | def thread_pool_with_tasks(self, tasks, thread_pool, agent):
19 | for task in tasks:
20 | unit = task.get_next_unit()
21 | thread_pool.add_task_unit(agent, task, unit)
22 |
23 | return thread_pool
24 |
25 | def test_init_thread_pool(self, thread_pool):
26 | assert len(thread_pool._threads) == Defaults.DEFAULT_NUM_THREADS
27 | assert all([t.is_alive() for t in thread_pool._threads])
28 | assert thread_pool.input_queue.empty()
29 | assert thread_pool.output_queue.empty()
30 | assert thread_pool.is_completed
31 |
32 | @pytest.mark.parametrize("tasks", [4], indirect=True)
33 | def test_put(self, thread_pool, agent, tasks):
34 | for task in tasks:
35 | unit = task.get_next_unit()
36 | thread_pool.add_task_unit(agent, task, unit)
37 |
38 | assert not thread_pool.is_completed
39 | assert len(thread_pool._task_units_in_progress) == 4
40 |
41 | @pytest.mark.parametrize("tasks", [4], indirect=True)
42 | def test_get_completed_tasks(self, thread_pool, agent, tasks):
43 | for task in tasks:
44 | unit = task.get_next_unit()
45 | thread_pool.add_task_unit(agent, task, unit)
46 |
47 | thread_pool.wait_and_close()
48 | completed_tasks = thread_pool.get_completed_task_units()
49 |
50 | assert len(completed_tasks) == 4
51 | assert len(thread_pool._task_units_in_progress) == 0
52 | assert thread_pool.is_completed
53 | assert all([t.state == TaskUnitThreadState.EXITED for t in thread_pool._threads])
54 |
55 | @pytest.mark.parametrize("tasks", [1], indirect=True)
56 | def test_get_completed_task_exception(self, thread_pool, agent, tasks):
57 | for task in tasks:
58 | thread_pool.add_task_unit(agent, task, None)
59 | thread_pool.wait_and_close()
60 |
61 | with pytest.raises(AttributeError):
62 | thread_pool.get_completed_task_units()
63 |
64 | assert not thread_pool.is_completed
65 |
66 | def test_close(self, thread_pool):
67 | thread_pool.wait_and_close()
68 | assert all([not t.is_alive() for t in thread_pool._threads])
69 | assert all([t.state == TaskUnitThreadState.EXITED for t in thread_pool._threads])
70 |
--------------------------------------------------------------------------------
/tests/test_storage/__init__.py:
--------------------------------------------------------------------------------
1 | import kuzu
2 | import pytest
3 |
4 | from motleycrew.storage import MotleyKuzuGraphStore
5 |
6 |
7 | class GraphStoreFixtures:
8 | @pytest.fixture
9 | def kuzu_graph_store(self, tmpdir):
10 | db_path = tmpdir / "test_db"
11 | db = kuzu.Database(str(db_path))
12 |
13 | graph_store = MotleyKuzuGraphStore(db)
14 | return graph_store
15 |
16 | @pytest.fixture
17 | def graph_store(self, request, kuzu_graph_store):
18 | graph_stores = {
19 | "kuzu": kuzu_graph_store,
20 | }
21 | return graph_stores.get(request.param)
22 |
--------------------------------------------------------------------------------
/tests/test_storage/test_kuzu_graph_store.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import kuzu
4 | import pytest
5 |
6 | from motleycrew.common import GraphStoreType
7 | from motleycrew.storage import MotleyGraphNode
8 | from motleycrew.storage import MotleyKuzuGraphStore
9 | from tests.test_storage import GraphStoreFixtures
10 |
11 |
12 | class Entity(MotleyGraphNode):
13 | int_param: int
14 | optional_str_param: Optional[str] = None
15 | optional_list_str_param: Optional[list[str]] = None
16 |
17 |
18 | @pytest.fixture
19 | def database(tmpdir):
20 | db_path = tmpdir / "test_db"
21 | db = kuzu.Database(str(db_path))
22 | return db
23 |
24 |
25 | class TestMotleyKuzuGraphStore(GraphStoreFixtures):
26 | def test_set_get_node_id(self):
27 | entity = Entity(int_param=1)
28 | MotleyKuzuGraphStore._set_node_id(node=entity, node_id=2)
29 | assert entity.id == 2
30 |
31 | @pytest.mark.parametrize("graph_store", [GraphStoreType.KUZU], indirect=True)
32 | def test_insert_node_with_id_already_set(self, graph_store):
33 | entity = Entity(int_param=1)
34 | MotleyKuzuGraphStore._set_node_id(node=entity, node_id=2)
35 | with pytest.raises(AssertionError):
36 | graph_store.insert_node(entity)
37 |
--------------------------------------------------------------------------------
/tests/test_tasks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShoggothAI/motleycrew/413d2c3ae9c5497229d784b9da0ca64cca7103cd/tests/test_tasks/__init__.py
--------------------------------------------------------------------------------
/tests/test_tasks/test_simple_task.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from langchain_community.tools import DuckDuckGoSearchRun
3 |
4 | from motleycrew.agents.langchain.tool_calling_react import ReActToolCallingMotleyAgent
5 | from motleycrew.crew import MotleyCrew
6 | from motleycrew.storage.graph_store_utils import init_graph_store
7 | from motleycrew.tasks.simple import (
8 | SimpleTask,
9 | SimpleTaskUnit,
10 | compose_simple_task_prompt_with_dependencies,
11 | )
12 |
13 |
14 | @pytest.fixture(scope="session")
15 | def graph_store():
16 | graph_store = init_graph_store()
17 | return graph_store
18 |
19 |
20 | @pytest.fixture
21 | def crew(graph_store):
22 | return MotleyCrew(graph_store=graph_store)
23 |
24 |
25 | @pytest.fixture
26 | def agent():
27 | agent = ReActToolCallingMotleyAgent(
28 | name="AI writer agent",
29 | tools=[DuckDuckGoSearchRun()],
30 | verbose=True,
31 | )
32 | return agent
33 |
34 |
35 | @pytest.fixture
36 | def tasks(crew, agent):
37 | task1 = SimpleTask(crew=crew, description="task1 description", agent=agent)
38 | task2 = SimpleTask(crew=crew, description="task2 description")
39 | crew.register_tasks([task1, task2])
40 | return [task1, task2]
41 |
42 |
43 | class TestSimpleTask:
44 | def test_register_completed_unit(self, tasks, crew):
45 | task1, task2 = tasks
46 | assert not task1.done
47 | assert task1.output is None
48 | unit = task1.get_next_unit()
49 | unit.output = task1.description
50 |
51 | with pytest.raises(AssertionError):
52 | task1.on_unit_completion(unit)
53 | unit.set_done()
54 | task1.on_unit_completion(unit)
55 | assert task1.done
56 | assert task1.output == unit.output
57 | assert task1.node.done
58 |
59 | def test_get_next_unit(self, tasks, crew):
60 | task1, task2 = tasks
61 | crew.add_dependency(task1, task2)
62 | assert task2.get_next_unit() is None
63 | prompt = compose_simple_task_prompt_with_dependencies(
64 | description=task1.description,
65 | upstream_task_units=task1.get_units(),
66 | prompt_template_with_upstreams=task1.prompt_template_with_upstreams,
67 | )
68 | expected_unit = SimpleTaskUnit(
69 | name=task1.name,
70 | prompt=prompt,
71 | )
72 | next_unit = task1.get_next_unit()
73 | assert next_unit.prompt == expected_unit.prompt
74 |
75 | def test_get_worker(self, tasks, agent):
76 | task1, task2 = tasks
77 | assert task1.get_worker([]) == agent
78 | with pytest.raises(ValueError):
79 | task2.get_worker([])
80 |
--------------------------------------------------------------------------------
/tests/test_tasks/test_task.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | import pytest
4 |
5 | from langchain_core.runnables import Runnable
6 |
7 | from motleycrew import MotleyCrew
8 | from motleycrew.tools import MotleyTool
9 | from motleycrew.tasks import Task, TaskUnitType, TaskUnit
10 | from motleycrew.storage.graph_store_utils import init_graph_store
11 | from motleycrew.common.exceptions import TaskDependencyCycleError
12 |
13 |
14 | class TaskMock(Task):
15 | def get_next_unit(self) -> List[TaskUnitType]:
16 | pass
17 |
18 | def get_worker(self, tools: Optional[List[MotleyTool]]) -> Runnable:
19 | pass
20 |
21 |
22 | def create_dummy_task(crew: MotleyCrew, name: str):
23 | return TaskMock(
24 | name=name,
25 | task_unit_class=TaskUnit,
26 | crew=crew,
27 | )
28 |
29 |
30 | @pytest.fixture(scope="session")
31 | def graph_store():
32 | graph_store = init_graph_store()
33 | return graph_store
34 |
35 |
36 | @pytest.fixture
37 | def crew(graph_store):
38 | return MotleyCrew(graph_store=graph_store)
39 |
40 |
41 | @pytest.fixture
42 | def task_1(crew):
43 | return create_dummy_task(crew, "1")
44 |
45 |
46 | @pytest.fixture
47 | def task_2(crew):
48 | return create_dummy_task(crew, "2")
49 |
50 |
51 | @pytest.fixture
52 | def task_3(crew):
53 | return create_dummy_task(crew, "3")
54 |
55 |
56 | class TestSetUpstream:
57 | def test_set_upstream_returns_self(self, task_1, task_2):
58 | result = task_2.set_upstream(task_1)
59 |
60 | assert result is task_2
61 |
62 | def test_set_upstream_sets_upstream(self, task_1, task_2):
63 | task_2.set_upstream(task_1)
64 |
65 | assert task_1.get_upstream_tasks() == []
66 | assert task_2.get_upstream_tasks() == [task_1]
67 |
68 | def test_set_upstream_sets_downstreams(self, task_1, task_2):
69 | task_2.set_upstream(task_1)
70 |
71 | assert task_1.get_downstream_tasks() == [task_2]
72 | assert task_2.get_downstream_tasks() == []
73 |
74 | def test_rshift_returns_left(self, task_1, task_2):
75 | result = task_1 >> task_2
76 |
77 | assert result is task_1
78 |
79 | def test_rshift_sets_downstream(self, task_1, task_2):
80 | task_1 >> task_2
81 |
82 | assert task_1.get_downstream_tasks() == [task_2]
83 | assert task_2.get_downstream_tasks() == []
84 |
85 | def test_rshift_sets_upstream(self, task_1, task_2):
86 | task_1 >> task_2
87 |
88 | assert task_1.get_upstream_tasks() == []
89 | assert task_2.get_upstream_tasks() == [task_1]
90 |
91 | def test_rshift_set_multiple_downstream(self, task_1, task_2, task_3):
92 | task_1 >> [task_2, task_3]
93 |
94 | assert set(task_1.get_downstream_tasks()) == {task_2, task_3}
95 | assert task_2.get_downstream_tasks() == []
96 | assert task_3.get_downstream_tasks() == []
97 |
98 | def test_rshift_set_multiple_upstream(self, task_1, task_2, task_3):
99 | task_1 >> [task_2, task_3]
100 |
101 | assert task_1.get_upstream_tasks() == []
102 | assert task_2.get_upstream_tasks() == [task_1]
103 | assert task_3.get_upstream_tasks() == [task_1]
104 |
105 | def test_sequence_on_left_returns_sequence(self, task_1, task_2, task_3):
106 | result = [task_1, task_2] >> task_3
107 |
108 | assert result == [task_1, task_2]
109 |
110 | def test_sequence_on_left_sets_downstream(self, task_1, task_2, task_3):
111 | [task_1, task_2] >> task_3
112 |
113 | assert task_1.get_downstream_tasks() == [task_3]
114 | assert task_2.get_downstream_tasks() == [task_3]
115 | assert task_3.get_downstream_tasks() == []
116 |
117 | def test_sequence_on_left_sets_upstream(self, task_1, task_2, task_3):
118 | [task_1, task_2] >> task_3
119 |
120 | assert task_1.get_upstream_tasks() == []
121 | assert task_2.get_upstream_tasks() == []
122 | assert set(task_3.get_upstream_tasks()) == {task_1, task_2}
123 |
124 | def test_deduplicates(self, task_1, task_2):
125 | task_1 >> [task_2, task_2]
126 |
127 | assert task_1.get_downstream_tasks() == [task_2]
128 |
129 | def test_error_on_direct_dependency_cycle(self, task_1):
130 | with pytest.raises(TaskDependencyCycleError):
131 | task_1 >> task_1
132 |
133 |
134 | class TestTask:
135 | def test_set_done(self, task_1):
136 | assert not task_1.done
137 | assert not task_1.node.done
138 | task_1.set_done()
139 | assert task_1.done
140 | assert task_1.node.done
141 |
--------------------------------------------------------------------------------
/tests/test_tasks/test_task_unit.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from motleycrew.tasks import TaskUnit
4 |
5 |
6 | class TestTaskUnit:
7 |
8 | @pytest.fixture(scope="class")
9 | def unit(self):
10 | return TaskUnit()
11 |
12 | def test_set_pending(self, unit):
13 | unit.set_pending()
14 | assert unit.pending
15 |
16 | def test_set_running(self, unit):
17 | unit.set_running()
18 | assert unit.running
19 |
20 | def test_set_done(self, unit):
21 | unit.set_done()
22 | assert unit.done
23 |
24 | def test_as_dict(self, unit):
25 | assert dict(unit) == unit.as_dict()
26 |
--------------------------------------------------------------------------------
/tests/test_tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShoggothAI/motleycrew/413d2c3ae9c5497229d784b9da0ca64cca7103cd/tests/test_tools/__init__.py
--------------------------------------------------------------------------------
/tests/test_tools/test_html_render_tool.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 |
5 | from motleycrew.tools.html_render_tool import HTMLRenderTool
6 |
7 |
8 | @pytest.mark.fat
9 | @pytest.mark.parametrize(
10 | "html_code",
11 | [
12 | "Test html
",
13 | "Test html
",
14 | ],
15 | )
16 | def test_render_tool(tmpdir, html_code):
17 | html_render_tool = HTMLRenderTool(work_dir=str(tmpdir), window_size=(800, 600), headless=False)
18 |
19 | image_path = html_render_tool.invoke(html_code)
20 | assert os.path.exists(image_path)
21 | image_dir, image_file_name = os.path.split(image_path)
22 | image_name = ".".join(image_file_name.split(".")[:-1])
23 | html_file_name = "{}.html".format(image_name)
24 | html_file_path = os.path.join(tmpdir, "html", html_file_name)
25 | assert os.path.exists(html_file_path)
26 |
--------------------------------------------------------------------------------
/tests/test_tools/test_linter_tools.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from motleycrew.tools.code import PostgreSQLLinterTool, PythonLinterTool
4 | from motleycrew.common.exceptions import ModuleNotInstalled
5 |
6 |
7 | @pytest.fixture
8 | def pgsql_linter_tool():
9 | tool = PostgreSQLLinterTool()
10 | return tool
11 |
12 |
13 | @pytest.fixture
14 | def python_linter_tool():
15 | try:
16 | tool = PythonLinterTool()
17 | except ModuleNotInstalled:
18 | tool = None
19 | return tool
20 |
21 |
22 | @pytest.mark.parametrize(
23 | "query, expected",
24 | [
25 | ("select a from table_name", "SELECT a\nFROM table_name"),
26 | ("select a from table_name where a = 1", "SELECT a\nFROM table_name\nWHERE a = 1"),
27 | ("selec a from table_name where a = 1", 'syntax error at or near "selec", at index 0'),
28 | ],
29 | )
30 | def test_pgsql_tool(pgsql_linter_tool, query, expected):
31 | parse_result = pgsql_linter_tool.invoke({"query": query})
32 | assert expected == parse_result
33 |
34 |
35 | @pytest.mark.parametrize(
36 | "code, file_name, valid_code, raises",
37 | [
38 | ("def plus(a, b):\n\treturn a + b", None, True, False),
39 | ("def plus(a):\n\treturn a + b", "test_code.py", False, False),
40 | ("def plus(a, b):\nreturn a + b", "test_code.py", False, False),
41 | ("def plus(a, b):\n\treturn a + b", "code.js", True, True),
42 | ],
43 | )
44 | def test_python_tool(python_linter_tool, code, file_name, valid_code, raises):
45 | if python_linter_tool is None:
46 | return
47 |
48 | params = {"code": code}
49 | if file_name:
50 | params["file_name"] = file_name
51 |
52 | if raises:
53 | with pytest.raises(ValueError):
54 | python_linter_tool.invoke(params)
55 | else:
56 | linter_result = python_linter_tool.invoke(params)
57 | if valid_code:
58 | assert linter_result is None
59 | else:
60 | assert isinstance(linter_result, str)
61 |
--------------------------------------------------------------------------------
/tests/test_tools/test_repl_tool.py:
--------------------------------------------------------------------------------
1 | from motleycrew.tools.code import PythonREPLTool
2 |
3 |
4 | class TestREPLTool:
5 | def test_repl_tool(self):
6 | repl_tool = PythonREPLTool()
7 | repl_tool_input_fields = list(repl_tool.tool.args_schema.__fields__.keys())
8 |
9 | assert repl_tool_input_fields == ["command"]
10 | assert repl_tool.invoke({repl_tool_input_fields[0]: "print(1)"}).strip() == "1"
11 |
--------------------------------------------------------------------------------
/tests/test_tools/test_structured_passthrough_tool.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pydantic import BaseModel, ValidationError
3 |
4 | from motleycrew.tools.structured_passthrough import StructuredPassthroughTool
5 |
6 |
7 | class SampleSchema(BaseModel):
8 | name: str
9 | age: int
10 |
11 |
12 | @pytest.fixture
13 | def sample_schema():
14 | return SampleSchema
15 |
16 |
17 | @pytest.fixture
18 | def structured_passthrough_tool(sample_schema):
19 | return StructuredPassthroughTool(schema=sample_schema)
20 |
21 |
22 | def test_structured_passthrough_tool_initialization(structured_passthrough_tool, sample_schema):
23 | assert structured_passthrough_tool.schema == sample_schema
24 | assert structured_passthrough_tool.name == "structured_passthrough_tool"
25 | assert structured_passthrough_tool.description == "A tool that checks output validity."
26 |
27 |
28 | def test_structured_passthrough_tool_run_valid_input(structured_passthrough_tool):
29 | input_data = {"name": "John Doe", "age": 30}
30 | result = structured_passthrough_tool.run(**input_data)
31 | assert result.name == "John Doe"
32 | assert result.age == 30
33 |
34 |
35 | def test_structured_passthrough_tool_run_invalid_input(structured_passthrough_tool):
36 | input_data = {"name": "John Doe", "age": "thirty"}
37 | with pytest.raises(ValidationError):
38 | structured_passthrough_tool.run(**input_data)
39 |
40 |
41 | def test_structured_passthrough_tool_post_process(structured_passthrough_tool):
42 | def post_process(data):
43 | data.name = data.name.upper()
44 | return data
45 |
46 | tool_with_post_process = StructuredPassthroughTool(
47 | schema=structured_passthrough_tool.schema, post_process=post_process
48 | )
49 |
50 | input_data = {"name": "John Doe", "age": 30}
51 | result = tool_with_post_process.run(**input_data)
52 | assert result.name == "JOHN DOE"
53 | assert result.age == 30
54 |
55 |
56 | def test_structured_passthrough_tool_post_process_noop(structured_passthrough_tool):
57 | def post_process(data):
58 | return data
59 |
60 | tool_with_post_process = StructuredPassthroughTool(
61 | schema=structured_passthrough_tool.schema, post_process=post_process
62 | )
63 |
64 | input_data = {"name": "John Doe", "age": 30}
65 | result = tool_with_post_process.run(**input_data)
66 | assert result.name == "John Doe"
67 | assert result.age == 30
68 |
--------------------------------------------------------------------------------
/tests/test_tools/test_tool_chain.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from motleycrew.tools import MotleyTool
4 |
5 |
6 | class ToolMock:
7 | def invoke(self, input: dict, *args, **kwargs):
8 | return input
9 |
10 | def _run(self, input: dict, *args, **kwargs):
11 | return input
12 |
13 |
14 | @pytest.fixture
15 | def tools():
16 | tool1 = MotleyTool(ToolMock())
17 | tool2 = MotleyTool(ToolMock())
18 | return [tool1, tool2]
19 |
20 |
21 | def test_tool_chain(tools):
22 | tool1, tool2 = tools
23 | tool_chain = tool1 | tool2
24 | assert hasattr(tool_chain, "invoke")
25 | prompt = {"prompt": "test prompt"}
26 | assert tool_chain.invoke(prompt) == prompt
27 |
--------------------------------------------------------------------------------