├── .env.template ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── documentation.md │ └── feature_request.md ├── pull_request_template.md └── workflows │ ├── docs-deploy.yml │ └── publish.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── SECURITY.md ├── docker-compose.override.yaml ├── docker-compose.yaml ├── docs ├── DOCUMENTATION_GUIDE.md ├── contributing │ ├── architecture.md │ ├── index.md │ ├── project-structure.md │ ├── setup-development.md │ └── troubleshooting.md ├── core-concepts │ ├── agents │ │ ├── agent-communication.md │ │ ├── agent-definition.md │ │ ├── agent-state.md │ │ └── system-prompts.md │ ├── providers │ │ ├── provider-configuration.md │ │ └── supported-providers.md │ └── tools │ │ ├── built-in-tools.md │ │ ├── parameter-injection.md │ │ ├── tool-decorators.md │ │ └── tool-development.md ├── examples │ ├── basic-examples.md │ └── chatbot.md ├── getting-started │ ├── basic-concepts.md │ ├── first-agent.md │ ├── installation.md │ └── quick-start.md ├── index.md ├── release-notes │ ├── roadmap.md │ └── version-history.md └── stylesheets │ ├── content.css │ ├── footer.css │ ├── navbar.css │ ├── sidebar.css │ └── theme.css ├── examples ├── agents │ ├── basic_agent.py │ ├── dynamic_prompt_agent.py │ ├── gemini_agent.py │ ├── groq_agent.py │ ├── ollama_agent.py │ └── schema_agent.py ├── blocks │ └── basic_blocks.py ├── chains │ ├── basic_chain.py │ └── mixed_chain.py ├── graph │ └── basic_graph.py ├── teams │ ├── basic_team.py │ └── collaborative_team.py └── tools │ ├── basic_tools.py │ └── injected_tools.py ├── legion ├── __init__.py ├── agents │ ├── __init__.py │ ├── base.py │ └── decorators.py ├── blocks │ ├── __init__.py │ ├── base.py │ └── decorators.py ├── errors │ ├── __init__.py │ └── exceptions.py ├── exceptions.py ├── graph │ ├── __init__.py │ ├── builder.py │ ├── channel_manager.py │ ├── channels.py │ ├── checkpointing.py │ ├── component_coordinator.py │ ├── coordinator.py │ ├── decorators.py │ ├── edges │ │ ├── __init__.py │ │ ├── base.py │ │ ├── conditional.py │ │ ├── registry.py │ │ ├── routing.py │ │ └── validator.py │ ├── graph.py │ ├── nodes │ │ ├── __init__.py │ │ ├── agent.py │ │ ├── base.py │ │ ├── block.py │ │ ├── chain.py │ │ ├── decorators.py │ │ ├── execution.py │ │ ├── graph.py │ │ ├── registry.py │ │ └── team.py │ ├── retry.py │ ├── state.py │ └── update_protocol.py ├── groups │ ├── __init__.py │ ├── base.py │ ├── chain.py │ ├── decorators.py │ ├── team.py │ ├── team_tools.py │ └── types.py ├── interface │ ├── __init__.py │ ├── base.py │ ├── decorators.py │ ├── schemas.py │ └── tools.py ├── memory │ ├── base.py │ └── providers │ │ └── memory.py ├── monitoring │ ├── __init__.py │ ├── analysis.py │ ├── collectors.py │ ├── decorators.py │ ├── events │ │ ├── __init__.py │ │ ├── agent.py │ │ ├── base.py │ │ ├── chain.py │ │ └── team.py │ ├── metrics.py │ ├── monitors.py │ ├── registry.py │ └── storage │ │ ├── __init__.py │ │ ├── base.py │ │ ├── config.py │ │ ├── factory.py │ │ ├── memory.py │ │ └── sqlite.py └── providers │ ├── __init__.py │ ├── anthropic.py │ ├── bedrock.py │ ├── factory.py │ ├── gemini.py │ ├── groq.py │ ├── huggingface.py │ ├── ollama.py │ └── openai.py ├── mkdocs.yml ├── mypy.ini ├── poetry.lock ├── pyproject.toml ├── pytest.ini ├── requirements.txt ├── scripts ├── security.py ├── setup_env.py ├── setup_hooks.py ├── sync_requirements.py └── typecheck.py ├── setup.py.bak ├── tests ├── __init__.py ├── agents │ ├── __init__.py │ ├── run_tests.py │ ├── test_agent_base.py │ └── test_decorators.py ├── blocks │ ├── __init__.py │ ├── run_tests.py │ └── test_blocks.py ├── conftest.py ├── graph │ ├── __init__.py │ ├── nodes │ │ ├── __init__.py │ │ ├── test_agent.py │ │ ├── test_base.py │ │ ├── test_block.py │ │ ├── test_chain.py │ │ ├── test_decorators.py │ │ ├── test_execution.py │ │ ├── test_execution_retry.py │ │ ├── test_graph_node.py │ │ ├── test_registry.py │ │ └── test_team.py │ ├── run_tests.py │ ├── test_builder.py │ ├── test_channel_manager.py │ ├── test_channels.py │ ├── test_checkpointing.py │ ├── test_component_coordinator.py │ ├── test_conditional_edge.py │ ├── test_coordinator.py │ ├── test_decorators.py │ ├── test_edges.py │ ├── test_graph.py │ ├── test_retry.py │ ├── test_routing.py │ ├── test_state.py │ └── test_update_protocol.py ├── groups │ ├── __init__.py │ ├── run_tests.py │ ├── test_chain.py │ ├── test_chain_decorators.py │ └── test_team.py ├── interface │ ├── __init__.py │ ├── run_tests.py │ └── test_tools.py ├── monitoring │ ├── __init__.py │ ├── events │ │ ├── run_tests.py │ │ ├── test_agent.py │ │ ├── test_chain_events.py │ │ ├── test_event_base.py │ │ └── test_team_events.py │ ├── run_tests.py │ ├── storage │ │ ├── test_base.py │ │ ├── test_config.py │ │ ├── test_factory.py │ │ ├── test_memory.py │ │ └── test_sqlite.py │ ├── test_analysis.py │ ├── test_collectors.py │ ├── test_metrics.py │ ├── test_monitoring_decorators.py │ ├── test_monitors.py │ └── test_registry.py ├── providers │ ├── __init__.py │ ├── run_tests.py │ ├── test_anthropic.py │ ├── test_bedrock.py │ ├── test_gemini.py │ ├── test_groq.py │ ├── test_huggingface.py │ ├── test_model_colon.py │ ├── test_ollama.py │ └── test_openai.py ├── run_tests.py └── utils.py └── utils └── scan_codebase.py /.env.template: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= 2 | OPENAI_ORG_ID= 3 | ANTHROPIC_API_KEY= 4 | GEMINI_API_KEY= 5 | GROQ_API_KEY= 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Create a report to help us improve Legion 4 | title: '[BUG] ' 5 | labels: bug 6 | assignees: '' 7 | --- 8 | 9 | ## Bug Description 10 | 11 | 12 | ## Steps to Reproduce 13 | 1. 14 | 2. 15 | 3. 16 | 17 | ## Expected Behavior 18 | 19 | 20 | ## Actual Behavior 21 | 22 | 23 | ## Environment 24 | - OS: [e.g. Ubuntu 22.04] 25 | - Python Version: [e.g. 3.11.0] 26 | - Legion Version: [e.g. 1.0.0] 27 | - Other relevant dependencies: 28 | 29 | ## Additional Context 30 | 31 | 32 | ## Possible Solution 33 | 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/documentation.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Documentation 3 | about: Suggest improvements or report issues in documentation 4 | title: '[DOCS] ' 5 | labels: documentation 6 | assignees: '' 7 | --- 8 | 9 | ## Documentation Location 10 | 11 | 12 | ## Issue Type 13 | 14 | - [ ] Missing documentation 15 | - [ ] Incorrect documentation 16 | - [ ] Unclear documentation 17 | - [ ] Outdated documentation 18 | - [ ] Other 19 | 20 | ## Current State 21 | 22 | 23 | ## Suggested Changes 24 | 25 | 26 | ## Additional Context 27 | 28 | 29 | ## Would You Be Willing to Help? 30 | 31 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Suggest an idea for Legion 4 | title: '[FEATURE] ' 5 | labels: enhancement 6 | assignees: '' 7 | --- 8 | 9 | ## Problem Statement 10 | 11 | 12 | ## Proposed Solution 13 | 14 | 15 | ## Alternative Solutions 16 | 17 | 18 | ## Additional Context 19 | 20 | 21 | ## Implementation Ideas 22 | 23 | 24 | ## Benefits 25 | 26 | 27 | ## Potential Challenges 28 | 29 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Summary of Changes 2 | 3 | 4 | ### Files Changed 5 | - `file1.py` 6 | - Change description 1 7 | - Change description 2 8 | - `file2.py` 9 | - Change description 1 10 | - Change description 2 11 | 12 | ## Related Issue 13 | 14 | Fixes # 15 | 16 | ## Type of Change 17 | 18 | - [ ] Bug fix (non-breaking change which fixes an issue) 19 | - [ ] New feature (non-breaking change which adds functionality) 20 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 21 | - [ ] Documentation update 22 | - [ ] Performance improvement 23 | - [ ] Code cleanup or refactor 24 | 25 | ## Implementation Details 26 | 27 | 28 | ## Testing 29 | 30 | - [ ] All existing tests pass 31 | - [ ] Added new tests for the changes 32 | 33 | 34 | ## Documentation Changes 35 | 36 | 37 | ## Checklist 38 | - [ ] My code follows the project's style guidelines 39 | - [ ] I have performed a self-review of my code 40 | - [ ] I have commented my code, particularly in hard-to-understand areas 41 | - [ ] I have updated the documentation accordingly 42 | - [ ] My changes generate no new warnings 43 | - [ ] I have added tests that prove my fix is effective or that my feature works 44 | - [ ] New and existing unit tests pass locally with my changes 45 | 46 | ## Screenshots (if applicable) 47 | 48 | 49 | ## Additional Notes 50 | -------------------------------------------------------------------------------- /.github/workflows/docs-deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Documentation 2 | on: 3 | pull_request: 4 | branches: 5 | - main 6 | paths: 7 | - 'docs/**' 8 | - 'mkdocs.yml' 9 | 10 | permissions: 11 | contents: write 12 | 13 | jobs: 14 | deploy: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Configure Git Credentials 20 | run: | 21 | git config user.name github-actions[bot] 22 | git config user.email 41898282+github-actions[bot]@users.noreply.github.com 23 | 24 | - uses: actions/setup-python@v5 25 | with: 26 | python-version: '3.11' 27 | 28 | - name: Install dependencies 29 | run: pip install mkdocs-material==9.5.49 30 | 31 | - name: Validate documentation build 32 | run: mkdocs build --strict 33 | 34 | - name: Deploy documentation 35 | run: mkdocs gh-deploy --force -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to PyPI 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | 14 | - name: Set up Python 15 | uses: actions/setup-python@v5 16 | with: 17 | python-version: '3.11' 18 | 19 | - name: Install dependencies 20 | run: | 21 | python -m pip install --upgrade pip 22 | pip install build twine 23 | 24 | - name: Build package 25 | run: python -m build 26 | 27 | - name: Publish to PyPI 28 | env: 29 | TWINE_USERNAME: __token__ 30 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} 31 | run: | 32 | python -m twine upload dist/* 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .DS_Store 3 | __pycache__/ 4 | .mypy_cache/ 5 | .ruff_cache/ 6 | .pytest_cache/ 7 | .vscode/ 8 | .venv/ 9 | venv/ 10 | 11 | *.egg-info/ 12 | build/ 13 | dist/ 14 | *.pyc 15 | *.pyo 16 | *.pyd 17 | .Python 18 | *.so 19 | *.egg 20 | *.egg-info 21 | *.eggs 22 | 23 | poetry.lock 24 | 25 | .coverage 26 | coverage.xml 27 | htmlcov/ 28 | 29 | .ipynb_checkpoints 30 | 31 | .idea/ 32 | *.swp 33 | *.swo 34 | *.sublime-workspace 35 | *.sublime-project 36 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # Pre-commit configuration 2 | default_language_version: 3 | python: python3 # Prefer python3 by default 4 | 5 | repos: 6 | - repo: local 7 | hooks: 8 | - id: pytest-non-integration 9 | name: Run non-integration tests 10 | entry: python3 -m pytest -v -m "not integration" --no-header --capture=no || python -m pytest -v -m "not integration" --no-header --capture=no 11 | language: python 12 | types: [python] 13 | pass_filenames: false 14 | verbose: true 15 | additional_dependencies: [pytest, python-dotenv] 16 | 17 | - id: security-check 18 | name: Security checking 19 | entry: python3 scripts/security.py || python scripts/security.py 20 | language: python 21 | types: [python] 22 | pass_filenames: false 23 | verbose: true 24 | additional_dependencies: [bandit, safety] 25 | 26 | - repo: https://github.com/pre-commit/pre-commit-hooks 27 | rev: v4.5.0 28 | hooks: 29 | - id: trailing-whitespace 30 | - id: end-of-file-fixer 31 | - id: check-yaml 32 | - id: check-added-large-files 33 | - id: check-ast 34 | - id: check-json 35 | - id: check-merge-conflict 36 | - id: detect-private-key 37 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11-slim 2 | 3 | WORKDIR /app 4 | 5 | RUN apt-get update && apt-get install -y --no-install-recommends \ 6 | gcc \ 7 | python3-dev \ 8 | build-essential \ 9 | libffi-dev \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | RUN pip install --no-cache-dir --upgrade pip \ 13 | && pip install --no-cache-dir packaging \ 14 | && pip install --no-cache-dir poetry==1.7.1 15 | 16 | RUN poetry config virtualenvs.create false 17 | 18 | COPY . . 19 | 20 | RUN poetry install --no-interaction --no-ansi 21 | 22 | ENV PYTHONPATH="/app:${PYTHONPATH}" 23 | 24 | EXPOSE 8000 25 | 26 | CMD ["poetry", "run", "pytest", "-v"] 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Hayden Smith, Zain Imdad, LLMP 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Environment Setup 2 | setup-venv: 3 | python3 -m venv venv 4 | . venv/bin/activate && pip install -r requirements.txt 5 | 6 | setup-conda: 7 | @echo "Setting up Conda environment..." 8 | conda create -n legion python=3.11 -y 9 | @echo "Installing dependencies with Poetry in Conda environment..." 10 | conda run -n legion POETRY_VIRTUALENVS_CREATE=false poetry install 11 | 12 | # Install requirements with pip or poetry 13 | install: 14 | @if [ "$(POETRY)" = "true" ]; then \ 15 | echo "Using Poetry to install dependencies..."; \ 16 | POETRY_VIRTUALENVS_CREATE=false poetry install; \ 17 | else \ 18 | echo "Using pip to install dependencies..."; \ 19 | pip install -r requirements.txt; \ 20 | fi 21 | 22 | # Install pre-commit hooks 23 | pre-commit: 24 | @if [ "$(POETRY)" = "true" ]; then \ 25 | POETRY_VIRTUALENVS_CREATE=false poetry run pre-commit install; \ 26 | else \ 27 | pre-commit install; \ 28 | fi 29 | 30 | # Full Setup: Environment + Dependencies 31 | setup: 32 | @if [ "$(ENV)" = "conda" ]; then \ 33 | echo "Setting up environment with Conda..."; \ 34 | make setup-conda POETRY=$(POETRY); \ 35 | elif [ "$(ENV)" = "venv" ]; then \ 36 | echo "Setting up environment with venv..."; \ 37 | make setup-venv POETRY=$(POETRY); \ 38 | else \ 39 | echo "Invalid or missing ENV. Use ENV=conda or ENV=venv."; \ 40 | exit 1; \ 41 | fi 42 | @echo "Installing dependencies..." 43 | @make install POETRY=$(POETRY) 44 | @make pre-commit POETRY=$(POETRY) 45 | @echo "You will still need to activate the environment with 'conda activate legion'." 46 | 47 | # Testing 48 | test: 49 | @if [ "$(POETRY)" = "true" ]; then \ 50 | poetry run pytest -v; \ 51 | else \ 52 | pytest -v; \ 53 | fi 54 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | | Version | Supported | 6 | | ------- | ------------------ | 7 | | Latest | :white_check_mark: | 8 | 9 | ## Reporting a Vulnerability 10 | 11 | We take the security of Legion seriously. If you believe you have found a security vulnerability, please report it to us as described below. 12 | 13 | ### Reporting Process 14 | 15 | **Please do not report security vulnerabilities through public GitHub issues.** 16 | 17 | Instead: 18 | 19 | 1. Email us at [hayden@llmp.io](mailto:hayden@llmp.io) 20 | 2. Include as much information as possible: 21 | - A clear description of the vulnerability 22 | - Steps to reproduce the issue 23 | - Versions affected 24 | - Potential impact 25 | - Suggested fixes (if any) 26 | 27 | ### What to Expect 28 | 29 | After you submit a report: 30 | 31 | 1. You'll receive an acknowledgment within 48 hours. 32 | 2. We'll investigate and keep you updated on our findings. 33 | 3. Once we've determined the impact and resolution: 34 | - We'll develop and test a fix 35 | - We'll establish a disclosure timeline 36 | - We'll notify affected users as appropriate 37 | 38 | ### Safe Harbor 39 | 40 | We support safe harbor for security research that: 41 | - Follows our reporting guidelines 42 | - Makes a good faith effort to avoid privacy violations, data destruction, service interruption, and other harm 43 | - Does not exploit findings beyond what's necessary to demonstrate the vulnerability 44 | 45 | ### Public Disclosure 46 | 47 | We aim to address critical vulnerabilities within 30 days. We request that you keep vulnerabilities private until we release fixes. We'll coordinate with you on a disclosure timeline that serves both the community's need to update and your recognition as the reporter. 48 | 49 | ## Security Best Practices for Contributors 50 | 51 | 1. **Dependency Management** 52 | - Keep dependencies up to date 53 | - Review dependency changes carefully 54 | - Use dependabot alerts 55 | 56 | 2. **Code Review** 57 | - Review for security implications 58 | - Follow secure coding guidelines 59 | - Use security linters when possible 60 | 61 | 3. **Secrets and Credentials** 62 | - Never commit secrets or credentials 63 | - Use environment variables for sensitive data 64 | - Review code for accidental credential exposure 65 | 66 | ## Security Updates 67 | 68 | Security updates will be released as: 69 | 1. Immediate patches for critical vulnerabilities 70 | 2. Regular updates for non-critical security improvements 71 | 3. Dependencies updates via automated tools 72 | 73 | Updates will be announced through: 74 | - GitHub Security Advisories 75 | - Release notes 76 | - Discord announcements channel 77 | 78 | ## Questions 79 | 80 | If you have questions about this policy or Legion's security practices, please reach out on our Discord server in the #help channel. 81 | -------------------------------------------------------------------------------- /docker-compose.override.yaml: -------------------------------------------------------------------------------- 1 | ### BEGIN COMMENT BLOCK ### - Uncomment to leave container running 2 | # services: 3 | # legion: 4 | # # Override the command to keep container running 5 | # command: tail -f /dev/null 6 | # # Add tty and stdin_open to make shell access work better 7 | # tty: true 8 | # stdin_open: true 9 | ### END COMMENT BLOCK ### 10 | 11 | # To shell into the container, run: 12 | # docker compose up -d 13 | # docker compose exec legion bash 14 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | legion: 3 | build: 4 | context: . 5 | dockerfile: Dockerfile 6 | volumes: 7 | - .:/app 8 | environment: 9 | - PYTHONPATH=/app 10 | command: poetry run pytest -v 11 | -------------------------------------------------------------------------------- /docs/DOCUMENTATION_GUIDE.md: -------------------------------------------------------------------------------- 1 | # Documentation 2 | 3 | This project uses [MkDocs-Material](https://squidfunk.github.io/mkdocs-material/) for documentation. 4 | 5 | ## Quick Start 6 | 7 | ```bash 8 | pip install mkdocs-material==9.5.49 9 | 10 | # Start dev server 11 | mkdocs serve 12 | 13 | # Build static site 14 | mkdocs build 15 | ``` 16 | 17 | Visit http://127.0.0.1:8000 during development. 18 | 19 | ## File Structure 20 | 21 | ``` 22 | docs/ 23 | ├── index.md # Homepage 24 | └── assets/ # Images, files, etc. 25 | ``` 26 | 27 | ## Writing Docs 28 | 29 | 1. Create `.md` files in the `docs/` directory 30 | 2. Update `mkdocs.yml` to include new pages 31 | 3. Use standard Markdown syntax 32 | 33 | See [MkDocs writing guide](https://www.mkdocs.org/user-guide/writing-your-docs/) for detailed information. -------------------------------------------------------------------------------- /docs/contributing/architecture.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/docs/contributing/architecture.md -------------------------------------------------------------------------------- /docs/contributing/index.md: -------------------------------------------------------------------------------- 1 | # Contributing to Legion 2 | 3 | We're excited that you're interested in contributing to Legion! This guide will help you get started with the contribution process. 4 | 5 | ## Ways to Contribute 6 | 7 | - **Bug Reports**: If you find a bug, please create an issue in our GitHub repository 8 | - **Feature Requests**: Have an idea for a new feature? Share it through GitHub issues 9 | - **Documentation**: Help improve our documentation by fixing typos, adding examples, or clarifying explanations 10 | - **Code Contributions**: Submit pull requests for bug fixes or new features 11 | 12 | ## Development Setup 13 | 14 | For information about setting up your development environment, please refer to our installation guide. 15 | 16 | ## Pull Request Process 17 | 18 | 1. **Create Pull Request** 19 | - Push your changes to your fork 20 | - Create a PR against the `main` branch 21 | - Fill out the PR template 22 | - Link any related issues 23 | 24 | 2. **PR Guidelines** 25 | - Keep changes focused and atomic 26 | - Provide clear description of changes 27 | - Include any necessary documentation updates 28 | - Add screenshots for UI changes 29 | - Reference any related issues 30 | 31 | 3. **Review Process** 32 | - Maintainers will review your PR 33 | - Address any requested changes 34 | - Once approved, maintainers will merge your PR 35 | 36 | ### **Collaboration Patterns** 37 | 38 | 1. **Direct to Upstream** (Recommended) 39 | - Each contributor maintains their fork 40 | - Create PRs directly to upstream 41 | - Sync fork: 42 | ```bash 43 | git fetch upstream 44 | git rebase upstream/main 45 | ``` 46 | 47 | 2. **Fork Collaboration** 48 | - Add collaborators to your fork's settings 49 | - Both can push branches 50 | - Create single PR to upstream 51 | - Maintain clear ownership of the PR 52 | 53 | 3. **Cross-fork PRs** 54 | - For non-collaborator contributions 55 | - Create PR between forks 56 | - Owner submits final PR upstream 57 | 58 | Note: Contributions are tracked through PRs, not individual commits due to squash merging. All contributors are credited in the PR history. 59 | 60 | ## Code Style 61 | 62 | - Follow the existing code style 63 | - Include comments where necessary 64 | - Write clear commit messages 65 | 66 | ## Questions? 67 | 68 | If you have questions, feel free to open a discussion on GitHub. 69 | -------------------------------------------------------------------------------- /docs/contributing/project-structure.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/docs/contributing/project-structure.md -------------------------------------------------------------------------------- /docs/contributing/setup-development.md: -------------------------------------------------------------------------------- 1 | # Development Environment Setup 2 | 3 | This guide provides comprehensive instructions for setting up your development environment for Legion. 4 | 5 | ## Development Environment Options 6 | 7 | Legion offers two primary methods for setting up your development environment: 8 | 9 | 1. Manual Installation 10 | 2. Nix Shell (Recommended for team consistency) 11 | 12 | ## Manual Installation 13 | 14 | ### Prerequisites 15 | 16 | - Windows 10/11 with WSL2 enabled 17 | - Ubuntu on WSL2 18 | - Git 19 | 20 | ### Step-by-Step Setup 21 | 22 | 1. Update WSL Ubuntu System 23 | ```bash 24 | sudo apt update && sudo apt upgrade -y 25 | ``` 26 | 27 | 2. Install Python and Development Tools 28 | ```bash 29 | sudo apt install -y python3.11 python3.11-venv python3-pip build-essential 30 | ``` 31 | 32 | 3. Create and Activate Virtual Environment 33 | ```bash 34 | python3.11 -m venv .venv 35 | source .venv/bin/activate 36 | ``` 37 | 38 | 4. Install Python Dependencies 39 | ```bash 40 | pip install -r requirements.txt 41 | ``` 42 | 43 | ## Nix Shell Setup 44 | 45 | Nix provides a fully reproducible development environment that ensures all team members have identical setups. 46 | 47 | ### What is Nix? 48 | 49 | Nix creates isolated development environments that bundle everything your project needs - from system packages to language-specific dependencies. Unlike traditional approaches, Nix manages all dependencies in isolation, ensuring consistent development environments across your team. 50 | 51 | ### Installation Steps 52 | 53 | 1. Install Nix Package Manager 54 | ```bash 55 | sh <(curl -L https://nixos.org/nix/install) --daemon 56 | ``` 57 | 58 | 2. Enable Flakes (Optional) 59 | Add to `~/.config/nix/nix.conf`: 60 | ``` 61 | experimental-features = nix-command flakes 62 | ``` 63 | 64 | 3. Using the Development Environment 65 | ```bash 66 | nix-shell shell.nix 67 | ``` 68 | 69 | ### Troubleshooting Nix Setup 70 | 71 | #### Permission Issues 72 | - Verify group membership: `groups` 73 | - Check socket permissions: `ls -l /nix/var/nix/daemon-socket/socket` 74 | - Restart daemon if needed: `sudo systemctl restart nix-daemon` 75 | 76 | #### Channel Update Failures 77 | - Check internet connection 78 | - Verify channel: `nix-channel --list` 79 | - Try removing and re-adding channel 80 | 81 | ## Environment Verification 82 | 83 | To verify your setup is working correctly: 84 | 85 | 1. Activate the environment (either venv or nix-shell) 86 | 2. Run the verification script: 87 | ```bash 88 | python verify_env.py 89 | ``` 90 | 91 | ## Included Tools and Dependencies 92 | 93 | - Python 3.11 94 | - pip 95 | - venv 96 | - build-essential 97 | - Development libraries 98 | 99 | ## Maintenance 100 | 101 | Keep your development environment up to date: 102 | 103 | 1. Regular Updates 104 | ```bash 105 | git pull origin main 106 | pip install -r requirements.txt # For manual setup 107 | # OR 108 | nix-shell # For Nix setup (will automatically update) 109 | ``` 110 | 111 | 2. Verify Environment 112 | ```bash 113 | python verify_env.py 114 | ``` 115 | 116 | ## Need Help? 117 | 118 | If you encounter any issues during setup: 119 | 120 | 1. Check our [Troubleshooting Guide](troubleshooting.md) 121 | 2. Open an issue on our [GitHub repository](https://github.com/LLMP-io/Legion) 122 | 3. Join our [Discord community](https://discord.gg/legion) for real-time support 123 | -------------------------------------------------------------------------------- /docs/contributing/troubleshooting.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/docs/contributing/troubleshooting.md -------------------------------------------------------------------------------- /docs/core-concepts/agents/agent-communication.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/core-concepts/agents/agent-definition.md: -------------------------------------------------------------------------------- 1 | # Agent Definition 2 | 3 | In Legion, agents are the fundamental building blocks of your multi-agent system. This guide explains how to define and customize agents. 4 | 5 | ## Basic Agent Structure 6 | 7 | An agent in Legion is defined using the `@agent` decorator: 8 | 9 | ```python 10 | from legion.agents import agent 11 | from legion.interface.decorators import tool 12 | 13 | @agent(model="openai:gpt-4-turbo", temperature=0.2) 14 | class MyAgent: 15 | """System prompt that defines the agent's role and behavior""" 16 | 17 | @tool 18 | def my_tool(self, param: str) -> str: 19 | """Tool description""" 20 | return f"Processed: {param}" 21 | ``` 22 | 23 | ## Agent Decorator Parameters 24 | 25 | - `model`: The LLM provider and model to use (e.g., "openai:gpt-4-turbo", "anthropic:claude-3") 26 | - `temperature`: Controls randomness in responses (0.0 to 1.0) 27 | - `tools`: List of tools available to the agent 28 | - `memory`: Memory provider for storing conversation history 29 | 30 | ## System Prompts 31 | 32 | The class docstring serves as the agent's system prompt: 33 | 34 | ```python 35 | @agent(model="openai:gpt-4-turbo") 36 | class AnalysisAgent: 37 | """You are an expert data analyst skilled in interpreting complex datasets. 38 | Always provide clear explanations and cite relevant statistics.""" 39 | ``` 40 | 41 | ## Adding Tools 42 | 43 | Tools can be added in two ways: 44 | 45 | 1. As class methods: 46 | ```python 47 | @agent(model="openai:gpt-4-turbo") 48 | class CalculatorAgent: 49 | @tool 50 | def add(self, a: float, b: float) -> float: 51 | """Add two numbers together""" 52 | return a + b 53 | ``` 54 | 55 | 2. As external functions: 56 | ```python 57 | @tool 58 | def multiply(a: float, b: float) -> float: 59 | """Multiply two numbers""" 60 | return a * b 61 | 62 | @agent(model="openai:gpt-4-turbo", tools=[multiply]) 63 | class MathAgent: 64 | """A mathematical agent with access to multiplication""" 65 | ``` 66 | 67 | ## Using Type Hints 68 | 69 | Legion uses type hints for input validation: 70 | 71 | ```python 72 | from typing import List, Optional 73 | from pydantic import Field 74 | 75 | @agent(model="openai:gpt-4-turbo") 76 | class DataProcessor: 77 | @tool 78 | def process_items( 79 | self, 80 | items: List[str], 81 | prefix: Optional[str] = None, 82 | max_items: int = Field(default=10, gt=0, le=100) 83 | ) -> List[str]: 84 | """Process a list of items with optional prefix""" 85 | results = items[:max_items] 86 | if prefix: 87 | results = [f"{prefix}: {item}" for item in results] 88 | return results 89 | ``` 90 | 91 | ## Best Practices 92 | 93 | 1. **Clear System Prompts**: Write detailed, specific system prompts 94 | 2. **Tool Documentation**: Always provide clear docstrings for tools 95 | 3. **Type Safety**: Use type hints and Pydantic fields for validation 96 | 4. **Modular Design**: Keep agents focused on specific tasks 97 | 5. **Error Handling**: Implement proper error handling in tools 98 | 99 | ## Next Steps 100 | 101 | - Learn about [System Prompts](system-prompts.md) 102 | - Understand [Agent State](agent-state.md) 103 | - Explore [Agent Communication](agent-communication.md) 104 | -------------------------------------------------------------------------------- /docs/core-concepts/agents/agent-state.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/core-concepts/agents/system-prompts.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/core-concepts/providers/provider-configuration.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/core-concepts/providers/supported-providers.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/core-concepts/tools/built-in-tools.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/core-concepts/tools/parameter-injection.md: -------------------------------------------------------------------------------- 1 | # Parameter Injection 2 | 3 | A function can be made into a tool that the agent can use using the `@tool` decorator. It can have the following: 4 | 5 | - A name 6 | - A description 7 | - Parameters with type hints and descriptions 8 | - Injectable parameters for dynamic configuration 9 | 10 | For example, we can define a tool called `process_api_query` that is supposed to ping some endpoint with a query authenticated by a given credential. We can define it like so - 11 | 12 | ```python 13 | from typing import Annotated 14 | from pydantic import Field 15 | from legion.interface.decorators import tool 16 | 17 | @tool( 18 | inject=["api_key", "endpoint"], 19 | description="Process a query using an external API", 20 | defaults={"api_key": "sk_test_default", "endpoint": "https://api.example.com/v1"} 21 | ) 22 | def process_api_query( 23 | query: Annotated[str, Field(description="The query to process")], 24 | api_key: Annotated[str, Field(description="API key for the service")], 25 | endpoint: Annotated[str, Field(description="API endpoint")] 26 | ) -> str: 27 | """Process a query using an external API""" 28 | # API call logic here 29 | return f"Processed '{query}' using API at {endpoint} with key {api_key[:4]}..." 30 | ``` 31 | 32 | This has three parameters, `query`, `api_key` and `endpoint`. In simpler examples of how tools are meant to be used, we say the LLM deciding the values of these parameters. However, in this case, `api_key` and `endpoint` needs to come from the user and we do not want to expose these details to the LLM. 33 | 34 | We can define parameter injection in this manner to pass on those details from the application side without exposing them to the LLM. 35 | 36 | During the invocation of the agent, we can inject these parameters like so - 37 | 38 | ```python 39 | response = await agent.aprocess( 40 | "Process the query 'test message' with production credentials.", 41 | injected_parameters=[ 42 | { 43 | "tool": process_api_query, 44 | "parameters": { 45 | "api_key": "sk_prod_key_123", 46 | "endpoint": "https://api.prod.example.com/v1" 47 | } 48 | } 49 | ] 50 | ) 51 | ``` 52 | Putting it all together, 53 | 54 | ```python 55 | from typing import Annotated 56 | 57 | from colorama import Fore, Style 58 | from dotenv import load_dotenv 59 | from pydantic import Field 60 | 61 | from legion.agents import agent 62 | from legion.interface.decorators import tool # noqa: F401 63 | 64 | load_dotenv() 65 | 66 | 67 | @tool( 68 | inject=["api_key", "endpoint"], 69 | description="Process a query using an external API", 70 | defaults={"api_key": "sk_test_default", "endpoint": "https://api.example.com/v1"} 71 | ) 72 | def process_api_query( 73 | query: Annotated[str, Field(description="The query to process")], 74 | api_key: Annotated[str, Field(description="API key for the service")], 75 | endpoint: Annotated[str, Field(description="API endpoint")] 76 | ) -> str: 77 | """Process a query using an external API""" 78 | # API call logic here 79 | response = f"Processed '{query}' using API at {endpoint} with key {api_key[:4]}..." 80 | print(response) 81 | return response 82 | 83 | 84 | @agent( 85 | model="openai:gpt-4o-mini", 86 | temperature=0.2, 87 | tools=[process_api_query], # Bind the tool 88 | ) 89 | class APIAgent: 90 | """An agent that demonstrates using tools with injected parameters. 91 | 92 | I can process queries using an external API without exposing sensitive credentials. 93 | Parameters are injected per-message with optional defaults. Simply execute the given 94 | process_api_query tool with the given parameters. 95 | """ 96 | 97 | 98 | async def main(): 99 | print(f"{Fore.GREEN}[MAIN]{Style.RESET_ALL} Creating agent instance") 100 | # Create an instance of our agent 101 | agent = APIAgent() 102 | 103 | response = await agent.aprocess( 104 | "Process the query 'test message' with production credentials.", 105 | injected_parameters=[ 106 | { 107 | "tool": process_api_query, 108 | "parameters": { 109 | "api_key": "sk_prod_key_123", 110 | "endpoint": "https://api.prod.example.com/v1" 111 | } 112 | } 113 | ] 114 | ) 115 | 116 | print(response.content) 117 | 118 | 119 | if __name__ == "__main__": 120 | import asyncio 121 | asyncio.run(main()) 122 | ``` 123 | 124 | This creates a tool with injected parameters determined at runtime. 125 | -------------------------------------------------------------------------------- /docs/core-concepts/tools/tool-decorators.md: -------------------------------------------------------------------------------- 1 | # The tool decorator 2 | 3 | The `@tool` decorator is a mechanism for defining specialized functions (tools) that can be called by an agent during runtime. Tools extend an agent's capabilities by allowing it to execute specific, structured tasks that might require external inputs or perform operations outside the agent's core model reasoning. These tasks can include API calls, pre-defined and specific computations, data processing, and more. 4 | 5 | ## Basic Structure of a Tool 6 | 7 | Here's a simple example of how a tool is defined and used: 8 | 9 | ```python 10 | from typing import Annotated 11 | from pydantic import Field 12 | from legion.interface.decorators import tool 13 | 14 | @tool 15 | def add_numbers( 16 | a: Annotated[int, Field(description="The first number")], 17 | b: Annotated[int, Field(description="The second number")] 18 | ) -> int: 19 | """Adds two numbers and returns the result.""" 20 | return a + b 21 | ``` 22 | Note that the tool has all the parameters type annotated. This is non-optional in Legion, since the framework uses these annotations to give more information to the the agent of the type of data that the tool expects in the middle of the computation cycle. 23 | 24 | ## Usage of the Tool in an Agent 25 | 26 | ```python 27 | from legion.agents import agent 28 | 29 | @agent( 30 | model="openai:gpt-4o-mini", 31 | tools=[add_numbers], # Bind the tool 32 | temperature=0.5 33 | ) 34 | class MathAgent: 35 | """An agent that can perform mathematical operations using tools.""" 36 | ``` 37 | 38 | ## Calling the Tool 39 | 40 | ```python 41 | async def main(): 42 | agent = MathAgent() 43 | response = await agent.aprocess("Add 7 and 3 using the tool.") 44 | print(response.content) # Output: The result of adding 7 and 3 is 10. 45 | ``` 46 | 47 | After this, the LLM will automatically try to understand what tool is meant to be used and how. It will invoke the function `add_numbers` with `a=7` and `b=3`. It's response will again be given back to the LLM for it to decide how to present the final answer of the computation to the user. 48 | 49 | The full code - 50 | 51 | ```python 52 | from typing import Annotated 53 | from pydantic import Field 54 | from legion.interface.decorators import tool 55 | from legion.agents import agent 56 | 57 | # Define the tool 58 | @tool 59 | def add_numbers( 60 | a: Annotated[int, Field(description="The first number")], 61 | b: Annotated[int, Field(description="The second number")] 62 | ) -> int: 63 | """Adds two numbers and returns the result.""" 64 | return a + b 65 | 66 | # Define the agent 67 | @agent( 68 | model="openai:gpt-4o-mini", # Specify the model 69 | tools=[add_numbers], # Bind the tool to the agent 70 | temperature=0.5 # Set the temperature for response generation 71 | ) 72 | class MathAgent: 73 | """An agent that can perform mathematical operations using tools.""" 74 | 75 | # Async main function to run the agent 76 | async def main(): 77 | agent = MathAgent() # Initialize the agent 78 | response = await agent.aprocess("Add 7 and 3 using the tool.") # Use the agent to process a request 79 | print(response.content) # Print the result 80 | 81 | # Run the main function in an async environment 82 | if __name__ == "__main__": 83 | import asyncio 84 | asyncio.run(main()) 85 | ``` 86 | -------------------------------------------------------------------------------- /docs/core-concepts/tools/tool-development.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/examples/basic-examples.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/docs/examples/basic-examples.md -------------------------------------------------------------------------------- /docs/examples/chatbot.md: -------------------------------------------------------------------------------- 1 | # Building a simple chatbot agent with Legion 2 | 3 | ## Simple conversational agent 4 | 5 | This guide will explain how to set up an chatbot agent with Legion. 6 | 7 | ```python 8 | from legion.agents import agent 9 | from dotenv import load_dotenv 10 | 11 | load_dotenv() 12 | 13 | @agent(model="openai:gpt-4o-mini", temperature=0.2) 14 | class LifeStoryInterviewer: 15 | """You are an expert interviewer that conducts interview of people about thier life.""" 16 | 17 | async def main(): 18 | agent = LifeStoryInterviewer() 19 | while True: 20 | message = input("You: ") 21 | response = await agent.aprocess(message=message, thread_id="abc") 22 | print(f"Agent:{response.content}") 23 | 24 | if __name__ == "__main__": 25 | import asyncio 26 | asyncio.run(main()) 27 | ``` 28 | 29 | > Note: Assumes that you have the API key set in a .env file. See quick start for details. 30 | 31 | This script sets up an interactive agent designed to engage in meaningful conversations about your life. 32 | 33 | The `thread_id` works as a tag for the conversation, letting the agent remember everything said during the session. This makes the agent’s responses more relevant and ensures the conversation feels natural and connected. Memory handling is built right into Legion, so it’s simple to create smooth, context-aware interactions without extra effort. 34 | -------------------------------------------------------------------------------- /docs/getting-started/basic-concepts.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/getting-started/first-agent.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/getting-started/installation.md: -------------------------------------------------------------------------------- 1 | # Installation Guide 2 | 3 | Legion can be installed using pip or through a development environment setup. Choose the method that best suits your needs. 4 | 5 | ## **Development Environment** 6 | 7 | If you're a contributor, check out our [Contribution Section](../contributing/setup-development.md). 8 | 9 | ## Standard Installation 10 | 11 | ### Using pip 12 | 13 | ```bash 14 | pip install legion 15 | ``` 16 | 17 | ### Using Poetry 18 | 19 | ```bash 20 | poetry add legion 21 | ``` 22 | 23 | ## Development Installation 24 | 25 | For development purposes, we recommend setting up a proper development environment. You have two options: 26 | 27 | ### Option 1: Manual Setup 28 | 29 | Follow our detailed [Development Environment Setup Guide](../contributing/setup-development.md#manual-installation) for step-by-step instructions. 30 | 31 | ### Option 2: Using Nix 32 | 33 | For a fully reproducible development environment, we recommend using Nix. Follow our [Nix Setup Guide](#option-2-using-nix) for instructions. 34 | 35 | ## Verifying Installation 36 | 37 | After installation, verify your setup: 38 | 39 | ```python 40 | import legion 41 | print(legion.__version__) 42 | ``` 43 | 44 | ## System Requirements 45 | 46 | - Python 3.11 or higher 47 | - Operating System: Linux, macOS, or Windows with WSL2 48 | - Recommended: 8GB RAM or more for running multiple agents 49 | 50 | ## Next Steps 51 | 52 | - Follow the [Quick Start Guide](quick-start.md) 53 | - Learn about [Basic Concepts](basic-concepts.md) 54 | - Try the [First Agent Example](first-agent.md) 55 | -------------------------------------------------------------------------------- /docs/getting-started/quick-start.md: -------------------------------------------------------------------------------- 1 | # Quick Start Guide 2 | 3 | This guide will help you get started with Legion quickly. We'll cover the basic setup and show you how to create your first agent. 4 | 5 | ## Prerequisites 6 | 7 | - Python 3.11 or higher 8 | - pip (Python package installer) 9 | 10 | ## Installation 11 | 12 | ```bash 13 | pip install legion 14 | ``` 15 | 16 | ## Basic Usage 17 | 18 | Save your OpenAI API key in a .env file. 19 | 20 | ```bash 21 | OPENAI_API_KEY='' 22 | ``` 23 | 24 | Here's a simple example of creating an agent: 25 | 26 | ```python 27 | from legion.agents import agent 28 | from legion.interface.decorators import tool 29 | from dotenv import load_dotenv 30 | 31 | load_dotenv() 32 | 33 | @agent(model="openai:gpt-4-turbo", temperature=0.2) 34 | class SimpleAgent: 35 | """A simple agent that can perform basic tasks""" 36 | 37 | @tool 38 | def greet(self, name: str) -> str: 39 | """Greet someone by name""" 40 | return f"Hello, {name}!" 41 | 42 | async def main(): 43 | agent = SimpleAgent() 44 | response = await agent.aprocess("Greet someone named Alice") 45 | print(response.content) 46 | 47 | if __name__ == "__main__": 48 | import asyncio 49 | asyncio.run(main()) 50 | ``` 51 | 52 | ## Next Steps 53 | 54 | - Read the [Installation Guide](installation.md) for detailed setup instructions 55 | - Learn about [Basic Concepts](basic-concepts.md) 56 | - Try the [First Agent Example](first-agent.md) 57 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Legion Documentation 2 | 3 | Welcome to the Legion documentation! This documentation will help you understand and use Legion effectively. 4 | 5 | ## Quick Links 6 | 7 | - 📚 [Installation Guide](getting-started/installation.md) 8 | - 🚀 [Quick Start](getting-started/quick-start.md) 9 | - 🤖 [Agent Definition](core-concepts/agents/agent-definition.md) 10 | - 🛠️ [Development Setup](contributing/setup-development.md) 11 | - 👥 [Contributing](contributing/index.md) 12 | 13 | ## What is Legion? 14 | 15 | Legion is a powerful framework for building and managing agents. This documentation will guide you through installation, basic concepts, and advanced usage. 16 | 17 | ## Getting Started 18 | 19 | To get started with Legion, follow these steps: 20 | 21 | 1. First, follow our [Installation Guide](getting-started/installation.md) 22 | 2. Then, check out the [Quick Start](getting-started/quick-start.md) guide 23 | 3. Learn about [Core Concepts](core-concepts/agents/agent-definition.md) 24 | 25 | ## Contributing 26 | 27 | We welcome contributions! Please see our [Contributing Guide](contributing/index.md) for details on how to get involved. 28 | -------------------------------------------------------------------------------- /docs/release-notes/roadmap.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/docs/release-notes/roadmap.md -------------------------------------------------------------------------------- /docs/release-notes/version-history.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/docs/release-notes/version-history.md -------------------------------------------------------------------------------- /docs/stylesheets/content.css: -------------------------------------------------------------------------------- 1 | /* Main content text sizing */ 2 | .md-typeset { 3 | font-size: 0.70rem; 4 | line-height: 1.5; 5 | } 6 | 7 | .md-typeset h1 { 8 | font-size: 1.4rem; 9 | } 10 | 11 | .md-typeset h2 { 12 | font-size: 1.1rem; 13 | } 14 | 15 | .md-typeset h3 { 16 | font-size: 0.9rem; 17 | } 18 | 19 | .md-typeset h4 { 20 | font-size: 0.8rem; 21 | } 22 | 23 | .md-typeset code { 24 | font-size: 0.70rem; 25 | } 26 | 27 | .md-typeset pre { 28 | font-size: 0.70rem; 29 | } 30 | 31 | /* Improve code block contrast */ 32 | [data-md-color-scheme="slate"] .highlight { 33 | background-color: #1a1a1a; 34 | } 35 | 36 | /* Better table contrast */ 37 | [data-md-color-scheme="slate"] table { 38 | background-color: var(--md-default-bg-color--light); 39 | } 40 | 41 | [data-md-color-scheme="slate"] th { 42 | background-color: var(--md-default-bg-color--lighter); 43 | } 44 | 45 | /* Better blockquote styling */ 46 | [data-md-color-scheme="slate"] blockquote { 47 | border-left-color: var(--md-accent-fg-color); 48 | background-color: var(--md-default-bg-color--light); 49 | } 50 | 51 | /* Selection highlight */ 52 | ::selection { 53 | background-color: var(--md-accent-fg-color--transparent); 54 | } 55 | -------------------------------------------------------------------------------- /docs/stylesheets/footer.css: -------------------------------------------------------------------------------- 1 | /* Footer customization */ 2 | .md-footer { 3 | background-color: var(--md-primary-fg-color); 4 | } 5 | 6 | .md-footer-meta { 7 | background-color: var(--md-primary-fg-color); 8 | } 9 | 10 | /* Make footer slimmer */ 11 | .md-footer-meta__inner { 12 | padding: 0.4rem 0.8rem; 13 | display: flex; 14 | justify-content: space-between; 15 | align-items: center; 16 | } 17 | 18 | /* Custom copyright text */ 19 | .md-footer-copyright { 20 | display: block !important; 21 | font-size: 0.60rem; 22 | margin: 0; 23 | padding: 0; 24 | color: var(--md-footer-fg-color--light); 25 | } 26 | 27 | .md-footer-social { 28 | padding: 0; 29 | margin: 0; 30 | } 31 | 32 | .md-footer-social__link { 33 | padding: 0.4rem; 34 | } 35 | 36 | /* Adjust icon sizes */ 37 | .md-footer-social__link svg { 38 | width: 1.2rem; 39 | height: 1.2rem; 40 | } 41 | 42 | /* Footer links hover effect */ 43 | .md-footer-social__link:hover { 44 | color: var(--md-accent-fg-color); 45 | } 46 | -------------------------------------------------------------------------------- /docs/stylesheets/navbar.css: -------------------------------------------------------------------------------- 1 | /* Header/Navbar customization */ 2 | .md-header { 3 | height: 2.4rem; 4 | } 5 | 6 | /* Search bar customization */ 7 | .md-search__inner { 8 | width: 12rem; 9 | margin-left: auto; 10 | } 11 | 12 | .md-search__input { 13 | height: 1.8rem !important; 14 | font-size: 0.8rem !important; 15 | } 16 | 17 | .md-search__form { 18 | height: 1.8rem; 19 | } 20 | 21 | .md-search__overlay { 22 | top: 2.4rem; 23 | } 24 | 25 | /* Theme toggle button */ 26 | .md-header__button.md-icon { 27 | padding: 0.4rem; 28 | margin: 0.2rem; 29 | } 30 | 31 | /* GitHub repository link */ 32 | .md-header__source { 33 | margin-left: 0.5rem; 34 | margin-right: 0.6rem; 35 | font-size: 0.60rem; 36 | order: 99; /* Push to the end */ 37 | } 38 | 39 | .md-header__source .md-source { 40 | padding: 0 0.4rem; 41 | } 42 | 43 | .md-header__source .md-source__icon { 44 | padding-right: 0.4rem; 45 | } 46 | 47 | .md-header__source .md-source__icon svg { 48 | width: 1rem; 49 | height: 1rem; 50 | } 51 | 52 | /* Header container */ 53 | .md-header__inner { 54 | display: flex; 55 | align-items: center; 56 | justify-content: flex-end; 57 | } 58 | 59 | /* Title positioning */ 60 | .md-header__title { 61 | margin-right: auto; /* Push everything else to the right */ 62 | line-height: 2.4rem; 63 | } 64 | 65 | /* Theme toggle and search positioning */ 66 | .md-header__option { 67 | order: 98; /* Place before GitHub link */ 68 | } 69 | -------------------------------------------------------------------------------- /docs/stylesheets/sidebar.css: -------------------------------------------------------------------------------- 1 | /* Navigation styling */ 2 | .md-nav__title { 3 | font-size: 0.90rem; 4 | font-weight: 600; 5 | color: var(--md-default-fg-color); 6 | margin: 1rem 0 0.4rem 0; 7 | } 8 | 9 | .md-nav__link { 10 | font-size: 0.70rem; 11 | color: var(--md-default-fg-color--light); 12 | } 13 | 14 | .md-nav__link:hover, 15 | .md-nav__link--active { 16 | color: var(--md-accent-fg-color) !important; 17 | } 18 | 19 | .md-nav__item .md-nav__item { 20 | padding-left: 0.6rem; 21 | } 22 | 23 | .md-nav__list { 24 | padding-left: 1rem; 25 | } 26 | 27 | .md-nav__item--active > .md-nav__link { 28 | color: var(--md-accent-fg-color) !important; 29 | } 30 | 31 | /* TOC active item */ 32 | .md-nav--secondary .md-nav__link--active { 33 | color: var(--md-accent-fg-color) !important; 34 | } 35 | 36 | /* Search result highlighting */ 37 | .md-search-result__link:hover, 38 | .md-search-result__link[data-md-state="active"] { 39 | background-color: var(--md-accent-fg-color--transparent); 40 | } 41 | -------------------------------------------------------------------------------- /docs/stylesheets/theme.css: -------------------------------------------------------------------------------- 1 | /* Light mode customization */ 2 | [data-md-color-scheme="default"] { 3 | --md-primary-fg-color: #25946d; 4 | --md-primary-fg-color--light: #2AAB86; 5 | --md-primary-fg-color--dark: #021207; 6 | --md-accent-fg-color: #3BD39F; 7 | --md-accent-fg-color--transparent: rgba(59, 211, 159, 0.1); 8 | } 9 | 10 | /* Dark mode customization */ 11 | [data-md-color-scheme="slate"] { 12 | --md-default-bg-color: #121212; 13 | --md-default-bg-color--light: #1e1e1e; 14 | --md-default-bg-color--lighter: #232323; 15 | --md-default-bg-color--lightest: #2a2a2a; 16 | 17 | --md-primary-fg-color: #2A9B76; 18 | --md-primary-fg-color--light: #1e1e1e; 19 | --md-primary-fg-color--dark: #0a0a0a; 20 | 21 | --md-accent-fg-color: #3BD39F; 22 | --md-accent-fg-color--transparent: rgba(59, 211, 159, 0.1); 23 | 24 | /* Increase contrast for text */ 25 | --md-typeset-color: rgba(255, 255, 255, 0.87); 26 | --md-typeset-a-color: #3BD39F; 27 | } 28 | -------------------------------------------------------------------------------- /examples/agents/basic_agent.py: -------------------------------------------------------------------------------- 1 | """Basic Agent Example 2 | 3 | This example demonstrates how to create a simple agent using Legion's decorator syntax. 4 | The agent can use both internal (nested) tools and external tools. 5 | """ 6 | 7 | from typing import Annotated, List 8 | 9 | from dotenv import load_dotenv 10 | from pydantic import Field 11 | 12 | from legion import agent, tool 13 | 14 | load_dotenv() 15 | 16 | 17 | @tool 18 | def add_numbers( 19 | numbers: Annotated[List[float], Field(description="List of numbers to add together")] 20 | ) -> float: 21 | """Add a list of numbers together and return the sum.""" 22 | return sum(numbers) 23 | 24 | 25 | @tool 26 | def multiply( 27 | a: Annotated[float, Field(description="First number to multiply")], 28 | b: Annotated[float, Field(description="Second number to multiply")] 29 | ) -> float: 30 | """Multiply two numbers together.""" 31 | return a * b 32 | 33 | 34 | @agent( 35 | model="openai:gpt-4o-mini", 36 | temperature=0.2, 37 | tools=[add_numbers, multiply] # Bind external tools 38 | ) 39 | class MathHelper: 40 | """An agent that helps with basic arithmetic and string operations. 41 | 42 | I can perform calculations and manipulate text based on your requests. 43 | I have both external tools for math operations and internal tools for formatting. 44 | """ 45 | 46 | @tool 47 | def format_result( 48 | self, 49 | number: Annotated[float, Field(description="Number to format")], 50 | prefix: Annotated[str, Field(description="Text to add before the number")] = ( 51 | "Result: " 52 | ) 53 | ) -> str: 54 | """Format a number with a custom prefix.""" 55 | return f"{prefix}{number:.2f}" 56 | 57 | 58 | async def main(): 59 | # Create an instance of our agent 60 | agent = MathHelper() 61 | 62 | # Example 1: Using external add_numbers tool with internal format_result 63 | response = await agent.aprocess( 64 | "I have the numbers 1.5, 2.5, and 3.5. Can you add them together and format " 65 | "the result nicely?" 66 | ) 67 | print("Example 1 Response:") 68 | print(response.content) 69 | print() 70 | 71 | # Example 2: Using external multiply tool with internal format_result 72 | response = await agent.aprocess( 73 | "Can you multiply 4.2 by 2.0 and then format the result with the prefix " 74 | "'The product is: '?" 75 | ) 76 | print("Example 2 Response:") 77 | print(response.content) 78 | print() 79 | 80 | # Example 3: Complex operation using both external and internal tools 81 | response = await agent.aprocess( 82 | "I need to add the numbers 10.5 and 20.5, then multiply the result by 2, " 83 | "and format it nicely." 84 | ) 85 | print("Example 3 Response:") 86 | print(response.content) 87 | 88 | 89 | if __name__ == "__main__": 90 | import asyncio 91 | asyncio.run(main()) 92 | -------------------------------------------------------------------------------- /examples/agents/dynamic_prompt_agent.py: -------------------------------------------------------------------------------- 1 | """Dynamic System Prompt Example 2 | 3 | This example demonstrates how to create an agent with a dynamic system prompt that changes 4 | based on context and user preferences. It shows how to provide dynamic values during 5 | process/aprocess calls. 6 | 7 | Best Practices: 8 | 1. When using a dynamic system prompt, don't include a docstring on the agent class 9 | 2. Use descriptive section IDs for dynamic fields 10 | 3. Provide meaningful default values 11 | 4. Use callable defaults for dynamic values that should be computed at runtime 12 | """ 13 | 14 | from datetime import datetime 15 | from typing import Annotated 16 | 17 | from dotenv import load_dotenv 18 | from pydantic import Field 19 | 20 | from legion import agent, tool, system_prompt 21 | 22 | load_dotenv() 23 | 24 | 25 | def get_current_time() -> str: 26 | """Get formatted current time.""" 27 | return datetime.now().strftime("%I:%M %p") 28 | 29 | 30 | # Create a dynamic system prompt with section IDs for runtime updates 31 | SYSTEM_PROMPT = system_prompt( 32 | sections=[ 33 | system_prompt.SystemPromptSection( 34 | content="I am a helpful assistant that adapts my communication style " 35 | "based on context.", 36 | is_dynamic=False 37 | ), 38 | system_prompt.SystemPromptSection( 39 | content="{mood}", 40 | is_dynamic=True, 41 | section_id="mood", 42 | default_value="neutral and ready to help" 43 | ), 44 | system_prompt.SystemPromptSection( 45 | content="{context}", 46 | is_dynamic=True, 47 | section_id="context", 48 | default_value="general assistance" 49 | ), 50 | system_prompt.SystemPromptSection( 51 | content="{time}", 52 | is_dynamic=True, 53 | section_id="time", 54 | default_value=get_current_time 55 | ) 56 | ] 57 | ) 58 | 59 | 60 | @tool 61 | def get_weather( 62 | location: Annotated[str, Field(description="Location to get weather for")] 63 | ) -> str: 64 | """Simulate getting weather (in a real app, you'd call a weather API).""" 65 | # This is just a mock response 66 | return f"It's sunny and 72°F in {location}" 67 | 68 | 69 | @agent( 70 | model="openai:gpt-4o-mini", 71 | temperature=0.7, 72 | system_prompt=SYSTEM_PROMPT, 73 | tools=[get_weather] # Binding external tools 74 | ) 75 | class DynamicAssistant: 76 | # Note: Since we are using a dynamic system prompt, we don't need to define 77 | # the system prompt using a docstring 78 | 79 | # Internal tool specific to this agent 80 | @tool 81 | def get_current_time(self) -> str: 82 | """Get the current time.""" 83 | return datetime.now().strftime("%I:%M %p") 84 | 85 | 86 | async def main(): 87 | # Create an instance 88 | assistant = DynamicAssistant() 89 | 90 | # Example 1: Using defaults (no dynamic values provided) 91 | print("Example 1 (Default Values):") 92 | print("System Prompt Before Process:") 93 | print(assistant._memory.messages[0].content) 94 | print("\nMaking Request...") 95 | 96 | response = await assistant.aprocess( 97 | "How are you feeling right now?", 98 | dynamic_values=None 99 | ) 100 | 101 | print("\nSystem Prompt During Process:") 102 | print(assistant._memory.messages[0].content) 103 | print("\nResponse:") 104 | print(response.content) 105 | print("\n" + "=" * 50 + "\n") 106 | 107 | # Example 2: Provide dynamic values during process 108 | print("Example 2 (With Dynamic Values):") 109 | dynamic_values = { 110 | "mood": "energetic and enthusiastic, you really like to speak like a pirate", 111 | "context": "casual conversation", 112 | "time": "9:00 AM" 113 | } 114 | 115 | print("System Prompt Before Process:") 116 | print(assistant._memory.messages[0].content) 117 | print("\nMaking Request with dynamic values:", dynamic_values) 118 | 119 | response = await assistant.aprocess( 120 | "How are you feeling right now?", 121 | dynamic_values=dynamic_values 122 | ) 123 | 124 | print("\nSystem Prompt During Process:") 125 | print(assistant._memory.messages[0].content) 126 | print("\nResponse:") 127 | print(response.content) 128 | print("\n" + "=" * 50 + "\n") 129 | 130 | # Example 3: Different context, different mood 131 | print("Example 3 (Different Context):") 132 | dynamic_values = { 133 | "mood": "professional and focused", 134 | "context": "weather reporting", 135 | "time": datetime.now().strftime("%I:%M %p") 136 | } 137 | 138 | print("System Prompt Before Process:") 139 | print(assistant._memory.messages[0].content) 140 | print("\nMaking Request with dynamic values:", dynamic_values) 141 | 142 | response = await assistant.aprocess( 143 | "What's the weather in San Francisco?", 144 | dynamic_values=dynamic_values 145 | ) 146 | 147 | print("\nSystem Prompt During Process:") 148 | print(assistant._memory.messages[0].content) 149 | print("\nResponse:") 150 | print(response.content) 151 | 152 | 153 | if __name__ == "__main__": 154 | import asyncio 155 | asyncio.run(main()) 156 | -------------------------------------------------------------------------------- /examples/agents/gemini_agent.py: -------------------------------------------------------------------------------- 1 | """Basic Agent Example with Gemini 2 | 3 | This example demonstrates how to create a simple agent using Legion's decorator syntax. 4 | The agent can use both internal (nested) tools and external tools. 5 | """ 6 | 7 | from typing import Annotated, List 8 | 9 | from dotenv import load_dotenv 10 | from pydantic import Field 11 | 12 | from legion import agent, tool 13 | 14 | load_dotenv() 15 | 16 | 17 | @tool 18 | async def add_numbers( 19 | numbers: Annotated[List[float], Field(description="List of numbers to add together")] 20 | ) -> float: 21 | """Add a list of numbers together and return the sum.""" 22 | return sum(numbers) 23 | 24 | 25 | @tool 26 | async def multiply( 27 | a: Annotated[float, Field(description="First number to multiply")], 28 | b: Annotated[float, Field(description="Second number to multiply")] 29 | ) -> float: 30 | """Multiply two numbers together.""" 31 | return a * b 32 | 33 | 34 | @agent( 35 | model="gemini:gemini-2.0-flash-exp", 36 | temperature=0.2, 37 | tools=[add_numbers, multiply], # Bind external tools 38 | debug=True 39 | ) 40 | class MathHelper: 41 | """An agent that helps with basic arithmetic and string operations. 42 | 43 | I can perform calculations and manipulate text based on your requests. 44 | I have both external tools for math operations and internal tools for formatting. 45 | """ 46 | 47 | @tool 48 | async def format_result( 49 | self, 50 | number: Annotated[float, Field(description="Number to format")], 51 | prefix: Annotated[str, Field(description="Text to add before the number")] = ( 52 | "Result: " 53 | ) 54 | ) -> str: 55 | """Format a number with a custom prefix.""" 56 | return f"{prefix}{number:.2f}" 57 | 58 | 59 | async def main(): 60 | # Create an instance of our agent 61 | agent = MathHelper() 62 | 63 | try: 64 | # Example 1: Using external add_numbers tool with internal format_result 65 | response = await agent.aprocess( 66 | "I have the numbers 1.5, 2.5, and 3.5. Can you add them together and format " 67 | "the result nicely?" 68 | ) 69 | print("Example 1 Response:") 70 | print(response.content if response and response.content else "No response") 71 | print() 72 | 73 | # Example 2: Using external multiply tool with internal format_result 74 | response = await agent.aprocess( 75 | "Can you multiply 4.2 by 2.0 and then format the result with the prefix " 76 | "'The product is: '?" 77 | ) 78 | print("Example 2 Response:") 79 | print(response.content if response and response.content else "No response") 80 | print() 81 | 82 | # Example 3: Complex operation using both external and internal tools 83 | response = await agent.aprocess( 84 | "I need to add the numbers 10.5 and 20.5, then multiply the result by 2, " 85 | "and format it nicely." 86 | ) 87 | print("Example 3 Response:") 88 | print(response.content if response and response.content else "No response") 89 | print() 90 | 91 | except Exception as e: 92 | print(f"Error: {str(e)}") 93 | 94 | 95 | if __name__ == "__main__": 96 | import asyncio 97 | asyncio.run(main()) 98 | -------------------------------------------------------------------------------- /examples/agents/groq_agent.py: -------------------------------------------------------------------------------- 1 | """Groq Agent Example 2 | 3 | This example demonstrates using Legion's agent system with the Groq provider. 4 | It tests various capabilities including: 5 | - Basic text completion 6 | - Tool usage (both external and internal tools) 7 | - JSON output with schema validation 8 | """ 9 | 10 | import asyncio 11 | import os 12 | from typing import Annotated, List, Optional 13 | 14 | from dotenv import load_dotenv 15 | from pydantic import BaseModel, Field 16 | 17 | from legion import agent, tool 18 | 19 | # Load environment variables 20 | load_dotenv() 21 | 22 | # Verify API key is present 23 | if not os.getenv("GROQ_API_KEY"): 24 | raise ValueError("GROQ_API_KEY environment variable is not set") 25 | 26 | 27 | # Define some tools for testing 28 | @tool 29 | def calculate_average( 30 | numbers: Annotated[List[float], Field(description="List of numbers to average")] 31 | ) -> float: 32 | """Calculate the average (mean) of a list of numbers.""" 33 | return sum(numbers) / len(numbers) 34 | 35 | 36 | @tool 37 | def analyze_text( 38 | text: Annotated[str, Field(description="Text to analyze")], 39 | include_word_count: Annotated[bool, Field(description="Whether to include word count")] = True 40 | ) -> dict: 41 | """Analyze text and return statistics.""" 42 | stats = { 43 | "length": len(text), 44 | "uppercase_count": sum(1 for c in text if c.isupper()) 45 | } 46 | if include_word_count: 47 | stats["word_count"] = len(text.split()) 48 | return stats 49 | 50 | 51 | # Define a schema for JSON output 52 | class WeatherInfo(BaseModel): 53 | """Schema for weather information""" 54 | 55 | temperature: float = Field(description="Temperature in Celsius") 56 | conditions: str = Field(description="Weather conditions (e.g., sunny, rainy)") 57 | precipitation_chance: Optional[float] = Field( 58 | description="Chance of precipitation (0-1)", 59 | ge=0, 60 | le=1 61 | ) 62 | 63 | 64 | @agent( 65 | model="groq:llama-3.3-70b-versatile", # Use Groq's LLaMA model 66 | temperature=0.2, 67 | tools=[calculate_average, analyze_text] # Bind external tools 68 | ) 69 | class GroqAssistant: 70 | """A versatile assistant powered by Groq's LLaMA model. 71 | 72 | I can help with various tasks including: 73 | - Mathematical calculations 74 | - Text analysis 75 | - Weather information (in JSON format) 76 | - Custom text formatting 77 | """ 78 | 79 | @tool 80 | def format_number( 81 | self, 82 | number: Annotated[float, Field(description="Number to format")], 83 | decimal_places: Annotated[int, Field(description="Number of decimal places")] = 2, 84 | prefix: Annotated[str, Field(description="Text to add before number")] = "" 85 | ) -> str: 86 | """Format a number with specified decimal places and optional prefix.""" 87 | return f"{prefix}{number:.{decimal_places}f}" 88 | 89 | async def get_weather(self, location: str) -> WeatherInfo: 90 | """Get weather information in a structured format.""" 91 | # Use JSON output with schema 92 | response = await self.aprocess( 93 | f"Generate realistic weather information for {location}", 94 | response_schema=WeatherInfo 95 | ) 96 | return WeatherInfo.model_validate_json(response.content) 97 | 98 | 99 | async def main(): 100 | # Create an instance of our Groq-powered agent 101 | agent = GroqAssistant() 102 | 103 | print("1. Testing basic text completion:") 104 | response = await agent.aprocess( 105 | "What are the key benefits of using Groq's LLM services?" 106 | ) 107 | print(response.content) 108 | print("\n" + "="*50 + "\n") 109 | 110 | print("2. Testing external tool (calculate_average):") 111 | response = await agent.aprocess( 112 | "Calculate the average of these numbers: 15.5, 20.5, 25.5, 30.5 " 113 | "and format the result to 1 decimal place" 114 | ) 115 | print(response.content) 116 | print("\n" + "="*50 + "\n") 117 | 118 | print("3. Testing external tool (analyze_text):") 119 | response = await agent.aprocess( 120 | "Analyze this text: 'The QUICK brown FOX jumps over the lazy dog'" 121 | ) 122 | print(response.content) 123 | print("\n" + "="*50 + "\n") 124 | 125 | print("4. Testing JSON output with schema:") 126 | weather = await agent.get_weather("San Francisco") 127 | print(f"Temperature: {weather.temperature}°C") 128 | print(f"Conditions: {weather.conditions}") 129 | if weather.precipitation_chance is not None: 130 | print(f"Precipitation chance: {weather.precipitation_chance*100:.1f}%") 131 | 132 | 133 | if __name__ == "__main__": 134 | asyncio.run(main()) 135 | -------------------------------------------------------------------------------- /examples/agents/ollama_agent.py: -------------------------------------------------------------------------------- 1 | """Ollama Agent Example 2 | 3 | This example demonstrates how to create a simple agent using Legion's decorator syntax. 4 | The agent can use both internal (nested) tools and external tools. 5 | """ 6 | 7 | from typing import Annotated, List 8 | 9 | from dotenv import load_dotenv 10 | from pydantic import Field 11 | 12 | from legion.agents import agent 13 | from legion.interface.decorators import tool 14 | 15 | load_dotenv() 16 | 17 | 18 | @tool 19 | def add_numbers( 20 | numbers: Annotated[List[float], Field(description="List of numbers to add together")] 21 | ) -> float: 22 | """Add a list of numbers together and return the sum.""" 23 | return sum(numbers) 24 | 25 | 26 | @tool 27 | def multiply( 28 | a: Annotated[float, Field(description="First number to multiply")], 29 | b: Annotated[float, Field(description="Second number to multiply")] 30 | ) -> float: 31 | """Multiply two numbers together.""" 32 | return a * b 33 | 34 | 35 | @agent( 36 | model="ollama:llama3.2", 37 | temperature=0.3, 38 | tools=[add_numbers, multiply], # Bind external tools 39 | ) 40 | class MathHelper: 41 | """An agent that helps with basic arithmetic and string operations. 42 | 43 | I can perform calculations and manipulate text based on your requests. 44 | I have both external tools for math operations and internal tools for formatting. 45 | I format arguments in JSON format and pay attention to decimal places. 46 | My answers are succinct and only contain the input arguments and the answer. 47 | """ 48 | 49 | @tool 50 | def format_result( 51 | self, 52 | number: Annotated[float, Field(description="Number to format")], 53 | prefix: Annotated[str, Field(description="Text to add before the number")] = ( 54 | "Result: " 55 | ) 56 | ) -> str: 57 | """Format a number with a custom prefix.""" 58 | return f"{prefix}{number:.2f}" 59 | 60 | 61 | async def main(): 62 | # Create an instance of our agent 63 | agent = MathHelper() 64 | 65 | # Example 1: Using external add_numbers tool with internal format_result 66 | response = await agent.aprocess( 67 | "I have the numbers 1.5, 2.5, and 3.5. Can you add them together and format " 68 | "the result nicely?" 69 | ) 70 | print("Example 1 Response:") 71 | print(response.content) 72 | print() 73 | 74 | # Example 2: Using external multiply tool with internal format_result 75 | response = await agent.aprocess( 76 | "I have the numbers 4.2 and 2.0. Can you multiply them and then format " 77 | "the result nicely?" 78 | ) 79 | print("Example 2 Response:") 80 | print(response.content) 81 | print() 82 | 83 | # Example 3: Complex operation using both external and internal tools 84 | response = await agent.aprocess( 85 | "I need to add the numbers 10.5 and 20.5, then multiply the result by 2" 86 | ) 87 | print("Example 3 Response:") 88 | print(response.content) 89 | 90 | 91 | if __name__ == "__main__": 92 | import asyncio 93 | asyncio.run(main()) 94 | -------------------------------------------------------------------------------- /examples/blocks/basic_blocks.py: -------------------------------------------------------------------------------- 1 | """Basic Blocks Example 2 | 3 | This example demonstrates how to create and use blocks in Legion. 4 | Blocks are enhanced functions that provide: 5 | - Input/output validation 6 | - Execution monitoring 7 | - Async compatibility 8 | - Chain integration 9 | """ 10 | 11 | import logging 12 | from typing import Any, Dict, List # noqa: F401 13 | 14 | from pydantic import BaseModel, Field 15 | 16 | from legion import block, chain 17 | 18 | # Set up logging 19 | logging.basicConfig( 20 | level=logging.INFO, 21 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" 22 | ) 23 | logger = logging.getLogger(__name__) 24 | 25 | 26 | # Define input/output schemas for type safety 27 | class TextInput(BaseModel): 28 | text: str = Field(description="Input text to process") 29 | 30 | 31 | class WordCountOutput(BaseModel): 32 | word_count: int = Field(description="Number of words in text") 33 | char_count: int = Field(description="Number of characters in text") 34 | 35 | 36 | class SentimentOutput(BaseModel): 37 | sentiment: str = Field(description="Detected sentiment (positive/negative/neutral)") 38 | confidence: float = Field(description="Confidence score of sentiment") 39 | 40 | 41 | # Create a simple word counter block using decorator syntax 42 | @block( 43 | input_schema=TextInput, 44 | output_schema=WordCountOutput, 45 | tags=["text", "analysis"] 46 | ) 47 | def count_words(input_data: TextInput) -> WordCountOutput: 48 | """Count words and characters in text.""" 49 | text = input_data.text 50 | words = len(text.split()) 51 | chars = len(text) 52 | return WordCountOutput(word_count=words, char_count=chars) 53 | 54 | 55 | # Create a mock sentiment analysis block using decorator syntax 56 | @block( 57 | input_schema=TextInput, 58 | output_schema=SentimentOutput, 59 | tags=["text", "sentiment", "nlp"] 60 | ) 61 | async def analyze_sentiment(input_data: TextInput) -> SentimentOutput: 62 | """Analyze sentiment of text (mock implementation).""" 63 | # In real usage, this would call an NLP model 64 | text = input_data.text.lower() 65 | positive_words = {"good", "great", "excellent", "happy", "wonderful"} 66 | negative_words = {"bad", "terrible", "awful", "sad", "horrible"} 67 | 68 | pos_count = sum(1 for word in text.split() if word in positive_words) 69 | neg_count = sum(1 for word in text.split() if word in negative_words) 70 | 71 | if pos_count > neg_count: 72 | return SentimentOutput(sentiment="positive", confidence=0.8) 73 | elif neg_count > pos_count: 74 | return SentimentOutput(sentiment="negative", confidence=0.8) 75 | return SentimentOutput(sentiment="neutral", confidence=0.6) 76 | 77 | 78 | # Define the chain using decorator syntax 79 | @chain 80 | class TextAnalysisChain: 81 | """A chain that analyzes text by counting words and determining sentiment.""" 82 | 83 | # List the blocks in processing order 84 | members = [ 85 | count_words, 86 | analyze_sentiment 87 | ] 88 | 89 | 90 | async def main(): 91 | # Create chain instance 92 | text_analysis_chain = TextAnalysisChain() 93 | 94 | # Process some text 95 | text = "This is a great example of using Legion blocks!" 96 | input_data = TextInput(text=text) 97 | 98 | print(f"\nAnalyzing text: '{text}'\n") 99 | 100 | # Execute individual blocks 101 | print("Individual block execution:") 102 | word_count_result = await count_words(input_data) 103 | print(f"Word count result: {word_count_result}") 104 | 105 | sentiment_result = await analyze_sentiment(input_data) 106 | print(f"Sentiment result: {sentiment_result}\n") 107 | 108 | # Execute chain 109 | print("Chain execution:") 110 | chain_result = await text_analysis_chain.aprocess(input_data) 111 | print(f"Chain results: {chain_result}") 112 | 113 | 114 | if __name__ == "__main__": 115 | import asyncio 116 | asyncio.run(main()) 117 | -------------------------------------------------------------------------------- /examples/chains/basic_chain.py: -------------------------------------------------------------------------------- 1 | """Basic Chain Example 2 | 3 | This example demonstrates how to create a simple chain of two agents using Legion's 4 | decorator syntax. The chain processes text through: 5 | 1. A summarizer that condenses the input 6 | 2. An analyzer that provides insights about the summary 7 | """ 8 | 9 | from typing import Annotated 10 | 11 | from dotenv import load_dotenv 12 | from pydantic import Field 13 | 14 | from legion import agent, chain, tool 15 | 16 | load_dotenv() 17 | 18 | 19 | # First agent: Summarizes text 20 | @agent( 21 | model="openai:gpt-4o-mini", 22 | temperature=0.3 # Lower temperature for more consistent summaries 23 | ) 24 | class Summarizer: 25 | """I am a text summarizer that creates concise summaries while preserving key information. 26 | I aim to reduce text length by 70-80% while maintaining the most important points. 27 | """ 28 | 29 | @tool 30 | def count_words(self, text: Annotated[str, Field(description="Text to count words in")]) -> int: 31 | """Count the number of words in a text""" 32 | return len(text.split()) 33 | 34 | 35 | # Second agent: Analyzes summaries 36 | @agent( 37 | model="openai:gpt-4o-mini", 38 | temperature=0.7 # Higher temperature for more creative analysis 39 | ) 40 | class Analyzer: 41 | """I am a text analyzer that provides insights about the content. 42 | I focus on identifying key themes, tone, and potential implications. 43 | """ 44 | 45 | @tool 46 | def identify_keywords( 47 | self, 48 | text: Annotated[str, Field(description="Text to extract keywords from")] 49 | ) -> list[str]: 50 | """Extract main keywords from text""" 51 | # This is just a simple example - in practice you might use NLP 52 | words = text.lower().split() 53 | return list(set(w for w in words if len(w) > 5))[:5] # Just a simple example 54 | 55 | 56 | # Create a chain that combines both agents 57 | @chain 58 | class TextAnalysisChain: 59 | """A chain that first summarizes text and then analyzes the summary.""" 60 | 61 | # Define the agents in the order they should process 62 | summarizer = Summarizer() 63 | analyzer = Analyzer() 64 | 65 | # Define the members list for the chain 66 | members = [Summarizer(), Analyzer()] 67 | 68 | 69 | async def main(): 70 | # Create an instance of our chain 71 | processor = TextAnalysisChain(verbose=True) # Enable verbose output to see chain progress 72 | 73 | # Example text to process 74 | long_text = """ 75 | Artificial Intelligence has transformed numerous industries in recent years. 76 | From healthcare to finance, AI systems are being deployed to automate tasks, 77 | analyze complex data, and make predictions. In healthcare, AI helps doctors 78 | diagnose diseases and plan treatments. In finance, it detects fraudulent 79 | transactions and predicts market trends. However, these advancements also 80 | raise important ethical considerations about privacy, bias, and the role of 81 | human oversight in AI-driven decisions. As we continue to develop more 82 | sophisticated AI systems, addressing these ethical concerns becomes increasingly 83 | crucial for responsible innovation. 84 | """ 85 | 86 | print("Original Text:") 87 | print(long_text.strip()) 88 | print("\n" + "=" * 50 + "\n") 89 | 90 | # Process the text through our chain 91 | response = await processor.aprocess(long_text) 92 | 93 | print("\nFinal Chain Output:") 94 | print(response.content) 95 | 96 | 97 | if __name__ == "__main__": 98 | import asyncio 99 | asyncio.run(main()) 100 | -------------------------------------------------------------------------------- /examples/chains/mixed_chain.py: -------------------------------------------------------------------------------- 1 | """Example demonstrating a chain with both blocks and agents. 2 | 3 | This example shows how to combine functional blocks with LLM-powered agents 4 | in a processing chain. The chain processes text through the following steps: 5 | 1. A block that normalizes text (removes extra whitespace, etc.) 6 | 2. An agent that summarizes the text 7 | 3. A block that extracts key metrics from the summary 8 | 9 | Note: This example requires an OpenAI API key. Set it in your environment: 10 | export OPENAI_API_KEY=your_api_key_here 11 | """ 12 | 13 | import asyncio 14 | import json 15 | import os 16 | from typing import Dict, List 17 | 18 | from dotenv import load_dotenv 19 | from pydantic import BaseModel 20 | 21 | from legion import agent, block, chain 22 | 23 | load_dotenv() 24 | 25 | # Check for OpenAI API key 26 | if not os.getenv("OPENAI_API_KEY"): 27 | print("\nError: OpenAI API key not found!") 28 | print("Please set your API key in the environment:") 29 | print(" export OPENAI_API_KEY=your_api_key_here") 30 | exit(1) 31 | 32 | 33 | # Output schemas for validation 34 | class NormalizedText(BaseModel): 35 | """Schema for normalized text output.""" 36 | 37 | text: str 38 | char_count: int 39 | word_count: int 40 | 41 | 42 | class TextInput(BaseModel): 43 | """Schema for text input.""" 44 | 45 | text: str 46 | 47 | 48 | class TextMetrics(BaseModel): 49 | """Schema for text metrics output.""" 50 | 51 | sentence_count: int 52 | avg_sentence_length: float 53 | key_phrases: List[str] 54 | 55 | 56 | class SummaryInput(BaseModel): 57 | """Schema for summary input.""" 58 | 59 | text: str 60 | 61 | 62 | @block(input_schema=TextInput, output_schema=NormalizedText) 63 | def normalize_text(input_data: TextInput) -> Dict: 64 | """Block that normalizes text by cleaning whitespace and counting stats.""" 65 | text = input_data.text 66 | print("\nNormalize Block Input:", text[:100], "...") 67 | 68 | # Remove extra whitespace and normalize line endings 69 | cleaned = " ".join(text.split()) 70 | 71 | result = { 72 | "text": cleaned, 73 | "char_count": len(cleaned), 74 | "word_count": len(cleaned.split()) 75 | } 76 | print("Normalize Block Output:", result) 77 | return result 78 | 79 | 80 | @agent( 81 | model="openai:gpt-4o-mini", 82 | temperature=0.0, 83 | max_tokens=1000, 84 | ) 85 | class Summarizer: 86 | """Text summarization expert creating clear, concise summaries. 87 | 88 | Focus on extracting the most important information and presenting it in a 89 | well-structured format. 90 | 91 | Output should be a concise summary that: 92 | 1. Captures the main ideas 93 | 2. Preserves key terminology 94 | 3. Uses clear, direct language 95 | 4. Is roughly 1/3 the length of the input 96 | """ 97 | 98 | pass 99 | 100 | 101 | @block(input_schema=SummaryInput, output_schema=TextMetrics) 102 | def extract_metrics(input_data: SummaryInput) -> Dict: 103 | """Block that extracts key metrics from text.""" 104 | text = input_data.text 105 | print("\nMetrics Block Input:", text[:100], "...") 106 | 107 | sentences = [s.strip() for s in text.split(".") if s.strip()] 108 | words = text.split() 109 | 110 | # Extract key phrases (simple implementation) 111 | key_phrases = [] 112 | for i in range(len(words) - 2): 113 | phrase = " ".join(words[i:i + 3]) 114 | if any(w[0].isupper() for w in words[i:i + 3]): 115 | key_phrases.append(phrase) 116 | 117 | result = { 118 | "sentence_count": len(sentences), 119 | "avg_sentence_length": len(words) / len(sentences) if sentences else 0, 120 | "key_phrases": key_phrases[:5] # Top 5 phrases 121 | } 122 | print("Metrics Block Output:", result) 123 | return result 124 | 125 | 126 | @chain 127 | class TextProcessor: 128 | """Chain that processes text through normalization, summarization, and metrics.""" 129 | 130 | # Define chain members in processing order 131 | members = [ 132 | normalize_text, # First normalize the text 133 | Summarizer, # Then create a summary 134 | extract_metrics # Finally extract metrics from the summary 135 | ] 136 | 137 | 138 | async def main(): 139 | # Create chain instance 140 | processor = TextProcessor(verbose=True) 141 | 142 | # Sample text to process 143 | text = """ 144 | Artificial Intelligence (AI) has transformed many industries in recent years. 145 | Machine learning models can now perform tasks that were once thought impossible. 146 | Natural Language Processing has enabled computers to understand and generate human-like text. 147 | Deep Learning architectures have revolutionized computer vision and speech recognition. 148 | The future of AI looks promising, with new breakthroughs happening regularly. 149 | """ 150 | 151 | # Process the text through the chain 152 | # Wrap the text in a JSON object to match the input schema 153 | input_json = json.dumps({"text": text}) 154 | result = await processor.aprocess(input_json) 155 | metrics = json.loads(result.content) 156 | 157 | print("\nFinal Output:") 158 | print("Key Phrases:", metrics["key_phrases"]) 159 | print(f"Sentence Count: {metrics['sentence_count']}") 160 | print(f"Average Sentence Length: {metrics['avg_sentence_length']:.1f} words") 161 | 162 | 163 | if __name__ == "__main__": 164 | asyncio.run(main()) 165 | -------------------------------------------------------------------------------- /examples/graph/basic_graph.py: -------------------------------------------------------------------------------- 1 | """Basic Graph Example 2 | 3 | This example demonstrates how to create a simple graph using Legion's decorator syntax. 4 | It shows how to add nodes, connect them with edges, and execute the graph. 5 | This example uses a sequential execution mode. 6 | """ 7 | 8 | import asyncio 9 | from typing import Annotated, List # noqa: F401 10 | 11 | from dotenv import load_dotenv 12 | from pydantic import BaseModel, Field 13 | 14 | from legion.agents.decorators import agent 15 | from legion.blocks.decorators import block 16 | from legion.graph.channels import LastValue 17 | from legion.graph.decorators import graph 18 | from legion.graph.edges.base import EdgeBase 19 | from legion.graph.nodes.decorators import node # noqa: F401 20 | from legion.interface.decorators import tool 21 | 22 | load_dotenv() 23 | 24 | 25 | # Define a simple data model for type safety 26 | class TextData(BaseModel): 27 | text: str = Field(description="Input text to process") 28 | 29 | 30 | # Define a simple block 31 | @block( 32 | input_schema=TextData, 33 | output_schema=TextData, 34 | tags=["text", "preprocessing"] 35 | ) 36 | def normalize_text(input_data: TextData) -> TextData: 37 | """Normalize text by removing extra whitespace.""" 38 | text = " ".join(input_data.text.split()) 39 | return TextData(text=text) 40 | 41 | 42 | # Define a simple agent 43 | @agent(model="openai:gpt-4o-mini", temperature=0.2) 44 | class Summarizer: 45 | """An agent that summarizes text.""" 46 | 47 | @tool 48 | def count_words( 49 | self, 50 | text: Annotated[str, Field(description="Text to count words in")] 51 | ) -> int: 52 | """Count the number of words in a text.""" 53 | return len(text.split()) 54 | 55 | 56 | # Define a simple edge 57 | class TextEdge(EdgeBase): 58 | """Edge for connecting text processing nodes.""" 59 | 60 | pass 61 | 62 | 63 | # Define the graph using decorator syntax 64 | @graph(name="basic_text_processing", description="A simple graph for processing text") 65 | class TextProcessingGraph: 66 | """A graph that first normalizes text and then summarizes it. 67 | This demonstrates a basic sequential workflow. 68 | """ 69 | 70 | # Define nodes 71 | normalizer = normalize_text 72 | summarizer = Summarizer() 73 | 74 | # Define edges - these will be processed by the graph decorator 75 | edges = [ 76 | { 77 | "edge_type": TextEdge, 78 | "source_node": "normalizer", 79 | "target_node": "summarizer", 80 | "source_channel": "output", 81 | "target_channel": "input" 82 | } 83 | ] 84 | 85 | # Define input and output channels 86 | input_channel = LastValue(type_hint=str) 87 | output_channel = LastValue(type_hint=str) 88 | 89 | async def process(self, input_text: str) -> str: 90 | """Process text through the graph.""" 91 | # Set input value 92 | self.input_channel.set(input_text) 93 | 94 | # Execute the graph 95 | await self.graph.execute() 96 | 97 | # Get output value 98 | return self.output_channel.get() 99 | 100 | 101 | async def main(): 102 | # Create an instance of the graph 103 | text_graph = TextProcessingGraph() 104 | 105 | # Process some text 106 | input_text = " This is a test with extra spaces. " 107 | output_text = await text_graph.process(input_text) 108 | 109 | print(f"Original Text: '{input_text}'") 110 | print(f"Processed Text: '{output_text}'") 111 | 112 | 113 | if __name__ == "__main__": 114 | asyncio.run(main()) 115 | -------------------------------------------------------------------------------- /examples/teams/basic_team.py: -------------------------------------------------------------------------------- 1 | """Basic Team Example 2 | 3 | This example demonstrates how to create a team of agents using Legion's decorator syntax. 4 | The team consists of: 5 | 1. A research leader that coordinates tasks 6 | 2. A data analyst that processes numbers 7 | 3. A writer that creates reports 8 | """ 9 | 10 | from typing import Annotated, Dict, List 11 | 12 | from dotenv import load_dotenv 13 | from pydantic import Field 14 | 15 | from legion import agent, leader, team, tool 16 | from legion.memory.providers.memory import InMemoryProvider 17 | 18 | load_dotenv() 19 | 20 | 21 | # Create specialized tools for team members 22 | @tool 23 | def analyze_numbers( 24 | numbers: Annotated[List[float], Field(description="List of numbers to analyze")] 25 | ) -> Dict[str, float]: 26 | """Analyze a list of numbers and return basic statistics.""" 27 | if not numbers: 28 | return {"mean": 0, "min": 0, "max": 0} 29 | return { 30 | "mean": sum(numbers) / len(numbers), 31 | "min": min(numbers), 32 | "max": max(numbers) 33 | } 34 | 35 | 36 | @tool 37 | def format_report( 38 | title: Annotated[str, Field(description="Report title")], 39 | sections: Annotated[List[str], Field(description="List of report sections")], 40 | summary: Annotated[str, Field(description="Executive summary")] = None 41 | ) -> str: 42 | """Format a professional report with sections.""" 43 | parts = [f"# {title}\n"] 44 | if summary: 45 | parts.extend(["\n## Executive Summary", summary]) 46 | for i, section in enumerate(sections, 1): 47 | parts.extend([f"\n## Section {i}", section]) 48 | return "\n".join(parts) 49 | 50 | 51 | # Define the research team using decorator syntax 52 | @team 53 | class ResearchTeam: 54 | """A team that collaborates on research tasks.""" 55 | 56 | # Shared memory provider for the team 57 | memory = InMemoryProvider() 58 | 59 | @leader( 60 | model="openai:gpt-4o-mini", 61 | temperature=0.2 62 | ) 63 | class Leader: 64 | """Research team coordinator who delegates tasks and synthesizes results.""" 65 | 66 | pass 67 | 68 | @agent( 69 | model="openai:gpt-4o-mini", 70 | temperature=0.1, 71 | tools=[analyze_numbers] 72 | ) 73 | class Analyst: 74 | """Data analyst who processes numerical data and provides statistical insights.""" 75 | 76 | @tool 77 | def interpret_stats( 78 | self, 79 | stats: Annotated[Dict[str, float], Field( 80 | description="Statistics to interpret" 81 | )] 82 | ) -> str: 83 | """Interpret statistical results in plain language.""" 84 | return ( 85 | f"The data shows an average of {stats['mean']:.2f}, " 86 | f"ranging from {stats['min']:.2f} to {stats['max']:.2f}." 87 | ) 88 | 89 | @agent( 90 | model="openai:gpt-4o-mini", 91 | temperature=0.7, 92 | tools=[format_report] 93 | ) 94 | class Writer: 95 | """Technical writer who creates clear and professional reports.""" 96 | 97 | @tool 98 | def create_report( 99 | self, 100 | content: Annotated[str, Field( 101 | description="The content to include in the report" 102 | )], 103 | report_type: Annotated[str, Field( 104 | description="Type of report (e.g., analysis, summary, technical)" 105 | )] = "analysis" 106 | ) -> str: 107 | """Create a professional report from the given content.""" 108 | # Generate a title based on the report type 109 | title = f"{report_type.title()} Report" 110 | 111 | # Split content into sections 112 | sections = [ 113 | "Introduction", 114 | "Analysis", 115 | content, 116 | "Conclusion" 117 | ] 118 | 119 | # Format the report using the format_report tool 120 | return format_report( 121 | title=title, 122 | sections=sections, 123 | summary=f"This report provides a {report_type} of the given data." 124 | ) 125 | 126 | 127 | async def main(): 128 | # Create an instance of our research team 129 | team = ResearchTeam() 130 | 131 | # Example 1: Basic research task 132 | print("Example 1: Basic Research Task") 133 | print("=" * 50) 134 | 135 | response = await team.aprocess( 136 | "We need to analyze these numbers: [10.5, 20.5, 15.0, 30.0, 25.5] " 137 | "and create a report for stakeholders." 138 | ) 139 | print(response.content) 140 | print("\n" + "=" * 50 + "\n") 141 | 142 | # Example 2: Complex research project 143 | print("Example 2: Complex Research Project") 144 | print("=" * 50) 145 | 146 | response = await team.aprocess( 147 | "Let's conduct a research project on quarterly sales data:\n" 148 | "Q1: [100.0, 120.0, 95.0]\n" 149 | "Q2: [115.0, 125.0, 105.0]\n" 150 | "Q3: [130.0, 140.0, 125.0]\n" 151 | "Q4: [150.0, 160.0, 145.0]\n\n" 152 | "Analyze the trends and prepare a detailed report." 153 | ) 154 | print(response.content) 155 | 156 | 157 | if __name__ == "__main__": 158 | import asyncio 159 | asyncio.run(main()) 160 | -------------------------------------------------------------------------------- /examples/tools/basic_tools.py: -------------------------------------------------------------------------------- 1 | """Basic Tools Example 2 | 3 | This example demonstrates how to create and use tools with a Legion agent. 4 | It shows both standalone tools and agent-specific tools. 5 | """ 6 | 7 | from datetime import datetime 8 | from typing import Annotated, Any, Dict, List # noqa: F401 9 | 10 | from dotenv import load_dotenv 11 | from pydantic import Field 12 | 13 | from legion import agent, tool 14 | 15 | load_dotenv() 16 | 17 | 18 | # Standalone tool that can be shared between agents 19 | @tool 20 | def get_current_time( 21 | format: Annotated[str, Field(description="Format string for the datetime")] = ( 22 | "%Y-%m-%d %H:%M:%S" 23 | ) 24 | ) -> str: 25 | """Get the current time in a specified format.""" 26 | return datetime.now().strftime(format) 27 | 28 | 29 | @tool 30 | def list_formatter( 31 | items: Annotated[List[str], Field(description="List of items to format")], 32 | prefix: Annotated[str, Field(description="Prefix for each item")] = "- " 33 | ) -> str: 34 | """Format a list of items with a prefix.""" 35 | return "\n".join(f"{prefix}{item}" for item in items) 36 | 37 | 38 | @agent( 39 | model="openai:gpt-4o-mini", 40 | temperature=0.2, 41 | tools=[get_current_time, list_formatter] # Bind external tools 42 | ) 43 | class NoteTaker: 44 | """An agent that helps with taking and formatting notes. 45 | 46 | I can create timestamps, format lists, and organize information. 47 | """ 48 | 49 | # Tools can also be defined within the agent class, and as such, 50 | # are only available to that agent. 51 | # This makes it easy when reading Legion code to know which tools 52 | # are specific to an agent. 53 | @tool 54 | def create_note( 55 | self, 56 | title: Annotated[str, Field(description="Title of the note")], 57 | content: Annotated[str, Field(description="Content of the note")], 58 | add_timestamp: Annotated[bool, Field( 59 | description="Whether to add a timestamp" 60 | )] = True 61 | ) -> str: 62 | """Create a formatted note with an optional timestamp.""" 63 | timestamp = ( 64 | f"\nCreated at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}" 65 | if add_timestamp else "" 66 | ) 67 | return f"# {title}\n\n{content}{timestamp}" 68 | 69 | 70 | async def main(): 71 | # Create an instance of our agent 72 | agent = NoteTaker() 73 | 74 | # Example 1: Create a simple note with current time 75 | response = await agent.aprocess( 76 | "Create a note titled 'Meeting Summary' with the content being a list of: " 77 | "'Discussed project timeline', 'Assigned tasks', 'Set next meeting date'. " 78 | "Make sure to format it as a nice list and include the timestamp." 79 | ) 80 | print("Example 1 Response:") 81 | print(response.content) 82 | print("\n" + "=" * 50 + "\n") 83 | 84 | # Example 2: Get current time in a specific format 85 | response = await agent.aprocess( 86 | "What's the current time in the format: Month Day, Year (HH:MM)?" 87 | ) 88 | print("Example 2 Response:") 89 | print(response.content) 90 | print("\n" + "=" * 50 + "\n") 91 | 92 | # Example 3: Complex note with multiple tool usage 93 | response = await agent.aprocess( 94 | "Create a note titled 'Daily Tasks' with today's date. Include a list of: " 95 | "'Check emails', 'Team standup', 'Code review'. Format it nicely with bullets " 96 | "and add the current time at the end." 97 | ) 98 | print("Example 3 Response:") 99 | print(response.content) 100 | 101 | 102 | if __name__ == "__main__": 103 | import asyncio 104 | asyncio.run(main()) 105 | -------------------------------------------------------------------------------- /legion/__init__.py: -------------------------------------------------------------------------------- 1 | """Legion: A provider-agnostic framework for building AI agent systems""" 2 | 3 | __version__ = "0.1.5" 4 | 5 | # Core interfaces 6 | from legion.interface.decorators import tool, param, schema, output_schema, system_prompt 7 | 8 | # Core agent system 9 | from legion.agents.decorators import agent 10 | 11 | # Groups 12 | from legion.groups.decorators import chain, team, leader 13 | 14 | # Blocks 15 | from legion.blocks.decorators import block 16 | 17 | # Provider management 18 | 19 | # Error types 20 | -------------------------------------------------------------------------------- /legion/agents/__init__.py: -------------------------------------------------------------------------------- 1 | """Agent module for Legion framework.""" 2 | from .base import Agent 3 | from .decorators import agent 4 | 5 | __all__ = ["Agent", "agent"] 6 | -------------------------------------------------------------------------------- /legion/blocks/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BlockError, BlockMetadata, FunctionalBlock, ValidationError 2 | from .decorators import block 3 | 4 | __all__ = [ 5 | "FunctionalBlock", 6 | "BlockMetadata", 7 | "BlockError", 8 | "ValidationError", 9 | "block" 10 | ] 11 | -------------------------------------------------------------------------------- /legion/blocks/decorators.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import logging 3 | from typing import List, Optional, Type 4 | 5 | from pydantic import BaseModel 6 | from rich import print as rprint 7 | from rich.console import Console 8 | 9 | from .base import BlockMetadata, FunctionalBlock 10 | 11 | # Set up rich console and logging 12 | console = Console() 13 | logger = logging.getLogger(__name__) 14 | 15 | def block( 16 | name: Optional[str] = None, 17 | description: Optional[str] = None, 18 | input_schema: Optional[Type[BaseModel]] = None, 19 | output_schema: Optional[Type[BaseModel]] = None, 20 | version: str = "1.0", 21 | tags: Optional[List[str]] = None, 22 | validate: bool = True, 23 | debug: Optional[bool] = False 24 | ): 25 | """Decorator to create a functional block 26 | 27 | Example: 28 | ------- 29 | @block( 30 | input_schema=InputModel, 31 | output_schema=OutputModel, 32 | tags=['preprocessing'] 33 | ) 34 | async def process_data(data: InputModel) -> OutputModel: 35 | ... 36 | 37 | """ 38 | 39 | def _log_message(message: str, color: str = None) -> None: 40 | """Internal method for consistent logging""" 41 | if debug: 42 | if color: 43 | rprint(f"\n[{color}]{message}[/{color}]") 44 | else: 45 | rprint(f"\n{message}") 46 | 47 | def decorator(func): 48 | 49 | _log_message(f"Decorating function {func.__name__}", color="bold blue") 50 | 51 | # Extract function signature 52 | inspect.signature(func) 53 | 54 | # Get description from docstring if not provided 55 | block_description = description 56 | if not block_description and func.__doc__: 57 | block_description = func.__doc__.split("\n")[0].strip() 58 | _log_message("Block description not provided, using function docstring instead\n") 59 | block_description = block_description or f"Block: {func.__name__}" 60 | 61 | # Only create schemas if explicitly requested or if schemas are provided 62 | block_input_schema = input_schema 63 | block_output_schema = output_schema 64 | 65 | # Create metadata 66 | metadata = BlockMetadata( 67 | name=name or func.__name__, 68 | description=block_description, 69 | input_schema=block_input_schema, 70 | output_schema=block_output_schema, 71 | version=version, 72 | tags=tags or [] 73 | ) 74 | 75 | # Create and return block 76 | return FunctionalBlock( 77 | func=func, 78 | metadata=metadata, 79 | validate=validate 80 | ) 81 | 82 | return decorator 83 | -------------------------------------------------------------------------------- /legion/errors/__init__.py: -------------------------------------------------------------------------------- 1 | # Errors __init__.py 2 | """Custom exceptions for legion""" 3 | 4 | from .exceptions import AgentError, LegionError, ProviderError 5 | 6 | 7 | class LegionError(Exception): 8 | """Base exception class for Legion""" 9 | 10 | pass 11 | 12 | class AgentError(LegionError): 13 | """Exception raised for errors in Agent operations""" 14 | 15 | pass 16 | 17 | class ProviderError(LegionError): 18 | """Exception raised for errors in Provider operations""" 19 | 20 | pass 21 | 22 | class ConfigError(LegionError): 23 | """Exception raised for configuration errors""" 24 | 25 | pass 26 | 27 | class ValidationError(LegionError): 28 | """Exception raised for validation errors""" 29 | 30 | pass 31 | 32 | class ToolError(LegionError): 33 | """Error raised when a tool fails to execute.""" 34 | 35 | def __init__(self, message: str, tool_name: str = None): 36 | self.tool_name = tool_name 37 | if "Invalid parameters for tool" in message and not tool_name: 38 | # Already formatted message from tools.py 39 | super().__init__(f"Tool error: {message}") 40 | else: 41 | # Message needs formatting with tool name 42 | super().__init__(f"Tool error{f' in {tool_name}' if tool_name else ''}: {message}") 43 | 44 | class ConfigurationError(LegionError): 45 | """Error raised when configuration is invalid.""" 46 | 47 | pass 48 | 49 | class NodeError(LegionError): 50 | """Error raised when a node fails.""" 51 | 52 | def __init__(self, message: str, node_id: str = None): 53 | self.node_id = node_id 54 | super().__init__(f"Node error{f' in {node_id}' if node_id else ''}: {message}") 55 | 56 | class StateError(LegionError): 57 | """Error raised when state operations fail.""" 58 | 59 | pass 60 | 61 | class ResourceError(LegionError): 62 | """Error raised when resource limits are exceeded.""" 63 | 64 | pass 65 | 66 | class NonRetryableError(LegionError): 67 | """Error that should not be retried.""" 68 | 69 | pass 70 | 71 | class FatalError(NonRetryableError): 72 | """Error that indicates a fatal condition.""" 73 | 74 | pass 75 | -------------------------------------------------------------------------------- /legion/errors/exceptions.py: -------------------------------------------------------------------------------- 1 | # errors/exceptions.py 2 | """Custom exception classes for legion""" 3 | 4 | class LegionError(Exception): 5 | """Base exception for all legion errors""" 6 | 7 | pass 8 | 9 | class ProviderError(LegionError): 10 | """Errors related to LLM provider operations""" 11 | 12 | pass 13 | 14 | class ToolError(LegionError): 15 | """Errors related to tool execution""" 16 | 17 | pass 18 | 19 | class JSONFormatError(LegionError): 20 | """Errors related to JSON formatting""" 21 | 22 | pass 23 | 24 | class InvalidSchemaError(LegionError): 25 | """Errors related to invalid schemas""" 26 | 27 | pass 28 | 29 | class AgentError(LegionError): 30 | """Error in agent operations""" 31 | 32 | pass 33 | -------------------------------------------------------------------------------- /legion/exceptions.py: -------------------------------------------------------------------------------- 1 | """Legion exceptions module.""" 2 | 3 | class LegionError(Exception): 4 | """Base exception for all Legion errors.""" 5 | 6 | pass 7 | 8 | class ExecutionError(LegionError): 9 | """Raised when there is an error during graph execution.""" 10 | 11 | pass 12 | 13 | class RetryableError(ExecutionError): 14 | """Base class for errors that can be retried.""" 15 | 16 | def __init__(self, message: str, retry_count: int = 0, max_retries: int = 0): 17 | super().__init__(message) 18 | self.retry_count = retry_count 19 | self.max_retries = max_retries 20 | 21 | @property 22 | def can_retry(self) -> bool: 23 | """Check if error can be retried""" 24 | return self.retry_count < self.max_retries 25 | 26 | class NonRetryableError(ExecutionError): 27 | """Base class for errors that cannot be retried.""" 28 | 29 | pass 30 | 31 | class RoutingError(LegionError): 32 | """Raised when there is an error in graph routing.""" 33 | 34 | pass 35 | 36 | class MetadataError(LegionError): 37 | """Raised when there is an error in metadata handling.""" 38 | 39 | pass 40 | 41 | class ValidationError(LegionError): 42 | """Raised when there is a validation error.""" 43 | 44 | pass 45 | 46 | class ConfigurationError(LegionError): 47 | """Raised when there is a configuration error.""" 48 | 49 | pass 50 | 51 | class GraphError(LegionError): 52 | """Raised when there is an error in graph structure or operation.""" 53 | 54 | pass 55 | 56 | class ResourceError(RetryableError): 57 | """Raised when resource limits are exceeded.""" 58 | 59 | pass 60 | 61 | class StateError(RetryableError): 62 | """Raised when there is an error in state management.""" 63 | 64 | pass 65 | 66 | class NodeError(RetryableError): 67 | """Raised when there is an error in node execution.""" 68 | 69 | def __init__(self, message: str, node_id: str, retry_count: int = 0, max_retries: int = 0): 70 | super().__init__(message, retry_count, max_retries) 71 | self.node_id = node_id 72 | 73 | class FatalError(NonRetryableError): 74 | """Raised when there is an unrecoverable error.""" 75 | 76 | pass 77 | 78 | class TimeoutError(RetryableError): 79 | """Raised when an operation times out.""" 80 | 81 | pass 82 | -------------------------------------------------------------------------------- /legion/graph/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/legion/graph/__init__.py -------------------------------------------------------------------------------- /legion/graph/decorators.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from .graph import Graph, GraphConfig 4 | 5 | 6 | class GraphDecorator: 7 | """Decorator for creating graph classes""" 8 | 9 | def __init__( 10 | self, 11 | name: Optional[str] = None, 12 | config: Optional[GraphConfig] = None, 13 | **kwargs 14 | ): 15 | self.name = name 16 | self.config = config or GraphConfig(**kwargs) 17 | 18 | def __call__(self, cls): 19 | # Extract name and description from class 20 | graph_name = self.name or cls.__name__ 21 | graph_description = cls.__doc__ or f"Graph: {graph_name}" 22 | 23 | # Create graph instance 24 | graph = Graph( 25 | name=graph_name, 26 | description=graph_description, 27 | config=self.config 28 | ) 29 | 30 | # Store original class attributes 31 | original_attrs = { 32 | name: value for name, value in cls.__dict__.items() 33 | if not name.startswith("__") 34 | } 35 | 36 | # Create wrapper class 37 | class GraphClass(cls): 38 | """Graph wrapper class""" 39 | 40 | def __init__(self, *args, **kwargs): 41 | # Initialize base class 42 | super().__init__(*args, **kwargs) 43 | 44 | # Store graph instance 45 | self.__graph = graph 46 | self.__original_attrs = original_attrs 47 | 48 | # Process class attributes 49 | self._process_attributes() 50 | 51 | def _process_attributes(self): 52 | """Process class attributes to set up graph structure""" 53 | # First, process parent class attributes if any 54 | parent_attrs = {} 55 | for base in reversed(self.__class__.__bases__): 56 | if hasattr(base, "__dict__"): 57 | parent_attrs.update({ 58 | name: value for name, value in base.__dict__.items() 59 | if not name.startswith("__") 60 | }) 61 | 62 | # Process parent attributes first 63 | for name, value in parent_attrs.items(): 64 | if isinstance(value, GraphConfig): 65 | config_dict = value.model_dump() 66 | for key, val in config_dict.items(): 67 | setattr(self.__graph._config, key, val) 68 | 69 | # Then process class attributes, which can override parent attributes 70 | for name, value in self.__original_attrs.items(): 71 | # Skip already processed attributes 72 | if hasattr(self, f"_{name}_processed"): 73 | continue 74 | 75 | # Handle different attribute types 76 | if hasattr(value, "__node_decorator__"): 77 | # Node definition - will be processed by node decorator 78 | pass 79 | elif hasattr(value, "__edge_decorator__"): 80 | # Edge definition - will be processed by edge decorator 81 | pass 82 | elif hasattr(value, "__channel_decorator__"): 83 | # Channel definition - will be processed by channel decorator 84 | pass 85 | elif isinstance(value, GraphConfig): 86 | # Update graph configuration 87 | config_dict = value.model_dump() 88 | for key, val in config_dict.items(): 89 | setattr(self.__graph._config, key, val) 90 | 91 | # Mark attribute as processed 92 | setattr(self, f"_{name}_processed", True) 93 | 94 | @property 95 | def graph(self) -> Graph: 96 | """Get the underlying graph instance""" 97 | return self.__graph 98 | 99 | # Copy class attributes 100 | for attr in ["__module__", "__name__", "__qualname__", "__doc__", "__annotations__"]: 101 | try: 102 | setattr(GraphClass, attr, getattr(cls, attr)) 103 | except AttributeError: 104 | pass 105 | 106 | return GraphClass 107 | 108 | # Create decorator function 109 | def graph( 110 | name: Optional[str] = None, 111 | config: Optional[GraphConfig] = None, 112 | **kwargs 113 | ): 114 | """Decorator for creating graph classes 115 | 116 | Args: 117 | ---- 118 | name: Optional graph name 119 | config: Optional graph configuration 120 | **kwargs: Additional configuration parameters 121 | 122 | Example: 123 | ------- 124 | @graph(name="analysis_workflow") 125 | class AnalysisWorkflow: 126 | '''Multi-stage analysis workflow.''' 127 | 128 | config = GraphConfig( 129 | execution_mode=ExecutionMode.PARALLEL, 130 | debug_mode=True 131 | ) 132 | 133 | """ 134 | if isinstance(name, type): 135 | # @graph used without parameters 136 | return GraphDecorator()(name) 137 | # @graph() used with parameters 138 | return GraphDecorator(name=name, config=config, **kwargs) 139 | -------------------------------------------------------------------------------- /legion/graph/edges/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import EdgeBase, EdgeMetadata 2 | from .registry import EdgeRegistry, EdgeRegistryMetadata 3 | from .validator import EdgeValidator, ValidationResult 4 | 5 | __all__ = [ 6 | "EdgeBase", 7 | "EdgeMetadata", 8 | "EdgeValidator", 9 | "ValidationResult", 10 | "EdgeRegistry", 11 | "EdgeRegistryMetadata" 12 | ] 13 | -------------------------------------------------------------------------------- /legion/graph/edges/base.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from typing import Any, Dict, Optional, TypeVar 3 | from uuid import uuid4 4 | 5 | from pydantic import BaseModel, Field 6 | 7 | from ..nodes.base import NodeBase 8 | from ..state import GraphState 9 | 10 | T = TypeVar("T") 11 | 12 | class EdgeMetadata(BaseModel): 13 | """Edge metadata""" 14 | 15 | created_at: datetime = Field(default_factory=datetime.now) 16 | updated_at: datetime = Field(default_factory=datetime.now) 17 | version: int = 0 18 | edge_id: str = Field(default_factory=lambda: str(uuid4())) 19 | edge_type: str = Field(default="") 20 | error: Optional[str] = None 21 | 22 | def model_dump(self, **kwargs) -> Dict[str, Any]: 23 | """Override model_dump to handle datetime serialization""" 24 | data = super().model_dump(**kwargs) 25 | data["created_at"] = data["created_at"].isoformat() 26 | data["updated_at"] = data["updated_at"].isoformat() 27 | return data 28 | 29 | class EdgeBase: 30 | """Base class for all graph edges""" 31 | 32 | def __init__( 33 | self, 34 | graph_state: GraphState, 35 | source_node: NodeBase, 36 | target_node: NodeBase, 37 | source_channel: str, 38 | target_channel: str 39 | ): 40 | """Initialize edge 41 | 42 | Args: 43 | ---- 44 | graph_state: Graph state manager 45 | source_node: Source node 46 | target_node: Target node 47 | source_channel: Name of source output channel 48 | target_channel: Name of target input channel 49 | 50 | """ 51 | self._metadata = EdgeMetadata( 52 | edge_type=self.__class__.__name__ 53 | ) 54 | self._graph_state = graph_state 55 | self._source_node = source_node 56 | self._target_node = target_node 57 | self._source_channel = source_channel 58 | self._target_channel = target_channel 59 | 60 | # Validate channel existence and compatibility 61 | self._validate_channels() 62 | 63 | @property 64 | def metadata(self) -> EdgeMetadata: 65 | """Get edge metadata""" 66 | return self._metadata 67 | 68 | @property 69 | def edge_id(self) -> str: 70 | """Get edge ID""" 71 | return self._metadata.edge_id 72 | 73 | @property 74 | def source_node(self) -> NodeBase: 75 | """Get source node""" 76 | return self._source_node 77 | 78 | @property 79 | def target_node(self) -> NodeBase: 80 | """Get target node""" 81 | return self._target_node 82 | 83 | @property 84 | def source_channel(self) -> str: 85 | """Get source channel name""" 86 | return self._source_channel 87 | 88 | @property 89 | def target_channel(self) -> str: 90 | """Get target channel name""" 91 | return self._target_channel 92 | 93 | def _update_metadata(self) -> None: 94 | """Update metadata after state change""" 95 | self._metadata.updated_at = datetime.now() 96 | self._metadata.version += 1 97 | 98 | def _validate_channels(self) -> None: 99 | """Validate channel existence and compatibility""" 100 | source_channel = self._source_node.get_output_channel(self._source_channel) 101 | if not source_channel: 102 | raise ValueError( 103 | f"Source channel '{self._source_channel}' not found in node {self._source_node.node_id}" 104 | ) 105 | 106 | target_channel = self._target_node.get_input_channel(self._target_channel) 107 | if not target_channel: 108 | raise ValueError( 109 | f"Target channel '{self._target_channel}' not found in node {self._target_node.node_id}" 110 | ) 111 | 112 | # Check channel type compatibility 113 | if not isinstance(source_channel, type(target_channel)): 114 | raise TypeError( 115 | f"Channel type mismatch: {type(source_channel)} -> {type(target_channel)}" 116 | ) 117 | 118 | # Check value type compatibility if available 119 | source_type = getattr(source_channel, "type_hint", None) 120 | target_type = getattr(target_channel, "type_hint", None) 121 | 122 | if source_type and target_type and source_type != target_type: 123 | raise TypeError( 124 | f"Channel value type mismatch: {source_type} -> {target_type}" 125 | ) 126 | 127 | def checkpoint(self) -> Dict[str, Any]: 128 | """Create a checkpoint of current state""" 129 | return { 130 | "metadata": self._metadata.model_dump(), 131 | "source_node": self._source_node.node_id, 132 | "target_node": self._target_node.node_id, 133 | "source_channel": self._source_channel, 134 | "target_channel": self._target_channel 135 | } 136 | 137 | def restore(self, checkpoint: Dict[str, Any]) -> None: 138 | """Restore from checkpoint""" 139 | self._metadata = EdgeMetadata(**checkpoint["metadata"]) 140 | # Note: Nodes and channels are restored through registry 141 | -------------------------------------------------------------------------------- /legion/graph/edges/conditional.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any, Dict, List, Optional 3 | 4 | from ..nodes.base import NodeBase 5 | from ..state import GraphState 6 | from .base import EdgeBase 7 | from .routing import RoutingCondition 8 | 9 | 10 | @dataclass 11 | class Target: 12 | """Target node and channel with optional condition""" 13 | 14 | node: NodeBase 15 | channel: str 16 | condition: Optional[RoutingCondition] = None 17 | priority: int = 0 18 | 19 | class ConditionalEdge(EdgeBase): 20 | """Edge with conditional routing support""" 21 | 22 | def __init__( 23 | self, 24 | graph_state: GraphState, 25 | source_node: NodeBase, 26 | source_channel: str, 27 | default_target: Target, 28 | conditional_targets: Optional[List[Target]] = None 29 | ): 30 | """Initialize conditional edge 31 | 32 | Args: 33 | ---- 34 | graph_state: Graph state manager 35 | source_node: Source node 36 | source_channel: Name of source output channel 37 | default_target: Default target if no conditions match 38 | conditional_targets: Optional list of conditional targets 39 | 40 | """ 41 | super().__init__( 42 | graph_state, 43 | source_node, 44 | default_target.node, 45 | source_channel, 46 | default_target.channel 47 | ) 48 | self._default_target = default_target 49 | self._conditional_targets = sorted( 50 | conditional_targets or [], 51 | key=lambda t: t.priority, 52 | reverse=True # Higher priority first 53 | ) 54 | 55 | # Validate all targets 56 | self._validate_target(default_target) 57 | for target in self._conditional_targets: 58 | self._validate_target(target) 59 | 60 | def _validate_target(self, target: Target) -> None: 61 | """Validate a target's channel compatibility""" 62 | source_channel = self._source_node.get_output_channel(self._source_channel) 63 | target_channel = target.node.get_input_channel(target.channel) 64 | 65 | if not source_channel or not target_channel: 66 | raise ValueError("Invalid channel configuration") 67 | 68 | # Check channel type compatibility 69 | if not isinstance(source_channel, type(target_channel)): 70 | raise TypeError( 71 | f"Channel type mismatch: {type(source_channel)} -> {type(target_channel)}" 72 | ) 73 | 74 | # Check value type compatibility if available 75 | source_type = getattr(source_channel, "type_hint", None) 76 | target_type = getattr(target_channel, "type_hint", None) 77 | 78 | if source_type and target_type and source_type != target_type: 79 | raise TypeError( 80 | f"Channel value type mismatch: {source_type} -> {target_type}" 81 | ) 82 | 83 | async def get_active_target(self) -> Target: 84 | """Get the currently active target based on conditions 85 | 86 | Returns 87 | ------- 88 | Target that should receive the next value 89 | 90 | """ 91 | # Check conditional targets in priority order 92 | for target in self._conditional_targets: 93 | if target.condition and await target.condition.evaluate(self._source_node): 94 | return target 95 | 96 | # Fall back to default target 97 | return self._default_target 98 | 99 | def checkpoint(self) -> Dict[str, Any]: 100 | """Create a checkpoint of current state""" 101 | checkpoint = super().checkpoint() 102 | checkpoint.update({ 103 | "default_target": { 104 | "node": self._default_target.node.node_id, 105 | "channel": self._default_target.channel, 106 | "priority": self._default_target.priority 107 | }, 108 | "conditional_targets": [ 109 | { 110 | "node": target.node.node_id, 111 | "channel": target.channel, 112 | "priority": target.priority, 113 | "condition": target.condition.checkpoint() if target.condition else None 114 | } 115 | for target in self._conditional_targets 116 | ] 117 | }) 118 | return checkpoint 119 | 120 | def restore(self, checkpoint: Dict[str, Any]) -> None: 121 | """Restore from checkpoint""" 122 | super().restore(checkpoint) 123 | # Note: Full restoration happens through registry to reconnect nodes 124 | -------------------------------------------------------------------------------- /legion/graph/nodes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/legion/graph/nodes/__init__.py -------------------------------------------------------------------------------- /legion/graph/state.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from typing import Any, Dict, List, Optional, Type, TypeVar 3 | from uuid import uuid4 4 | 5 | from pydantic import BaseModel, ConfigDict, Field 6 | 7 | from .channels import Channel, ChannelMetadata, LastValue, SharedState, ValueSequence 8 | 9 | T = TypeVar("T") 10 | 11 | class GraphStateMetadata(BaseModel): 12 | """Metadata for graph state""" 13 | 14 | created_at: datetime = Field(default_factory=datetime.now) 15 | updated_at: datetime = Field(default_factory=datetime.now) 16 | version: int = 0 17 | graph_id: str = Field(default_factory=lambda: str(uuid4())) 18 | 19 | model_config = ConfigDict( 20 | json_encoders={ 21 | datetime: lambda v: v.isoformat() 22 | } 23 | ) 24 | 25 | class GraphState: 26 | """Manages state for a graph instance""" 27 | 28 | def __init__(self): 29 | self._metadata = GraphStateMetadata() 30 | self._channels: Dict[str, Channel] = {} 31 | self._global_state = SharedState() 32 | 33 | @property 34 | def metadata(self) -> GraphStateMetadata: 35 | """Get graph state metadata""" 36 | return self._metadata 37 | 38 | @property 39 | def graph_id(self) -> str: 40 | """Get graph ID""" 41 | return self._metadata.graph_id 42 | 43 | def _update_metadata(self) -> None: 44 | """Update metadata after state change""" 45 | self._metadata.updated_at = datetime.now() 46 | self._metadata.version += 1 47 | 48 | def create_channel( 49 | self, 50 | channel_type: Type[Channel[T]], 51 | name: str, 52 | type_hint: Optional[Type[T]] = None, 53 | **kwargs 54 | ) -> Channel[T]: 55 | """Create a new channel""" 56 | if name in self._channels: 57 | raise ValueError(f"Channel '{name}' already exists") 58 | 59 | channel = channel_type(type_hint=type_hint, **kwargs) 60 | self._channels[name] = channel 61 | self._update_metadata() 62 | return channel 63 | 64 | def get_channel(self, name: str) -> Optional[Channel]: 65 | """Get channel by name""" 66 | return self._channels.get(name) 67 | 68 | def delete_channel(self, name: str) -> None: 69 | """Delete channel by name""" 70 | if name in self._channels: 71 | del self._channels[name] 72 | self._update_metadata() 73 | 74 | def list_channels(self) -> List[str]: 75 | """List all channel names""" 76 | return list(self._channels.keys()) 77 | 78 | def get_global_state(self) -> Dict[str, Any]: 79 | """Get global state""" 80 | return self._global_state.get() 81 | 82 | def update_global_state(self, updates: Dict[str, Any]) -> None: 83 | """Update global state""" 84 | self._global_state.update(updates) 85 | self._update_metadata() 86 | 87 | def set_global_state(self, state: Dict[str, Any]) -> None: 88 | """Set global state""" 89 | self._global_state.set(state) 90 | self._update_metadata() 91 | 92 | def checkpoint(self) -> Dict[str, Any]: 93 | """Create a checkpoint of current state""" 94 | channels_checkpoint = { 95 | name: channel.checkpoint() 96 | for name, channel in self._channels.items() 97 | } 98 | 99 | return { 100 | "metadata": self._metadata.model_dump(), 101 | "channels": channels_checkpoint, 102 | "global_state": self._global_state.checkpoint() 103 | } 104 | 105 | def restore(self, checkpoint: Dict[str, Any]) -> None: 106 | """Restore from checkpoint""" 107 | self._metadata = GraphStateMetadata(**checkpoint["metadata"]) 108 | 109 | # Restore channels 110 | self._channels.clear() 111 | for name, channel_checkpoint in checkpoint["channels"].items(): 112 | # Determine channel type from checkpoint 113 | ChannelMetadata(**channel_checkpoint["metadata"]) 114 | if "values" in channel_checkpoint: 115 | channel = ValueSequence() 116 | elif "value" in channel_checkpoint: 117 | channel = LastValue() 118 | else: 119 | channel = SharedState() 120 | 121 | channel.restore(channel_checkpoint) 122 | self._channels[name] = channel 123 | 124 | # Restore global state 125 | self._global_state.restore(checkpoint["global_state"]) 126 | 127 | def clear(self) -> None: 128 | """Clear all state""" 129 | self._channels.clear() 130 | self._global_state = SharedState() 131 | self._metadata = GraphStateMetadata() 132 | 133 | def merge(self, other: "GraphState") -> None: 134 | """Merge another graph state into this one""" 135 | # Merge channels 136 | for name, channel in other._channels.items(): 137 | if name not in self._channels: 138 | self._channels[name] = channel 139 | else: 140 | # For now, skip conflicting channels 141 | continue 142 | 143 | # Merge global state 144 | other_state = other.get_global_state() 145 | current_state = self.get_global_state() 146 | merged_state = {**current_state, **other_state} 147 | self.set_global_state(merged_state) 148 | 149 | self._update_metadata() 150 | -------------------------------------------------------------------------------- /legion/groups/__init__.py: -------------------------------------------------------------------------------- 1 | """Group implementations for multi-agent systems""" 2 | 3 | from .base import BaseGroup 4 | from .chain import Chain 5 | from .team import Team 6 | from .types import AgentOrGroup, MemberDict, MemberList 7 | 8 | __all__ = [ 9 | "BaseGroup", 10 | "Chain", 11 | "Team", 12 | "AgentOrGroup", 13 | "MemberDict", 14 | "MemberList" 15 | ] 16 | -------------------------------------------------------------------------------- /legion/groups/types.py: -------------------------------------------------------------------------------- 1 | """Type definitions for group implementations""" 2 | from typing import TYPE_CHECKING, Dict, List, TypeVar, Union 3 | 4 | from ..agents.base import Agent 5 | from ..blocks.base import FunctionalBlock 6 | from .base import BaseGroup 7 | 8 | if TYPE_CHECKING: 9 | from .chain import Chain 10 | from .team import Team 11 | 12 | # Type definitions 13 | ChainMember = Union[Agent, BaseGroup, FunctionalBlock] 14 | AgentOrGroup = Union[Agent, BaseGroup] 15 | MemberDict = Dict[str, AgentOrGroup] 16 | MemberList = List[ChainMember] 17 | 18 | # Type variable for group classes 19 | GroupT = TypeVar("GroupT", bound=BaseGroup) 20 | 21 | # Group type hints 22 | ChainType = TypeVar("ChainType", bound="Chain") 23 | TeamType = TypeVar("TeamType", bound="Team") 24 | -------------------------------------------------------------------------------- /legion/interface/__init__.py: -------------------------------------------------------------------------------- 1 | from .decorators import output_schema, param, schema, tool 2 | 3 | __all__ = [ 4 | "tool", 5 | "schema", 6 | "output_schema", 7 | "param" 8 | ] 9 | -------------------------------------------------------------------------------- /legion/interface/tools.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from abc import ABC, abstractmethod 4 | from typing import Any, Dict, Optional, Set, Type 5 | 6 | from pydantic import BaseModel 7 | 8 | from ..errors import ToolError 9 | 10 | # Set up logging 11 | logger = logging.getLogger(__name__) 12 | 13 | class BaseTool(ABC): 14 | """Base class for all tools""" 15 | 16 | def __init__( 17 | self, 18 | name: str, 19 | description: str, 20 | parameters: Type[BaseModel], 21 | injected_params: Optional[Set[str]] = None, 22 | injected_values: Optional[Dict[str, Any]] = None 23 | ): 24 | """Initialize tool""" 25 | self.name = name 26 | self.description = description 27 | self.parameters = parameters 28 | self.injected_params = injected_params or set() 29 | self._is_async = hasattr(self, "arun") 30 | self._injected_values = dict(injected_values or {}) # Make a copy 31 | logger.debug(f"BaseTool initialized with injected_values: {self._injected_values}") 32 | 33 | def get_schema(self) -> Dict[str, Any]: 34 | """Get OpenAI-compatible function schema""" 35 | # TODO: Ensure this is compatible with all providers 36 | schema = self.parameters.model_json_schema() 37 | 38 | # Filter out injected parameters from schema 39 | filtered_properties = { 40 | k: v for k, v in schema.get("properties", {}).items() 41 | if k not in self.injected_params 42 | } 43 | 44 | # Remove injected params from required list 45 | required = [ 46 | param for param in schema.get("required", []) 47 | if param not in self.injected_params 48 | ] 49 | 50 | return { 51 | "type": "function", 52 | "function": { 53 | "name": self.name, 54 | "description": self.description, 55 | "parameters": { 56 | "type": "object", 57 | "properties": filtered_properties, 58 | "required": required, 59 | } 60 | } 61 | } 62 | 63 | def inject(self, **kwargs) -> "BaseTool": 64 | """Inject static parameter values""" 65 | # Only allow injecting declared injectable parameters 66 | for key in kwargs: 67 | if key not in self.injected_params: 68 | raise ValueError(f"Parameter '{key}' is not injectable") 69 | self._injected_values.update(kwargs) 70 | return self 71 | 72 | async def __call__(self, **kwargs) -> Any: 73 | """Make tool callable, validating parameters""" 74 | try: 75 | # Start with injected values 76 | all_kwargs = dict(self._injected_values) 77 | logger.debug(f"Starting with injected values: {all_kwargs}") 78 | 79 | # Update with provided kwargs, but don't override injected values 80 | for key, value in kwargs.items(): 81 | if key not in self.injected_params or key not in all_kwargs: 82 | all_kwargs[key] = value 83 | logger.debug(f"After merging with provided kwargs: {all_kwargs}") 84 | 85 | # Validate all parameters together 86 | try: 87 | validated = self.parameters(**all_kwargs) 88 | validated_dict = validated.model_dump() 89 | logger.debug("Validation successful") 90 | except Exception as e: 91 | logger.error(f"Validation failed: {str(e)}") 92 | raise ToolError(f"Invalid parameters for tool {self.name}: {str(e)}") 93 | 94 | if self._is_async: 95 | return await self.arun(**validated_dict) 96 | else: 97 | return await asyncio.get_event_loop().run_in_executor( 98 | None, lambda: self.run(**validated_dict) 99 | ) 100 | except Exception as e: 101 | if isinstance(e, (ValueError, ToolError)): 102 | raise 103 | raise ToolError(f"Error executing tool {self.name}: {str(e)}") 104 | 105 | @abstractmethod 106 | def run(self, **kwargs) -> Any: 107 | """Execute the tool with validated parameters (sync)""" 108 | pass 109 | 110 | async def arun(self, **kwargs) -> Any: 111 | """Execute the tool with validated parameters (async)""" 112 | # Default async implementation calls sync version 113 | return await asyncio.get_event_loop().run_in_executor( 114 | None, lambda: self.run(**kwargs) 115 | ) 116 | 117 | def model_dump(self) -> Dict[str, Any]: 118 | """Serialize tool for provider APIs 119 | 120 | Returns format expected by OpenAI/compatible APIs: 121 | { 122 | "type": "function", 123 | "function": { 124 | "name": str, 125 | "description": str, 126 | "parameters": { 127 | "type": "object", 128 | "properties": {...}, 129 | "required": [...] 130 | } 131 | } 132 | } 133 | """ 134 | return self.get_schema() 135 | -------------------------------------------------------------------------------- /legion/monitoring/__init__.py: -------------------------------------------------------------------------------- 1 | """Legion monitoring system for observability and telemetry""" 2 | 3 | from .events.base import Event, EventCategory, EventEmitter, EventSeverity, EventType 4 | 5 | __all__ = [ 6 | "Event", 7 | "EventEmitter", 8 | "EventType", 9 | "EventCategory", 10 | "EventSeverity" 11 | ] 12 | -------------------------------------------------------------------------------- /legion/monitoring/events/__init__.py: -------------------------------------------------------------------------------- 1 | """Event types for Legion monitoring system""" 2 | 3 | from .agent import ( 4 | AgentDecisionEvent, 5 | AgentErrorEvent, 6 | AgentEvent, 7 | AgentMemoryEvent, 8 | AgentProcessingEvent, 9 | AgentResponseEvent, 10 | AgentStartEvent, 11 | AgentStateChangeEvent, 12 | AgentToolUseEvent, 13 | ) 14 | from .base import Event, EventCategory, EventEmitter, EventSeverity, EventType 15 | from .chain import ( 16 | ChainBottleneckEvent, 17 | ChainCompletionEvent, 18 | ChainErrorEvent, 19 | ChainEvent, 20 | ChainStartEvent, 21 | ChainStateChangeEvent, 22 | ChainStepEvent, 23 | ChainTransformEvent, 24 | ) 25 | from .team import ( 26 | TeamCommunicationEvent, 27 | TeamCompletionEvent, 28 | TeamDelegationEvent, 29 | TeamErrorEvent, 30 | TeamEvent, 31 | TeamFormationEvent, 32 | TeamLeadershipEvent, 33 | TeamPerformanceEvent, 34 | TeamStateChangeEvent, 35 | ) 36 | 37 | __all__ = [ 38 | # Base types 39 | "Event", 40 | "EventType", 41 | "EventCategory", 42 | "EventSeverity", 43 | "EventEmitter", 44 | 45 | # Agent events 46 | "AgentEvent", 47 | "AgentStartEvent", 48 | "AgentProcessingEvent", 49 | "AgentDecisionEvent", 50 | "AgentToolUseEvent", 51 | "AgentMemoryEvent", 52 | "AgentResponseEvent", 53 | "AgentErrorEvent", 54 | "AgentStateChangeEvent", 55 | 56 | # Team events 57 | "TeamEvent", 58 | "TeamFormationEvent", 59 | "TeamDelegationEvent", 60 | "TeamLeadershipEvent", 61 | "TeamCommunicationEvent", 62 | "TeamCompletionEvent", 63 | "TeamPerformanceEvent", 64 | "TeamStateChangeEvent", 65 | "TeamErrorEvent", 66 | 67 | # Chain events 68 | "ChainEvent", 69 | "ChainStartEvent", 70 | "ChainStepEvent", 71 | "ChainTransformEvent", 72 | "ChainCompletionEvent", 73 | "ChainErrorEvent", 74 | "ChainStateChangeEvent", 75 | "ChainBottleneckEvent" 76 | ] 77 | -------------------------------------------------------------------------------- /legion/monitoring/metrics.py: -------------------------------------------------------------------------------- 1 | """System metrics collection utilities for Legion monitoring""" 2 | 3 | import os 4 | import platform 5 | import sys 6 | import threading 7 | import time 8 | from typing import TYPE_CHECKING, Any, Dict, Optional 9 | 10 | import psutil 11 | 12 | if TYPE_CHECKING: 13 | from .events.base import Event 14 | 15 | class SystemMetricsCollector: 16 | """Collects system metrics for event context enrichment""" 17 | 18 | def __init__(self): 19 | """Initialize the metrics collector""" 20 | self._process = psutil.Process() 21 | self._initial_net_io = psutil.net_io_counters() 22 | self._initial_disk_io = psutil.disk_io_counters() 23 | self._start_time = time.time() 24 | self._initial_cpu_times = self._process.cpu_times() 25 | 26 | def get_execution_context(self) -> Dict[str, Any]: 27 | """Get current execution context 28 | 29 | Returns 30 | ------- 31 | Dict containing execution context information 32 | 33 | """ 34 | return { 35 | "thread_id": threading.get_ident(), 36 | "process_id": os.getpid(), 37 | "host_name": platform.node(), 38 | "python_version": sys.version 39 | } 40 | 41 | def get_system_metrics(self) -> Dict[str, Any]: 42 | """Get current system metrics 43 | 44 | Returns 45 | ------- 46 | Dict containing system metrics 47 | 48 | """ 49 | # Get current measurements 50 | current_net_io = psutil.net_io_counters() 51 | current_disk_io = psutil.disk_io_counters() 52 | 53 | # Calculate network IO 54 | net_bytes_sent = current_net_io.bytes_sent - self._initial_net_io.bytes_sent 55 | net_bytes_recv = current_net_io.bytes_recv - self._initial_net_io.bytes_recv 56 | 57 | # Calculate disk IO 58 | disk_bytes_read = current_disk_io.read_bytes - self._initial_disk_io.read_bytes 59 | disk_bytes_written = current_disk_io.write_bytes - self._initial_disk_io.write_bytes 60 | 61 | return { 62 | "system_cpu_percent": psutil.cpu_percent(), 63 | "system_memory_percent": psutil.virtual_memory().percent, 64 | "system_disk_usage_bytes": disk_bytes_read + disk_bytes_written, 65 | "system_network_bytes_sent": net_bytes_sent, 66 | "system_network_bytes_received": net_bytes_recv 67 | } 68 | 69 | def get_process_metrics(self) -> Dict[str, Any]: 70 | """Get current process metrics 71 | 72 | Returns 73 | ------- 74 | Dict containing process metrics 75 | 76 | """ 77 | return { 78 | "memory_usage_bytes": self._process.memory_info().rss, 79 | "cpu_usage_percent": self._process.cpu_percent() 80 | } 81 | 82 | class MetricsContext: 83 | """Context manager for collecting metrics during an operation""" 84 | 85 | def __init__(self, event: Optional["Event"] = None): 86 | """Initialize metrics context 87 | 88 | Args: 89 | ---- 90 | event: Optional event to enrich with metrics 91 | 92 | """ 93 | self.event = event 94 | self.collector = SystemMetricsCollector() 95 | self._start_time = None 96 | 97 | def __enter__(self): 98 | """Enter the metrics collection context""" 99 | self._start_time = time.time() 100 | return self 101 | 102 | def __exit__(self, exc_type, exc_val, exc_tb): 103 | """Exit the metrics collection context and update event""" 104 | if not self.event: 105 | return 106 | 107 | # Calculate duration 108 | duration_ms = (time.time() - self._start_time) * 1000 109 | self.event.duration_ms = duration_ms 110 | 111 | # Get metrics 112 | execution_context = self.collector.get_execution_context() 113 | system_metrics = self.collector.get_system_metrics() 114 | process_metrics = self.collector.get_process_metrics() 115 | 116 | # Update event 117 | for key, value in execution_context.items(): 118 | setattr(self.event, key, value) 119 | 120 | for key, value in system_metrics.items(): 121 | setattr(self.event, key, value) 122 | 123 | for key, value in process_metrics.items(): 124 | setattr(self.event, key, value) 125 | -------------------------------------------------------------------------------- /legion/monitoring/monitors.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Any, Dict, Optional, Pattern, Set 3 | 4 | from .events.base import Event, EventCategory, EventSeverity, EventType 5 | 6 | 7 | @dataclass 8 | class MonitorConfig: 9 | """Configuration for a monitor instance""" 10 | 11 | # Event type filtering 12 | event_types: Set[EventType] = field(default_factory=lambda: {e for e in EventType}) 13 | categories: Set[EventCategory] = field(default_factory=lambda: {c for c in EventCategory}) 14 | min_severity: EventSeverity = EventSeverity.DEBUG 15 | 16 | # Component filtering 17 | component_patterns: Set[Pattern] = field(default_factory=set) 18 | excluded_component_patterns: Set[Pattern] = field(default_factory=set) 19 | 20 | # Sampling 21 | sample_rate: float = 1.0 # 1.0 = 100% sampling 22 | 23 | # Resource limits 24 | max_events_per_second: Optional[int] = None 25 | max_memory_mb: Optional[int] = None 26 | 27 | class Monitor: 28 | """Base class for all monitors in Legion""" 29 | 30 | def __init__(self, config: Optional[MonitorConfig] = None): 31 | """Initialize a new monitor 32 | 33 | Args: 34 | ---- 35 | config: Optional monitor configuration. If not provided, uses defaults 36 | 37 | """ 38 | self.config = config or MonitorConfig() 39 | self.is_active = False 40 | self._event_count = 0 41 | self._error_count = 0 42 | 43 | def should_process_event(self, event: Event) -> bool: 44 | """Determine if an event should be processed based on configuration 45 | 46 | Args: 47 | ---- 48 | event: The event to check 49 | 50 | Returns: 51 | ------- 52 | bool: True if the event should be processed 53 | 54 | """ 55 | # Check event type and category 56 | if event.event_type not in self.config.event_types: 57 | return False 58 | if event.category not in self.config.categories: 59 | return False 60 | 61 | # Check severity - compare enum values 62 | severity_values = { 63 | EventSeverity.DEBUG: 0, 64 | EventSeverity.INFO: 1, 65 | EventSeverity.WARNING: 2, 66 | EventSeverity.ERROR: 3 67 | } 68 | if severity_values[event.severity] < severity_values[self.config.min_severity]: 69 | return False 70 | 71 | # Check component patterns 72 | if self.config.component_patterns: 73 | if not any(p.match(event.component_id) for p in self.config.component_patterns): 74 | return False 75 | 76 | if any(p.match(event.component_id) for p in self.config.excluded_component_patterns): 77 | return False 78 | 79 | return True 80 | 81 | def process_event(self, event: Event): 82 | """Process an event 83 | 84 | Args: 85 | ---- 86 | event: Event to process 87 | 88 | """ 89 | if not self.is_active: 90 | return 91 | 92 | if not self.should_process_event(event): 93 | return 94 | 95 | try: 96 | self._process_event_impl(event) 97 | self._event_count += 1 98 | except Exception: 99 | self._error_count += 1 100 | raise 101 | 102 | def _process_event_impl(self, event: Event): 103 | """Implementation of event processing 104 | 105 | Args: 106 | ---- 107 | event: Event to process 108 | 109 | This should be overridden by subclasses 110 | 111 | """ 112 | raise NotImplementedError() 113 | 114 | def start(self): 115 | """Start the monitor""" 116 | self.is_active = True 117 | 118 | def stop(self): 119 | """Stop the monitor""" 120 | self.is_active = False 121 | 122 | @property 123 | def stats(self) -> Dict[str, Any]: 124 | """Get monitor statistics 125 | 126 | Returns 127 | ------- 128 | Dict containing monitor stats 129 | 130 | """ 131 | return { 132 | "is_active": self.is_active, 133 | "event_count": self._event_count, 134 | "error_count": self._error_count 135 | } 136 | -------------------------------------------------------------------------------- /legion/monitoring/storage/__init__.py: -------------------------------------------------------------------------------- 1 | """Storage backend API for Legion monitoring""" 2 | 3 | from .base import StorageBackend 4 | from .config import StorageConfig 5 | from .factory import StorageFactory, StorageType 6 | from .memory import MemoryStorageBackend 7 | from .sqlite import SQLiteStorageBackend 8 | 9 | __all__ = [ 10 | "StorageBackend", 11 | "MemoryStorageBackend", 12 | "SQLiteStorageBackend", 13 | "StorageConfig", 14 | "StorageFactory", 15 | "StorageType" 16 | ] 17 | -------------------------------------------------------------------------------- /legion/monitoring/storage/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from datetime import datetime 3 | from typing import List, Optional, Type 4 | 5 | from ..events.base import Event 6 | 7 | 8 | class StorageBackend(ABC): 9 | """Abstract base class for event storage backends""" 10 | 11 | @abstractmethod 12 | def store_event(self, event: Event) -> None: 13 | """Store an event in the backend 14 | 15 | Args: 16 | ---- 17 | event: The event to store 18 | 19 | """ 20 | pass 21 | 22 | @abstractmethod 23 | def get_events(self, 24 | event_types: Optional[List[Type[Event]]] = None, 25 | start_time: Optional[datetime] = None, 26 | end_time: Optional[datetime] = None) -> List[Event]: 27 | """Retrieve events matching the specified criteria 28 | 29 | Args: 30 | ---- 31 | event_types: Optional list of event types to filter by 32 | start_time: Optional start time to filter by 33 | end_time: Optional end time to filter by 34 | 35 | Returns: 36 | ------- 37 | List of matching events 38 | 39 | """ 40 | pass 41 | 42 | @abstractmethod 43 | def clear(self) -> None: 44 | """Clear all events from storage""" 45 | pass 46 | 47 | @abstractmethod 48 | def cleanup(self, retention_days: int) -> None: 49 | """Clean up old events based on retention policy 50 | 51 | Args: 52 | ---- 53 | retention_days: Number of days to retain events for 54 | 55 | """ 56 | pass 57 | -------------------------------------------------------------------------------- /legion/monitoring/storage/config.py: -------------------------------------------------------------------------------- 1 | """Configuration for storage backends""" 2 | 3 | from typing import Optional 4 | 5 | from pydantic import BaseModel, Field 6 | 7 | 8 | class StorageConfig(BaseModel): 9 | """Configuration for storage backends 10 | 11 | Attributes 12 | ---------- 13 | retention_days: Number of days to retain events for 14 | cleanup_interval: Number of minutes between cleanup runs 15 | max_events: Maximum number of events to store (None for unlimited) 16 | 17 | """ 18 | 19 | retention_days: int = Field( 20 | default=30, 21 | description="Number of days to retain events for", 22 | ge=1 23 | ) 24 | 25 | cleanup_interval: float = Field( 26 | default=60.0, 27 | description="Number of minutes between cleanup runs", 28 | gt=0 29 | ) 30 | 31 | max_events: Optional[int] = Field( 32 | default=None, 33 | description="Maximum number of events to store (None for unlimited)", 34 | ge=1 35 | ) 36 | -------------------------------------------------------------------------------- /legion/monitoring/storage/factory.py: -------------------------------------------------------------------------------- 1 | """Factory for creating storage backends""" 2 | 3 | from enum import Enum 4 | from typing import Dict, Optional, Type, Union 5 | 6 | from .base import StorageBackend 7 | from .config import StorageConfig 8 | from .memory import MemoryStorageBackend 9 | from .sqlite import SQLiteStorageBackend 10 | 11 | 12 | class StorageType(Enum): 13 | """Available storage backend types""" 14 | 15 | MEMORY = "memory" 16 | SQLITE = "sqlite" 17 | 18 | class StorageFactory: 19 | """Factory for creating storage backends 20 | 21 | This class manages the creation of storage backends and ensures 22 | consistent configuration across the application. 23 | """ 24 | 25 | _backends: Dict[str, Type[StorageBackend]] = { 26 | StorageType.MEMORY.value: MemoryStorageBackend, 27 | StorageType.SQLITE.value: SQLiteStorageBackend 28 | } 29 | 30 | @classmethod 31 | def create(cls, 32 | storage_type: Union[StorageType, str] = StorageType.MEMORY, 33 | config: Optional[StorageConfig] = None, 34 | **kwargs) -> StorageBackend: 35 | """Create a new storage backend 36 | 37 | Args: 38 | ---- 39 | storage_type: Type of storage backend to create 40 | config: Optional storage configuration 41 | **kwargs: Additional arguments passed to the backend constructor 42 | 43 | Returns: 44 | ------- 45 | The created storage backend 46 | 47 | Raises: 48 | ------ 49 | ValueError: If the storage type is not supported 50 | 51 | """ 52 | if isinstance(storage_type, StorageType): 53 | storage_type = storage_type.value 54 | 55 | if storage_type not in cls._backends: 56 | raise ValueError(f"Unsupported storage type: {storage_type}") 57 | 58 | backend_cls = cls._backends[storage_type] 59 | return backend_cls(config=config, **kwargs) 60 | 61 | @classmethod 62 | def register_backend(cls, storage_type: str, backend_cls: Type[StorageBackend]): 63 | """Register a new storage backend type 64 | 65 | Args: 66 | ---- 67 | storage_type: Unique identifier for the backend type 68 | backend_cls: Backend class to register 69 | 70 | Raises: 71 | ------ 72 | ValueError: If the storage type is already registered 73 | 74 | """ 75 | if storage_type in cls._backends: 76 | raise ValueError(f"Storage type already registered: {storage_type}") 77 | 78 | cls._backends[storage_type] = backend_cls 79 | -------------------------------------------------------------------------------- /legion/monitoring/storage/memory.py: -------------------------------------------------------------------------------- 1 | """Memory storage backend implementation""" 2 | 3 | import logging 4 | import threading 5 | from datetime import datetime, timedelta, timezone 6 | from typing import List, Optional 7 | 8 | from ..events.base import Event 9 | from .base import StorageBackend 10 | from .config import StorageConfig 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | def _ensure_timezone(dt: datetime) -> datetime: 15 | """Ensure a datetime has a timezone 16 | 17 | Args: 18 | ---- 19 | dt: The datetime to check 20 | 21 | Returns: 22 | ------- 23 | The datetime with UTC timezone if it was naive 24 | 25 | """ 26 | if dt.tzinfo is None: 27 | return dt.replace(tzinfo=timezone.utc) 28 | return dt 29 | 30 | class MemoryStorageBackend(StorageBackend): 31 | """In-memory implementation of event storage""" 32 | 33 | def __init__(self, config: Optional[StorageConfig] = None): 34 | """Initialize the storage backend 35 | 36 | Args: 37 | ---- 38 | config: Optional storage configuration 39 | 40 | """ 41 | self._events: List[Event] = [] 42 | self._lock = threading.Lock() 43 | self._config = config or StorageConfig() 44 | self._cleanup_thread = None 45 | self._stop_cleanup = threading.Event() 46 | self._start_cleanup_task() 47 | 48 | def _start_cleanup_task(self): 49 | """Start the background cleanup task""" 50 | def cleanup_loop(): 51 | while not self._stop_cleanup.is_set(): 52 | try: 53 | # Wait first to allow initial events to be stored 54 | self._stop_cleanup.wait(self._config.cleanup_interval * 60) 55 | if self._stop_cleanup.is_set(): 56 | break 57 | 58 | # Perform cleanup 59 | self.cleanup(self._config.retention_days) 60 | 61 | # Check max events 62 | if self._config.max_events: 63 | with self._lock: 64 | if len(self._events) > self._config.max_events: 65 | self._events = self._events[-self._config.max_events:] 66 | except Exception as e: 67 | logger.error(f"Error in cleanup task: {e}") 68 | 69 | self._cleanup_thread = threading.Thread(target=cleanup_loop, daemon=True) 70 | self._cleanup_thread.start() 71 | 72 | def store_event(self, event: Event) -> None: 73 | """Store an event in memory 74 | 75 | Args: 76 | ---- 77 | event: The event to store 78 | 79 | """ 80 | with self._lock: 81 | self._events.append(event) 82 | 83 | # Check max events limit 84 | if self._config.max_events and len(self._events) > self._config.max_events: 85 | self._events.pop(0) # Remove oldest event 86 | 87 | def get_events(self, 88 | event_types: Optional[List[Event]] = None, 89 | start_time: Optional[datetime] = None, 90 | end_time: Optional[datetime] = None) -> List[Event]: 91 | """Get events matching the specified criteria 92 | 93 | Args: 94 | ---- 95 | event_types: Optional list of events to filter by type 96 | start_time: Optional start time to filter by 97 | end_time: Optional end time to filter by 98 | 99 | Returns: 100 | ------- 101 | List of matching events 102 | 103 | """ 104 | with self._lock: 105 | events = self._events.copy() 106 | 107 | if start_time: 108 | start_time = _ensure_timezone(start_time) 109 | events = [e for e in events if _ensure_timezone(e.timestamp) >= start_time] 110 | 111 | if end_time: 112 | end_time = _ensure_timezone(end_time) 113 | events = [e for e in events if _ensure_timezone(e.timestamp) <= end_time] 114 | 115 | if event_types: 116 | # Filter by event type value 117 | type_values = [e.event_type.value for e in event_types] 118 | events = [e for e in events if e.event_type.value in type_values] 119 | 120 | return events 121 | 122 | def clear(self) -> None: 123 | """Clear all events from memory""" 124 | with self._lock: 125 | self._events.clear() 126 | 127 | def cleanup(self, retention_days: Optional[int] = None) -> None: 128 | """Remove events older than the retention period 129 | 130 | Args: 131 | ---- 132 | retention_days: Number of days to retain events for. If not specified, 133 | uses the configured value. 134 | 135 | """ 136 | retention_days = retention_days or self._config.retention_days 137 | cutoff = datetime.now(timezone.utc) - timedelta(days=retention_days) 138 | 139 | with self._lock: 140 | self._events = [e for e in self._events if _ensure_timezone(e.timestamp) >= cutoff] 141 | 142 | def __del__(self): 143 | """Cleanup when the backend is destroyed""" 144 | if self._cleanup_thread: 145 | self._stop_cleanup.set() 146 | self._cleanup_thread.join(timeout=1) 147 | -------------------------------------------------------------------------------- /legion/providers/__init__.py: -------------------------------------------------------------------------------- 1 | """Provider registry and factory functions""" 2 | 3 | from typing import Dict, List, Optional, Type 4 | 5 | from ..interface.base import LLMInterface 6 | from ..interface.schemas import ProviderConfig 7 | from .anthropic import AnthropicFactory 8 | from .factory import ProviderFactory 9 | from .gemini import GeminiFactory 10 | from .groq import GroqFactory 11 | from .ollama import OllamaFactory 12 | 13 | # Import provider implementations 14 | from .openai import OpenAIFactory 15 | 16 | # Registry of provider factories 17 | PROVIDER_FACTORIES: Dict[str, Type[ProviderFactory]] = { 18 | "openai": OpenAIFactory, 19 | "anthropic": AnthropicFactory, 20 | "groq": GroqFactory, 21 | "ollama": OllamaFactory, 22 | "gemini": GeminiFactory 23 | } 24 | 25 | def get_provider( 26 | name: str, 27 | config: Optional[ProviderConfig] = None, 28 | **kwargs 29 | ) -> LLMInterface: 30 | """Get an instance of a provider by name using the factory pattern 31 | 32 | Args: 33 | ---- 34 | name: Name of the provider 35 | config: Provider configuration 36 | **kwargs: Additional provider-specific arguments 37 | 38 | Returns: 39 | ------- 40 | Configured provider instance 41 | 42 | Raises: 43 | ------ 44 | ValueError: If provider name is not recognized 45 | 46 | """ 47 | if name not in PROVIDER_FACTORIES: 48 | raise ValueError( 49 | f"Unknown provider '{name}'. Available providers: {list(PROVIDER_FACTORIES.keys())}" 50 | ) 51 | 52 | factory = PROVIDER_FACTORIES[name]() 53 | return factory.create_provider(config=config, **kwargs) 54 | 55 | def available_providers() -> List[str]: 56 | """Get list of available provider names""" 57 | return list(PROVIDER_FACTORIES.keys()) 58 | -------------------------------------------------------------------------------- /legion/providers/factory.py: -------------------------------------------------------------------------------- 1 | """Provider factory implementation""" 2 | 3 | from abc import ABC, abstractmethod 4 | from typing import Optional 5 | 6 | from ..interface.base import LLMInterface 7 | from ..interface.schemas import ProviderConfig 8 | 9 | 10 | class ProviderFactory(ABC): 11 | """Abstract factory for creating LLM providers""" 12 | 13 | @abstractmethod 14 | def create_provider(self, config: Optional[ProviderConfig] = None, **kwargs) -> LLMInterface: 15 | """Create a new provider instance""" 16 | pass 17 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Legion Documentation 2 | theme: 3 | name: material 4 | features: 5 | - navigation.sections 6 | - navigation.expand 7 | - search.suggest 8 | - search.highlight 9 | - content.code.copy 10 | palette: 11 | # Palette toggle for dark mode 12 | - media: "(prefers-color-scheme: dark)" 13 | scheme: slate 14 | primary: custom 15 | accent: custom 16 | toggle: 17 | icon: material/brightness-4 18 | name: Switch to light mode 19 | # Palette toggle for light mode 20 | - media: "(prefers-color-scheme: light)" 21 | scheme: default 22 | primary: custom 23 | accent: custom 24 | toggle: 25 | icon: material/brightness-7 26 | name: Switch to dark mode 27 | font: 28 | text: Roboto 29 | code: Roboto Mono 30 | icon: 31 | repo: fontawesome/brands/github 32 | 33 | site_url: https://LLMP-io.github.io/Legion/ 34 | repo_url: https://github.com/LLMP-io/Legion 35 | repo_name: LLMP-io/Legion 36 | 37 | extra: 38 | generator: false 39 | social: 40 | - icon: fontawesome/brands/github 41 | link: https://github.com/LLMP-io/Legion 42 | name: Legion on GitHub 43 | 44 | extra_css: 45 | - stylesheets/footer.css 46 | - stylesheets/content.css 47 | - stylesheets/navbar.css 48 | - stylesheets/sidebar.css 49 | - stylesheets/theme.css 50 | 51 | markdown_extensions: 52 | - pymdownx.highlight: 53 | anchor_linenums: true 54 | - pymdownx.inlinehilite 55 | - pymdownx.snippets 56 | - pymdownx.superfences 57 | - admonition 58 | - pymdownx.details 59 | - attr_list 60 | - md_in_html 61 | 62 | nav: 63 | - Home: index.md 64 | - Getting Started: 65 | - Installation: getting-started/installation.md 66 | - Quick Start: getting-started/quick-start.md 67 | - Basic Concepts: getting-started/basic-concepts.md 68 | - First Agent: getting-started/first-agent.md 69 | 70 | - Core Concepts: 71 | - Agents: 72 | - Agent Definition: core-concepts/agents/agent-definition.md 73 | - System Prompts: core-concepts/agents/system-prompts.md 74 | - Agent State: core-concepts/agents/agent-state.md 75 | - Agent Communication: core-concepts/agents/agent-communication.md 76 | - Tools: 77 | - Tool Decorators: core-concepts/tools/tool-decorators.md 78 | - Parameter Injection: core-concepts/tools/parameter-injection.md 79 | - Tool Development: core-concepts/tools/tool-development.md 80 | - Built-in Tools: core-concepts/tools/built-in-tools.md 81 | - Providers: 82 | - Supported LLM Providers: core-concepts/providers/supported-providers.md 83 | - Provider Configuration: core-concepts/providers/provider-configuration.md 84 | - Examples & Tutorials: 85 | - Basic Examples: examples/basic-examples.md 86 | - Build a chatbot: examples/chatbot.md 87 | - Contributing: 88 | - Contribution Guide: contributing/index.md 89 | - Project Structure: contributing/project-structure.md 90 | - Architecture Overview: contributing/architecture.md 91 | - Development Setup: contributing/setup-development.md 92 | - Troubleshooting: contributing/troubleshooting.md 93 | - Documentation Guide: DOCUMENTATION_GUIDE.md 94 | - Release Notes: 95 | - Version History: release-notes/version-history.md 96 | - Roadmap: release-notes/roadmap.md 97 | 98 | copyright: © 2025 Legion 99 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | python_version = 3.11 3 | warn_return_any = True 4 | warn_unused_configs = True 5 | disallow_untyped_defs = True 6 | disallow_incomplete_defs = True 7 | check_untyped_defs = True 8 | disallow_untyped_decorators = False 9 | no_implicit_optional = True 10 | warn_redundant_casts = True 11 | warn_unused_ignores = True 12 | warn_no_return = True 13 | warn_unreachable = True 14 | strict_equality = True 15 | 16 | # Allow untyped defs in tests for flexibility 17 | [mypy-tests.*] 18 | disallow_untyped_defs = False 19 | disallow_incomplete_defs = False 20 | check_untyped_defs = True 21 | 22 | # Pydantic settings 23 | [mypy.plugins.pydantic.*] 24 | init_forbid_extra = True 25 | init_typed = True 26 | warn_required_dynamic_aliases = True 27 | warn_untyped_fields = True 28 | 29 | # Third-party package settings 30 | [mypy.plugins.*] 31 | ignore_missing_imports = True 32 | 33 | # Specific package overrides 34 | [mypy-pytest.*] 35 | ignore_missing_imports = True 36 | 37 | [mypy-rich.*] 38 | ignore_missing_imports = True 39 | 40 | [mypy-dotenv.*] 41 | ignore_missing_imports = True 42 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "legion-ai" 3 | version = "0.1.5" 4 | description = "Legion is a flexible and provider-agnostic framework designed to simplify the creation of sophisticated multi-agent systems" 5 | authors = [ 6 | "Zain Imdad, Hayden Smith ", 7 | ] 8 | license = "MIT" # Change from LICENSE to MIT 9 | readme = "README.md" 10 | homepage = "https://github.com/LLMP-io/Legion" 11 | repository = "https://github.com/LLMP-io/Legion" 12 | keywords = ["ai", "agents", "multi-agent", "llm"] 13 | classifiers = [ 14 | "Programming Language :: Python :: 3", 15 | "License :: OSI Approved :: MIT License", 16 | "Operating System :: OS Independent", 17 | "Development Status :: 4 - Beta", 18 | "Intended Audience :: Developers", 19 | "Topic :: Software Development :: Libraries :: Python Modules", 20 | "Topic :: Scientific/Engineering :: Artificial Intelligence" 21 | ] 22 | 23 | packages = [{ include = "legion" }] 24 | 25 | [tool.poe.tasks] 26 | test = "pytest -v" 27 | typecheck = "python scripts/typecheck.py" 28 | security = "python scripts/security.py" 29 | 30 | [tool.poetry.dependencies] 31 | python = "^3.11" 32 | annotated-types = "0.6.0" 33 | anthropic = "0.30.1" 34 | asyncio = "3.4.3" 35 | bandit = "1.7.8" 36 | colorama = "0.4.6" 37 | flake8 = "7.0.0" 38 | flake8-docstrings = "1.7.0" 39 | flake8-import-order = "0.18.2" 40 | flake8-quotes = "3.4.0" 41 | google-genai = "0.3.0" 42 | google-generativeai = "0.8.2" 43 | groq = "0.4.2" 44 | huggingface-hub = "0.26.3" 45 | mypy = "1.9.0" 46 | ollama = "0.4.2" 47 | openai = "1.55.0" 48 | psutil = "5.9.8" 49 | pydantic = "2.10.2" 50 | python-dotenv = "1.0.1" 51 | ruff = "0.3.0" 52 | safety = "2.3.5" 53 | types-setuptools = "69.2.0.20240317" 54 | toml = "^0.10.2" 55 | 56 | [tool.poetry.group.dev.dependencies] 57 | pre-commit = "4.0.1" 58 | pytest = "8.2.2" 59 | pytest-asyncio = "0.23.6" 60 | # poethepoet = "^0.32.1" # Uncomment if you want to use the `poethepoet` tool 61 | 62 | [build-system] 63 | requires = ["poetry-core"] 64 | build-backend = "poetry.core.masonry.api" 65 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = tests 3 | python_files = test_*.py 4 | python_classes = Test* 5 | python_functions = test_* 6 | addopts = -v 7 | markers = 8 | asyncio: mark test as async/asyncio test 9 | asyncio_mode = strict 10 | asyncio_default_fixture_loop_scope = function 11 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | annotated-types==0.6.0 2 | anthropic==0.30.1 3 | boto3==1.36.3 4 | asyncio==3.4.3 5 | bandit==1.7.8 6 | colorama==0.4.6 7 | flake8==7.0.0 8 | flake8-docstrings==1.7.0 9 | flake8-import-order==0.18.2 10 | flake8-quotes==3.4.0 11 | google-genai==0.3.0 12 | google-generativeai==0.8.2 13 | groq==0.4.2 14 | huggingface-hub==0.26.3 15 | mypy==1.9.0 16 | ollama==0.4.2 17 | openai==1.55.0 18 | pre-commit==4.0.1 19 | psutil==5.9.8 20 | pydantic==2.10.2 21 | pytest==8.2.2 22 | pytest-asyncio==0.23.6 23 | python-dotenv==1.0.1 24 | ruff==0.3.0 25 | safety==2.3.5 26 | types-setuptools==69.2.0.20240317 27 | -------------------------------------------------------------------------------- /scripts/setup_env.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to set up development environment.""" 3 | 4 | import os 5 | import platform 6 | import shutil 7 | import subprocess 8 | import sys 9 | import venv 10 | from pathlib import Path 11 | 12 | 13 | def run_command(cmd: list[str], check: bool = True) -> subprocess.CompletedProcess: 14 | """Run a command and return the result.""" 15 | return subprocess.run(cmd, check=check, capture_output=True, text=True) 16 | 17 | 18 | def setup_environment() -> int: 19 | """Set up the development environment.""" 20 | try: 21 | project_root = Path(__file__).parent.parent 22 | venv_path = project_root / "venv" 23 | 24 | print("Setting up development environment...") 25 | 26 | # Deactivate current virtual environment if active 27 | if "VIRTUAL_ENV" in os.environ: 28 | print("\n1. Deactivating current virtual environment...") 29 | # Clear the VIRTUAL_ENV variable 30 | del os.environ["VIRTUAL_ENV"] 31 | 32 | # Remove existing virtual environment if it exists 33 | if venv_path.exists(): 34 | print("\n2. Removing existing virtual environment...") 35 | shutil.rmtree(venv_path) 36 | 37 | # Create virtual environment 38 | print("\n3. Creating virtual environment...") 39 | venv.create(venv_path, with_pip=True) 40 | 41 | # Determine the Python executable path 42 | if platform.system() == "Windows": 43 | python_path = venv_path / "Scripts" / "python.exe" 44 | activate_path = venv_path / "Scripts" / "activate" 45 | else: 46 | python_path = venv_path / "bin" / "python" 47 | activate_path = venv_path / "bin" / "activate" 48 | 49 | # Upgrade pip 50 | print("\n4. Upgrading pip...") 51 | run_command([str(python_path), "-m", "pip", "install", "--upgrade", "pip"]) 52 | 53 | # Install dependencies 54 | print("\n5. Installing dependencies...") 55 | run_command([str(python_path), "-m", "pip", "install", "-r", str(project_root / "requirements.txt")]) 56 | 57 | # Install Legion package in editable mode 58 | print("\n6. Installing Legion package in development mode...") 59 | run_command([str(python_path), "-m", "pip", "install", "-e", str(project_root)]) 60 | 61 | # Install pre-commit 62 | print("\n7. Installing pre-commit...") 63 | run_command([str(python_path), "-m", "pip", "install", "pre-commit"]) 64 | 65 | # Clear pre-commit cache 66 | print("\n8. Clearing pre-commit cache...") 67 | run_command([str(python_path), "-m", "pre_commit", "clean"]) 68 | 69 | # Install pre-commit hooks 70 | print("\n9. Installing pre-commit hooks...") 71 | run_command([str(python_path), "-m", "pre_commit", "install"]) 72 | 73 | print("\nEnvironment setup complete! 🎉") 74 | print("\nTo activate the virtual environment:") 75 | if platform.system() == "Windows": 76 | print(f" {activate_path}") 77 | else: 78 | print(f" source {activate_path}") 79 | 80 | return 0 81 | 82 | except subprocess.CalledProcessError as e: 83 | print(f"\nError during setup: {e}", file=sys.stderr) 84 | print(f"Command output: {e.output}", file=sys.stderr) 85 | return 1 86 | except Exception as e: 87 | print(f"\nUnexpected error: {e}", file=sys.stderr) 88 | return 1 89 | 90 | 91 | if __name__ == "__main__": 92 | sys.exit(setup_environment()) 93 | -------------------------------------------------------------------------------- /scripts/setup_hooks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to set up git hooks for development.""" 3 | 4 | import subprocess 5 | import sys 6 | from pathlib import Path 7 | 8 | 9 | def install_pre_commit() -> int: 10 | """Install pre-commit and configure hooks.""" 11 | try: 12 | # Install pre-commit if not already installed 13 | subprocess.run( 14 | [sys.executable, "-m", "pip", "install", "pre-commit"], 15 | check=True 16 | ) 17 | 18 | # Install the git hooks 19 | subprocess.run( 20 | ["pre-commit", "install"], 21 | check=True 22 | ) 23 | 24 | print("\nPre-commit hooks installed successfully!") 25 | print("\nThe following checks will run before each commit:") 26 | print("1. Non-integration tests") 27 | print("2. Security scanning") 28 | print("\nTo bypass hooks temporarily, use: git commit --no-verify") 29 | 30 | return 0 31 | 32 | except subprocess.CalledProcessError as e: 33 | print(f"Error setting up pre-commit hooks: {e}", file=sys.stderr) 34 | return 1 35 | 36 | 37 | if __name__ == "__main__": 38 | sys.exit(install_pre_commit()) 39 | -------------------------------------------------------------------------------- /scripts/sync_requirements.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Sync dependencies between pyproject.toml, setup.py, and requirements.txt.""" 3 | import toml 4 | import re 5 | from pathlib import Path 6 | 7 | def read_pyproject_deps(): 8 | """Read dependencies from pyproject.toml.""" 9 | with open('pyproject.toml') as f: 10 | pyproject = toml.load(f) 11 | 12 | deps = pyproject['tool']['poetry']['dependencies'].items() 13 | main_deps = {k: v for k, v in deps if k != 'python'} 14 | 15 | dev_deps = pyproject['tool']['poetry']['group']['dev']['dependencies'] 16 | 17 | return main_deps, dev_deps 18 | 19 | def update_requirements_txt(main_deps, dev_deps): 20 | """Update requirements.txt with all dependencies.""" 21 | all_deps = [] 22 | 23 | for package, version in main_deps.items(): 24 | version = version.replace('^', '') # Remove poetry's ^ operator 25 | all_deps.append(f"{package}=={version}") 26 | 27 | for package, version in dev_deps.items(): 28 | version = version.replace('^', '') 29 | all_deps.append(f"{package}=={version}") 30 | 31 | all_deps.sort() 32 | 33 | with open('requirements.txt', 'w') as f: 34 | f.write('\n'.join(all_deps) + '\n') 35 | 36 | def update_setup_py(main_deps, dev_deps): 37 | """Update setup.py with dependencies.""" 38 | setup_path = Path('setup.py') 39 | setup_content = setup_path.read_text() 40 | 41 | main_deps_formatted = [f' "{package}=={version}",' for package, version in main_deps.items()] 42 | main_deps_str = '\n'.join(main_deps_formatted) 43 | 44 | dev_deps_formatted = [f' "{package}={version}",' for package, version in dev_deps.items()] 45 | dev_deps_str = '\n'.join(dev_deps_formatted) 46 | 47 | setup_content = re.sub( 48 | r'install_requires=\[(.*?)\]', 49 | f'install_requires=[\n{main_deps_str}\n ]', 50 | setup_content, 51 | flags=re.DOTALL 52 | ) 53 | 54 | setup_content = re.sub( 55 | r'"dev":\s*\[(.*?)\]', 56 | f'"dev": [\n{dev_deps_str}\n ]', 57 | setup_content, 58 | flags=re.DOTALL 59 | ) 60 | 61 | setup_path.write_text(setup_content) 62 | 63 | def main(): 64 | """Main function to sync all dependency files.""" 65 | try: 66 | # Read dependencies from pyproject.toml 67 | main_deps, dev_deps = read_pyproject_deps() 68 | 69 | # Update requirements.txt 70 | update_requirements_txt(main_deps, dev_deps) 71 | 72 | # Update setup.py 73 | update_setup_py(main_deps, dev_deps) 74 | 75 | print("✅ Successfully synced dependencies!") 76 | return 0 77 | except Exception as e: 78 | print(f"❌ Error syncing dependencies: {str(e)}") 79 | return 1 80 | 81 | if __name__ == '__main__': 82 | exit(main()) 83 | -------------------------------------------------------------------------------- /scripts/typecheck.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script to run static type checking with mypy.""" 3 | 4 | import subprocess 5 | import sys 6 | from pathlib import Path 7 | from typing import List, Optional, Sequence 8 | 9 | 10 | def run_mypy(paths: Optional[Sequence[str]] = None) -> int: 11 | """Run mypy type checker on specified paths. 12 | 13 | Args: 14 | paths: List of paths to check. If None, checks default paths. 15 | 16 | Returns: 17 | Exit code from mypy (0 for success, non-zero for errors) 18 | """ 19 | project_root = Path(__file__).parent.parent 20 | 21 | if paths is None: 22 | paths = ['legion'] 23 | 24 | cmd = [ 25 | sys.executable, 26 | '-m', 'mypy', 27 | '--config-file', str(project_root / 'mypy.ini'), 28 | '--show-error-codes', 29 | '--pretty', 30 | '--show-column-numbers', 31 | '--show-error-context', 32 | '--no-error-summary', 33 | '--hide-error-codes', 34 | *paths 35 | ] 36 | 37 | print("\n" + "=" * 80) 38 | print("Running type checking...") 39 | print(f"Command: {' '.join(cmd)}") 40 | print("=" * 80 + "\n") 41 | 42 | try: 43 | # Run mypy 44 | result = subprocess.run( 45 | cmd, 46 | cwd=project_root, 47 | capture_output=True, 48 | text=True, 49 | check=False 50 | ) 51 | 52 | # Always print command output for visibility 53 | if result.stdout: 54 | print("Output:") 55 | print("-" * 40) 56 | print(result.stdout.strip()) 57 | 58 | if result.stderr: 59 | print("\nErrors:") 60 | print("-" * 40) 61 | print(result.stderr.strip(), file=sys.stderr) 62 | 63 | # Count actual errors (ignore notes and other info) 64 | error_count = len([ 65 | line for line in result.stdout.split('\n') 66 | if line.strip() and ': error:' in line 67 | ]) 68 | 69 | print("\n" + "=" * 40) 70 | if error_count > 0: 71 | print(f"Found {error_count} type issues") 72 | else: 73 | print("No type issues found!") 74 | print("=" * 40 + "\n") 75 | 76 | if result.returncode != 0: 77 | print(f"Type checking failed with exit code: {result.returncode}", file=sys.stderr) 78 | 79 | return result.returncode 80 | 81 | except subprocess.CalledProcessError as e: 82 | print(f"Error running mypy: {str(e)}", file=sys.stderr) 83 | if e.stdout: 84 | print("\nOutput:", file=sys.stderr) 85 | print(e.stdout.decode().strip(), file=sys.stderr) 86 | if e.stderr: 87 | print("\nErrors:", file=sys.stderr) 88 | print(e.stderr.decode().strip(), file=sys.stderr) 89 | return 1 90 | except Exception as e: 91 | print(f"Unexpected error during type checking: {str(e)}", file=sys.stderr) 92 | return 1 93 | 94 | 95 | def main() -> int: 96 | """Run the type checking process.""" 97 | try: 98 | return run_mypy() 99 | except Exception as e: 100 | print(f"Fatal error in type checking: {str(e)}", file=sys.stderr) 101 | return 1 102 | 103 | 104 | if __name__ == '__main__': 105 | sys.exit(main()) 106 | -------------------------------------------------------------------------------- /setup.py.bak: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="legion-ai", 5 | version="0.1.4", 6 | description="A flexible and provider-agnostic framework for building AI agent systems.", 7 | long_description=open("README.md").read(), 8 | long_description_content_type="text/markdown", 9 | url="https://github.com/LLMP-io/Legion", 10 | author="Zain Imdad, Hayden Smith", 11 | author_email="zain@llmp.io, hayden@llmp.io", 12 | maintainer="Zain Imdad, Hayden Smith", 13 | maintainer_email="zain@llmp.io, hayden@llmp.io", 14 | license="MIT", 15 | project_urls={ 16 | "Homepage": "https://github.com/LLMP-io/Legion", 17 | "Bug Tracker": "https://github.com/LLMP-io/Legion/issues", 18 | }, 19 | packages=find_packages(include=["legion", "legion.*"]), 20 | classifiers=[ 21 | "Programming Language :: Python :: 3", 22 | "License :: OSI Approved :: MIT License", 23 | "Operating System :: OS Independent", 24 | "Development Status :: 4 - Beta", 25 | "Intended Audience :: Developers", 26 | "Topic :: Software Development :: Libraries :: Python Modules", 27 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 28 | ], 29 | python_requires=">=3.11", 30 | install_requires=[ 31 | "annotated-types==0.6.0", 32 | "anthropic==0.30.1", 33 | "asyncio==3.4.3", 34 | "bandit==1.7.8", 35 | "colorama==0.4.6", 36 | "flake8==7.0.0", 37 | "flake8-docstrings==1.7.0", 38 | "flake8-import-order==0.18.2", 39 | "flake8-quotes==3.4.0", 40 | "google-genai==0.3.0", 41 | "google-generativeai==0.8.2", 42 | "groq==0.4.2", 43 | "huggingface-hub==0.26.3", 44 | "mypy==1.9.0", 45 | "ollama==0.4.2", 46 | "openai==1.55.0", 47 | "psutil==5.9.8", 48 | "pydantic==2.10.2", 49 | "python-dotenv==1.0.1", 50 | "ruff==0.3.0", 51 | "safety==2.3.5", 52 | "types-setuptools==69.2.0.20240317", 53 | ], 54 | extras_require={ 55 | "dev": [ 56 | "pre-commit=4.0.1", 57 | "pytest=8.2.2", 58 | "pytest-asyncio=0.23.6", 59 | # "poethepoet=^0.32.1", # Uncomment if you want to use the `poethepoet` tool 60 | ], 61 | }, 62 | entry_points={ 63 | "console_scripts": [ 64 | # 'legion-cli=legion.cli:main', # Maybe add a CLI entry point 65 | ], 66 | }, 67 | include_package_data=True, 68 | ) 69 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/tests/__init__.py -------------------------------------------------------------------------------- /tests/agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/tests/agents/__init__.py -------------------------------------------------------------------------------- /tests/agents/run_tests.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from dotenv import load_dotenv 5 | 6 | # Load environment variables 7 | load_dotenv() 8 | 9 | if __name__ == "__main__": 10 | # Run all tests in the directory 11 | pytest.main([ 12 | os.path.dirname(__file__), # Run tests in current directory 13 | "-v", # Verbose output 14 | "--asyncio-mode=auto" # Enable async test support 15 | ]) 16 | -------------------------------------------------------------------------------- /tests/blocks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/tests/blocks/__init__.py -------------------------------------------------------------------------------- /tests/blocks/run_tests.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from dotenv import load_dotenv 5 | 6 | # Load environment variables 7 | load_dotenv() 8 | 9 | if __name__ == "__main__": 10 | # Run all tests in the directory 11 | pytest.main([ 12 | os.path.dirname(__file__), # Run tests in current directory 13 | "-v", # Verbose output 14 | "--asyncio-mode=auto" # Enable async test support 15 | ]) 16 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from dotenv import load_dotenv 5 | 6 | 7 | def pytest_configure(config): 8 | """Called before test collection, ensures environment is properly configured""" 9 | # Get the project root directory (where .env file is located) 10 | root_dir = Path(__file__).parent.parent 11 | 12 | # Load environment variables from .env file 13 | env_file = root_dir / ".env" 14 | if env_file.exists(): 15 | load_dotenv(env_file) 16 | else: 17 | raise RuntimeError(f"No .env file found at {env_file}") 18 | 19 | # Verify required environment variables 20 | required_vars = ["OPENAI_API_KEY"] 21 | missing_vars = [var for var in required_vars if not os.getenv(var)] 22 | 23 | if missing_vars: 24 | raise RuntimeError(f"Missing required environment variables: {', '.join(missing_vars)}") 25 | 26 | # Register custom markers 27 | config.addinivalue_line( 28 | "markers", "integration: mark test as requiring external API access" 29 | ) 30 | -------------------------------------------------------------------------------- /tests/graph/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/tests/graph/__init__.py -------------------------------------------------------------------------------- /tests/graph/nodes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/tests/graph/nodes/__init__.py -------------------------------------------------------------------------------- /tests/graph/run_tests.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | import pytest 5 | 6 | 7 | def run_graph_tests(): 8 | """Run all graph system tests""" 9 | test_dir = Path(__file__).parent 10 | 11 | # Run tests with pytest 12 | args = [ 13 | str(test_dir), # Test directory 14 | "-v", # Verbose output 15 | "--tb=short", # Shorter traceback format 16 | "-p", "no:warnings", # Disable warning capture plugin 17 | "-p", "asyncio" # Enable asyncio plugin 18 | ] 19 | 20 | # Run pytest with our arguments 21 | return pytest.main(args) 22 | 23 | if __name__ == "__main__": 24 | sys.exit(run_graph_tests()) 25 | -------------------------------------------------------------------------------- /tests/graph/test_decorators.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from legion.graph.decorators import graph 4 | from legion.graph.graph import ExecutionMode, Graph, GraphConfig, LogLevel 5 | 6 | 7 | def test_basic_graph_creation(): 8 | """Test basic graph creation with minimal configuration""" 9 | 10 | @graph 11 | class SimpleGraph: 12 | """A simple test graph.""" 13 | 14 | pass 15 | 16 | instance = SimpleGraph() 17 | assert isinstance(instance.graph, Graph) 18 | assert instance.graph.metadata.name == "SimpleGraph" 19 | assert "simple test graph" in instance.graph.metadata.description.lower() 20 | 21 | def test_graph_with_config(): 22 | """Test graph creation with configuration""" 23 | 24 | @graph( 25 | name="custom_graph", 26 | debug_mode=True, 27 | log_level=LogLevel.DEBUG 28 | ) 29 | class ConfiguredGraph: 30 | """A configured test graph.""" 31 | 32 | pass 33 | 34 | instance = ConfiguredGraph() 35 | assert instance.graph.metadata.name == "custom_graph" 36 | assert instance.graph._config.debug_mode is True 37 | assert instance.graph._config.log_level == LogLevel.DEBUG 38 | 39 | def test_graph_with_class_config(): 40 | """Test graph creation with class-level configuration""" 41 | 42 | @graph 43 | class ConfigGraph: 44 | """A graph with class configuration.""" 45 | 46 | config = GraphConfig( 47 | execution_mode=ExecutionMode.PARALLEL, 48 | debug_mode=True, 49 | log_level=LogLevel.DEBUG 50 | ) 51 | 52 | instance = ConfigGraph() 53 | assert instance.graph._config.execution_mode == ExecutionMode.PARALLEL 54 | assert instance.graph._config.debug_mode is True 55 | assert instance.graph._config.log_level == LogLevel.DEBUG 56 | 57 | def test_graph_inheritance(): 58 | """Test graph inheritance""" 59 | 60 | @graph 61 | class BaseGraph: 62 | """Base graph class.""" 63 | 64 | config = GraphConfig(debug_mode=True) 65 | 66 | @graph 67 | class DerivedGraph(BaseGraph): 68 | """Derived graph class.""" 69 | 70 | config = GraphConfig(log_level=LogLevel.DEBUG) 71 | 72 | base = BaseGraph() 73 | derived = DerivedGraph() 74 | 75 | assert base.graph._config.debug_mode is True 76 | assert derived.graph._config.debug_mode is False # Not inherited 77 | assert derived.graph._config.log_level == LogLevel.DEBUG 78 | 79 | if __name__ == "__main__": 80 | pytest.main(["-v", __file__]) 81 | -------------------------------------------------------------------------------- /tests/graph/test_routing.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Optional 2 | 3 | import pytest 4 | 5 | from legion.graph.channels import LastValue 6 | from legion.graph.edges.routing import ( 7 | ChannelCondition, 8 | CustomCondition, 9 | StateCondition, 10 | ) 11 | from legion.graph.nodes.base import NodeBase 12 | from legion.graph.state import GraphState 13 | 14 | 15 | class TestNode(NodeBase): 16 | """Test node implementation""" 17 | 18 | def __init__(self, graph_state: GraphState): 19 | super().__init__(graph_state) 20 | self.create_output_channel("test", LastValue, type_hint=str) 21 | 22 | async def _execute(self, **kwargs) -> Optional[Dict[str, Any]]: 23 | """Execute the node""" 24 | return {"result": "test"} 25 | 26 | @pytest.fixture 27 | def graph_state(): 28 | """Create graph state fixture""" 29 | return GraphState() 30 | 31 | @pytest.fixture 32 | def test_node(graph_state): 33 | """Create test node fixture""" 34 | return TestNode(graph_state) 35 | 36 | @pytest.mark.asyncio 37 | async def test_state_condition(graph_state, test_node): 38 | """Test state-based routing condition""" 39 | # Set up condition 40 | condition = StateCondition( 41 | graph_state, 42 | "test_key", 43 | lambda x: x == "test_value" 44 | ) 45 | 46 | # Test false case 47 | graph_state.update_global_state({"test_key": "wrong_value"}) 48 | assert not await condition.evaluate(test_node) 49 | 50 | # Test true case 51 | graph_state.update_global_state({"test_key": "test_value"}) 52 | assert await condition.evaluate(test_node) 53 | 54 | # Test missing key 55 | graph_state.clear() 56 | assert not await condition.evaluate(test_node) 57 | 58 | @pytest.mark.asyncio 59 | async def test_channel_condition(graph_state, test_node): 60 | """Test channel-based routing condition""" 61 | # Set up condition 62 | condition = ChannelCondition( 63 | graph_state, 64 | "test", 65 | lambda x: x == "test_value" 66 | ) 67 | 68 | # Test false case 69 | test_node.get_output_channel("test").set("wrong_value") 70 | assert not await condition.evaluate(test_node) 71 | 72 | # Test true case 73 | test_node.get_output_channel("test").set("test_value") 74 | assert await condition.evaluate(test_node) 75 | 76 | # Test missing channel 77 | condition = ChannelCondition( 78 | graph_state, 79 | "non_existent", 80 | lambda x: True 81 | ) 82 | assert not await condition.evaluate(test_node) 83 | 84 | @pytest.mark.asyncio 85 | async def test_custom_condition(graph_state, test_node): 86 | """Test custom routing condition""" 87 | # Set up condition 88 | def custom_evaluator(node: NodeBase, state: GraphState, kwargs: Dict[str, Any]) -> bool: 89 | state_data = state.get_global_state() 90 | return ( 91 | state_data.get("test_key") == "test_value" and 92 | kwargs.get("extra") == "test_extra" 93 | ) 94 | 95 | condition = CustomCondition(graph_state, custom_evaluator) 96 | 97 | # Test false cases 98 | graph_state.update_global_state({"test_key": "wrong_value"}) 99 | assert not await condition.evaluate(test_node, extra="test_extra") 100 | 101 | graph_state.update_global_state({"test_key": "test_value"}) 102 | assert not await condition.evaluate(test_node, extra="wrong_extra") 103 | 104 | # Test true case 105 | assert await condition.evaluate(test_node, extra="test_extra") 106 | 107 | @pytest.mark.asyncio 108 | async def test_condition_checkpoint_restore(graph_state, test_node): 109 | """Test condition checkpoint and restore""" 110 | # Create and configure condition 111 | condition = StateCondition( 112 | graph_state, 113 | "test_key", 114 | lambda x: x == "test_value" 115 | ) 116 | 117 | # Create checkpoint 118 | checkpoint = condition.checkpoint() 119 | assert checkpoint["metadata"]["condition_type"] == "StateCondition" 120 | assert checkpoint["state_key"] == "test_key" 121 | 122 | # Create new condition and restore 123 | new_condition = StateCondition( 124 | graph_state, 125 | "temp_key", # Different key 126 | lambda x: x == "test_value" 127 | ) 128 | new_condition.restore(checkpoint) 129 | 130 | # Verify metadata restored 131 | assert new_condition.metadata.condition_type == "StateCondition" 132 | 133 | # Test condition still works 134 | graph_state.update_global_state({"test_key": "test_value"}) 135 | assert await new_condition.evaluate(test_node) 136 | -------------------------------------------------------------------------------- /tests/graph/test_state.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | import pytest 4 | 5 | from legion.graph.channels import LastValue, SharedState, ValueSequence 6 | from legion.graph.state import GraphState, GraphStateMetadata 7 | 8 | 9 | def test_graph_state_metadata(): 10 | """Test graph state metadata functionality""" 11 | state = GraphState() 12 | metadata = state.metadata 13 | 14 | assert isinstance(metadata, GraphStateMetadata) 15 | assert isinstance(metadata.created_at, datetime) 16 | assert metadata.version == 0 17 | assert isinstance(metadata.graph_id, str) 18 | 19 | 20 | def test_channel_creation(): 21 | """Test channel creation and management""" 22 | state = GraphState() 23 | 24 | # Create different types of channels 25 | last_value = state.create_channel(LastValue, "last_value", type_hint=str) 26 | value_sequence = state.create_channel(ValueSequence, "value_sequence", type_hint=int, max_size=3) 27 | shared_state = state.create_channel(SharedState, "shared_state") 28 | 29 | # Verify channels were created 30 | assert state.get_channel("last_value") == last_value 31 | assert state.get_channel("value_sequence") == value_sequence 32 | assert state.get_channel("shared_state") == shared_state 33 | 34 | # Verify channel listing 35 | channels = state.list_channels() 36 | assert set(channels) == {"last_value", "value_sequence", "shared_state"} 37 | 38 | # Test duplicate channel creation 39 | with pytest.raises(ValueError): 40 | state.create_channel(LastValue, "last_value") 41 | 42 | # Test channel deletion 43 | state.delete_channel("last_value") 44 | assert state.get_channel("last_value") is None 45 | assert len(state.list_channels()) == 2 46 | 47 | 48 | def test_global_state(): 49 | """Test global state management""" 50 | state = GraphState() 51 | 52 | # Test initial state 53 | assert state.get_global_state() == {} 54 | 55 | # Test state setting 56 | initial_state = {"key": "value"} 57 | state.set_global_state(initial_state) 58 | assert state.get_global_state() == initial_state 59 | 60 | # Test state update 61 | state.update_global_state({"new_key": "new_value"}) 62 | assert state.get_global_state() == {"key": "value", "new_key": "new_value"} 63 | 64 | 65 | def test_checkpointing(): 66 | """Test state checkpointing and restoration""" 67 | state = GraphState() 68 | 69 | # Setup some state 70 | state.create_channel(LastValue, "last_value", type_hint=str).set("test") 71 | state.create_channel(ValueSequence, "value_sequence", type_hint=int).append(1) 72 | state.set_global_state({"key": "value"}) 73 | 74 | # Create checkpoint 75 | checkpoint = state.checkpoint() 76 | 77 | # Create new state and restore 78 | new_state = GraphState() 79 | new_state.restore(checkpoint) 80 | 81 | # Verify channels were restored 82 | assert new_state.get_channel("last_value").get() == "test" 83 | assert new_state.get_channel("value_sequence").get() == [1] 84 | assert new_state.get_global_state() == {"key": "value"} 85 | 86 | # Verify metadata was restored 87 | assert new_state.metadata.version == state.metadata.version 88 | assert new_state.metadata.graph_id == state.metadata.graph_id 89 | 90 | 91 | def test_state_clearing(): 92 | """Test state clearing functionality""" 93 | state = GraphState() 94 | 95 | # Setup some state 96 | state.create_channel(LastValue, "test") 97 | state.set_global_state({"key": "value"}) 98 | 99 | # Clear state 100 | state.clear() 101 | 102 | # Verify state was cleared 103 | assert len(state.list_channels()) == 0 104 | assert state.get_global_state() == {} 105 | assert state.metadata.version == 0 106 | 107 | 108 | def test_state_merging(): 109 | """Test state merging functionality""" 110 | state1 = GraphState() 111 | state2 = GraphState() 112 | 113 | # Setup state1 114 | state1.create_channel(LastValue, "unique1", type_hint=str).set("test1") 115 | state1.set_global_state({"key1": "value1"}) 116 | 117 | # Setup state2 118 | state2.create_channel(LastValue, "unique2", type_hint=str).set("test2") 119 | state2.set_global_state({"key2": "value2"}) 120 | 121 | # Merge states 122 | state1.merge(state2) 123 | 124 | # Verify merged channels 125 | assert state1.get_channel("unique1").get() == "test1" 126 | assert state1.get_channel("unique2").get() == "test2" 127 | 128 | # Verify merged global state 129 | global_state = state1.get_global_state() 130 | assert global_state["key1"] == "value1" 131 | assert global_state["key2"] == "value2" 132 | 133 | 134 | def test_version_tracking(): 135 | """Test version tracking in metadata""" 136 | state = GraphState() 137 | initial_version = state.metadata.version 138 | 139 | # Version should increment on channel creation 140 | state.create_channel(LastValue, "test") 141 | assert state.metadata.version == initial_version + 1 142 | 143 | # Version should increment on channel deletion 144 | state.delete_channel("test") 145 | assert state.metadata.version == initial_version + 2 146 | 147 | # Version should increment on global state changes 148 | state.set_global_state({"key": "value"}) 149 | assert state.metadata.version == initial_version + 3 150 | 151 | state.update_global_state({"new_key": "new_value"}) 152 | assert state.metadata.version == initial_version + 4 153 | -------------------------------------------------------------------------------- /tests/groups/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/tests/groups/__init__.py -------------------------------------------------------------------------------- /tests/groups/run_tests.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from dotenv import load_dotenv 5 | 6 | # Load environment variables 7 | load_dotenv() 8 | 9 | if __name__ == "__main__": 10 | # Run all tests in the directory 11 | pytest.main([ 12 | os.path.dirname(__file__), # Run tests in current directory 13 | "-v", # Verbose output 14 | "--asyncio-mode=auto" # Enable async test support 15 | ]) 16 | -------------------------------------------------------------------------------- /tests/groups/test_chain_decorators.py: -------------------------------------------------------------------------------- 1 | """Tests for chain decorators""" 2 | 3 | import asyncio 4 | import os 5 | from typing import Any, Dict, List, Optional, Type, Union 6 | 7 | import pytest 8 | from dotenv import load_dotenv 9 | from pydantic import BaseModel 10 | 11 | # Load environment variables 12 | load_dotenv() 13 | 14 | # Check for OpenAI API key 15 | if not os.getenv("OPENAI_API_KEY"): 16 | print("\nError: OpenAI API key not found!") 17 | print("Please set your API key in the environment:") 18 | print(" export OPENAI_API_KEY=your_api_key_here") 19 | exit(1) 20 | 21 | from legion import agent, block, chain 22 | from legion.interface.schemas import Message, ModelResponse, Role, SystemPrompt, SystemPromptSection 23 | 24 | 25 | # Test schemas 26 | class InputData(BaseModel): 27 | text: str 28 | metadata: Dict[str, Any] 29 | 30 | class OutputData(BaseModel): 31 | summary: str 32 | word_count: int 33 | 34 | # Test blocks and agents 35 | @block(output_schema=InputData) 36 | def preprocess_data(text: str) -> Dict: 37 | """Preprocess input data""" 38 | return { 39 | "text": text.strip(), 40 | "metadata": {"length": len(text)} 41 | } 42 | 43 | @agent( 44 | model="openai:gpt-4o-mini", 45 | system_prompt=SystemPrompt(sections=[ 46 | SystemPromptSection( 47 | content="You are a test agent that processes data." 48 | ) 49 | ]) 50 | ) 51 | class TestAgent: 52 | """Test agent that processes data""" 53 | 54 | pass 55 | 56 | @block(output_schema=OutputData) 57 | def postprocess_data(text: str) -> Dict: 58 | """Postprocess output data""" 59 | return { 60 | "summary": text[:100], 61 | "word_count": len(text.split()) 62 | } 63 | 64 | # Test chain with error 65 | @agent( 66 | model="openai:gpt-4o-mini", 67 | system_prompt=SystemPrompt(sections=[ 68 | SystemPromptSection( 69 | content="You are a test agent that raises errors." 70 | ) 71 | ]) 72 | ) 73 | class ErrorAgent: 74 | """Test agent that raises errors""" 75 | 76 | async def _aprocess( 77 | self, 78 | message: Union[str, Dict[str, Any], Message], 79 | response_schema: Optional[Type[BaseModel]] = None, 80 | dynamic_values: Optional[Dict[str, str]] = None, 81 | injected_parameters: Optional[List[Dict[str, Any]]] = None, 82 | verbose: bool = False 83 | ) -> ModelResponse: 84 | """Process message and raise error""" 85 | # Create error response before raising 86 | response = ModelResponse( 87 | content="Error will be raised", 88 | raw_response={}, 89 | usage=None, 90 | role=Role.ASSISTANT 91 | ) 92 | # Add to memory if needed 93 | if hasattr(self, "memory"): 94 | self.memory.add_message(Message( 95 | role=Role.ASSISTANT, 96 | content=response.content 97 | )) 98 | raise ValueError("Test error") 99 | 100 | # Test chains 101 | @chain(name="BasicChain") 102 | class BasicChain: 103 | """Basic test chain""" 104 | 105 | members = [ 106 | preprocess_data, 107 | TestAgent, 108 | postprocess_data 109 | ] 110 | 111 | @chain(name="ErrorChain") 112 | class ErrorChain: 113 | """Chain that raises an error""" 114 | 115 | members = [ 116 | preprocess_data, 117 | ErrorAgent, 118 | postprocess_data 119 | ] 120 | 121 | def test_chain_error_handling(): 122 | """Test that chain properly handles errors""" 123 | chain = ErrorChain() 124 | 125 | with pytest.raises(ValueError) as exc_info: 126 | asyncio.run(chain.process("test input")) 127 | assert str(exc_info.value) == "Test error" 128 | 129 | async def test_chain_error_handling_async(): 130 | """Test that chain properly handles errors in async mode""" 131 | chain = ErrorChain() 132 | 133 | with pytest.raises(ValueError) as exc_info: 134 | await chain.process("test input") 135 | assert str(exc_info.value) == "Test error" 136 | 137 | def test_chain_processing(): 138 | """Test basic chain processing""" 139 | chain = BasicChain() 140 | result = asyncio.run(chain.process("test input")) 141 | assert isinstance(result.content, str) 142 | 143 | async def test_chain_processing_async(): 144 | """Test basic chain processing in async mode""" 145 | chain = BasicChain() 146 | result = await chain.process("test input") 147 | assert isinstance(result.content, str) 148 | 149 | if __name__ == "__main__": 150 | # Run sync tests 151 | test_chain_error_handling() 152 | test_chain_processing() 153 | 154 | # Run async tests 155 | asyncio.run(test_chain_error_handling_async()) 156 | asyncio.run(test_chain_processing_async()) 157 | -------------------------------------------------------------------------------- /tests/interface/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/tests/interface/__init__.py -------------------------------------------------------------------------------- /tests/interface/run_tests.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from dotenv import load_dotenv 5 | 6 | # Load environment variables 7 | load_dotenv() 8 | 9 | if __name__ == "__main__": 10 | # Run all tests in the directory 11 | pytest.main([ 12 | os.path.dirname(__file__), # Run tests in current directory 13 | "-v", # Verbose output 14 | "--asyncio-mode=auto" # Enable async test support 15 | ]) 16 | -------------------------------------------------------------------------------- /tests/monitoring/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/tests/monitoring/__init__.py -------------------------------------------------------------------------------- /tests/monitoring/events/run_tests.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from dotenv import load_dotenv 5 | 6 | # Load environment variables 7 | load_dotenv() 8 | 9 | if __name__ == "__main__": 10 | # Run all tests in the directory 11 | pytest.main([ 12 | os.path.dirname(__file__), # Run tests in current directory 13 | "-v", # Verbose output 14 | "--asyncio-mode=auto" # Enable async test support 15 | ]) 16 | -------------------------------------------------------------------------------- /tests/monitoring/run_tests.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | import pytest 5 | 6 | # Add the root directory to Python path 7 | root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | sys.path.insert(0, root_dir) 9 | 10 | def run_tests(): 11 | """Run all monitoring tests""" 12 | pytest.main(["-v", os.path.dirname(__file__)]) 13 | 14 | if __name__ == "__main__": 15 | run_tests() 16 | -------------------------------------------------------------------------------- /tests/monitoring/storage/test_base.py: -------------------------------------------------------------------------------- 1 | """Tests for storage backend base class""" 2 | 3 | 4 | import pytest 5 | 6 | from legion.monitoring.events.base import Event, EventCategory, EventType 7 | from legion.monitoring.storage.base import StorageBackend 8 | 9 | 10 | def test_storage_backend_is_abstract(): 11 | """Test that StorageBackend cannot be instantiated directly""" 12 | with pytest.raises(TypeError): 13 | StorageBackend() 14 | 15 | def test_concrete_implementation_required(): 16 | """Test that concrete implementations must implement all abstract methods""" 17 | 18 | class IncompleteStorage(StorageBackend): 19 | """Storage implementation missing required methods""" 20 | 21 | pass 22 | 23 | with pytest.raises(TypeError): 24 | IncompleteStorage() 25 | 26 | def test_minimal_implementation(): 27 | """Test that a complete implementation can be instantiated""" 28 | 29 | class MinimalStorage(StorageBackend): 30 | """Minimal storage implementation""" 31 | 32 | def store_event(self, event): 33 | pass 34 | 35 | def get_events(self, event_types=None, start_time=None, end_time=None): 36 | return [] 37 | 38 | def clear(self): 39 | pass 40 | 41 | def cleanup(self, retention_days): 42 | pass 43 | 44 | # Should not raise 45 | storage = MinimalStorage() 46 | 47 | # Test method signatures 48 | event = Event( 49 | event_type=EventType.AGENT, 50 | component_id="test", 51 | category=EventCategory.EXECUTION 52 | ) 53 | 54 | storage.store_event(event) 55 | assert storage.get_events() == [] 56 | storage.clear() 57 | storage.cleanup(7) 58 | -------------------------------------------------------------------------------- /tests/monitoring/storage/test_config.py: -------------------------------------------------------------------------------- 1 | """Tests for storage configuration""" 2 | 3 | import pytest 4 | from pydantic import ValidationError 5 | 6 | from legion.monitoring.storage.config import StorageConfig 7 | 8 | 9 | def test_default_config(): 10 | """Test default configuration values""" 11 | config = StorageConfig() 12 | assert config.retention_days == 30 13 | assert config.cleanup_interval == 60 14 | assert config.max_events is None 15 | 16 | def test_custom_config(): 17 | """Test custom configuration values""" 18 | config = StorageConfig( 19 | retention_days=7, 20 | cleanup_interval=30, 21 | max_events=1000 22 | ) 23 | assert config.retention_days == 7 24 | assert config.cleanup_interval == 30 25 | assert config.max_events == 1000 26 | 27 | def test_validation(): 28 | """Test configuration validation""" 29 | # Test invalid retention days 30 | with pytest.raises(ValidationError): 31 | StorageConfig(retention_days=0) 32 | 33 | with pytest.raises(ValidationError): 34 | StorageConfig(retention_days=-1) 35 | 36 | # Test invalid cleanup interval 37 | with pytest.raises(ValidationError): 38 | StorageConfig(cleanup_interval=0) 39 | 40 | with pytest.raises(ValidationError): 41 | StorageConfig(cleanup_interval=-1) 42 | 43 | # Test invalid max events 44 | with pytest.raises(ValidationError): 45 | StorageConfig(max_events=0) 46 | 47 | with pytest.raises(ValidationError): 48 | StorageConfig(max_events=-1) 49 | 50 | def test_model_dump(): 51 | """Test configuration serialization""" 52 | config = StorageConfig( 53 | retention_days=7, 54 | cleanup_interval=30, 55 | max_events=1000 56 | ) 57 | 58 | data = config.model_dump() 59 | assert data == { 60 | "retention_days": 7, 61 | "cleanup_interval": 30, 62 | "max_events": 1000 63 | } 64 | -------------------------------------------------------------------------------- /tests/monitoring/storage/test_factory.py: -------------------------------------------------------------------------------- 1 | """Tests for storage factory""" 2 | 3 | from typing import Optional 4 | 5 | import pytest 6 | 7 | from legion.monitoring.storage.base import StorageBackend 8 | from legion.monitoring.storage.config import StorageConfig 9 | from legion.monitoring.storage.factory import StorageFactory, StorageType 10 | from legion.monitoring.storage.memory import MemoryStorageBackend 11 | from legion.monitoring.storage.sqlite import SQLiteStorageBackend 12 | 13 | 14 | def test_create_memory_backend(): 15 | """Test creating memory backend""" 16 | backend = StorageFactory.create(StorageType.MEMORY) 17 | assert isinstance(backend, MemoryStorageBackend) 18 | 19 | def test_create_sqlite_backend(tmp_path): 20 | """Test creating SQLite backend""" 21 | backend = StorageFactory.create( 22 | StorageType.SQLITE, 23 | db_path=str(tmp_path / "test.db") 24 | ) 25 | assert isinstance(backend, SQLiteStorageBackend) 26 | 27 | def test_create_with_config(): 28 | """Test creating backend with configuration""" 29 | config = StorageConfig(retention_days=7) 30 | backend = StorageFactory.create(StorageType.MEMORY, config=config) 31 | assert backend._config == config 32 | 33 | def test_create_invalid_type(): 34 | """Test creating backend with invalid type""" 35 | with pytest.raises(ValueError): 36 | StorageFactory.create("invalid") 37 | 38 | def test_register_custom_backend(): 39 | """Test registering custom backend""" 40 | class CustomBackend(StorageBackend): 41 | def __init__(self, config: Optional[StorageConfig] = None): 42 | pass 43 | 44 | def store_event(self, event): 45 | pass 46 | 47 | def get_events(self, event_types=None, start_time=None, end_time=None): 48 | return [] 49 | 50 | def clear(self): 51 | pass 52 | 53 | def cleanup(self, retention_days=None): 54 | pass 55 | 56 | StorageFactory.register_backend("custom", CustomBackend) 57 | 58 | # Create instance of custom backend 59 | backend = StorageFactory.create("custom") 60 | assert isinstance(backend, CustomBackend) 61 | 62 | def test_register_duplicate_backend(): 63 | """Test registering duplicate backend type""" 64 | class DuplicateBackend(StorageBackend): 65 | def __init__(self, config: Optional[StorageConfig] = None): 66 | pass 67 | 68 | def store_event(self, event): 69 | pass 70 | 71 | def get_events(self, event_types=None, start_time=None, end_time=None): 72 | return [] 73 | 74 | def clear(self): 75 | pass 76 | 77 | def cleanup(self, retention_days=None): 78 | pass 79 | 80 | with pytest.raises(ValueError): 81 | StorageFactory.register_backend(StorageType.MEMORY.value, DuplicateBackend) 82 | 83 | def test_backend_kwargs(): 84 | """Test passing additional arguments to backend""" 85 | class CustomBackend(StorageBackend): 86 | def __init__(self, config: Optional[StorageConfig] = None, custom_arg: str = None): 87 | self.custom_arg = custom_arg 88 | 89 | def store_event(self, event): 90 | pass 91 | 92 | def get_events(self, event_types=None, start_time=None, end_time=None): 93 | return [] 94 | 95 | def clear(self): 96 | pass 97 | 98 | def cleanup(self, retention_days=None): 99 | pass 100 | 101 | StorageFactory.register_backend("custom_with_args", CustomBackend) 102 | 103 | backend = StorageFactory.create( 104 | "custom_with_args", 105 | custom_arg="test" 106 | ) 107 | assert backend.custom_arg == "test" 108 | -------------------------------------------------------------------------------- /tests/monitoring/test_metrics.py: -------------------------------------------------------------------------------- 1 | """Tests for system metrics collection""" 2 | 3 | import os 4 | import platform 5 | import sys 6 | import threading 7 | import time 8 | 9 | import pytest 10 | 11 | from legion.monitoring.events.base import Event, EventCategory, EventType 12 | from legion.monitoring.metrics import MetricsContext, SystemMetricsCollector 13 | 14 | 15 | def test_execution_context(): 16 | """Test execution context collection""" 17 | collector = SystemMetricsCollector() 18 | context = collector.get_execution_context() 19 | 20 | assert context["thread_id"] == threading.get_ident() 21 | assert context["process_id"] == os.getpid() 22 | assert context["host_name"] == platform.node() 23 | assert context["python_version"] == sys.version 24 | 25 | def test_system_metrics(): 26 | """Test system metrics collection""" 27 | collector = SystemMetricsCollector() 28 | 29 | # Sleep to ensure some system activity 30 | time.sleep(0.1) 31 | 32 | metrics = collector.get_system_metrics() 33 | 34 | assert "system_cpu_percent" in metrics 35 | assert "system_memory_percent" in metrics 36 | assert "system_disk_usage_bytes" in metrics 37 | assert "system_network_bytes_sent" in metrics 38 | assert "system_network_bytes_received" in metrics 39 | 40 | assert isinstance(metrics["system_cpu_percent"], float) 41 | assert isinstance(metrics["system_memory_percent"], float) 42 | assert isinstance(metrics["system_disk_usage_bytes"], int) 43 | assert isinstance(metrics["system_network_bytes_sent"], int) 44 | assert isinstance(metrics["system_network_bytes_received"], int) 45 | 46 | def test_process_metrics(): 47 | """Test process metrics collection""" 48 | collector = SystemMetricsCollector() 49 | metrics = collector.get_process_metrics() 50 | 51 | assert "memory_usage_bytes" in metrics 52 | assert "cpu_usage_percent" in metrics 53 | 54 | assert isinstance(metrics["memory_usage_bytes"], int) 55 | assert isinstance(metrics["cpu_usage_percent"], float) 56 | 57 | def test_metrics_context(): 58 | """Test metrics context manager""" 59 | event = Event( 60 | event_type=EventType.AGENT, 61 | component_id="test_agent", 62 | category=EventCategory.EXECUTION 63 | ) 64 | 65 | with MetricsContext(event): 66 | # Simulate some work 67 | time.sleep(0.1) 68 | 69 | # Check that metrics were collected 70 | assert event.duration_ms >= 100 # At least 100ms 71 | assert event.thread_id == threading.get_ident() 72 | assert event.process_id == os.getpid() 73 | assert event.host_name == platform.node() 74 | assert event.python_version == sys.version 75 | assert isinstance(event.system_cpu_percent, float) 76 | assert isinstance(event.system_memory_percent, float) 77 | assert isinstance(event.system_disk_usage_bytes, int) 78 | assert isinstance(event.system_network_bytes_sent, int) 79 | assert isinstance(event.system_network_bytes_received, int) 80 | assert isinstance(event.memory_usage_bytes, int) 81 | assert isinstance(event.cpu_usage_percent, float) 82 | 83 | def test_metrics_context_no_event(): 84 | """Test metrics context manager with no event""" 85 | # Should not raise any exceptions 86 | with MetricsContext(): 87 | time.sleep(0.1) 88 | 89 | def test_metrics_context_exception(): 90 | """Test metrics context manager with exception""" 91 | event = Event( 92 | event_type=EventType.AGENT, 93 | component_id="test_agent", 94 | category=EventCategory.EXECUTION 95 | ) 96 | 97 | with pytest.raises(ValueError): 98 | with MetricsContext(event): 99 | raise ValueError("Test error") 100 | 101 | # Metrics should still be collected 102 | assert event.duration_ms is not None 103 | assert event.thread_id is not None 104 | assert event.process_id is not None 105 | assert event.host_name is not None 106 | assert event.python_version is not None 107 | assert event.system_cpu_percent is not None 108 | assert event.system_memory_percent is not None 109 | assert event.system_disk_usage_bytes is not None 110 | assert event.system_network_bytes_sent is not None 111 | assert event.system_network_bytes_received is not None 112 | assert event.memory_usage_bytes is not None 113 | assert event.cpu_usage_percent is not None 114 | 115 | if __name__ == "__main__": 116 | pytest.main([__file__, "-v"]) 117 | -------------------------------------------------------------------------------- /tests/providers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LLMP-io/Legion/ba7764e32f6e9ff9b93439ee62df5da5d26608f9/tests/providers/__init__.py -------------------------------------------------------------------------------- /tests/providers/run_tests.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from dotenv import load_dotenv 5 | 6 | # Load environment variables 7 | load_dotenv() 8 | 9 | if __name__ == "__main__": 10 | # Run all tests in the directory 11 | pytest.main([ 12 | os.path.dirname(__file__), # Run tests in current directory 13 | "-v", # Verbose output 14 | "--asyncio-mode=auto" # Enable async test support 15 | ]) 16 | -------------------------------------------------------------------------------- /tests/providers/test_model_colon.py: -------------------------------------------------------------------------------- 1 | """Test Model Colon Parsing 2 | 3 | This test verifies the fix for models with colons in their names, 4 | such as 'codellama:70b'. 5 | """ 6 | 7 | import os 8 | import pytest 9 | from typing import Annotated, Tuple 10 | 11 | from dotenv import load_dotenv 12 | from pydantic import Field 13 | 14 | from legion import agent, tool 15 | from legion.interface.schemas import ModelResponse 16 | 17 | # Load environment variables 18 | load_dotenv() 19 | 20 | # Check for API keys 21 | MOCK_MODE = not bool(os.getenv("OPENAI_API_KEY")) 22 | 23 | 24 | # Test cases for model parsing 25 | MODEL_TEST_CASES = [ 26 | ("openai:gpt-4", "openai", "gpt-4"), 27 | ("anthropic:claude-3-opus", "anthropic", "claude-3-opus"), 28 | ("ollama:codellama:70b", "ollama", "codellama:70b"), 29 | ("groq:mixtral-8x7b", "groq", "mixtral-8x7b"), 30 | ] 31 | 32 | 33 | # Define a mock agent that simulates the behavior without making actual API calls 34 | @agent( 35 | model="openai:gpt-4", # Using OpenAI since it's fully implemented 36 | temperature=0.7 37 | ) 38 | class TestAgent: 39 | """I am an agent that tests model names with colons.""" 40 | 41 | @tool 42 | def get_model_info(self) -> str: 43 | """Return information about the model being used.""" 44 | if MOCK_MODE: 45 | return "Mock mode: Using openai:gpt-4" 46 | else: 47 | return "Using openai:gpt-4" 48 | 49 | 50 | @pytest.mark.parametrize("model_str,expected_provider,expected_model", MODEL_TEST_CASES) 51 | def test_model_parsing(model_str: str, expected_provider: str, expected_model: str): 52 | """Test the model parsing function directly.""" 53 | from legion.agents.base import Agent 54 | 55 | provider, model = Agent._parse_model_string(model_str) 56 | 57 | assert provider == expected_provider 58 | assert model == expected_model 59 | 60 | 61 | @pytest.mark.asyncio 62 | async def test_agent_with_colon_model(): 63 | """Test that an agent can be created with a model name containing colons.""" 64 | # This test just verifies that the agent can be instantiated without errors 65 | agent = TestAgent() 66 | assert agent is not None 67 | 68 | # If not in mock mode, test actual processing 69 | if not MOCK_MODE: 70 | response = await agent.aprocess("What model are you using?") 71 | assert isinstance(response, ModelResponse) 72 | assert response.content is not None -------------------------------------------------------------------------------- /tests/run_tests.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | import pytest 5 | 6 | 7 | def run_tests(): 8 | """Run all tests""" 9 | test_dir = Path(__file__).parent 10 | 11 | print("\n" + "=" * 80) 12 | print("Running test suite...") 13 | print("=" * 80 + "\n") 14 | 15 | # Run tests with pytest 16 | args = [ 17 | str(test_dir), # Test directory 18 | "-v", # Verbose output 19 | "--tb=short", # Shorter traceback format 20 | "-p", "no:warnings", # Disable warning capture plugin 21 | "-p", "asyncio", # Enable asyncio plugin 22 | "--no-header", # Skip pytest header 23 | "--capture=no", # Don't capture stdout/stderr 24 | ] 25 | 26 | try: 27 | print(f"Test command: pytest {' '.join(args)}\n") 28 | result = pytest.main(args) 29 | 30 | if result != 0: 31 | print(f"\nTests failed with exit code: {result}", file=sys.stderr) 32 | if result == 5: 33 | print("No tests were collected. Check your test discovery settings.", file=sys.stderr) 34 | 35 | return result 36 | 37 | except Exception as e: 38 | print(f"\nFatal error running tests: {str(e)}", file=sys.stderr) 39 | return 1 40 | 41 | 42 | if __name__ == "__main__": 43 | sys.exit(run_tests()) 44 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import AsyncMock, MagicMock 2 | 3 | from legion.interface.schemas import ModelResponse, Role, TokenUsage 4 | 5 | 6 | class MockOpenAIProvider: 7 | """Mock OpenAI provider for testing""" 8 | 9 | def __init__(self, *args, **kwargs): 10 | self.agenerate = AsyncMock(return_value=ModelResponse( 11 | content="Mock response", 12 | role=Role.ASSISTANT, 13 | raw_response={"content": "Mock response"}, 14 | usage=TokenUsage(prompt_tokens=10, completion_tokens=10, total_tokens=20) 15 | )) 16 | self.generate = MagicMock(return_value=ModelResponse( 17 | content="Mock response", 18 | role=Role.ASSISTANT, 19 | raw_response={"content": "Mock response"}, 20 | usage=TokenUsage(prompt_tokens=10, completion_tokens=10, total_tokens=20) 21 | )) 22 | -------------------------------------------------------------------------------- /utils/scan_codebase.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from pathlib import Path 4 | 5 | def scan_python_files(root_dir="."): 6 | """ 7 | Recursively scan directory for Python files and create a dictionary with 8 | file paths as keys and file contents as values. 9 | 10 | Args: 11 | root_dir (str): Root directory to start scanning from 12 | 13 | Returns: 14 | dict: Dictionary with file paths as keys and file contents as values 15 | """ 16 | # Get the absolute path of the current script 17 | current_script = os.path.abspath(__file__) 18 | 19 | # Initialize dictionary to store results 20 | codebase = {} 21 | 22 | # Walk through directory tree 23 | for root, _, files in os.walk(root_dir): 24 | # Skip tests directory 25 | if "tests" in Path(root).parts: 26 | continue 27 | 28 | for file in files: 29 | # Get full file path 30 | file_path = os.path.join(root, file) 31 | abs_file_path = os.path.abspath(file_path) 32 | 33 | # Skip if this is the current script 34 | if abs_file_path == current_script: 35 | continue 36 | 37 | # Skip if not a Python file 38 | if not file.endswith('.py'): 39 | continue 40 | 41 | try: 42 | # Get relative path from root directory 43 | rel_path = os.path.relpath(file_path, root_dir) 44 | 45 | # Read file contents 46 | with open(file_path, 'r', encoding='utf-8') as f: 47 | content = f.read() 48 | 49 | # Store in dictionary using relative path as key 50 | codebase[rel_path] = content 51 | 52 | except Exception as e: 53 | print(f"Error reading file {file_path}: {str(e)}") 54 | continue 55 | 56 | return codebase 57 | 58 | def save_codebase_json(codebase, output_file="codebase.json"): 59 | """ 60 | Save the codebase dictionary to a JSON file. 61 | 62 | Args: 63 | codebase (dict): Dictionary containing the codebase 64 | output_file (str): Name of the output JSON file 65 | """ 66 | try: 67 | with open(output_file, 'w', encoding='utf-8') as f: 68 | json.dump(codebase, f, indent=2, ensure_ascii=False) 69 | print(f"Successfully saved codebase to {output_file}") 70 | except Exception as e: 71 | print(f"Error saving JSON file: {str(e)}") 72 | 73 | def main(): 74 | # Get codebase dictionary 75 | codebase = scan_python_files() 76 | 77 | # Save to JSON file 78 | save_codebase_json(codebase) 79 | 80 | # Print summary 81 | print(f"Scanned {len(codebase)} Python files") 82 | 83 | if __name__ == "__main__": 84 | main() 85 | --------------------------------------------------------------------------------