├── .github ├── CODEOWNERS └── workflows │ ├── cicd.yml │ └── reusable-docs.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTORS.md ├── LICENSE.md ├── README.md ├── Taskfile.yml ├── docs ├── _static │ ├── ABA.drawio.png │ ├── agent.png │ ├── agent_badge.png │ ├── application_extension_usage.drawio │ ├── application_extension_usage.png │ ├── css │ │ └── theme.custom.css │ ├── data_extension_usage.drawio │ ├── data_extension_usage.png │ ├── directory.png │ ├── espresso.svg │ ├── explore.png │ ├── img │ │ └── logo.png │ ├── ioa_arch.png │ ├── ioa_stack.png │ ├── login.png │ ├── logo.svg │ ├── neural-network.svg │ ├── service_composition.png │ └── service_compositon.drawio ├── build-docs.sh ├── conf.py ├── index.rst ├── pages │ ├── abstract.md │ ├── agent_directory.md │ ├── agws │ │ ├── manifest.md │ │ ├── workflow_server.rst │ │ └── workflow_server_manager.md │ ├── csit.md │ ├── data-model-guide.md │ ├── dir-howto.md │ ├── dir.md │ ├── how-to-guides │ │ ├── agents │ │ │ ├── interrupts.md │ │ │ └── thread.rst │ │ └── mas-creation-tutorial │ │ │ ├── _static │ │ │ ├── marketing_campaign_final.png │ │ │ └── marketing_campaign_skeleton.png │ │ │ └── mas-tutorial.md │ ├── identity │ │ └── identity.md │ ├── introduction.md │ ├── messaging_sdk │ │ ├── slim-control-plane.md │ │ ├── slim-core.md │ │ ├── slim-data-plane.md │ │ ├── slim-howto.md │ │ ├── slim-index.rst │ │ ├── slim-mcp.md │ │ ├── slim-security-layer.md │ │ └── slim-session-layer.md │ ├── oasf-data-model.proto.md │ ├── oasf-taxonomy.md │ ├── oasf-workflow.md │ ├── oasf.md │ ├── semantic_sdk │ │ ├── io_mapper.rst │ │ └── semantic_router.md │ └── syntactic_sdk │ │ ├── agntcy_acp_sdk.md │ │ ├── api_bridge_agent.md │ │ ├── connect.md │ │ ├── hil.md │ │ └── sample_acp_descriptors │ │ └── mailcomposer.json └── requirements.txt └── schema └── oasf-data-model.proto /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | **/ @agntcy/docs-maintainers 2 | -------------------------------------------------------------------------------- /.github/workflows/cicd.yml: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2025 Cisco and/or its affiliates. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | name: CI/CD Pipeline 5 | 6 | on: 7 | push: 8 | tags: 9 | - 'v*.*.*' 10 | 11 | pull_request: 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | prepare: 19 | name: Prepare 20 | outputs: 21 | release_tag: ${{ steps.vars.outputs.release_tag }} 22 | runs-on: ubuntu-latest 23 | steps: 24 | - name: Resolve required vars 25 | id: vars 26 | run: | 27 | echo "release_tag=${GITHUB_REF#refs/tags/}" >> "$GITHUB_OUTPUT" 28 | 29 | docs: 30 | name: Docs 31 | needs: 32 | - prepare 33 | uses: ./.github/workflows/reusable-docs.yml 34 | permissions: 35 | contents: read 36 | pages: write 37 | id-token: write 38 | with: 39 | deploy: ${{ startsWith(github.ref, 'refs/tags/') }} 40 | version: ${{ needs.prepare.outputs.release_tag }} 41 | 42 | success: 43 | name: Success 44 | if: ${{ !cancelled() && !contains(needs.*.result, 'cancelled') && !contains(needs.*.result, 'failure') }} 45 | needs: 46 | - prepare 47 | - docs 48 | runs-on: ubuntu-latest 49 | steps: 50 | - name: Echo Success 51 | run: echo "::notice Success!" 52 | -------------------------------------------------------------------------------- /.github/workflows/reusable-docs.yml: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2025 Cisco and/or its affiliates. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | name: Documentation 5 | 6 | on: 7 | workflow_call: 8 | inputs: 9 | deploy: 10 | description: 'Deploy documentation artifacts' 11 | required: true 12 | type: boolean 13 | default: false 14 | version: 15 | description: 'Version to use for documentation artifacts' 16 | required: true 17 | type: string 18 | default: dev 19 | 20 | workflow_dispatch: 21 | inputs: 22 | deploy: 23 | description: 'Deploy documentation artifacts' 24 | required: true 25 | type: boolean 26 | default: false 27 | version: 28 | description: 'Version to use for documentation artifacts' 29 | required: true 30 | type: string 31 | default: dev 32 | 33 | permissions: 34 | contents: read 35 | pages: write 36 | id-token: write 37 | 38 | jobs: 39 | build: 40 | name: Build artifacts 41 | runs-on: ubuntu-latest 42 | steps: 43 | - name: Checkout code 44 | uses: actions/checkout@v4 45 | with: 46 | fetch-depth: 0 47 | submodules: 'recursive' 48 | 49 | - name: Setup Pages 50 | id: pages 51 | uses: actions/configure-pages@v5 52 | 53 | - name: Setup Python 54 | uses: actions/setup-python@v5 55 | with: 56 | python-version: "3.12" 57 | 58 | - name: Setup Golang 59 | uses: actions/setup-go@v5 60 | with: 61 | go-version: '1.23.1' 62 | 63 | - name: Setup Taskfile 64 | shell: bash 65 | run: | 66 | sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin 67 | 68 | - name: Update GITHUB_PATH 69 | shell: bash 70 | run: echo "$HOME/.local/bin" >> $GITHUB_PATH 71 | 72 | - name: Build docs 73 | shell: bash 74 | env: 75 | VERSION: ${{ inputs.version }} 76 | run: | 77 | task build 78 | 79 | - name: Upload artifact 80 | uses: actions/upload-pages-artifact@v3 81 | with: 82 | name: docs-website 83 | path: ./.build/docs/html 84 | 85 | deploy: 86 | name: Deploy artifacts 87 | if: ${{ inputs.deploy == true || inputs.deploy == 'true' }} 88 | needs: 89 | - build 90 | environment: 91 | name: github-pages 92 | url: ${{ steps.deployment.outputs.page_url }} 93 | runs-on: ubuntu-latest 94 | steps: 95 | - name: Deploy to GitHub Pages 96 | id: deployment 97 | uses: actions/deploy-pages@v4 98 | with: 99 | artifact_name: docs-website 100 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Build 2 | .build/ 3 | venv/ 4 | 5 | # Dependencies 6 | .dep/ 7 | 8 | # Secrets 9 | .env 10 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, caste, color, religion, or sexual 10 | identity and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the overall 26 | community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or advances of 31 | any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email address, 35 | without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official email address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | [moderation@agntcy.org](mailto:moderation@agntcy.org). All complaints will be 64 | reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series of 86 | actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or permanent 93 | ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within the 113 | community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.1, available at 119 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder][Mozilla CoC]. 123 | 124 | For answers to common questions about this code of conduct, see the FAQ at 125 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 126 | [https://www.contributor-covenant.org/translations][translations]. 127 | 128 | [homepage]: https://www.contributor-covenant.org 129 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 130 | [Mozilla CoC]: https://github.com/mozilla/diversity 131 | [FAQ]: https://www.contributor-covenant.org/faq 132 | [translations]: https://www.contributor-covenant.org/translations 133 | -------------------------------------------------------------------------------- /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors to Agent Gateway Protocol 2 | 3 | CONTRIBUTOR file should only contain list of copyright holder (i.e. employers of 4 | maintainers). All files that support comments should include standard header for 5 | the project. AGNTCY uses the following file header: 6 | 7 | Copyright AGNTCY Contributors (https://github.com/agntcy) 8 | SPDX-License-Identifier: CC-BY-4.0 9 | 10 | 1. Cisco Systems Inc. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Documentation Repository Internet of Agent 2 | 3 | This repository contains the documentation for the project, built using Sphinx 4 | with the Read the Docs template. The documentation sources are written in 5 | Markdown. 6 | 7 | ## Table of Contents 8 | 9 | - [Documentation Repository Internet of Agent](#documentation-repository-internet-of-agent) 10 | - [Table of Contents](#table-of-contents) 11 | - [Installation](#installation) 12 | - [macOS](#macos) 13 | - [Linux](#linux) 14 | - [Windows](#windows) 15 | - [Building the Documentation](#building-the-documentation) 16 | - [Contributing](#contributing) 17 | - [Copyright Notice](#copyright-notice) 18 | 19 | ## Installation 20 | 21 | To build the documentation locally, you need to install required dependencies. 22 | 23 | **Prerequisites** 24 | 25 | - [Taskfile](https://taskfile.dev/) 26 | - [Python](https://www.python.org/downloads/) 27 | - [Golang](https://go.dev/doc/devel/release#go1.24.0) 28 | 29 | ### macOS 30 | 31 | - Install Taskfile using Homebrew: 32 | 33 | ```sh 34 | brew install go-task/tap/go-task 35 | 36 | ### Linux 37 | 38 | - Install Taskfile using bash: 39 | 40 | ```sh 41 | sh -c '$(curl -fsSL https://taskfile.dev/install.sh)' 42 | 43 | ### Windows 44 | 45 | - Install Taskfile using scoop: 46 | 47 | ```sh 48 | scoop install go-task 49 | 50 | ## Building the Documentation 51 | 52 | - To build the documentation, run the following command: 53 | 54 | ```sh 55 | task build 56 | 57 | - This will generate the HTML documentation in the .build/docs/html directory. 58 | 59 | ## Contributing 60 | 61 | Contributions are welcome! Please follow these steps to contribute: 62 | 63 | Fork the repository. Create a new branch for your feature or bugfix. Make your 64 | changes. Submit a pull request. 65 | 66 | # Copyright Notice 67 | 68 | [Copyright Notice and License](./LICENSE.md) 69 | 70 | Copyright AGNTCY Contributors (https://github.com/agentcy) -------------------------------------------------------------------------------- /Taskfile.yml: -------------------------------------------------------------------------------- 1 | # Copyright AGNTCY Contributors (https://github.com/agntcy) 2 | # SPDX-License-Identifier: CC-BY-4.0 3 | 4 | version: "3" 5 | 6 | interval: "500ms" 7 | 8 | dotenv: [".env"] 9 | 10 | env: 11 | BUILD_DIR: "{{.ROOT_DIR}}/.build" 12 | DEPS_DIR: "{{.ROOT_DIR}}/.dep" 13 | VENV_DIR: "{{.ROOT_DIR}}/.dep/.venv" 14 | # Taskfile env does not override the actual shell env, see: 15 | # https://github.com/go-task/task/issues/202 16 | PATH: "{{.DEPS_DIR}}:{{.VENV_DIR}}/bin:$PATH" 17 | 18 | tasks: 19 | default: 20 | cmds: 21 | - task -l 22 | 23 | ## 24 | ## Website 25 | ## 26 | build: 27 | desc: Build documentation website 28 | deps: 29 | - deps/venv 30 | - deps/proto-diagrams 31 | preconditions: 32 | - which go 33 | - which python3 34 | env: 35 | SOURCE_DIR: "{{.ROOT_DIR}}/docs" 36 | SCHEMA_DIR: "{{.ROOT_DIR}}/schema" 37 | BUILD_DOCS_DIR: "{{.BUILD_DIR}}/docs" 38 | vars: 39 | BUILD_SITE_DIR: "{{.BUILD_DIR}}/docs/html" 40 | cmds: 41 | - PATH={{.PATH}} ./docs/build-docs.sh 42 | - | 43 | echo "Docs available at: file://{{.BUILD_SITE_DIR}}/index.html" 44 | 45 | # TODO(ramizpolic): This usually works well to regenerate context, but it's not a real hot-reload. 46 | # Note that sometimes it crashes and goes into reload loop. Investigate or find alternative. 47 | run: 48 | desc: Run documentation website in live editing mode 49 | preconditions: 50 | - which go 51 | - which python3 52 | watch: true 53 | sources: 54 | - "docs/**/*" 55 | - "schema/**/*" 56 | vars: 57 | BUILD_SITE_DIR: "{{.BUILD_DIR}}/docs/html" 58 | cmds: 59 | - task: build 60 | - | 61 | echo "Docs available at: file://{{.BUILD_SITE_DIR}}/index.html" 62 | 63 | ## 64 | ## Dependencies 65 | ## 66 | deps/venv: 67 | internal: true 68 | cmds: 69 | - python3 -m venv {{.VENV_DIR}} 70 | status: 71 | - test -f {{.VENV_DIR}} 72 | 73 | deps/proto-diagrams: 74 | internal: true 75 | vars: 76 | GIT: "https://github.com/GoogleCloudPlatform/proto-gen-md-diagrams" 77 | DIR: "{{.DEPS_DIR}}/git-proto-diagrams" 78 | BIN: "{{.DEPS_DIR}}/proto-gen-md-diagrams" 79 | cmds: 80 | - defer: rm -rf {{.DIR}} 81 | - | 82 | mkdir -p {{.DIR}} 83 | git clone {{.GIT}} {{.DIR}} 84 | cd {{.DIR}} 85 | go build -o {{.BIN}} 86 | status: 87 | - test -f {{.BIN}} 88 | -------------------------------------------------------------------------------- /docs/_static/ABA.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/ABA.drawio.png -------------------------------------------------------------------------------- /docs/_static/agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/agent.png -------------------------------------------------------------------------------- /docs/_static/agent_badge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/agent_badge.png -------------------------------------------------------------------------------- /docs/_static/application_extension_usage.drawio: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /docs/_static/application_extension_usage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/application_extension_usage.png -------------------------------------------------------------------------------- /docs/_static/css/theme.custom.css: -------------------------------------------------------------------------------- 1 | @import url('https://fonts.googleapis.com/css2?family=Roboto:ital,wdth,wght@0,75..100,100..900;1,75..100,100..900&display=swap'); 2 | 3 | .rst-content .toctree-wrapper > p.caption, h1, h2, h3, h4, h5, h6, legend { 4 | font-family: Roboto, ff-tisa-web-pro, Georgia, Arial, sans-serif !important; 5 | } 6 | 7 | .wy-nav-content { 8 | background: #fff; 9 | max-width: 900px; 10 | } 11 | 12 | .wy-nav-content-wrap { 13 | background: #fff; 14 | } 15 | 16 | .wy-side-nav-search { 17 | background-color: #EFF3FC; 18 | } 19 | 20 | .wy-nav-side { 21 | background: #EFF3FC; 22 | } 23 | 24 | .wy-menu-vertical a { 25 | color: #333333; 26 | } 27 | 28 | .wy-menu-vertical header, .wy-menu-vertical p.caption { 29 | color: #187ADC; 30 | } 31 | 32 | .wy-side-nav-search > div.version { 33 | margin-top: -.4045em; 34 | margin-bottom: .809em; 35 | font-weight: 400; 36 | font-size: 16px; 37 | color: #7b7b7b; 38 | } 39 | 40 | a { 41 | color: #187ADC; 42 | } 43 | 44 | a:visited { 45 | color: #00142B; 46 | } 47 | 48 | .wy-menu-vertical li.toctree-l1.current>a { 49 | border-bottom: 0; 50 | border-top: 0; 51 | } 52 | 53 | .wy-menu-vertical li ul { 54 | background: #eff3fc00; 55 | } 56 | 57 | 58 | .wy-menu-vertical li.toctree-l1.current > a { 59 | background: #e5ecfb; 60 | } 61 | 62 | .wy-menu-vertical li.toctree-l2 > a, 63 | .wy-menu-vertical li.toctree-l2 li.toctree-l3>a { 64 | background: #e5ecfb; 65 | } 66 | 67 | .wy-menu-vertical li.toctree-l2.current > a, 68 | .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a { 69 | background: #dbe3f8; 70 | } 71 | 72 | .wy-menu-vertical li.toctree-l3.current>a, 73 | .wy-menu-vertical li.toctree-l3.current li.toctree-l4>a { 74 | background: #d1dbf6; 75 | } 76 | 77 | .wy-menu-vertical li.toctree-l3 > a, 78 | .wy-menu-vertical li.toctree-l3 li.toctree-l3>a { 79 | background: #d1dbf6; 80 | } 81 | 82 | .wy-menu.wy-menu-vertical li a:hover, 83 | .wy-menu-vertical li.current a:hover, 84 | .wy-menu-vertical li.toctree-l2.current > a:hover, 85 | .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a:hover, 86 | .wy-menu-vertical li.toctree-l3.current > a:hover, 87 | .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a:hover { 88 | background: #c5d3f7; 89 | } 90 | -------------------------------------------------------------------------------- /docs/_static/data_extension_usage.drawio: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/_static/data_extension_usage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/data_extension_usage.png -------------------------------------------------------------------------------- /docs/_static/directory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/directory.png -------------------------------------------------------------------------------- /docs/_static/espresso.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 7 | 11 | 19 | 20 | 30 | 36 | 48 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | -------------------------------------------------------------------------------- /docs/_static/explore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/explore.png -------------------------------------------------------------------------------- /docs/_static/img/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/img/logo.png -------------------------------------------------------------------------------- /docs/_static/ioa_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/ioa_arch.png -------------------------------------------------------------------------------- /docs/_static/ioa_stack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/ioa_stack.png -------------------------------------------------------------------------------- /docs/_static/login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/login.png -------------------------------------------------------------------------------- /docs/_static/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | network 5 | 6 | -------------------------------------------------------------------------------- /docs/_static/neural-network.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /docs/_static/service_composition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/_static/service_composition.png -------------------------------------------------------------------------------- /docs/_static/service_compositon.drawio: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/build-docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright AGNTCY Contributors (https://github.com/agntcy) 3 | # SPDX-License-Identifier: CC-BY-4.0 4 | 5 | set -e 6 | 7 | ######################################## 8 | # Sphinx documentation builder script. 9 | # It should be called with Taskfile. 10 | ######################################## 11 | 12 | ## Input config (absolute paths only) 13 | SOURCE_DIR="${SOURCE_DIR:-}" 14 | SCHEMA_DIR="${SCHEMA_DIR:-}" 15 | BUILD_DOCS_DIR="${BUILD_DOCS_DIR:-}" 16 | 17 | ## Cleanup previous builds 18 | rm -rf $BUILD_DOCS_DIR 19 | 20 | ## Configure source 21 | cd $SOURCE_DIR 22 | SOURCE_REQ_FILE="requirements.txt" 23 | SOURCE_PAGES_DIR="pages/" 24 | 25 | ## Install packages 26 | pip3 install -r $SOURCE_REQ_FILE 27 | 28 | ## Generate diagrams 29 | proto-gen-md-diagrams -d $SCHEMA_DIR 30 | mv ./*.proto.md $SOURCE_PAGES_DIR 31 | 32 | ## Build using sphinx 33 | sphinx-build -M clean "$SOURCE_DIR" "$BUILD_DOCS_DIR" 34 | sphinx-build -M html "$SOURCE_DIR" "$BUILD_DOCS_DIR" 35 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | # 15 | import os 16 | # import sys 17 | # sys.path.insert(0, os.path.abspath('.')) 18 | 19 | 20 | # -- Project information ----------------------------------------------------- 21 | 22 | project = "AGNTCY Collective" 23 | copyright = "AGNTCY Contributors" 24 | author = "AGNTCY Contributors" 25 | 26 | # The short X.Y version 27 | version = "Version: " + os.environ.get("VERSION", "v0.1.0") 28 | 29 | # The full version, including alpha/beta/rc tags 30 | release = os.environ.get("VERSION", "v0.1.0") 31 | 32 | 33 | # -- General configuration --------------------------------------------------- 34 | 35 | # If your documentation needs a minimal Sphinx version, state it here. 36 | # 37 | # needs_sphinx = '1.0' 38 | 39 | # Add any Sphinx extension module names here, as strings. They can be 40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 41 | # ones. 42 | extensions = [ 43 | "sphinx_markdown_tables", 44 | "myst_parser", 45 | "sphinxcontrib.mermaid", 46 | "sphinx_inline_tabs", 47 | "sphinxemoji.sphinxemoji", 48 | "sphinx_copybutton", 49 | ] 50 | 51 | # Add any paths that contain templates here, relative to this directory. 52 | # templates_path = ['_templates'] 53 | 54 | # The suffix(es) of source filenames. 55 | # You can specify multiple suffix as a list of string: 56 | # 57 | # source_suffix = ['.rst', '.md'] 58 | source_suffix = ".rst" 59 | 60 | # The master toctree document. 61 | master_doc = "index" 62 | 63 | # The language for content autogenerated by Sphinx. Refer to documentation 64 | # for a list of supported languages. 65 | # 66 | # This is also used if you do content translation via gettext catalogs. 67 | # Usually you set "language" from the command line for these cases. 68 | language = "en" 69 | 70 | # List of patterns, relative to source directory, that match files and 71 | # directories to ignore when looking for source files. 72 | # This pattern also affects html_static_path and html_extra_path. 73 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 74 | 75 | # The name of the Pygments (syntax highlighting) style to use. 76 | pygments_style = None 77 | html_logo = "_static/img/logo.png" 78 | 79 | # -- Options for HTML output ------------------------------------------------- 80 | 81 | # The theme to use for HTML and HTML Help pages. See the documentation for 82 | # a list of builtin themes. 83 | # 84 | html_theme = "sphinx_rtd_theme" 85 | 86 | # Theme options are theme-specific and customize the look and feel of a theme 87 | # further. For a list of options available for each theme, see the 88 | # documentation. 89 | # 90 | html_theme_options = { 91 | 'logo_only': True, 92 | 'style_external_links': True, 93 | } 94 | 95 | # Add any paths that contain custom static files (such as style sheets) here, 96 | # relative to this directory. They are copied after the builtin static files, 97 | # so a file named "default.css" will overwrite the builtin "default.css". 98 | html_static_path = ["_static"] 99 | 100 | # Custom sidebar templates, must be a dictionary that maps document names 101 | # to template names. 102 | # 103 | # The default sidebars (for documents that don't match any pattern) are 104 | # defined by theme itself. Builtin themes are using these templates by 105 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 106 | # 'searchbox.html']``. 107 | # 108 | # html_sidebars = {} 109 | 110 | 111 | # -- Options for HTMLHelp output --------------------------------------------- 112 | 113 | # Output file base name for HTML help builder. 114 | htmlhelp_basename = "agntcy-doc" 115 | 116 | 117 | # -- Options for LaTeX output ------------------------------------------------ 118 | 119 | latex_elements = { 120 | # The paper size ('letterpaper' or 'a4paper'). 121 | # 122 | 'papersize': 'letterpaper', 123 | # The font size ('10pt', '11pt' or '12pt'). 124 | # 125 | 'pointsize': '12pt', 126 | # Additional stuff for the LaTeX preamble. 127 | # 128 | # 'preamble': '', 129 | # Latex figure (float) alignment 130 | # 131 | 'figure_align': 'htbp', 132 | } 133 | 134 | # Grouping the document tree into LaTeX files. List of tuples 135 | # (source start file, target name, title, 136 | # author, documentclass [howto, manual, or own class]). 137 | latex_documents = [ 138 | ( 139 | master_doc, 140 | "agntcy-doc.tex", 141 | "Documentation", 142 | "AGNTCY Contributors", 143 | "manual", 144 | ), 145 | ] 146 | 147 | 148 | # -- Options for manual page output ------------------------------------------ 149 | 150 | # One entry per manual page. List of tuples 151 | # (source start file, name, description, authors, manual section). 152 | man_pages = [(master_doc, "agntcy-doc", "Documentation", [author], 1)] 153 | 154 | 155 | # -- Options for Texinfo output ---------------------------------------------- 156 | 157 | # Grouping the document tree into Texinfo files. List of tuples 158 | # (source start file, target name, title, author, 159 | # dir menu entry, description, category) 160 | texinfo_documents = [ 161 | ( 162 | master_doc, 163 | "agntcy-doc", 164 | "Documentation", 165 | author, 166 | "agncty-doc", 167 | "One line description of project.", 168 | "Miscellaneous", 169 | ), 170 | ] 171 | 172 | 173 | # -- Options for Epub output ------------------------------------------------- 174 | 175 | # Bibliographic Dublin Core info. 176 | epub_title = project 177 | 178 | # The unique identifier of the text. This can be a ISBN number 179 | # or the project homepage. 180 | # 181 | # epub_identifier = '' 182 | 183 | # A unique identification for the text. 184 | # 185 | # epub_uid = '' 186 | 187 | # A list of files that should not be packed into the epub file. 188 | epub_exclude_files = ["search.html"] 189 | 190 | # Enable cross-support for mermaid renderable tags, check: 191 | # https://github.com/mgaitan/sphinxcontrib-mermaid 192 | myst_fence_as_directive = ["mermaid"] 193 | suppress_warnings = ["myst.xref_missing", "myst.iref_ambiguous", "misc.highlighting_failure"] 194 | 195 | # CSS styles 196 | html_css_files = [ 197 | 'css/theme.custom.css', 198 | ] 199 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Internet of Agents Components 2 | ============================= 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | :caption: Contents: 7 | 8 | .. toctree:: 9 | :caption: Abstract 10 | :maxdepth: 1 11 | 12 | pages/abstract 13 | 14 | .. toctree:: 15 | :caption: Introduction 16 | :maxdepth: 1 17 | 18 | pages/introduction 19 | 20 | .. toctree:: 21 | :caption: OASF 22 | :maxdepth: 1 23 | 24 | pages/oasf 25 | pages/oasf-data-model.proto 26 | pages/oasf-taxonomy 27 | pages/oasf-workflow 28 | pages/data-model-guide 29 | 30 | .. toctree:: 31 | :caption: Agent Directory Service 32 | :maxdepth: 1 33 | 34 | pages/dir 35 | pages/dir-howto 36 | pages/agent_directory 37 | 38 | .. toctree:: 39 | :caption: Agent Manifest 40 | :maxdepth: 1 41 | 42 | pages/agws/manifest 43 | 44 | .. toctree:: 45 | :caption: Identity 46 | :maxdepth: 1 47 | 48 | pages/identity/identity 49 | 50 | .. toctree:: 51 | :caption: Semantic SDK 52 | :maxdepth: 1 53 | 54 | pages/semantic_sdk/io_mapper 55 | pages/semantic_sdk/semantic_router 56 | 57 | .. toctree:: 58 | :caption: Syntactic SDK 59 | :maxdepth: 1 60 | 61 | pages/syntactic_sdk/connect 62 | pages/syntactic_sdk/agntcy_acp_sdk 63 | pages/syntactic_sdk/api_bridge_agent 64 | pages/syntactic_sdk/hil 65 | 66 | 67 | .. toctree:: 68 | :caption: Messaging SDK 69 | :maxdepth: 1 70 | 71 | pages/messaging_sdk/slim-index.rst 72 | 73 | 74 | .. toctree:: 75 | :caption: Agent Workflow Server 76 | :maxdepth: 1 77 | 78 | pages/agws/workflow_server 79 | pages/agws/workflow_server_manager 80 | 81 | .. toctree:: 82 | :caption: CSIT 83 | :maxdepth: 1 84 | 85 | pages/csit 86 | 87 | .. toctree:: 88 | :caption: How-to Guides 89 | :maxdepth: 1 90 | 91 | pages/how-to-guides/mas-creation-tutorial/mas-tutorial 92 | pages/how-to-guides/agents/thread.rst 93 | pages/how-to-guides/agents/interrupts.md 94 | -------------------------------------------------------------------------------- /docs/pages/abstract.md: -------------------------------------------------------------------------------- 1 | # Abstract 2 | 3 | The Internet of Agents (IoA) represents a transformative initiative in the field 4 | of artificial intelligence designed to create a standardized ecosystem for AI 5 | agent discovery, interconnection, and collaboration. This comprehensive 6 | framework addresses the growing need for interoperability and secure 7 | communication between AI agents developed across different frameworks and 8 | platforms. 9 | -------------------------------------------------------------------------------- /docs/pages/agent_directory.md: -------------------------------------------------------------------------------- 1 | # Hosted AGNTCY Agent Directory 2 | 3 | A public hosted instance of the Agent Directory is available at 4 | [https://hub.agntcy.org/](https://hub.agntcy.org/). In 5 | this section we describe the main features of this instance which is provided __AS 6 | IS__ to the community to help users familiarize themselves with the Agent 7 | Directory. 8 | 9 | AGNTCY Agent Directory is designed to provide a robust multi-organization platform for hosting and 10 | managing Agent Directory Records, which we will refer to as simply "records" or 11 | "agent records." AGNTCY Agent Directory acts as a centralized 12 | point for organizing and accessing agent records. This hosted service is enhanced by a 13 | gRPC API that supports efficient service communication and integration, ensuring 14 | seamless interaction between components. 15 | 16 | AGNTCY Hub serves as a central platform for hosting and managing various 17 | agent-related services. The main purpose of the Agent Directory Service 18 | component of Hub is to provide a comprehensive solution 19 | for developers and IT admins to register, discover, and manage records in an 20 | organized manner. By offering a secure environment for authentication and user 21 | management, it ensures that organizations can confidently manage their agent 22 | directories and related services. 23 | 24 | ## Core Concepts 25 | 26 | The AGNTCY Agent Directory is organized around a few basic concepts: 27 | 28 | * Users - A user is the basic unit of authentication and authorization in the 29 | Hub, usually corresponding to a human or service account. 30 | * Organization - An organization provides a way to group users for sharing agents 31 | and handling administrative tasks. A user can belong to many organizations, but 32 | organizations are flat and cannot belong to one another. 33 | * Agent Records - An Agent Record is a collection of data and metadata about a 34 | particular agentic application or service. The schema of the Record is defined 35 | in [OASF](oasf.md) and contains, for example, a 36 | [collection of skills](oasf-taxonomy.md). 37 | * Repositories - A agent repository collects agent records that describe 38 | different versions of the same agent into one location to provide an overview of 39 | its history and current status. A Record can belong to only one repo, while a 40 | user or organization may access many different repos and by extension their 41 | agent records. 42 | 43 | The [Agent Directory Service (ADS)](dir.md) provides storage for agent records 44 | while the frontend hosted AGNTCY Agent Directory provides access control with 45 | Users and their Organizations and management of agent records in their Repos. 46 | 47 | ## Features 48 | 49 | AGNTCY Agent Directory enables users to: 50 | 51 | * View and search for public agent records. 52 | * View your organization's public and private agent records. 53 | * Publish agent records to an agent repository. 54 | * Access multiple separate organizations. 55 | * Invite other users your organizations. 56 | 57 | ## Using the Hub 58 | 59 | ### Signing up for the hosted AGNTCY Agent Directory and Logging in 60 | 61 | To get started with the hosted AGNTCY Agent Directory, sign up for free at the [AGNCTY Agent Directory 62 | homepage](https://hub.agntcy.org/). You can sign up with your GitHub account or 63 | by providing an email and password. Once your account is created, simply log in. 64 | When first logging in, you are prompted to create a name for your default 65 | organization. This organization is a personal space where all repositories 66 | belong to you. 67 | 68 | ![Logging in](../_static/login.png) 69 | 70 | ### View and Search for Agents 71 | 72 | "The Explore page allows users to browse and search through available agent repositories. 73 | 74 | ![The Explore Page](../_static/explore.png) 75 | 76 | You can refine the results using predefined filters and open search: 77 | 78 | * Use the **Search** bar to search for a text string in a repository name. To 79 | clear the search bar, click the **×**. 80 | * Use the drop-down **Filters** list to narrow the results by Agent Skill. 81 | * Use the drop-down **Sort by** list to sort the displayed items by Most Recent 82 | or Oldest. 83 | 84 | You can change organizations by clicking the Org drop-down list and selecting 85 | another organization. 86 | 87 | ### Manage Agents associated with Your Organization 88 | 89 | The Agent Directory Page allows you to view, edit, and create agent repositories 90 | in the AGNTCY Agent Directory. Here the records are displayed in a table with customizable columns. 91 | 92 | You can select which columns are displayed, and in which order, by clicking the 93 | **Arrange Columns** button (**▥**). 94 | 95 | You can reload the listed items by clicking the **Reload** button (**⟳**). 96 | 97 | You can refine the results using predefined filters and open search: 98 | 99 | * Use the **Search** bar to search for a text string in an agent repository 100 | name. To clear the search, click the **×**. 101 | * Use the drop-down **Filters** list to narrow the results by Agent Skill. 102 | * Use the drop-down **Sort by** list to sort the displayed items by Most Recent 103 | or Oldest. 104 | 105 | ![The Agent Directory Page](../_static/directory.png) 106 | 107 | #### Agent Actions 108 | 109 | Clicking the three dots (**⁝**) at the end of any row in the Agent Directory 110 | table opens a drop-down list of actions you can perform on that agent 111 | repository. 112 | 113 | * Click **Open Details** to [view the agent details](#agent-details). 114 | * Click **Edit** to edit the agent. 115 | * Click **Delete** to remove the agent repo from the directory, including all 116 | of its agent records. 117 | 118 | #### Agent Details 119 | 120 | Clicking on an agent repository opens the Agent Details page with further 121 | information on the agent repository. 122 | 123 | ![The Agent Details Page](../_static/agent.png) 124 | 125 | The **General** tab lists the following information from the agent record: 126 | 127 | * A description of the agent. 128 | * The skills associated with the agent. 129 | * The version number and date of publishing. 130 | * The CLI command to push a new version of the agent. 131 | 132 | The **Versions** tab lists the published versions of the agent. 133 | 134 | The **Settings** tab allows the owner to change the 135 | visibility of the agent. 136 | 137 | ## Add an Agent Directory Record to the AGNTCY Agent Directory 138 | Agent directory records are associated with a repository. A repository must 139 | exist first for an agent record to be added to it. 140 | 141 | ### Create a new agent repository 142 | To add an agent repository in the AGNTCY Agent Directory: 143 | 144 | 1. Click the **+ New Repository** button. 145 | 1. Enter the repository name and description. 146 | 1. Select the visibility for your agent repository. 147 | * Public agent repositories appear in search results. 148 | * Private agent repositories are only visible in your organization. 149 | 1. Click **Publish**. 150 | 1. You can also publish the agent repository using the generated CLI command. 151 | 1. Click **Finish**. 152 | 153 | At this point, you have an empty repository ready for agent records. 154 | 155 | ### Adding an Agent Directory Record to a Repository 156 | Adding an Agent Directory Record has these prerequisites: 157 | 1. You need to install the AGNTCY Agent Directory command line tool, `dirctl`. 158 | 1. You need an agent record that conforms to AGNTCY Agent Directory requirements. 159 | 1. You need to sign your agent record. 160 | 161 | #### Pre-req 1: Install `dirctl` 162 | Binary packages and installation of the AGNTCY Agent Directory `dirctl` 163 | command line tool are available in multiple forms on GitHub: 164 | * [container image](https://github.com/agntcy/dir/pkgs/container/dir-ctl) 165 | * [homebrew](https://github.com/agntcy/dir/tree/main/HomebrewFormula) 166 | * [binary](https://github.com/agntcy/dir/releases) 167 | 168 | After installation, use the `dirctl` and `dirctl hub` commands to list the 169 | available commands. 170 | 171 | #### Pre-req 2: Create a Conforming Agent Directory Record 172 | An Agent Directory record is stored in JSON format. The record is specific 173 | to one entry in the Agent Directory. The structure of each AD record is 174 | defined by the 175 | [Open Agentic Schema Framework](https://schema.oasf.agntcy.org/objects/agent) 176 | starting at the root with an [Agent object](https://schema.oasf.agntcy.org/objects/agent). 177 | 178 | To be useful, an agent record should include at least the following: 179 | * Name of the agent (the name MUST match the organization and repository name in the AGNTCY Agent Directory), 180 | * Version of the agent (use semantic convention) 181 | * Description (something to help any viewer understand what your agent does, what is the use case it is applicable to, expected inputs and outputs, LLM used, runtime, etc) 182 | * Locator, per [OASF locator objects](https://schema.oasf.agntcy.org/objects/locator?extensions=) 183 | * type(s) (source code, agent as a service, docker image, etc) matching the supported types in the OASF locator objects 184 | * url (corresponding address to find the agent) 185 | * Skills - MUST follow the [OASF skills schema](https://schema.oasf.agntcy.org/skills?extensions=) 186 | 187 | And it will look like this 188 | ``` 189 | { 190 | "name": "organization/my-agent", 191 | "version": "2.0", 192 | "description": "This agent takes any text input and condenses it down to 3 bullets of less than 100 characters each using any LLM.", 193 | "locators": \[ 194 | { 195 | "url": "https://github.com/example/agent_A", 196 | "type": "package-source-code" 197 | } 198 | \], 199 | "skills": \[ 200 | { 201 | "class_uid": 10201 202 | } 203 | \] 204 | } 205 | ``` 206 | 207 | 208 | #### Pre-req 3: Signing Agent Directory Records using `dirctl` 209 | You must sign the record before pushing it to the AGNTCY Agent Directory. Unsigned records are 210 | rejected by the API. 211 | 212 | To sign an agent record in the file `agent.json` using the default provider [sigstore](https://www.sigstore.dev/), run: 213 | 214 | ```shell 215 | dirctl sign agent.json > agent.signed.json 216 | ``` 217 | 218 | The signing service login page opens in your browser. Use your credentials to log in. The 219 | agent record will be augmented with a generated signature and will be output 220 | in JSON format. The new signed agent record can be pushed to the Hub. 221 | 222 | For further details on signing, please see 223 | [the Agent Directory HOWTO](dir-howto.md#signing-and-verification). 224 | 225 | #### Pushing Agent Directory Records using `dirctl` 226 | Once all pre-requisites are complete, you are ready to push an agent record to an agent repository 227 | that you have write access to. 228 | 229 | Pushing and pulling agent directory records is done using thd `dirctl` tool. 230 | 231 | From your terminal window: 232 | 1. Login to your AGNTCY Agent Directory account 233 | 234 | ```dirctl hub login``` 235 | 236 | The login page opens in your browser. Use your credentials to log in. 237 | 3. Verify your AGNTCY Agent Directory organizations and which one you are currently logged into: 238 | 239 | ```dirctl hub orgs``` 240 | 241 | Switch organizations as needed to the organization that you want to push your agent record to: 242 | 243 | ```dirctl hub orgs switch``` 244 | 245 | 5. Push your signed, conforming agent record to the desired organization/repository: 246 | 247 | ```dirctl hub push ``` 248 | 249 | 7. When you're done, logout of your hub account 250 | 251 | ```dirctl hub logout``` 252 | 253 | #### Pulling Agent Directory Records using `dirctl` 254 | 255 | You can also pull an agent directory record via `dirctl` using the command listed on the agent details page. 256 | 257 | 258 | #### Verifying an Agent Directory Record Signature 259 | 260 | The verification process allows validation of the agent record signature 261 | against a specific identity. 262 | 263 | To verify that an agent record is properly signed, you can run `dirctl 264 | verify agent.json`. 265 | 266 | To verify the signature against a specific identity, for example to check if an 267 | agent record originates from GitHub Agntcy users, run: 268 | 269 | ```bash 270 | dirctl verify agent.json \ 271 | --oidc-issuer "(.*)github.com(.*)" \ 272 | --oidc-identity "(.*)@agntcy.com" 273 | ``` 274 | 275 | For further details on verification, please see 276 | [the Agent Directory HOWTO](dir-howto.md#signing-and-verification). 277 | 278 | ## Managing Organizations and Users 279 | 280 | ### Settings 281 | 282 | The settings page allows you to manage your organizations and users. 283 | 284 | #### Organizations 285 | 286 | Organizations represent groups of users within the Hub, each with its own 287 | repositories. Users can be member of many organizations. The organizations 288 | available to you are listed under the **Organizations** tab. 289 | 290 | Clicking the three dots (**⁝**) at the end of any row in the table opens a 291 | drop-down list of actions you can perform on that organization. 292 | 293 | * Click **Switch** to switch to the organization. 294 | 295 | You can reload the listed items by clicking the **Reload** button (**⟳**). 296 | 297 | #### Users 298 | 299 | The users in a organization are listed under the **Users** tab. 300 | 301 | You can invite other users to the organization by clicking the **+ Invite User** 302 | button. 303 | 304 | > Note: You cannot invite other users to your personal organization created 305 | > during signing up. To collaborate with others, create a new organization and 306 | > invite them to it. 307 | 308 | Clicking the three dots (**⁝**) at the end of any row in the table opens a 309 | drop-down list of actions you can perform on that user. 310 | 311 | * Click **Edit** to edit the user's role. 312 | * Click **Delete** to delete the user. 313 | 314 | You can reload the listed items by clicking the **Reload** button (**⟳**). 315 | 316 | ## Troubleshooting pushing agents to the AGNTCY Agent Directory 317 | `Error: failed to validate access token: invalid session token` 318 | You forgot to login to your AGNTCY Agent Directory account 319 | 320 | `Error: failed to push agent: could not receive response: rpc error: code = InvalidArgument desc = agent: invalid value in agent name` 321 | The “agent name” attribute in the json file does not match the organization/repository in the Hub. 322 | 323 | `Error: failed to push agent: could not receive response: rpc error: code = AlreadyExists desc = agent: data model with same version already exists` 324 | You are trying to upload a new agent record with the same name and version as one that exists already. Update the version number in the json file. 325 | 326 | Details on other uses of the `dirctl` command to interact with the 327 | Agent Directory are 328 | [available in the documentation](https://github.com/agntcy/dir/pkgs/container/dir-ctl). 329 | After installation, use the `dirctl hub` command to list the available commands. 330 | -------------------------------------------------------------------------------- /docs/pages/agws/manifest.md: -------------------------------------------------------------------------------- 1 | # Agent Manifest 2 | 3 | ## Introduction 4 | 5 | An Agent Manifest is a document that describes in detail the following: 6 | * What the agent is capable of. 7 | * How the agent can be consumed if provided as-a-service. 8 | * How the agent can be deployed if provided as a deployable artifact. 9 | * What are the dependencies of the agent, that is, which other agents it relies on. 10 | 11 | The manifest is designed to be used by [Agent Connect Protocol](../syntactic_sdk/connect.md) and the Workflow Server and stored in the Agent Directory with the corresponding OASF extensions. 12 | 13 | This document describes the principles of the Agent Manifest definition. Manifest definition can be found [here](https://github.com/agntcy/workflow-srv-mgr/blob/main/wfsm/spec/manifest.yaml) 14 | 15 | Sample manifests can be found [here](https://github.com/agntcy/workflow-srv-mgr/tree/main/wfsm/spec/examples). 16 | 17 | ## Agent Manifest Structure 18 | 19 | Agent Manifest includes the following sections: 20 | * [Agent Identification and Metadata](#agent-identification-and-metadata) 21 | * [Agent Interface Data Structure Specification](#agent-interface-data-structure-specification) 22 | * [Agent Deployment and Consumption](#agent-deployment-and-consumption) 23 | * [Agent Dependencies](#agent-dependencies) 24 | 25 | ### Agent Identification and Metadata 26 | 27 | Agent Manifest must uniquely identify an agent within the namespace it is part of. This is done through a unique name and a version. 28 | 29 | Agent Manifest must include a natural language description that describes what the agent is capable of doing. This allows user and potentially other agents to select the agent that best fits a given task. 30 | 31 | Agent Manifest can include metadata that provides additional information about the agent, such as ownership, timestamps, tags, and so on. 32 | 33 | 34 |
35 | Sample descriptor metadata section for the mailcomposer agent 36 | 37 | ```json 38 | { 39 | "metadata": { 40 | "ref": { 41 | "name": "org.agntcy.mailcomposer", 42 | "version": "0.0.1", 43 | "url": "https://github.com/agntcy/acp-spec/blob/main/docs/sample_acp_descriptors/mailcomposer.json" 44 | }, 45 | "description": "This agent is able to collect user intent through a chat interface and compose wonderful emails based on that." 46 | } 47 | ... 48 | } 49 | ``` 50 | 51 | Metadata for a mail composer agent named `org.agntcy.mailcomposer` version `0.0.1`. 52 | 53 |
54 | 55 | 56 | 57 | ### Agent Interface Data Structure Specification 58 | Agents willing to interoperate with other agents expose an interface that allow for invocation and configuration. 59 | 60 | Agent Connect Protocol specifies a standard for this interface. However, it specifies methods to configure and invoke agents, but it does not specify the format of the data structures that an agent expects and produces for such configurations and invocations. 61 | 62 | The specification of these data structures is included in what we call the Agent ACP descriptor, which can be provided by ACP itself, but it is also defined as part of the Agent Manifest. 63 | 64 | Agent `specs` section includes ACP invocation capabilities, e.g. `streaming`, `callbacks`, `interrupts` etc., and the JSON schema definitions for ACP interactions: 65 | * Agent Configuration. 66 | * Run Input. 67 | * Run Output. 68 | * Interrupt and Resume Payloads. 69 | * Thread State. 70 | 71 |
72 | Sample specs section for the mailcomposer agent 73 | 74 | ```json 75 | { 76 | ... 77 | "specs": { 78 | "capabilities": { 79 | "threads": true, 80 | "interrupts": true, 81 | "callbacks": true 82 | }, 83 | "input": { 84 | "type": "object", 85 | "description": "Agent Input", 86 | "properties": { 87 | "message": { 88 | "type": "string", 89 | "description": "Last message of the chat from the user" 90 | } 91 | } 92 | }, 93 | "thread_state": { 94 | "type": "object", 95 | "description": "The state of the agent", 96 | "properties": { 97 | "messages": { 98 | "type": "array", 99 | "description": "Full chat history", 100 | "items": { 101 | "type": "string", 102 | "description": "A message in the chat" 103 | } 104 | } 105 | } 106 | }, 107 | "output": { 108 | "type": "object", 109 | "description": "Agent Input", 110 | "properties": { 111 | "message": { 112 | "type": "string", 113 | "description": "Last message of the chat from the user" 114 | } 115 | } 116 | }, 117 | "config": { 118 | "type": "object", 119 | "description": "The configuration of the agent", 120 | "properties": { 121 | "style": { 122 | "type": "string", 123 | "enum": ["formal", "friendly"] 124 | } 125 | } 126 | }, 127 | "interrupts": [ 128 | { 129 | "interrupt_type": "mail_send_approval", 130 | "interrupt_payload": { 131 | "type": "object", 132 | "title": "Mail Approval Payload", 133 | "description": "Description of the email", 134 | "properties": { 135 | "subject": { 136 | "title": "Mail Subject", 137 | "description": "Subject of the email that is about to be sent", 138 | "type": "string" 139 | }, 140 | "body": { 141 | "title": "Mail Body", 142 | "description": "Body of the email that is about to be sent", 143 | "type": "string" 144 | }, 145 | "recipients": { 146 | "title": "Mail recipients", 147 | "description": "List of recipients of the email", 148 | "type": "array", 149 | "items": { 150 | "type": "string", 151 | "format": "email" 152 | } 153 | } 154 | }, 155 | "required": [ 156 | "subject", 157 | "body", 158 | "recipients" 159 | ] 160 | }, 161 | "resume_payload": { 162 | "type": "object", 163 | "title": "Email Approval Input", 164 | "description": "User Approval for this email", 165 | "properties": { 166 | "reason": { 167 | "title": "Approval Reason", 168 | "description": "Reason to approve or decline", 169 | "type": "string" 170 | }, 171 | "approved": { 172 | "title": "Approval Decision", 173 | "description": "True if approved, False if declined", 174 | "type": "boolean" 175 | } 176 | }, 177 | "required": [ 178 | "approved" 179 | ] 180 | } 181 | } 182 | ] 183 | } 184 | ... 185 | } 186 | ``` 187 | The agent supports threads, interrupts, and callback. 188 | 189 | It declares schemas for input, output, and config: 190 | * As input, it expects the next message of the chat from the user. 191 | * As output, it produces the next message of the chat from the agent. 192 | * As config it expects the style of the email to be written. 193 | 194 | It supports one kind of interrupt, which is used to ask user for approval before sending the email. It provides subject, body, and recipients of the email as interrupt payload and expects approval as input to resume. 195 | 196 | It supports a thread state which holds the chat history. 197 | 198 |
199 | 200 | 201 | ### Agent Deployment and Consumption 202 | 203 | Agents can be provided in two different forms, which we call deployment options: 204 | 205 | * **As a service**: a network endpoint that exposes an interface to the agent (for example, Agent Connect Protocol). 206 | * **As a deployable artifact**, for example: 207 | * A docker image, which once deployed exposes an interface to the agent (for example, Agent Connect Protocol). 208 | * A source code bundle, which can be executed within the specific runtime and framework it is built on. 209 | 210 | The same agent can support one or more deployment options. 211 | 212 | Agent Manifest currently supports three deployment otions: 213 | * Source Code Deployment: In this case the agent can be deployed starting from its code. For this deployment mode, the manifest provides: 214 | * The location where the code is available 215 | * The framework used for this agent 216 | * The framework specific configuration needed to run the agent. 217 | * Remote Service Deployment: In this case, the agent does not come as a deployable artefact, but it's already deployed and available as a service. For this deployment mode, the manifest provides: 218 | * The network endpoint where the agent is available through the ACP 219 | * The authentication used by ACP for this agent 220 | * Docker Deployment: In this case the agent can be deployed starting from a docker image. It is assumed that once running the docker container expose the agent through ACP. For this deployment mode, the manifest provides: 221 | * The agent container image 222 | * The authentication used by ACP for this agent 223 | 224 |
225 | Sample manifest dependency section for the mailcomposer agent 226 | 227 | ```json 228 | { 229 | ... 230 | "deployments": [ 231 | { 232 | "type": "source_code", 233 | "name": "src", 234 | "url": "git@github.com:agntcy/mailcomposer.git", 235 | "framework_config": { 236 | "framework_type": "langgraph", 237 | "graph": "mailcomposer" 238 | } 239 | } 240 | ] 241 | ... 242 | } 243 | ``` 244 | 245 | Mailcomposer agent in the example above comes as code written for LangGraph and available on Github. 246 | 247 | 248 |
249 | 250 | ### Agent Dependencies 251 | 252 | An agent may depend on other agents, which means that at some point of its execution it needs to invoke them to accomplish its tasks. We refer to these other agents as **sub-agents**. A user who wants to use the agent, needs to know this information and check that the dependencies are satisfied, that is, make sure that the sub-agents are available. 253 | This may imply simply checking that sub-agents are reachable or deploying them, according to the deployment modes they support. 254 | 255 | The Agent Manifest must include a list of all sub-agents in the form of a list of references to their manifests. 256 | 257 | Note the recursive nature of Agent Manifests that can point in turn to other Agent Manifests as dependencies. 258 | 259 |
260 | Sample manifest dependency section for the mailcomposer agent 261 | 262 | ```json 263 | { 264 | ... 265 | "dependencies": [ 266 | { 267 | "name": "org.agntcy.sample-agent-2", 268 | "version": "0.0.1" 269 | }, 270 | { 271 | "name": "org.agntcy.sample-agent-3", 272 | "version": "0.0.1" 273 | } 274 | ] 275 | ... 276 | } 277 | ``` 278 | 279 | Mailcomposer agent in the example above depends on `sample-agent-2` and `sample-agent-3`. 280 | 281 |
282 | 283 | 284 | 285 | 286 | -------------------------------------------------------------------------------- /docs/pages/agws/workflow_server.rst: -------------------------------------------------------------------------------- 1 | Agent Workflow Server 2 | ===================== 3 | 4 | The `Agent Workflow Server `_ enables participation in the Internet of Agents. It accommodates AI Agents from diverse frameworks and exposes them through Agent Connect Protocol (`ACP <../syntactic_sdk/agntcy_acp_sdk.html>`_), regardless of their underlying implementation. 5 | 6 | .. note:: 7 | 8 | If you wish to quickly deploy and run your Agent, please check out the user-facing `Workflow Server Manager `_ instead. 9 | 10 | Getting Started 11 | --------------- 12 | 13 | Prerequisites 14 | ~~~~~~~~~~~~~ 15 | 16 | You need to have installed the following software to run the Agent Workflow Server: 17 | 18 | - Python 3.12 (or above) 19 | - Poetry 2.0 (or above) 20 | 21 | Local development 22 | ~~~~~~~~~~~~~~~~~ 23 | 24 | 1. Clone Agent Workflow Server repository: ``git clone https://github.com/agntcy/workflow-srv.git`` 25 | 26 | 2. Copy example env file and adapt if necessary: ``cp .env.example .env`` 27 | 28 | 3. Create a virtual environment and install the server dependencies: ``poetry install`` 29 | 30 | 4. Install an agent (`See examples `_). E.g.: ``pip install agntcy/acp-sdk/examples/mailcomposer`` 31 | 32 | 5. Start the server: ``poetry run server`` 33 | 34 | Generating API 35 | ~~~~~~~~~~~~~~ 36 | 37 | 1. If it's the first time you're cloning this repo, initialize submodule: ``git submodule update --init --recursive`` 38 | 39 | 2. Run ``make generate-api`` 40 | 41 | Generated code (API routes template and models) is under ``src/agent_workflow_server/generated``. 42 | 43 | - If needed, API routes template could be manually copied and implemented under ``src/agent_workflow_server/apis`` 44 | - Models should not be copied over different places nor modified, but referenced as they are 45 | 46 | Authentication 47 | --------------- 48 | 49 | The Agent Workflow Server, and the underlying Agent, could be optionally authenticated via a pre-defined API Key: 50 | 51 | - Set ``API_KEY`` environment variable with a pre-defined value to enable authentication 52 | - Include the same value in requests from clients via ``x-api-key`` header 53 | 54 | API Documentation 55 | ----------------- 56 | 57 | Once the Agent Workflow Server is running, interactive API docs are available under ``/docs`` endpoint, redoc documentation under ``/redoc`` endpoint 58 | 59 | 60 | Current Support 61 | --------------- 62 | 63 | .. list-table:: Supported frameworks and features 64 | :widths: 15 20 10 10 10 10 20 65 | :header-rows: 1 66 | 67 | * - Framework 68 | - Supported versions 69 | - Invoke 70 | - Streaming 71 | - Threads 72 | - Callbacks 73 | - Interrupts (Human-in-the-loop) 74 | * - LangGraph 75 | - >=0.2.60,<0.4.0 76 | - |:white_check_mark:| 77 | - |:white_check_mark:| 78 | - |:white_check_mark:| 79 | - |:construction:| 80 | - |:white_check_mark:| 81 | * - LlamaIndex 82 | - >=0.12.30,<0.13.0 83 | - |:white_check_mark:| 84 | - |:white_check_mark:| 85 | - |:white_check_mark:| 86 | - |:construction:| 87 | - |:white_check_mark:| 88 | 89 | Contributing 90 | ------------ 91 | 92 | ACP API Contribution 93 | ~~~~~~~~~~~~~~~~~~~~ 94 | 95 | Agent Workflow Server implements ACP specification to expose Agents functionalities. To contribute to the ACP API, check out `Agent Connect Protocol Specification `_. 96 | 97 | Adapters SDK Contribution 98 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 99 | 100 | Agent Workflow Server supports different agentic frameworks via ``Adapters``. 101 | 102 | The process of implementing support of a new framework is pretty straightforward, as the server dynamically loads ``Adapters`` at runtime. 103 | 104 | ``Adapters`` are placed under ``src/agent_workflow_server/agents/adapters`` and must implement ``BaseAdapter`` class. 105 | 106 | To support a new framework, or extend functionality, one must implement the ``load_agent`` method. To invoke that agent, one must implement the ``astream`` method. 107 | 108 | See example below, supposing support to a new framework ``MyFramework`` should be added. 109 | 110 | .. code-block:: python 111 | 112 | # src/agent_workflow_server/agents/adapters/myframework.py 113 | 114 | class MyAgent(BaseAgent): 115 | def __init__(self, agent: object): 116 | self.agent = agent 117 | 118 | async def astream(self, input: dict, config: dict): 119 | # Call your agent here (and stream events) 120 | # e.g.: 121 | async for event in self.agent.astream( 122 | input=input, config=config 123 | ): 124 | yield event 125 | 126 | 127 | class MyAdapter(BaseAdapter): 128 | def load_agent(self, agent: object): 129 | # Check if `agent` is supported by MyFramework and if so return it 130 | # e.g.: 131 | if isinstance(agent, MyAgentType): 132 | return MyAgent(agent) 133 | # Optionally add support to other Agent Types: 134 | # e.g.: 135 | # if isinstance(agent, MyOtherAgentType): 136 | # return MyAgent(MyAgentTypeConv(agent)) -------------------------------------------------------------------------------- /docs/pages/agws/workflow_server_manager.md: -------------------------------------------------------------------------------- 1 | # Workflow Server Manager 2 | 3 | The Workflow Server Manager (WFSM) is a command line tool that streamlines the process of wrapping an agent into a container image, starting the container, and exposing the agent functionality through the Agent Connect Protocol (ACP). 4 | 5 | The WFSM tool takes an [Agent Manifest](manifest.md) as input and based on it spins up a web server container exposing the agent through ACP through REST api. 6 | 7 | ## Getting Started 8 | 9 | ## Installation 10 | 11 | Download and unpack the executable binary from the [releases page](https://github.com/agntcy/workflow-srv-mgr/releases) 12 | 13 | Alternatively you can execute the installer script by running the following command: 14 | ```bash 15 | curl -L https://raw.githubusercontent.com/agntcy/workflow-srv-mgr/refs/heads/main/install.sh | bash 16 | ``` 17 | The installer script will download the latest release and unpack it into the `bin` folder in the current directory. 18 | The output of the execution looks like this: 19 | 20 | ```bash 21 | curl -L https://raw.githubusercontent.com/agntcy/workflow-srv-mgr/refs/heads/install/install.sh | bash [16:05:58] 22 | % Total % Received % Xferd Average Speed Time Time Time Current 23 | Dload Upload Total Spent Left Speed 24 | 100 1034 100 1034 0 0 2597 0 --:--:-- --:--:-- --:--:-- 2597 25 | Installing the Workflow Server Manager tool: 26 | 27 | OS: darwin 28 | ARCH: arm64 29 | AG: 0.0.1-dev.23 30 | TARGET: /Users/johndoe/.wfsm/bin 31 | ARCHIVE_URL: https://github.com/agntcy/workflow-srv-mgr/releases/download/v0.0.1-dev.23/wfsm0.0.1-dev.23_darwin_arm64.tar.gz 32 | 33 | 34 | Installation complete. The 'wfsm' binary is located at /Users/johndoe/.wfsm/bin/wfsm 35 | ``` 36 | 37 | Listed variables can be overridden by providing the values as variables to the script 38 | 39 | ### Prerequisites 40 | 41 | The utility requires Docker engine, `docker`, and `docker-compose` to be present on the host. 42 | 43 | To make sure the docker setup is correct on the host, execute the following: 44 | 45 | ```bash 46 | wfsm check 47 | ``` 48 | 49 | In case the command signals error you can pass the `-v` flag to display verbose information about the failure. 50 | 51 | 52 | ## Run deploy 53 | 54 | Using `wfsm deploy` command agents can be deployed on two platforms: `docker` & `k8s` using `--platform` option. 55 | (Default platform is `docker`). 56 | By default this command runs with `--dryRun` and only generates deployment artifacts (docker-compose.yaml or helm chart) which will be saved in `~/.wfsm` folder. 57 | Running with `dryRun=false` will result in deploying either a Docker compose file or a Helm chart. (See details in below sections) 58 | The only mandatory parameter is `--manifestPath` which describes agent specification and should contain a valid deployment option. See [Manifest Spec](https://github.com/agntcy/workflow-srv-mgr/blob/main/wfsm/spec/manifest.yaml), example manifests can be found in the [WFSM Tool](https://github.com/agntcy/workflow-srv-mgr/tree/main/examples) repository. 59 | In case of `source_code` deployment options first a Docker image is built in local repository, with a checksum tag generated from source code. 60 | 61 | Should there be multiple deployment options users have to set the selected one with `--deploymentOption`. 62 | 63 | ```bash 64 | wfsm deploy -m examples/langgraph_manifest.json --deploymentOption=src 65 | ``` 66 | 67 | Should an agent have dependencies on other agents, they are handled the same way as the main agent, images are built and then deployed with the same Docker compose or Helm chart, so that will see each other. 68 | Agent IDs, API keys and agent endpoints are automatically injected in calling agent so that they can reach dependent agents using ACP-SDK. 69 | 70 | ## Configuration 71 | 72 | Environment variables for an agent(s) can be provided in the: 73 | 74 | - agent manifest *dependencies* definition (see agent manifest) 75 | - local environment from where `wfsm` is launched 76 | - env file provided as an option: `wfsm deploy ... --envFilePath=my_env_file` 77 | - in `wfsm` specific config yaml: `wfsm deploy ... --configPath=my_config.yaml` 78 | 79 | Env vars are applied in the below order, meaning that bottom ones override : 80 | 81 | - all env vars from agents manifest(s) `dependencies` section 82 | - all *declared* env vars from local env 83 | - all *prefixed* env vars from local env 84 | - all *declared* env vars from env file (--envFilePath) 85 | - all *prefixed* env vars from env file (--envFilePath) 86 | - env vars from config.yaml (--configPath) 87 | - defaults defined in manifest `env_vars` section in case values is empty 88 | 89 | *declared env vars* - env vars defined in manifest `env_vars` section: 90 | 91 | ```yaml 92 | "env_vars": [ 93 | { 94 | "desc": "Environment variable for agentA", 95 | "name": "AZURE_OPENAI_MODEL", 96 | "required": true, 97 | "defaultValue": "gpt-4o-mini" 98 | }, 99 | ... 100 | ] 101 | ``` 102 | 103 | *prefixed env vars* - env vars prefixed with agent name (or agent deployment name), where there prefix is created upon following rule: 104 | 105 | - all letter converted to capital 106 | - all special character replaced with '_' 107 | 108 | 109 | ### Example of configuring environment variables for agents 110 | 111 | Let's say you want to set `AZURE_OPENAI_API_KEY` for `mailcomposer` and it's dependency `email_reviewer_1` (deployment name). 112 | Declare `AZURE_OPENAI_API_KEY` env var both for `mailcomposer` and `email_reviewer_1` in their manifest: 113 | 114 | ```yaml 115 | "env_vars": [ 116 | { 117 | "desc": "Environment variable for agentA", 118 | "name": "AZURE_OPENAI_MODEL", 119 | "required": true, 120 | "defaultValue": "gpt-4o-mini" 121 | }, 122 | ... 123 | ] 124 | ``` 125 | 126 | Example manifests can be found here: [mailcomposer](https://github.com/agntcy/workflow-srv-mgr/blob/main/examples/manifest_with_deps.json) manifest [email_reviewer_1](https://github.com/agntcy/workflow-srv-mgr/blob/main/examples/llama_manifest.json) manifest. 127 | 128 | You can run `export AZURE_OPENAI_API_KEY=xxx` in your local shell from where you launch `wfsm` and the env var will be set for both agents. 129 | 130 | If you want to set a different key for `email_reviewer_1` then you have to prefix you env var: `AGENT_B_AZURE_OPENAI_API_KEY`. 131 | 132 | Same rule applies for env vars declared in a usual env file which you can provide with `--envFilePath=your_env_file` option. 133 | 134 | ```yaml 135 | # applies to all agents decaling these env vars in their manifest 136 | AZURE_OPENAI_API_KEY="xxxxxxx" 137 | 138 | # only apllies to mailcomposer agent 139 | MAILCOMPOSER_AZURE_OPENAI_API_KEY="xxxxxxx" 140 | 141 | # only apllies to email_reviewer_1 agent 142 | EMAIL_REVIEWER_1_AZURE_OPENAI_API_KEY="xxxxxxx" 143 | ``` 144 | 145 | These above will override local env values. 146 | 147 | Finally you can declare env vars in `config.yaml` (eg. --configPath=config.yaml) like below that will override previous settings: 148 | 149 | ```yaml 150 | config: 151 | agent_A: 152 | envVars: 153 | "AZURE_OPENAI_API_KEY": "from_config" 154 | agent_B: 155 | envVars: 156 | "AZURE_OPENAI_API_KEY": "from_config" 157 | ``` 158 | 159 | ### Configuration of agent ID, API key, external port 160 | 161 | Agent ID's, API keys and external port for main agent are generated/set by `wfsm`. 162 | You can use `--showConfig` option to display the default configuration generated by the tool: 163 | 164 | ```sh 165 | wfsm deploy --manifestPath example_manifest.yaml --showConfig 166 | ``` 167 | 168 | You can use the default config as a base for you own config, or you can just add additional things you want to override. 169 | 170 | ```yaml 171 | cat > config.yaml <` folder. 208 | Using `-v` / `--verbose` option you can actually see the generated values file. 209 | For each agent will be deployed a `ConfigMap`, `Secret` (containing env var configs) a `Service` and a `Statefulset`. 210 | 211 | Additional k8s specific configs - beside id's & api keys, env vars - can be specified under `k8s` field in the config file. 212 | A full example of config possibilities can be found [here](https://github.com/agntcy/workflow-srv-mgr/blob/main/examples/k8s_example_config.yaml). 213 | 214 | ```sh 215 | wfsm deploy -m manifest_with_deps.json --configPath=k8s_example_config.yaml -p k8s --showConfig 216 | ``` 217 | 218 | In case you run with `--dryRun=false` and you have an active K8s context configured (you can also provide you kube config file directly in `KUBECONFIG` env var) the tool will try to deploy the helm chart to that cluster. 219 | 220 | By default, the main agent service type is configured to `NodePort` so that the main agent will be reachable on a local cluster. 221 | 222 | You can specify a different namespace using `WFSM_K8S_NAMESPACE` env var. 223 | 224 | ## Test the Results 225 | 226 | The exposed REST endpoints can be accessed with regular tools (for example, Curl or Postman). 227 | 228 | ## Examples 229 | 230 | Example manifests can be found in the [WFSM Tool](https://github.com/agntcy/workflow-srv-mgr/tree/main/examples) repository. 231 | 232 | ### Expose the [Mail Composer](https://github.com/agntcy/agentic-apps/tree/main/mailcomposer) LangGraph agent through ACP workflow server 233 | 234 | ```bash 235 | wfsm deploy -m examples/langgraph_manifest.json -e examples/env_vars 236 | ``` 237 | 238 | ### Expose the [Email Reviewer](https://github.com/agntcy/agentic-apps/tree/main/email_reviewer) llama deploy workflow agent through ACP workflow server 239 | 240 | ```bash 241 | wfsm deploy -m examples/llama_manifest.json -e examples/env_vars 242 | ``` 243 | 244 | ### Expose an agent with dependencies through the ACP workflow server 245 | 246 | ```bash 247 | wfsm deploy -m examples/manifest_with_deps.json -e examples/env_vars_with_deps 248 | ``` 249 | 250 | ### Run agent from docker image 251 | 252 | Run deploy to build images. 253 | 254 | ```bash 255 | wfsm deploy -m examples/langgraph_manifest.json -e examples/env_vars 256 | ``` 257 | 258 | Get the image tag from console athset in the manifest. 259 | 260 | ``` 261 | "deployment_options": [ 262 | 263 | { 264 | "type": "docker", 265 | "name": "docker", 266 | "image": "agntcy/wfsm-mailcomposer:" 267 | } 268 | ... 269 | ] 270 | ``` 271 | 272 | Run `wfsm` again now with `--deploymentOption=docker --dryRun=false`: 273 | 274 | 275 | ```bash 276 | wfsm deploy -m examples/langgraph_manifest.json -e examples/env_vars --deploymentOption=docker --dryRun=false 277 | ``` -------------------------------------------------------------------------------- /docs/pages/csit.md: -------------------------------------------------------------------------------- 1 | # Continuous System Integration Testing 2 | 3 | The Agncty Continuous System Integration Testing (CSIT) system design needs to 4 | meet the continuously expanding requirements of Agntcy projects including Agent 5 | Gateway Protocol, Agent Directory, and others. 6 | 7 | Tests can be run locally using taskfile or in GitHub Actions. 8 | 9 | The directory structure of the CSIT is the following: 10 | 11 | ``` 12 | csit 13 | ├── benchmarks # Benchmark tests 14 | │   ├── agntcy-agp # Benchmark tests for AGP 15 | │   │   ├── Taskfile.yml # Tasks for AGP benchmark tests 16 | │   │   └── tests 17 | │   ├── agntcy-dir # Benchmark tests for ADS 18 | │   │   ├── Taskfile.yml # Tasks for ADS benchmark tests 19 | │   │   └── tests 20 | │   ├── go.mod 21 | │   ├── go.sum 22 | │   └── Taskfile.yml 23 | ├── integrations # Integration tests 24 | │   ├── agntcy-agp # Integration tests for [agntcy/agp](https://github.com/agntcy/agp) 25 | │   │   ├── agentic-apps 26 | │   │   ├── Taskfile.yml # Tasks for AGP integration tests 27 | │   │   └── tests 28 | │   ├── agntcy-apps # Integration tests for [agntcy/agentic-apps](https://github.com/agntcy/agentic-apps) 29 | │   │   ├── agentic-apps 30 | │   │   ├── Taskfile.yml # Tasks for agentic-apps integration tests 31 | │   │   └── tools 32 | │   ├── agntcy-dir # Integration tests for [agntcy/dir](https://github.com/agntcy/dir) 33 | │   │   ├── components 34 | │   │   ├── examples 35 | │   │   ├── manifests 36 | │   │   ├── Taskfile.yml # Tasks for ADS integration tests 37 | │   │   └── tests 38 | │   ├── environment # Test environment helpers 39 | │   │   └── kind 40 | │   ├── Taskfile.yml # Tasks for integration tests 41 | │   └── testutils # Go test utils 42 | ├── samples # Sample applications for testing 43 | │   ├── crewai 44 | │   │   └── simple_crew # Agentic application example 45 | │   │   ├── agent.base.json # Required agent base model 46 | │   │   ├── build.config.yml # Required build configuration file 47 | │   ├── model.json # Required model file 48 | │   ├── langgraph 49 | │   └── research # Agentic application example 50 | │   │   ├── agent.base.json # Required agent base model 51 | │ │   ├── build.config.yml # Required build configuration file 52 | │ │   ├── model.json # Required model file 53 | │ │   ├── Taskfile.yml # Tasks for samples tests 54 | │ │   └── tests 55 | │ ├── llama-index 56 | │ │   └── research # Agentic application example 57 | │ │   ├── agent.base.json # Required agent base model 58 | │ │   ├── build.config.yml # Required build configuration file 59 | │ │   ├── model.json # Required model file 60 | │ │   ├── Taskfile.yml # Tasks for samples tests 61 | │ │   └── tests 62 | ├── .... 63 | ├── .... # Tasks for Samples 64 | └── Taskfile.yml # Repository level task definitions 65 | ``` 66 | 67 | In the Taskfiles, all required tasks and steps are defined in a structured manner. Each CSIT component contains its necessary tasks within dedicated Taskfiles, with higher-level Taskfiles incorporating lower-level ones to efficiently leverage their defined tasks. 68 | 69 | ## Tasks 70 | 71 | You can list all the tasks defined in the Taskfiles using the `task -l` or simply run `task`. 72 | The following tasks are defined: 73 | 74 | ```bash 75 | task: Available tasks for this project: 76 | * benchmarks:directory:test: All ADS benchmark test 77 | * benchmarks:gateway:test: All AGP benchmark test 78 | * integrations:apps:download:wfsm-bin: Get wfsm binary from GitHub 79 | * integrations:apps:get-marketing-campaign-cfgs: Populate marketing campaign config file 80 | * integrations:apps:init-submodules: Initialize submodules 81 | * integrations:apps:run-marketing-campaign: Run marketing campaign 82 | * integrations:directory:download:dirctl-bin: Get dirctl binary from GitHub 83 | * integrations:directory:test: All directory test 84 | * integrations:directory:test-env:bootstrap:deploy: Deploy Directory network peers 85 | * integrations:directory:test-env:cleanup: Remove agntcy directory test env 86 | * integrations:directory:test-env:deploy: Deploy Agntcy directory test env 87 | * integrations:directory:test-env:network:cleanup: Remove Directory network peers 88 | * integrations:directory:test-env:network:deploy: Deploy Directory network peers 89 | * integrations:directory:test:compile:samples: Agntcy compiler test in samples 90 | * integrations:directory:test:compiler: Agntcy compiler test 91 | * integrations:directory:test:delete: Directory agent delete test 92 | * integrations:directory:test:list: Directory agent list test 93 | * integrations:directory:test:networking: Directory agent networking test 94 | * integrations:directory:test:push: Directory agent push test 95 | * integrations:gateway:build:agentic-apps: Build agentic containers 96 | * integrations:gateway:test-env:cleanup: Remove agent gateway test env 97 | * integrations:gateway:test-env:deploy: Deploy agntcy gateway test env 98 | * integrations:gateway:test:mcp-server: Test MCP over AGP 99 | * integrations:gateway:test:mcp-server:agp-native: Test AGP native MCP server 100 | * integrations:gateway:test:mcp-server:mcp-proxy: Test MCP server via MCP proxy 101 | * integrations:gateway:test:sanity: Sanity gateway test 102 | * integrations:kind:create: Create kind cluster 103 | * integrations:kind:destroy: Destroy kind cluster 104 | * integrations:version: Get version 105 | * samples:agents:run:test: Run test 106 | * samples:autogen:kind: Run app in kind 107 | * samples:autogen:lint: Run lint with black 108 | * samples:autogen:lint-fix: Run lint and autofix with black 109 | * samples:autogen:run:test: Run tests 110 | * samples:crewai:run:crew: Run crew 111 | * samples:crewai:run:test: Run crew 112 | * samples:evaluation:run:crew: Run application main 113 | * samples:langgraph:run:test: Run tests 114 | * samples:llama-deploy:run:app: Run application main 115 | * samples:llama-deploy:run:test: Run tests 116 | * samples:llama-index:run:test: Run tests 117 | ``` 118 | 119 | ## Integration Tests 120 | 121 | The integration tests are testing interactions between integrated components. 122 | 123 | ### Directory structure 124 | 125 | The CSIT integrations directory contains the tasks that create the test 126 | environment, deploy the components to be tested, and run the tests. 127 | 128 | ### Running Integration Tests Locally 129 | 130 | For running tests locally, we need to create a test cluster and deploy the test environment on it before running the tests. 131 | Make sure the following tools are installed: 132 | - [Taskfile](https://taskfile.dev/installation/) 133 | - [Go](https://go.dev/doc/install) 134 | - [Docker](https://docs.docker.com/get-started/get-docker/) 135 | - [Kind](https://kind.sigs.k8s.io/docs/user/quick-start#installation) 136 | - [Kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) 137 | - [Helm](https://helm.sh/docs/intro/install/) 138 | 139 | To run tests locally: 140 | 141 | 1. Create the cluster and deploy the environment: 142 | 143 | ```bash 144 | task integrations:kind:create 145 | task integrations:directory:test-env:deploy 146 | # Or change dir to integratons directory: 147 | cd integrations 148 | task kind:create 149 | task directory:test-env:deploy 150 | ``` 151 | 152 | 1. Run the tests: 153 | 154 | ```bash 155 | task integrations:directory:test 156 | # Or change dir to integratons directory: 157 | cd integrations 158 | task directory:test 159 | ``` 160 | 161 | 1. When finished, the test cluster can be cleared: 162 | 163 | ```bash 164 | task integrations:kind:destroy 165 | # Or change dir to integratons directory: 166 | cd integrations 167 | task kind:destroy 168 | ``` 169 | 170 | ### Contributing Tests 171 | 172 | Contributing your own tests to the project is a great way to improve the 173 | robustness and coverage of the testing suite. 174 | 175 | To add your tests: 176 | 177 | 1. Fork and Clone the Repository 178 | 179 | Fork the repository to your GitHub account. Clone your fork to your local machine. 180 | 181 | ```bash 182 | git clone https://github.com/your-username/repository.git 183 | cd repository 184 | ``` 185 | 186 | 1. Create a new branch 187 | 188 | Create a new branch for your additions to keep your changes organized and separate from the main codebase. 189 | 190 | ```bash 191 | git checkout -b add-new-test 192 | ``` 193 | 194 | 1. Navigate to the Integrations directory 195 | 196 | Locate the integrations directory where the test components are organized. 197 | 198 | ```bash 199 | cd integrations 200 | ``` 201 | 202 | 1. Add your test 203 | 204 | Following the existing structure, create a new sub-directory for your test 205 | if necessary. For example, `integrations/new-component`. Add all necessary 206 | test files, such as scripts, manifests, and configuration files. 207 | 208 | 1. Update Taskfile 209 | 210 | Modify the Taskfile.yaml to include tasks for deploying and running your new 211 | test. 212 | 213 | ```yaml 214 | tasks: 215 | new-component:test-env:deploy: 216 | desc: Desription of deployig new component elements 217 | cmds: 218 | - # Command for deploying your components if needed 219 | 220 | new-component:test-env:cleanup: 221 | desc: Desription of cleaning up component elements 222 | cmds: 223 | - # Command for cleaning up your components if needed 224 | 225 | new-component:test: 226 | desc: Desription of the test 227 | cmds: 228 | - # Commands to set up and run your test 229 | ``` 230 | 231 | 1. Test locally 232 | 233 | Before pushing your changes, test them locally to ensure everything works as 234 | expected. 235 | 236 | ```bash 237 | task integrations:kind:create 238 | task integrations:new-componet:test-env:deploy 239 | task integrations:new-component:test 240 | task integrations:new-componet:test-env:cleanup 241 | task integrations:kind:destroy 242 | ``` 243 | 244 | 1. Document your test 245 | 246 | Update the documentation in the docs folder to include details on the new 247 | test. Explain the purpose of the test, any special setup instructions, and 248 | how it fits into the overall testing strategy. 249 | 250 | 1. Commit and push your changes 251 | 252 | Commit your changes with a descriptive message and push them to your fork. 253 | 254 | ```bash 255 | git add . 256 | git commit -m "feat: add new test for component X" 257 | git push origin add-new-test 258 | ``` 259 | 260 | 1. Submit a pull request 261 | 262 | Go to the original repository on GitHub and submit a pull request from your 263 | branch. Provide a detailed description of what your test covers and any 264 | additional context needed for reviewers. 265 | 266 | ## Samples 267 | 268 | The samples directory in the CSIT repository serves two primary purposes related 269 | to the testing of agentic applications. 270 | 271 | ### Running Samples Tests Locally 272 | 273 | For running tests locally, we need the following tools to build the sample applications: 274 | - [Taskfile](https://taskfile.dev/installation/) 275 | - [Python 3.12.X](https://www.python.org/downloads/) 276 | - [Poetry](https://python-poetry.org/docs/#installation) 277 | - [Docker](https://docs.docker.com/get-started/get-docker/) 278 | - [Kind](https://kind.sigs.k8s.io/docs/user/quick-start#installation) 279 | - [Kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) 280 | 281 | Run the test: 282 | 283 | ```bash 284 | task samples::run:test 285 | # Or change dir to integratons directory: 286 | cd samples/ 287 | task run:test 288 | ``` 289 | 290 | ### Compilation and Execution Verification 291 | 292 | The agentic applications stored within the `samples` directory are subjected to 293 | sample tests. These tests are designed to run whenever changes are made to the 294 | agentic apps to ensure they compile correctly and are able to execute as 295 | expected. 296 | 297 | ### Base for Agent Directory Integration Test 298 | 299 | The agentic applications in the `samples` directory also serve as the foundation 300 | for the agent model build and push test. This specific test checks for the 301 | presence of two required files: `model.json` and `build.config.yaml`. If these 302 | files are present within an agentic application, the integration agent model 303 | build and push tests are triggered. This test is crucial for validating the 304 | construction and verification of the agent model, ensuring that all necessary 305 | components are correctly configured and operational. 306 | -------------------------------------------------------------------------------- /docs/pages/data-model-guide.md: -------------------------------------------------------------------------------- 1 | # Creating an Agent Data Model 2 | 3 | Follow the steps below to ensure that your data model is complete and compliant. 4 | The model provides a structured way to describe your agent's features, capabilities, and dependencies. 5 | You can find the template for the data model [here](https://github.com/agntcy/csit/blob/main/samples/crewai/simple_crew/model.json). 6 | 7 | ## Basic Information 8 | 9 | Start by filling out the basic metadata of your agent: 10 | 11 | * `name`: Provide a descriptive name for your agent. 12 | * `version`: Use semantic versioning to indicate the current version of your agent. 13 | * `authors`: List the authors in the `Name ` format. Replace `Your Name` and `you@example.com` with the appropriate details. 14 | * `created_at`: Use ISO 8601 format to specify when the agent was created. 15 | 16 | ## Define Skills 17 | 18 | The skills section outlines your agent’s capabilities. Retrieve skills definitions from the [OASF schema catalog](https://schema.oasf.agntcy.org). Each skill must include the following: 19 | 20 | * `category_name`: The category which the skill belongs to (for example, Natural Language Processing). 21 | * `category_uid`: The unique identifier for the category. 22 | * `class_name`: The specific skill or capability (for example, Text Completion or Problem Solving). 23 | * `class_uid`: The unique identifier for the class. 24 | 25 | You can add multiple skills to your agent. 26 | 27 | ## Add Locators 28 | 29 | The locators section provides references to the agent's source code or other resources. If the agent is packaged as a Docker container, provide the corresponding image or registry URL. 30 | 31 | You can also provide the source code: 32 | 33 | ```json 34 | { 35 | "type": "source-code", 36 | "url": "https://github.com/agntcy/csit/tree/main/samples/crewai/simple_crew" 37 | } 38 | ``` 39 | 40 | ## Specify Extensions 41 | 42 | The extensions section is critical for describing the features and operational parameters of your agent. To ensure compatibility, you must select extensions from the [OASF main features catalog](https://schema.oasf.agntcy.org/main_features?extensions). Examples of common extensions are the following: 43 | 44 | 1. Runtime Framework 45 | Defines the runtime environment for the agent. 46 | 47 | 1. Observability (Logging and Metrics) 48 | Tracks logs and performance metrics. 49 | 50 | 1. Memory and Orchestration 51 | Configure the agent's memory and orchestration behavior. 52 | 53 | 1. Language Runtime 54 | Define the programming language and version used by the agent: 55 | 56 | 1. I/O Mapper 57 | Describe the input/output structure of the agent: 58 | 59 | 1. LLM Configuration 60 | If your agent uses a language model, provide the model details and endpoint. 61 | 62 | 1. Evaluation 63 | Indicate the evaluation mechanism for your agent. 64 | 65 | 66 | ## Validate and Finalize 67 | 68 | Double-check that all fields are filled out accurately. 69 | Ensure that extensions and skills are selected from the OASF schema to maintain compatibility. 70 | Test the agent's configuration to verify that it works as expected. 71 | -------------------------------------------------------------------------------- /docs/pages/dir-howto.md: -------------------------------------------------------------------------------- 1 | # Getting started 2 | 3 | The Agent Directory (dir) allows publication, exchange and discovery of information about AI agents over a distributed peer-to-peer network. 4 | It leverages [OASF](https://github.com/agntcy/oasf) to describe agents and provides a set of APIs and tools to build, store, publish and discover agents across the network by their attributes and constraints. 5 | Directory also leverages [CSIT](https://github.com/agntcy/csit) for continuous system integration and testing across different versions, environments, and features. 6 | 7 | ## Features 8 | 9 | - **Data Models** - Defines a standard schema for data representation and exchange. 10 | - **Dev Kit** - Provides CLI tooling to simplify development workflows and facilitate API interactions. 11 | - **Plugins** - Pluggable components to extend the build process of agent data models for custom use-cases. 12 | - **Announce** - Allows publication of agent data models to the network. 13 | - **Discover** - Listen, search, and retrieve agents across the network by their attributes and constraints. 14 | - **Security** - Relies on well-known security principles to provide data provenance, integrity and ownership. 15 | 16 | ## Prerequisites 17 | 18 | To build the project and work with the code, you will need the following installed in your system 19 | 20 | - [Taskfile](https://taskfile.dev/) 21 | - [Docker](https://www.docker.com/) 22 | - [Golang](https://go.dev/doc/devel/release#go1.24.0) 23 | 24 | Make sure Docker is installed with Buildx. 25 | 26 | ## Development 27 | 28 | Use `Taskfile` for all related development operations such as testing, validating, deploying, and working with the project. 29 | 30 | ### Clone the repository 31 | 32 | ```bash 33 | git clone https://github.com/agntcy/dir 34 | cd dir 35 | ``` 36 | 37 | ### Initialize the project 38 | 39 | This step will fetch all project dependencies and prepare the environment for development. 40 | 41 | ```bash 42 | task deps 43 | ``` 44 | 45 | ### Make changes 46 | 47 | Make the changes to the source code and rebuild for later testing. 48 | 49 | ```bash 50 | task build 51 | ``` 52 | 53 | ### Test changes 54 | 55 | The local testing pipeline relies on Golang to perform unit tests, and 56 | Docker to perform E2E tests in an isolated Kubernetes environment using Kind. 57 | 58 | ```bash 59 | task test:unit 60 | task test:e2e 61 | ``` 62 | 63 | ## Artifacts distribution 64 | 65 | All artifacts are tagged using the [Semantic Versioning](https://semver.org/) and follow the checked out source code tags. 66 | It is not advised to use artifacts with mismatching versions. 67 | 68 | ### Container images 69 | 70 | All container images are distributed via [GitHub Packages](https://github.com/orgs/agntcy/packages?repo_name=dir). 71 | 72 | ```bash 73 | docker pull ghcr.io/agntcy/dir-ctl:v0.2.0 74 | docker pull ghcr.io/agntcy/dir-apiserver:v0.2.0 75 | ``` 76 | 77 | ### Helm charts 78 | 79 | All helm charts are distributed as OCI artifacts via [GitHub Packages](https://github.com/agntcy/dir/pkgs/container/dir%2Fhelm-charts%2Fdir). 80 | 81 | ```bash 82 | helm pull oci://ghcr.io/agntcy/dir/helm-charts/dir --version v0.2.0 83 | ``` 84 | 85 | ### Binaries 86 | 87 | All release binaries are distributed via [GitHub Releases](https://github.com/agntcy/dir/releases). 88 | 89 | ### SDKs 90 | 91 | - **Golang** - [github.com/agntcy/dir/api](https://pkg.go.dev/github.com/agntcy/dir/api), [github.com/agntcy/dir/cli](https://pkg.go.dev/github.com/agntcy/dir/cli), [github.com/agntcy/dir/server](https://pkg.go.dev/github.com/agntcy/dir/server) 92 | 93 | ## Deployment 94 | 95 | Directory API services can be deployed either using the `Taskfile` or directly via released Helm chart. 96 | 97 | ### Using Taskfile 98 | 99 | This will start the necessary components such as storage and API services. 100 | 101 | ```bash 102 | task server:start 103 | ``` 104 | 105 | ### Using Helm chart 106 | 107 | This will deploy Directory services into an existing Kubernetes cluster. 108 | 109 | ```bash 110 | helm pull oci://ghcr.io/agntcy/dir/helm-charts/dir --version v0.2.0 111 | helm upgrade --install dir oci://ghcr.io/agntcy/dir/helm-charts/dir --version v0.2.0 112 | ``` 113 | 114 | ## Usage 115 | 116 | This document defines a basic overview of main Directory features, components, and usage scenarios. 117 | 118 | > Although the following example is shown for CLI-based usage scenario, 119 | there is an effort on exposing the same functionality via SDKs. 120 | 121 | ### Requirements 122 | 123 | - Directory CLI client, distributed via [GitHub Releases](https://github.com/agntcy/dir/releases) 124 | - Directory API server, outlined in the [Deployment](#deployment) section. 125 | 126 | ### Build 127 | 128 | This example demonstrates the examples of a data model and how to build such models using provided tooling to prepare for publication. 129 | 130 | Generate an example agent that matches the data model schema defined in [Agent Data Model](api/core/v1alpha1/agent.proto) specification. 131 | 132 | ```bash 133 | cat << EOF > model.json 134 | { 135 | "name": "my-agent", 136 | "skills": [ 137 | {"category_name": "Text Generation"}, 138 | {"category_name": "Fact Extraction"} 139 | ] 140 | } 141 | EOF 142 | ``` 143 | 144 | Alternatively, build the same agent data model using the CLI client. 145 | The build process allows additional operations to be performed, 146 | which is useful for agent model enrichment and other custom use-cases. 147 | 148 | ```bash 149 | # Define the build config 150 | cat << EOF > build.config.yml 151 | builder: 152 | # Base agent model path 153 | base-model: "model.json" 154 | 155 | # Disable the LLMAnalyzer plugin 156 | llmanalyzer: false 157 | 158 | # Disable the runtime plugin 159 | runtime: false 160 | EOF 161 | 162 | # Build the agent 163 | dirctl build . > built.model.json 164 | 165 | # Override above example 166 | mv built.model.json model.json 167 | ``` 168 | 169 | ### Signing and Verification 170 | 171 | This process relies on attaching signature to the agent data model using identity-based OIDC signing flow which can be verified by other clients. 172 | The signing process opens a browser window to authenticate the user 173 | with an OIDC identity provider. 174 | The verification process validates the agent signature against the identity provider and signature transparency services. 175 | These operations are implemented using [Sigstore](https://www.sigstore.dev/). 176 | 177 | ```bash 178 | ## Sign the agent data model 179 | cat model.json | dirctl sign --stdin > signed.model.json 180 | 181 | ## Verify agent data models 182 | cat model.json | dirctl verify --stdin 183 | cat signed.model.json | dirctl verify --stdin 184 | 185 | ## Verify signature using custom parameters: 186 | # 1. Only trust users with "cisco.com" addresses 187 | # 2. Only trust issuers from "github.com" 188 | dirctl verify signed.model.json \ 189 | --oidc-identity "(.*)@cisco.com" \ 190 | --oidc-issuer "(.*)github.com(.*)" 191 | 192 | ## Replace the base agent model with a signed one 193 | rm -rf model.json 194 | mv signed.model.json model.json 195 | ``` 196 | 197 | ### Store 198 | 199 | This example demonstrates the interaction with the local storage layer. 200 | It is used as an content-addressable object store for directory-specific models and serves both the local and network-based operations (if enabled). 201 | 202 | ```bash 203 | # push and store content digest 204 | dirctl push model.json > model.digest 205 | DIGEST=$(cat model.digest) 206 | 207 | # pull 208 | dirctl pull $DIGEST 209 | 210 | # lookup 211 | dirctl info $DIGEST 212 | ``` 213 | 214 | ### Announce 215 | 216 | This examples demonstrates how to publish the data to allow content discovery. 217 | To avoid stale data, it is recommended to republish the data periodically 218 | as the data across the network has TTL. 219 | 220 | Note that this operation only works for the objects already pushed to local storage layer, ie. 221 | you must first push the data before being able to perform publication. 222 | 223 | ```bash 224 | # Publish the data to your local data store. 225 | dirctl publish $DIGEST 226 | 227 | # Publish the data across the network. 228 | dirctl publish $DIGEST --network 229 | ``` 230 | 231 | If the data is not published to the network, it cannot be discovered by other peers. 232 | For published data, peers may try to reach out over network 233 | and request specific objects for verification and replication. 234 | Network publication may fail if you are not connected to any peers. 235 | 236 | ### Discover 237 | 238 | This examples demonstrates how to discover published data locally or across the network. 239 | This API supports both unicast- mode for routing to specific objects, 240 | and multicast mode for attribute-based matching and routing. 241 | 242 | There are two modes of operation, a) local mode where the data is queried from the local data store, and b) network mode where the data is queried across the network. 243 | 244 | Discovery is performed using full-set label matching, ie. the results always fully match the requested query. 245 | Note that it is not guaranteed that the data is available, valid, or up to date as results. 246 | 247 | ```bash 248 | # Get a list of peers holding a specific agent data model 249 | dirctl list --digest $DIGEST 250 | 251 | # Discover the agent data models in your local data store that can fully satisfy your search query. 252 | dirctl list "/skills/Text Generation" 253 | dirctl list "/skills/Text Generation" "/skills/Fact Extraction" 254 | 255 | # Discover the agent data models across the network that can fully satisfy your search query. 256 | dirctl list "/skills/Text Generation" --network 257 | dirctl list "/skills/Text Generation" "/skills/Fact Extraction" --network 258 | ``` 259 | 260 | It is also possible to get an aggregated summary about the data held in your local data store or across the network. 261 | This is used for routing decisions when traversing the network. 262 | Note that for network search, you will not query your own data, but only the data of other peers. 263 | 264 | ```bash 265 | # Get a list of labels and basic summary details about the data you currently have in your local data store. 266 | dirctl list info 267 | 268 | # Get a list of labels and basic summary details about the data you across the reachable network. 269 | dirctl list info --network 270 | ``` 271 | -------------------------------------------------------------------------------- /docs/pages/dir.md: -------------------------------------------------------------------------------- 1 | # Core Concepts 2 | 3 | The Agent Directory Service (ADS) is a distributed directory service designed to 4 | store metadata for AI agent applications. This metadata, stored as directory 5 | records, enables the discovery of agent applications with specific skills for 6 | solving various problems. 7 | The implementation features distributed directories that interconnect through a 8 | content-routing protocol. This protocol maps agent skills to directory record 9 | identifiers and maintains a list of directory servers currently hosting those 10 | records. 11 | Directory records are identified by globally unique names that are routable 12 | within a DHT (Distributed Hash Table) to locate peer directory servers. 13 | Similarly, the skill taxonomy is routable in the DHT to map skillsets to records 14 | that announce those skills. 15 | 16 | Each directory record must include skills from a defined taxonomy, as specified 17 | in the [Taxonomy of AI Agent Skills](oasf-taxonomy.md) from [OASF](oasf.md). 18 | While all record data is modeled using [OASF](oasf.md), only skills are 19 | leveraged for content routing in the distributed network of directory servers. 20 | The ADS specification is under active development and is published as an 21 | Internet Draft at [ADS Spec](https://spec.dir.agntcy.org). The source code is 22 | available in the [ADS Spec sources](https://github.com/agntcy). 23 | The current reference implementation, written in Go, provides server and client 24 | nodes with gRPC and protocol buffer interfaces. The directory record storage is 25 | built on [ORAS](https://oras.land) (OCI Registry As Storage), while data 26 | distribution uses the [zot](https://zotregistry.dev) OCI server implementation. 27 | 28 | ## Naming 29 | 30 | In distributed systems, having a reliable and collision-resistant naming scheme 31 | is crucial. The agent directory uses cryptographic hashes to generate globally 32 | unique identifiers for data records. 33 | ADS leverages OCI as object storage and therefore identifiers are made available 34 | as described in [OCI digest](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). 35 | 36 | ## Content Routing 37 | 38 | ADS implements capability-based record discovery through a hierarchical skill 39 | taxonomy. This architecture enables: 40 | 41 | 1. Capability Announcement: 42 | 1. Multi-agent systems can publish their capabilities by encoding them as 43 | skill taxonomies. 44 | 2. Each record contains metadata describing the agent's functional abilities. 45 | 3. Skills are structured in a hierarchical format for efficient matching. 46 | 2. Discovery Process: The system performs a two-phase discovery operation: 47 | 1. Matches queried capabilities against the skill taxonomy to determine 48 | records by their identifier. 49 | 2. Identifies the server nodes storing relevant records. 50 | 3. Distributed Resolution: Local nodes execute targeted retrievals based on: 51 | 1. Skill matching results: Evaluates capability requirements. 52 | 2. Server location information: Determines optimal data sources. 53 | 54 | ADS uses libp2p [Kad-DHT](https://docs.libp2p.io/concepts/discovery-routing/kaddht/) 55 | for server and content discovery. 56 | 57 | ## Distributed Object Storage 58 | 59 | ADS differs from block storage systems like 60 | [IPFS](https://ipfs.tech/) in its approach to distributed object storage. 61 | The differences are described in the following sections. 62 | 63 | ### Simplified Content Retrieval 64 | 65 | 1. ADS directly stores complete records rather than splitting them into blocks. 66 | 2. No special optimizations needed for retrieving content from multiple sources. 67 | 3. Records are retrieved as complete units using standard OCI protocols. 68 | 69 | ### OCI Integration 70 | 71 | ADS leverages the OCI distribution specification for content storage and retrieval: 72 | 73 | 1. Records are stored and transferred using OCI artifacts. 74 | 2. Any OCI distribution-compliant server can participate in the network. 75 | 3. Servers retrieve records directly from each other using standard OCI protocols. 76 | 77 | While ADS uses zot as its reference OCI server implementation, the system works 78 | with any server that implements the OCI distribution specification. 79 | 80 | ## Flow Diagrams 81 | 82 | ```mermaid 83 | sequenceDiagram 84 | participant User 85 | participant DHT 86 | participant ServerA 87 | participant ServerB 88 | participant ServerC 89 | 90 | Note over ServerA,ServerC: Publication Phase 91 | ServerA->>ServerA: Generate record digest 92 | ServerA->>ServerA: Extract skills from record 93 | ServerA->>ServerA: Store record locally 94 | ServerA->>DHT: Announce digest + skills 95 | ServerB->>ServerB: Generate record digest 96 | ServerB->>ServerB: Extract skills from record 97 | ServerB->>ServerB: Store record locally 98 | ServerB->>DHT: Announce digest + skills 99 | DHT->>DHT: Update routing tables
(skills→digests→servers) 100 | 101 | Note over User,ServerC: Discovery Phase 102 | User->>DHT: Query by skills 103 | DHT->>DHT: Search routing tables 104 | DHT->>User: Return matching digests
+ server addresses 105 | User->>User: Select records 106 | User->>ServerA: Download record 1 107 | User->>ServerB: Download record 2 108 | ``` 109 | 110 | 111 | # Agent Directory Records Example 112 | 113 | ## Skill Tags (Taxonomy) 114 | ```yaml 115 | skills: 116 | language: 117 | - text-generation 118 | - text-completion 119 | - text-summarization 120 | - text-translation 121 | vision: 122 | - image-generation 123 | - image-classification 124 | - object-detection 125 | audio: 126 | - speech-to-text 127 | - text-to-speech 128 | reasoning: 129 | - task-planning 130 | - decision-making 131 | - problem-solving 132 | ``` 133 | 134 | ## Record Examples with Digests 135 | 136 | ### Text Generation Agent 137 | ```json 138 | { 139 | "digest": "sha256:4e8c72f126b2e4a318911ba11b39432978d0611a56d53a2cfb6fdb42853df0e2", 140 | "skills": [ 141 | "language/text-generation", 142 | "language/text-completion" 143 | ], 144 | "metadata": { 145 | "name": "gpt4-agent", 146 | "version": "1.0.0", 147 | "locator": { 148 | "type": "github", 149 | "url": "github.com/agntcy/agents/gpt4-agent" 150 | } 151 | } 152 | } 153 | ``` 154 | 155 | ### Vision Processing Agent 156 | ```json 157 | { 158 | "digest": "sha256:9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", 159 | "skills": [ 160 | "vision/image-generation", 161 | "vision/image-classification" 162 | ], 163 | "metadata": { 164 | "name": "dall-e-agent", 165 | "version": "2.0.0", 166 | "locator": { 167 | "type": "github", 168 | "url": "github.com/agntcy/agents/dalle-agent" 169 | } 170 | } 171 | } 172 | ``` 173 | 174 | ### Multi-Modal Agent 175 | ```json 176 | { 177 | "digest": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 178 | "skills": [ 179 | "language/text-generation", 180 | "vision/image-generation", 181 | "reasoning/task-planning" 182 | ], 183 | "metadata": { 184 | "name": "multi-modal-agent", 185 | "version": "1.0.0", 186 | "locator": { 187 | "type": "github", 188 | "url": "github.com/agntcy/agents/multimodal-agent" 189 | } 190 | } 191 | } 192 | ``` 193 | 194 | The digests are SHA-256 hashes of the record content, making them: 195 | - Globally unique 196 | - Content-addressable 197 | - Collision-resistant 198 | - Immutable -------------------------------------------------------------------------------- /docs/pages/how-to-guides/agents/thread.rst: -------------------------------------------------------------------------------- 1 | Building Applications with ACP Threads 2 | ===================================== 3 | 4 | ACP Node supports threads, where a thread contains the accumulated state of a sequence of runs. 5 | 6 | In this tutorial, we will explore how to create a LangGraph agent with threads, wrap it in an ACP node, and leverage the various functionalities that come with using threads. 7 | 8 | Learning Objectives 9 | ------------------- 10 | 11 | In this short tutorial you will learn: 12 | 13 | * How to create a manifest for a LangGraph agent from code 14 | * Deploy the agent with Workflow Server 15 | * Use thread endpoints effectively 16 | 17 | Prerequisites 18 | ------------------ 19 | 20 | * poetry 21 | * python version <4.0,>=3.9 22 | * `Workflow server manager `_ 23 | * An Editor of your choice 24 | 25 | 26 | Implementation Walkthrough 27 | ------------------- 28 | 29 | Together we will, create a LangGraph agent, deploy it on a Workflow Server, and utilize its threading capabilities. You can find the agent's source code here: `Mail Composer Agent Source Code `_. 30 | The agent we will work with is called **Mail Composer**, which specializes in composing emails for marketing campaigns. 31 | 32 | Setup 33 | ^^^^^ 34 | 35 | Create a new poetry project 36 | ++++++++++++++++++++++++++++ 37 | 38 | .. code-block:: console 39 | 40 | poetry new agent_with_thread 41 | 42 | 43 | .. note:: 44 | 45 | As of this writing, `angtcy_acp` only supports Python versions specified as `requires-python = "<4.0,>=3.10.0"`. Therefore, before proceeding to the next step, ensure you edit the `pyproject.toml` file and set the `requires-python` variable to: 46 | 47 | .. code-block:: console 48 | 49 | requires-python = "<4.0,>=3.10.0" 50 | 51 | Install dependencies 52 | ++++++++++++++++++++ 53 | 54 | .. code-block:: console 55 | 56 | poetry add langgraph langchain langchain-openai pydantic agntcy_acp 57 | 58 | Within the `src/agent_with_thread` directory, create two new files: the first named `agent.py` and the second named `state.py`. 59 | +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 60 | 61 | .. code-block:: console 62 | 63 | cd src/agent_with_thread && touch agent.py && touch state.py 64 | 65 | 66 | Copy and Paste `this code `_ in the agent.py file 67 | 68 | Change the following lines: 69 | 70 | From 71 | 72 | .. code:: python 73 | if is_stateless: 74 | print("mailcomposer - running in stateless mode") 75 | graph = graph_builder.compile() 76 | else: 77 | print("mailcomposer - running in stateful mode") 78 | checkpointer = InMemorySaver() 79 | graph = graph_builder.compile(checkpointer=checkpointer) 80 | 81 | To 82 | 83 | .. code:: python 84 | 85 | checkpointer = InMemorySaver() 86 | graph = graph_builder.compile(checkpointer=checkpointer) 87 | 88 | 89 | Copy and Paste `this code `_ in the state.py file 90 | 91 | .. note:: 92 | 93 | The creation of a LangGraph agent is outside the scope of this guide. If you're unfamiliar with how to create one, refer to this tutorial provided by the LangGraph team: `LangGraph Agent Tutorial `_. 94 | 95 | 96 | Define agent manifest 97 | ^^^^^^^^^^^^^^^^^^^^^ 98 | 99 | 1. At the same level as the `src` file, create a new directory named `deploy` and inside src/agent_with_thread create a new Python file called `generate_manifest.py`. 100 | 101 | .. code-block:: console 102 | 103 | mkdir ../../deploy && touch generate_manifest.py 104 | 105 | 106 | 2. In the `generate_manifest.py` file, import all the necessary libraries. 107 | 108 | .. code-block:: python 109 | 110 | from pathlib import Path 111 | from pydantic import AnyUrl 112 | from state import AgentState, OutputState, ConfigSchema 113 | from agntcy_acp.manifest import ( 114 | AgentManifest, 115 | AgentDeployment, 116 | DeploymentOptions, 117 | LangGraphConfig, 118 | EnvVar, 119 | AgentMetadata, 120 | AgentACPSpec, 121 | AgentRef, 122 | Capabilities, 123 | SourceCodeDeployment, 124 | ) 125 | 126 | 127 | 3. Define the agent manifest, in code. 128 | 129 | .. code-block:: python 130 | :emphasize-lines: 10,16 131 | 132 | manifest = AgentManifest( 133 | metadata=AgentMetadata( 134 | ref=AgentRef(name="org.agntcy.agent_with_thread", version="0.0.1", url=None), 135 | description="Offer a chat interface to compose an email for a marketing campaign. Final output is the email that could be used for the campaign"), 136 | specs=AgentACPSpec( 137 | input=AgentState.model_json_schema(), 138 | output=OutputState.model_json_schema(), 139 | config=ConfigSchema.model_json_schema(), 140 | capabilities=Capabilities( 141 | threads=True, 142 | callbacks=False, 143 | interrupts=False, 144 | streaming=None 145 | ), 146 | custom_streaming_update=None, 147 | thread_state=AgentState.model_json_schema(), 148 | interrupts=None 149 | ), 150 | deployment=AgentDeployment( 151 | deployment_options=[ 152 | DeploymentOptions( 153 | root = SourceCodeDeployment( 154 | type="source_code", 155 | name="source_code_local", 156 | url=AnyUrl("file://../"), 157 | framework_config=LangGraphConfig( 158 | framework_type="langgraph", # or "llamaindex" if yout agent is written with that particular framework, 159 | graph="agent_with_thread.agent:graph" # if a llamaindex agent than the key for the entrypoint is path 160 | ) 161 | ) 162 | ) 163 | ], 164 | env_vars=[ 165 | EnvVar(name="AZURE_OPENAI_API_KEY", desc="Azure key for the OpenAI service"), 166 | EnvVar(name="AZURE_OPENAI_ENDPOINT", desc="Azure endpoint for the OpenAI service") 167 | ], 168 | dependencies=[] 169 | ) 170 | ) 171 | 172 | #Write the result in a json file 173 | 174 | with open(f"{Path(__file__).parent}/../../deploy/manifest.json", "w") as f: 175 | f.write(manifest.model_dump_json( 176 | exclude_unset=True, 177 | exclude_none=True, 178 | indent=2 179 | )) 180 | 181 | .. note:: 182 | 183 | You might have some indentation problems if you copy and paste the above code, make sure to fix them before you proceed. 184 | 185 | With the above code we've defined the manifest for our agent and in it we set threads with as one of it capabilities, and for that reason we also had to define the thread_state, so that the workflow server knows the model for the threads. For more detail about the manifest `here `_. 186 | 187 | Now you should be able to generate the agent manifest by running 188 | 189 | .. code-block:: console 190 | 191 | poetry run python generate_manifest.py 192 | 193 | Confirm that there is file called manifest.json inside deploy folder. 194 | 195 | 196 | Run and test the Agent 197 | ^^^^^^^^^^^^^^^^^^^^^^ 198 | 199 | 1. Create the agent configuration file 200 | 201 | First you need to create a configuration file that will hold the environment variables needed by the agent. To know more about the structure of this file go `here `_. 202 | 203 | Go to deploy folder previously created and create a file called config.yaml. 204 | 205 | .. code-block:: console 206 | 207 | cd ../../deploy && touch config.yaml 208 | 209 | Paste the code bellow, inside config.yaml and replace the environment variables accordingly. 210 | 211 | .. code-block:: yaml 212 | 213 | config: 214 | org.agntcy.agent_with_thread: 215 | port: 52393 216 | apiKey: 799cccc7-49e4-420a-b0a8-e4de949ae673 217 | id: 45fb3f84-c0d7-41fb-bae3-363ca8f8092a 218 | envVars: 219 | AZURE_OPENAI_API_KEY: [YOUR AZURE OPEN API KEY] 220 | AZURE_OPENAI_ENDPOINT: https://[YOUR ENDPOINT].openai.azure.com 221 | 222 | 223 | 2. Deploy the agent using the Workflow Server (`Workflow Server Repository `_) and the Workflow Server Manager (`Workflow Server Manager Repository `_). 224 | 225 | From the root of this project run: 226 | 227 | .. code-block:: console 228 | 229 | wfsm deploy -m deploy/manifest.json -c deploy/config.yaml --dryRun=false 230 | 231 | 3. Test your Agent 232 | 233 | Create a new thread 234 | ++++++++++++++++++++ 235 | 236 | .. code-block:: console 237 | 238 | curl -X 'POST' \ 239 | 'http://127.0.0.1:52393/threads' \ 240 | -H 'accept: application/json' \ 241 | -H 'x-api-key: 799cccc7-49e4-420a-b0a8-e4de949ae673' \ 242 | -H 'Content-Type: application/json' \ 243 | -d '{ 244 | "thread_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6", 245 | "metadata": {}, 246 | "if_exists": "raise" 247 | }' 248 | 249 | 250 | Run the thread 251 | +++++++++++++++ 252 | 253 | .. code-block:: console 254 | 255 | curl -X 'POST' \ 256 | 'http://127.0.0.1:52393/threads/3fa85f64-5717-4562-b3fc-2c963f66afa6/runs/wait' \ 257 | -H 'accept: application/json' \ 258 | -H 'x-api-key: 799cccc7-49e4-420a-b0a8-e4de949ae673' \ 259 | -H 'Content-Type: application/json' \ 260 | -d '{ 261 | "agent_id": "45fb3f84-c0d7-41fb-bae3-363ca8f8092a", 262 | "input": { 263 | "is_completed": null, 264 | "messages": [{"type": "human", "content": "Email about wooden spoon be inventive on regarding email body"}] 265 | }, 266 | "metadata": {}, 267 | "config": { 268 | "tags": [ 269 | "string" 270 | ], 271 | "recursion_limit": 10, 272 | "configurable": { 273 | "test": true, 274 | "thread_id":"3fa85f64-5717-4562-b3fc-2c963f66afa6" 275 | } 276 | }, 277 | "stream_mode": null, 278 | "on_disconnect": "cancel", 279 | "multitask_strategy": "reject", 280 | "after_seconds": 0, 281 | "stream_subgraphs": false, 282 | "if_not_exists": "reject" 283 | }' 284 | 285 | 286 | Get the state 287 | ++++++++++++++ 288 | 289 | .. code-block:: console 290 | 291 | curl -X 'GET' \ 292 | 'http://127.0.0.1:53032/threads/3fa85f64-5717-4562-b3fc-2c963f66afa6' \ 293 | -H 'accept: application/json' \ 294 | -H 'x-api-key: 8280bb5a-ced8-44d6-bb38-71a69ba2cb31' 295 | 296 | This will return a the current state of the thread in the format specified in the manifest. 297 | 298 | Get the state history 299 | +++++++++++++++++++++ 300 | 301 | .. code-block:: console 302 | 303 | curl -X 'GET' \ 304 | 'http://127.0.0.1:52393/threads/3fa85f64-5717-4562-b3fc-2c963f66afa6' \ 305 | -H 'accept: application/json' \ 306 | -H 'x-api-key: 799cccc7-49e4-420a-b0a8-e4de949ae673' 307 | 308 | This will return a the entire state for every run of the given thread_id. 309 | 310 | Final Words 311 | ++++++++++++ 312 | 313 | Do not stop here check our open api documentation and try out the more `endpoints `_. 314 | 315 | Thank you for reading 316 | -------------------------------------------------------------------------------- /docs/pages/how-to-guides/mas-creation-tutorial/_static/marketing_campaign_final.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/pages/how-to-guides/mas-creation-tutorial/_static/marketing_campaign_final.png -------------------------------------------------------------------------------- /docs/pages/how-to-guides/mas-creation-tutorial/_static/marketing_campaign_skeleton.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/agntcy/docs/9d5bde72deaf5476e3a096c3198724ce234c6d78/docs/pages/how-to-guides/mas-creation-tutorial/_static/marketing_campaign_skeleton.png -------------------------------------------------------------------------------- /docs/pages/introduction.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | The AGNTCY is an open source collective building the infrastructure for The Internet of Agents: an open, interoperable internet for agent to agent collaboration. 4 | 5 | ## Vision 6 | 7 | Agentic AI will accelerate all of human work. Enterprises need to create agentic workflows and applications combining internal and third-party agents to fully leverage the power of AI, accelerate their business, and achieve significant productivity gains. 8 | We believe that an open, interoperable Internet of Agents is the key to enabling the best possible path forward to accelerate innovation and create the most value for all participants, from builders to operators, developers to consumers across all industries and businesses. 9 | 10 | ### Mission 11 | 12 | We are an open source collective commited to build the Internet of Agents to be accessible to all. Our mission is to build a diverse, collaborative space to innovate, develop, and maintain software components and services that solve key problems in the domain of agentic workflows and multi-agent applications. 13 | 14 | ### Capabilities 15 | 16 | Based on advanced protocols, frameworks, and components, the goal of IoA software infrastructure is to enable and simplify the creation of multi-agent applications through the following steps: 17 | 18 | 1. **DISCOVER**: Find and evaluate the best agents for the job. 19 | 1. **COMPOSE**: Connect agents into effective workflows across any framework or vendor. 20 | 1. **DEPLOY**: Run multi-agent systems at scale, securely. 21 | 1. **EVALUATE**: Monitor performance and improve efficiency and efficacy over time. 22 | 23 | ### Technical Objectives 24 | 25 | 1. **Interoperability**: Establish a common protocol that enables AI agents from different vendors and platforms to communicate and work together efficiently. 26 | 2. **Security**: Ensure secure interactions between agents through robust authentication, authorization, and encryption mechanisms. 27 | 3. **Scalability**: Design a scalable architecture that leverages the cloud-native stack optiomally, supporting a growing number of agents and interactions without compromising performance. 28 | 4. **Standardization**: Develop standardized data models and schemas to ensure consistent data representation and validation across the ecosystem. 29 | 30 | ### Core Components 31 | 32 | The initial set of IoA components and architecture is outlined below. This is a starting point - as new members join and bring their contributions, the collective will continue to evolve and expand the IoA architecture, components, and interfaces. 33 | 34 | ```{image} ../_static/ioa_stack.png 35 | :alt: Simplified Internet of Agent Stack 36 | :width: 100% 37 | :align: center 38 | ``` 39 | 40 | 1. **Identity**: To ensure open, collision-free, secure agent authentication and metadata validation, fostering trust and reliability across Agents, MCP Servers, and Multi-Agent Systems. 41 | 1. **Open Agent Schema Framework (OASF)**: An OCI based extensible data model allowing to describe agents' attributes and ensuring unique identification of agents. Current OASF repo can be found [here](https://github.com/agntcy/oasf), OASF schema documentation can be found [here](https://schema.oasf.agntcy.org). 42 | 1. **Agent Directory**: Allows to announce and discover agents or multi-agent applications. Any organization can run its directory and keep it in sync with others, forming the Internet of Agents inventory. 43 | 1. **Agent Manifest**: A standard format to describes agents, their capabilities, their dependencies, and how to deploy or consume them. The manifest is designed to be used by ACP and the Workflow Server and stored in the Agent Directory with the corresponding OASF extensions. 44 | 1. **Semantic SDK**: 45 | * **I/O Mapper Agent**: Handles semantic data adaptations between agents that need to communicate with each other. 46 | * **Semantic Router**: Directs workflows via semantic matches. (coming soon) 47 | 1. **Syntactic SDK**: 48 | * **Agent Connect Protocol (ACP)**: A standard interface to invoke agents (or agentic applications), provide input, retrieve output, retrieve supported schemas, graph topology and other useful information. Current ACP spec can be found [here](https://spec.acp.agntcy.org/). 49 | * **API-bridge Agent** to connect an Agent with any API end-point (tools or data sources) 50 | * **Human in the Loop Agent** to interface with human input/output seamlessly. (coming soon) 51 | 1. **Messaging SDK**: 52 | * **Agent Gateway Protocol (AGP)**: A protocol that defines the standards and guidelines for secure and efficient network-level communication between AI agents. AGP ensures interoperability and seamless data exchange by specifying message formats, transport mechanisms, and interaction patterns. 53 | * **Agent Gateway**: Offers handy secure (MLS and quantum safe) network-level communication services to a group of agents (typically those of a given multi-agent application) through SDK/Libraries. It extends gRPC to support  pub/sub interactions in addition to request/reply, streaming, fire & forget and more. 54 | 1. **Agent Workflow Server**: Deploys and supervises agent workflows written in various frameworks and makes them available through the Agent Connect Protocol. Such workflows could be multi-agent applications including a mix of toolkit agents, local and remote agents. 55 | 1. **Agentic Ensemble Observability & Evaluation**: Telemetry collectors, tools and services to enable multi-agent application observability and evaluation 56 | 1. **Agentic Ensemble Security**: Tools and services to trust and protect multi-agent applications. 57 | 58 | 59 | The following diagram shows a simplified architecture of the core components described above. 60 | 61 | 62 | ```{image} ../_static/ioa_arch.png 63 | :alt: Simplified Internet of Agent Stack 64 | :width: 100% 65 | :align: center 66 | ``` 67 | 68 | ### Benefits 69 | 70 | * **Enhanced Collaboration**: By enabling seamless communication and data exchange, IoA fosters collaboration between AI agents, leading to more sophisticated and integrated solutions. 71 | * **Improved Efficiency**: Standardized protocols and frameworks reduce the complexity of integrating diverse AI agents, resulting in faster development and deployment of AI-driven applications. 72 | * **Increased Security**: Robust security mechanisms ensure that interactions between agents are secure, protecting sensitive data, and preventing unauthorized access. 73 | * **Future-Proof Architecture**: The scalable and flexible design of IoA ensures that the ecosystem can grow and adapt to future advancements in AI technology. 74 | -------------------------------------------------------------------------------- /docs/pages/messaging_sdk/slim-control-plane.md: -------------------------------------------------------------------------------- 1 | # Control Plane 2 | 3 | The Control Plane is a cloud-based controller that manages and orchestrates the configuration of SLIM nodes networks. It enables administrators to define, control, and interconnect limited domain networks while facilitating peering relationships with external networks. 4 | 5 | ## Architecture 6 | 7 | ```mermaid 8 | graph TB 9 | subgraph Control Plane 10 | CP[Central Controller] 11 | PM[Policy Manager] 12 | TM[Topology Manager] 13 | PR[Peering Registry] 14 | end 15 | 16 | subgraph Domain Network 17 | SLIM1[SLIM 1] 18 | SLIM2[SLIM 2] 19 | SLIM3[SLIM 3] 20 | end 21 | 22 | subgraph External Networks 23 | EN1[Network 1] 24 | EN2[Network 2] 25 | end 26 | 27 | CP --> PM 28 | CP --> TM 29 | CP --> PR 30 | TM --> SLIM1 31 | TM --> SLIM2 32 | TM --> SLIM3 33 | PR --> EN1 34 | PR --> EN2 35 | ``` 36 | 37 | ## Key Components 38 | 39 | ### Central Controller 40 | 41 | - Provides centralized management interface 42 | - Handles network-wide configuration 43 | - Monitors SLIM nodes health and status 44 | - Implements control policies 45 | 46 | ### Policy Manager 47 | 48 | - Defines access control policies 49 | - Manages traffic routing rules 50 | - Sets security parameters 51 | - Controls resource allocation 52 | 53 | ### Topology Manager 54 | 55 | - Maintains network topology 56 | - Handles SLIM nodes discovery 57 | - Manages SLIM nodes connections 58 | - Optimizes routing paths 59 | 60 | ### Peering Registry 61 | 62 | - Manages peering relationships 63 | - Handles cross-network authentication 64 | - Controls inter-network routing 65 | - Maintains peering agreements 66 | 67 | ## Features 68 | 69 | 1. **Network Configuration** 70 | - SLIM nodes deployment and configuration 71 | - Network topology management 72 | - Policy distribution 73 | - Resource allocation 74 | 75 | 2. **Network Peering** 76 | - Automated peering negotiation 77 | - Cross-network routing 78 | - Federation management 79 | - Trust establishment 80 | 81 | 3. **Monitoring and Analytics** 82 | - Network health monitoring 83 | - Performance metrics 84 | - Usage analytics 85 | - Anomaly detection 86 | 87 | 4. **Security Management** 88 | - Access control 89 | - Network segmentation 90 | - Encryption requirements 91 | - Security policy enforcement 92 | 93 | ## Integration 94 | 95 | The control plane integrates with: 96 | 97 | - SLIM node management interfaces 98 | - Security services 99 | - Monitoring systems 100 | - External network controllers 101 | 102 | ## Deployment 103 | 104 | The control plane can be deployed as: 105 | 106 | - Managed cloud service 107 | - Private cloud installation 108 | - Hybrid deployment -------------------------------------------------------------------------------- /docs/pages/messaging_sdk/slim-core.md: -------------------------------------------------------------------------------- 1 | # Core Concepts 2 | 3 | Multi-agent software incorporates complex design patterns inherited from various 4 | established frameworks: 5 | 6 | - Machine learning 7 | - Cloud-native computing 8 | - Interactive real-time applications 9 | - Big data processing 10 | 11 | LLMs (Large Language Models) and agent frameworks serve as middleware that 12 | automates processes traditionally requiring human intervention. This integration 13 | layer connects diverse systems and enables new automation capabilities through the following: 14 | 15 | - Natural language processing 16 | - Contextual understanding 17 | - Task decomposition 18 | - Autonomous decision making 19 | 20 | By integrating these technologies, multi-agent systems can manage complex 21 | workflows while ensuring: 22 | 23 | - Cloud-native scalability 24 | - Real-time responsiveness 25 | - Large-scale data processing 26 | - Seamless ML model integration 27 | 28 | ## Main Components 29 | 30 | Interconnecting these systems at scale requires meeting strict latency and 31 | response time requirements. Secure Low-Latency Interactive Messaging (SLIM) aims to provide a secure, scalable, and 32 | user-friendly communication framework that unifies state-of-the-art capabilities 33 | from all mentioned frameworks into a single implementation. 34 | 35 | The main components of SLIM are: 36 | 37 | - [Security layer](./slim-security-layer) 38 | - [Data plane](./slim-data-plane) 39 | - [Session layer](./slim-session-layer) 40 | - [Control plane](./slim-control-plane) 41 | -------------------------------------------------------------------------------- /docs/pages/messaging_sdk/slim-data-plane.md: -------------------------------------------------------------------------------- 1 | # Data Plane 2 | 3 | The Secure Low-Latency Interactive Messaging (SLIM) data plane implements an efficient message routing and delivery system between agents. 4 | 5 | ## Message Format 6 | 7 | SLIM messages use a channel-based addressing scheme for content routing: 8 | 9 | ```protobuf 10 | message SLIMMessage { 11 | string channel_id = 1; 12 | string message_id = 2; 13 | bytes payload = 3; 14 | MessageMetadata metadata = 4; 15 | } 16 | ``` 17 | 18 | ## Connection Table 19 | 20 | The connection table maintains agent connectivity information by mapping channel IDs to connected agents and tracking connection state and capabilities. 21 | 22 | ## Forwarding Table 23 | 24 | The forwarding table implements intelligent message routing by implementing the following: 25 | 26 | - Maps message patterns to delivery strategies. 27 | - Supports content-based routing. 28 | - Maintains routing metrics and preferences. 29 | - Handles multicast and anycast delivery. 30 | 31 | ## Message Buffer 32 | 33 | The message buffer provides temporary storage by implementing the following: 34 | 35 | - Caches messages for reliable delivery. 36 | - Implements store-and-forward when needed. 37 | - Supports message deduplication. 38 | - Handles out-of-order delivery. 39 | 40 | ## Data Plane Flow 41 | 42 | ```mermaid 43 | graph LR 44 | A([Input]) --> B[Buffer] 45 | B --> C{Forwarding} 46 | C --> D[Connection] 47 | D -->|Direct| E([Output]) 48 | D -->|Multicast| E 49 | D -->|Anycast| E 50 | 51 | style B fill:#ffffff,stroke:#000000,stroke-width:2px 52 | style C fill:#f0f0f0,stroke:#000000,stroke-width:2px 53 | style D fill:#e0e0e0,stroke:#000000,stroke-width:2px 54 | ``` 55 | 56 | The diagram shows the message flow through the SLIM data plane components: 57 | 58 | 1. Messages enter the system and are processed by the Message Buffer. 59 | 2. The Message Buffer handles deduplication and store-and-forward. 60 | 3. The Forwarding Table determines routing strategy. 61 | 4. The Connection Table manages delivery to connected agents. 62 | 5. Messages are delivered through direct, multicast, or anycast methods. -------------------------------------------------------------------------------- /docs/pages/messaging_sdk/slim-howto.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | ## Prerequisites 4 | 5 | To build the project and work with the code, you need the following 6 | installed components in your system: 7 | 8 | ### Taskfile 9 | 10 | Taskfile is required to run all the build operations. Follow the 11 | [installation instructions](https://taskfile.dev/installation/) in the Taskfile 12 | documentations to find the best installation method for your system. 13 | 14 |
15 | with brew 16 | 17 | ```bash 18 | brew install go-task 19 | ``` 20 |
21 |
22 | with curl 23 | 24 | ```bash 25 | sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin 26 | ``` 27 |
28 | 29 | For more information, see [Taskfile](https://taskfile.dev/). 30 | 31 | ### Rust 32 | 33 | The data-plane components are implemented in rust. Install with rustup: 34 | 35 | ```bash 36 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 37 | ``` 38 | 39 | For more information, see [Rust](https://rustup.rs/). 40 | 41 | ### Golang 42 | 43 | The control-plane components are implemented in Golang. Follow the installation 44 | instructions in the golang website. 45 | 46 | ## Artifacts distribution 47 | 48 | ### Crates 49 | 50 | For more information, see (https://crates.io/users/artifacts-agntcy). 51 | 52 | ```bash 53 | cargo install agntcy-slim 54 | ``` 55 | 56 | ### Container Images 57 | 58 | ```bash 59 | docker pull ghcr.io/agntcy/slim:latest 60 | ``` 61 | 62 | ### Helm Charts 63 | 64 | ```bash 65 | helm pull ghcr.io/agntcy/slim/helm/slim:latest 66 | ``` 67 | 68 | ### Pypi Packages 69 | 70 | ```bash 71 | pip install slim-bindings 72 | ``` -------------------------------------------------------------------------------- /docs/pages/messaging_sdk/slim-index.rst: -------------------------------------------------------------------------------- 1 | Secure Low-Latency Interactive Messaging 2 | ============================= 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | slim-core 8 | slim-security-layer 9 | slim-data-plane 10 | slim-control-plane 11 | slim-session-layer 12 | slim-howto 13 | slim-mcp 14 | -------------------------------------------------------------------------------- /docs/pages/messaging_sdk/slim-security-layer.md: -------------------------------------------------------------------------------- 1 | # Security Layer 2 | 3 | The Security Layer implements secure group communications using the Message Layer Security (MLS) protocol. This layer ensures end-to-end encryption, authentication, and access control across agent groups within the SLIM ecosystem. 4 | 5 | ## Overview 6 | 7 | The security layer leverages MLS to provide: 8 | 9 | - Quantum-safe end-to-end encryption (QSE2EE) for group communications 10 | - Dynamic group membership management 11 | - Forward secrecy and post-compromise security 12 | - Scalable key management for large agent groups 13 | 14 | ## Key Components 15 | 16 | ### 1. Group State Management 17 | 18 | - Maintains cryptographic group state 19 | - Handles member additions and removals 20 | - Manages group epoch changes 21 | - Processes key updates 22 | 23 | ### 2. Key Schedule 24 | 25 | - Derives encryption keys from group secrets 26 | - Implements MLS key schedule 27 | - Manages key rotations 28 | - Ensures forward secrecy 29 | 30 | ### 3. Message Protection 31 | 32 | - Encrypts group messages 33 | - Provides integrity protection 34 | - Implements sender authentication 35 | - Handles message sequencing 36 | 37 | ### 4. Authentication 38 | 39 | - Validates group members 40 | - Verifies message signatures 41 | - Manages identity credentials 42 | - Implements access control 43 | 44 | ## Security Properties 45 | 46 | - **Forward Secrecy**: Ensures past communications remain secure if keys are compromised 47 | - **Post-Compromise Security**: Provides security guarantees after member compromise 48 | - **Group Authentication**: Verifies message origin within the group 49 | - **Message Confidentiality**: Protects message content from unauthorized access 50 | 51 | ## Integration 52 | 53 | The security layer integrates with: 54 | 55 | - Transport layer for secure message delivery 56 | - Session layer for maintaining secure contexts 57 | - Identity services for credential management 58 | 59 | ```mermaid 60 | graph TB 61 | %% Services 62 | AS[Authentication Service] 63 | DS[Delivery Service] 64 | 65 | %% Service Connection 66 | AS <--> DS 67 | 68 | %% Clients and Members 69 | Client1[Client 1]:::client 70 | Client2[Client 2]:::client 71 | Client3[Client 3]:::client 72 | 73 | %% Group Definition 74 | subgraph MLSGroup[MLS Group] 75 | Member1[Member 1]:::member 76 | Member2[Member 2]:::member 77 | end 78 | 79 | %% Client Authentication 80 | Client1 --> AS 81 | Client2 --> AS 82 | Client3 --> AS 83 | 84 | %% Client-DS Connections 85 | Client1 -.-> DS 86 | Client2 --> DS 87 | Client3 --> DS 88 | 89 | %% Member Associations 90 | Client2 === Member1 91 | Client3 === Member2 92 | 93 | %% Styles 94 | classDef service fill:#ffffff,stroke:#000000,stroke-width:2px 95 | classDef client fill:#f0f0f0,stroke:#000000,stroke-width:2px 96 | classDef member fill:#e0e0e0,stroke:#000000,stroke-width:2px 97 | 98 | class AS,DS service 99 | class Client1,Client2,Client3 client 100 | class Member1,Member2 member 101 | ``` -------------------------------------------------------------------------------- /docs/pages/messaging_sdk/slim-session-layer.md: -------------------------------------------------------------------------------- 1 | # Session Layer 2 | 3 | The Secure Low-Latency Interactive Messaging (SLIM) Session Layer manages and maintains the communication state between agents and their respective SLIM nodes. It provides essential services for establishing, maintaining, and terminating sessions between communicating entities in the SLIM ecosystem. 4 | 5 | ## Flow Diagram 6 | 7 | ```mermaid 8 | sequenceDiagram 9 | participant Agent 10 | participant SessionLayer 11 | participant SLIM 12 | 13 | Agent->>SessionLayer: Initialize Session 14 | SessionLayer->>SLIM: Session Request 15 | SLIM->>SessionLayer: Session Acknowledgment 16 | SessionLayer->>Agent: Session Established 17 | 18 | rect rgb(200, 200, 200) 19 | note right of Agent: Active Session 20 | Agent->>SessionLayer: Data Exchange 21 | SessionLayer->>SLIM: Session-managed Communication 22 | SLIM->>SessionLayer: Response 23 | SessionLayer->>Agent: Processed Response 24 | end 25 | 26 | Agent->>SessionLayer: Terminate Session 27 | SessionLayer->>SLIM: Session Closure 28 | SLIM->>SessionLayer: Closure Acknowledgment 29 | SessionLayer->>Agent: Session Terminated 30 | ``` 31 | 32 | ## Key Features 33 | 34 | - **Session Establishment**: Handles the initial handshake and connection setup. 35 | - **State Management**: Maintains session context and state information. 36 | - **Security**: Implements session-level security measures and token management. 37 | - **Error Recovery**: Provides mechanisms for handling session interruptions and failures. 38 | - **Session Termination**: Manages graceful session closure and cleanup. 39 | 40 | ## Architecture 41 | 42 | The session layer operates between the transport and presentation layers, providing a reliable communication framework for higher-level protocol operations. It ensures the following: 43 | 44 | * Secure session initialization. 45 | * Stateful communication. 46 | * Error handling and recovery. 47 | * Graceful session termination. -------------------------------------------------------------------------------- /docs/pages/oasf-data-model.proto.md: -------------------------------------------------------------------------------- 1 | # Package: schema.model 2 | 3 |

4 | 5 | ## Imports 6 | 7 | | Import | Description | 8 | |--------|-------------| 9 | 10 | 11 | 12 | ## Options 13 | 14 | | Name | Value | Description | 15 | |------|-------|-------------| 16 | 17 | 18 | 19 | 20 | ### Agent Diagram 21 | 22 | ```mermaid 23 | classDiagram 24 | direction LR 25 | 26 | %% The data model defines a schema for AI agent content representation. The schema provides a way to describe agent's features, constraints, artifact locators, versioning, ownership, or relevant details. 27 | 28 | class Agent { 29 | + string name 30 | + string version 31 | + List~string~ authors 32 | + string created_at 33 | + Map~string, string~ annotations 34 | + List~string~ skills 35 | + List~Locator~ locators 36 | + List~Extension~ extensions 37 | } 38 | Agent --> `Locator` 39 | Agent --> `Extension` 40 | Agent --o `Locator` 41 | 42 | %% Locators provide actual artifact locators of an agent. For example, this can reference sources such as helm charts, docker images, binaries, etc. 43 | 44 | class Locator { 45 | + string url 46 | + string type 47 | + Map~string, string~ annotations 48 | + Optional~uint64~ size 49 | + Optional~string~ digest 50 | } 51 | Agent --o `Extension` 52 | 53 | %% Extensions provide dynamic descriptors for an agent. For example, security and categorization features can be described using extensions. 54 | 55 | class Extension { 56 | + string name 57 | + string version 58 | + Map~string, string~ annotations 59 | + bytes specs 60 | } 61 | 62 | ``` 63 | 64 | ## Message: Agent 65 |
FQN: schema.model.Agent
66 | 67 |
The data model defines a schema for AI agent content representation. The schema provides a way to describe agent's features, constraints, artifact locators, versioning, ownership, or relevant details.
68 | 69 | | Field | Ordinal | Type | Label | Description | 70 | |-------------|---------|----------------|----------|------------------------------------------------------------------------------------------------------------| 71 | | name | 1 | string | | Name of the agent. | 72 | | version | 2 | string | | Version of the agent. | 73 | | authors | 3 | string | Repeated | List of agent’s authors in the form of `author-name `. | 74 | | created_at | 4 | string | | Creation timestamp of the agent in the RFC3339 format. Specs: https://www.rfc-editor.org/rfc/rfc3339.html | 75 | | annotations | 5 | string, string | Map | Additional metadata associated with this agent. | 76 | | skills | 6 | string | Repeated | List of skills that this agent is capable of performing. Specs: https://schema.oasf.agntcy.org/skills | 77 | | locators | 7 | Locator | Repeated | List of source locators where this agent can be found or used from. | 78 | | extensions | 8 | Extension | Repeated | List of extensions that describe this agent more in depth. | 79 | 80 | 81 | 82 | ### Locator Diagram 83 | 84 | ```mermaid 85 | classDiagram 86 | direction LR 87 | 88 | %% Locators provide actual artifact locators of an agent. For example, this can reference sources such as helm charts, docker images, binaries, etc. 89 | 90 | class Locator { 91 | + string url 92 | + string type 93 | + Map~string, string~ annotations 94 | + Optional~uint64~ size 95 | + Optional~string~ digest 96 | } 97 | 98 | ``` 99 | ### Extension Diagram 100 | 101 | ```mermaid 102 | classDiagram 103 | direction LR 104 | 105 | %% Extensions provide dynamic descriptors for an agent. For example, security and categorization features can be described using extensions. 106 | 107 | class Extension { 108 | + string name 109 | + string version 110 | + Map~string, string~ annotations 111 | + bytes specs 112 | } 113 | 114 | ``` 115 | 116 | ## Message: Locator 117 |
FQN: schema.model.Agent.Locator
118 | 119 |
Locators provide actual artifact locators of an agent. For example, this can reference sources such as helm charts, docker images, binaries, etc.
120 | 121 | | Field | Ordinal | Type | Label | Description | 122 | |-------------|---------|----------------|----------|------------------------------------------------------------------------------------| 123 | | url | 1 | string | | Location URI where this source locator can be found. | 124 | | type | 2 | string | | Type of the source locator, for example: "docker-image", "binary", "source-code". | 125 | | annotations | 3 | string, string | Map | Metadata associated with this source locator. | 126 | | size | 4 | uint64 | Optional | Size in bytes of the source locator pointed by the `url` property. | 127 | | digest | 5 | string | Optional | Digest of the source locator pointed by the `url` property. | 128 | 129 | 130 | 131 | 132 | ## Message: Extension 133 |
FQN: schema.model.Agent.Extension
134 | 135 |
Extensions provide dynamic descriptors for an agent. For example, security and categorization features can be described using extensions.
136 | 137 | | Field | Ordinal | Type | Label | Description | 138 | |-------------|---------|----------------|-------|-----------------------------------------------------------------------------------------------| 139 | | name | 1 | string | | Name of the extension. | 140 | | version | 2 | string | | Version of the extension. | 141 | | annotations | 3 | string, string | Map | Metadata associated with this extension. | 142 | | specs | 4 | bytes | | Value of the data, it is available directly or can be constructed by fetching from some URL. | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | -------------------------------------------------------------------------------- /docs/pages/oasf-taxonomy.md: -------------------------------------------------------------------------------- 1 | # Taxonomy of AI Agent Skills 2 | 3 | | Category | Skill | UID | Description | 4 | |-----------------------------------------------|-----------------------------------------------|-------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 5 | | Natural Language Understanding | Natural Language Understanding | 101 | Describes the ability to understand the meaning of a given situation or event. | 6 | | Natural Language Generation | Natural Language Generation | 102 | Describes the ability to generate human-like text from structured data or other inputs. | 7 | | Information Retrieval and Synthesis | Information Retrieval and Synthesis | 103 | Capabilities for retrieving relevant information from various sources and synthesizing it into coherent, contextually appropriate responses. This includes searching, extracting, combining, and presenting information in a meaningful way. | 8 | | Creative Content Generation | Creative Content Generation | 104 | Capabilities for generating various forms of creative content, including narratives, poetry, and other creative writing forms. | 9 | | Language Translation and Multilingual Support | Language Translation and Multilingual Support | 105 | Capabilities for handling multiple languages, including translation and multilingual text processing. | 10 | | Personalisation and Adaptation | Personalisation and Adaptation | 106 | Capabilities for adapting and personalizing content based on user context and preferences. | 11 | | Analytical and Logical Reasoning | Analytical and Logical Reasoning | 107 | Capabilities for performing logical analysis, inference, and problem-solving tasks. | 12 | | Ethical and Safe Interaction | Ethical and Safe Interaction | 108 | Capabilities for ensuring ethical, unbiased, and safe content generation and interaction. | 13 | | Text Classification | Text Classification | 109 | Capabilities for classifying and categorizing text into predefined categories or labels. | 14 | | Feature Extraction | Feature Extraction | 110 | Capabilities for extracting and representing textual features as vectors for downstream tasks. | 15 | | Token Classification | Token Classification | 111 | Capabilities for classifying individual tokens or words within text. | 16 | | Contextual Comprehension | Contextual Comprehension | 10101 | Describes the ability to understand the context of a given situation or event. | 17 | | Semantic Understanding | Semantic Understanding | 10102 | Describes the ability to understand the meaning of a given situation or event. | 18 | | Entity Recognition | Entity Recognition | 10103 | Describes the ability to identify and classify entities in a given situation or event. | 19 | | Text Completion | Text Completion | 10201 | Continuing a given text prompt in a coherent and contextually appropriate manner to generate fluent and contextually relevant content. | 20 | | Text Summarization | Text Summarization | 10202 | Condensing longer texts into concise summaries while preserving essential information and maintaining coherence. | 21 | | Text Paraphrasing | Text Paraphrasing | 10203 | Rewriting text to express the same ideas using different words and structures while maintaining the original meaning. | 22 | | Dialogue Generation | Dialogue Generation | 10204 | Producing conversational responses that are contextually relevant and engaging within a dialogue context. | 23 | | Question Generation | Question Generation | 10205 | Automatically generating relevant and meaningful questions from a given text or context. | 24 | | Text Style Transfer | Text Style Transfer | 10206 | Rewriting text to match the style of a given reference text while preserving the original content. | 25 | | Story Generation | Story Generation | 10207 | Generating a piece of text given a description or a first sentence to complete. | 26 | | Fact Extraction | Fact Extraction | 10301 | Capability to identify and extract factual information from text documents or knowledge bases, including entities, relationships, and key data points. | 27 | | Question Answering | Question Answering | 10302 | System capability to understand questions and provide accurate, relevant answers by analyzing available information sources. | 28 | | Knowledge Synthesis | Knowledge Synthesis | 10303 | Capability to aggregate and combine information from multiple sources, creating comprehensive and coherent responses while maintaining context and relevance. | 29 | | Sentence Similarity | Sentence Similarity | 10304 | Capability to analyze and determine the semantic similarity between sentences, supporting tasks like search, matching, and content comparison. | 30 | | Document and Passage Retrieval | Document and Passage Retrieval | 10305 | Capability to identify and retrieve relevant documents or text passages based on specific criteria or queries from a larger collection of texts. | 31 | | Search | Search | 10306 | Capability to perform efficient and accurate searches within large textual databases based on various criteria, including keywords, semantic meaning, or complex queries. | 32 | | Storytelling | Storytelling | 10401 | Creating narratives, stories, or fictional content with creativity and coherence. | 33 | | Poetry and Creative Writing | Poetry and Creative Writing | 10402 | Composing poems, prose, or other forms of creative literature. | 34 | | Translation | Translation | 10501 | Converting text from one language to another while maintaining meaning and context. | 35 | | Multilingual Understanding | Multilingual Understanding | 10502 | Recognising and processing text in multiple languages. | 36 | | User Adaptation | User Adaptation | 10601 | Tailoring responses based on user preferences, history, or context. | 37 | | Tone and Style Adjustment | Tone and Style Adjustment | 10602 | Modifying the tone or style of generated text to suit specific audiences or purposes. | 38 | | Inference and Deduction | Inference and Deduction | 10701 | Making logical inferences based on provided information. | 39 | | Problem Solving | Problem Solving | 10702 | Assisting with solving problems by generating potential solutions or strategies. | 40 | | Fact and Claim Verification | Fact and Claim Verification | 10703 | Verifying facts and claims given a reference text. | 41 | | Bias Mitigation | Bias Mitigation | 10801 | Reducing or eliminating biased language and ensuring fair and unbiased output. | 42 | | Content Moderation | Content Moderation | 10802 | Avoiding the generation of harmful, inappropriate, or sensitive content. | 43 | | Topic Labelling and Tagging | Topic Labelling and Tagging | 10901 | Classifying a text as belonging to one of several topics, which can be used to tag a text. | 44 | | Sentiment Analysis | Sentiment Analysis | 10902 | Classify the sentiment of a text, e.g., a positive movie review. | 45 | | Natural Language Inference | Natural Language Inference | 10903 | Classifying the relation between two texts, e.g., as a contradiction, entailment, etc. | 46 | | Model Feature Extraction | Model Feature Extraction | 11001 | Representing parts of text with vectors to be used as input to other tasks. | 47 | | Named Entity Recognition | Named Entity Recognition | 11101 | Task to recognise names as entities, e.g., people, locations, buildings, etc. | 48 | | Part-of-Speech Tagging | Part-of-Speech Tagging | 11102 | Tagging each part of a sentence as nouns, adjectives, verbs, etc. | -------------------------------------------------------------------------------- /docs/pages/oasf-workflow.md: -------------------------------------------------------------------------------- 1 | # OASF Contribution Guide 2 | 3 | This documentation presents guidelines and expected etiquette to successfully 4 | contribute to the development of OASF Schemas and the framework itself. 5 | 6 | * * * 7 | 8 | ## Terminology 9 | 10 | 1. **Field**: A field is a unique identifier name for a piece of data contained in OASF. Each field also designates a corresponding `data_type`. 11 | 2. **Object**: An object is a collection of contextually related fields and other objects. It is also a data_type in OASF. 12 | 3. **Attribute**: An attribute is the more generic name for both fields and objects in OASF. A field is a scalar attribute while an object is a complex attribute. 13 | 4. **Class**: A class is a particular set of attributes (including fields & objects) representing metadata associated to an autonomous agent. 14 | 5. **Category:** A Category organizes classes that represent a particular domain. 15 | 16 | ## How do I add a `class`? 17 | 18 | ### Overview 19 | 20 | 1. Determine all the `attributes` (including fields and objects) you want to add in the `class`. 21 | 2. Check the [dictionary](https://github.com/agntcy/oasf/blob/main/schema/dictionary.json) and the [/objects](https://github.com/agntcy/oasf/tree/main/schema/objects) folder, many of your desired attributes may already be present. 22 | 3. Define the missing attributes → [Adding/Modifying an `attribute`](#adding-modifying-an-attribute) 23 | 4. Determine which category you want to add your class in, note it’s `name`. 24 | 5. Create a new file → `` inside the category specific subdirectory in the [/schema](https://github.com/agntcy/oasf/tree/main/schema) folder. Template available [here](https://github.com/agntcy/oasf/blob/main/schema/templates/class_name.json). 25 | 6. Define the `class` itself → [Adding/Modifying a `class`](#adding-modifying-a-class). 26 | 7. Verify the changes are working as expected in your local [oasf/server](https://github.com/agntcy/oasf/tree/main/server). 27 | 28 | * * * 29 | 30 | ### Adding/Modifying an `attribute` 31 | 32 | 1. All the available `attributes` - `fields` and `objects` in OASF must be defined in the attribute dictionary, the [dictionary.json](https://github.com/agntcy/oasf/blob/main/schema/dictionary.json) file and [/objects](https://github.com/agntcy/oasf/tree/main/schema/objects) folder if defining an object. 33 | 2. Determine if a new attribute is required for your change, it might already be defined in the attribute dictionary and/or the [/objects](https://github.com/agntcy/oasf/tree/main/schema/objects) folder. 34 | 3. Before adding a new attribute, review the following OASF attribute conventions: 35 | 36 | * Attribute names must be a valid UTF-8 sequence. 37 | * Attribute names must be all lower case. 38 | * Combine words using underscore. 39 | * No special characters except underscore. 40 | * Use present tense unless the attribute describes historical information. 41 | * Use singular and plural names properly to reflect the field content. 42 | * When an attribute represents multiple entities, the attribute name should be pluralized and the value type should be an array. 43 | * Avoid word repetition. 44 | * Avoid abbreviations when possible. Some exceptions can be made for well-accepted abbreviation like well known acronyms (for example, LLM or AI). 45 | 46 | #### How to define a `field` in the dictionary? 47 | 48 | To add a new field in OASF, you must define it in the [dictionary.json](https://github.com/agntcy/oasf/blob/main/schema/dictionary.json) file as described below. 49 | 50 | Sample entry in the dictionary: 51 | 52 | ``` 53 | "uid": 54 | { 55 | "caption": "Unique ID", // "previously name" 56 | "description": "The unique identifier. See specific usage.", 57 | "type": "string_t" 58 | } 59 | ``` 60 | 61 | Choose a **unique** field you want to add, `uid` in the example above and populate it as described below. 62 | 63 | 1. `caption`: A user-friendly name to the field. 64 | 2. `description`: A concise description to define the attributes. 65 | 1. Note that `field` descriptions can be overridden in the `class/object`, therefore if it’s a common field (like name, label, uid) feel free to add a generic description, specific descriptions can be added in the `class/object` definition. For example: 66 | 2. A generic definition of `uid` in the dictionary: 67 | 1. `uid` : `The unique identifier. See specific usage.` 68 | 3. Specific description of `uid` in the `agent` object: 69 | 1. `uid` : `Unique Identifier/s of the reported agent."` 70 | 3. `type`: Review OASF data_types and ensure you utilize appropriate types while defining new fields. 71 | 1. All the available data_types can be accessed [here](https://schema.oasf.agntcy.org/data_types). 72 | 2. They are also accessible in your local instance of the oasf server (http://localhost:8000/data_types). 73 | 4. `is_array`: This a boolean key:value pair that you must add if the field you are defining is an array. 74 | 1. For example: `"is_array": true` 75 | 76 | #### How to define an `object`? 77 | 78 | 1. All the available `objects` must be defined as individual field entries in the dictionary, the [dictionary.json](https://github.com/agntcy/oasf/blob/main/schema/dictionary.json) file and as distinct .json files in the [/objects](https://github.com/agntcy/oasf/tree/main/schema/objects) folder. 79 | 2. Review existing Objects, determine if a modification of the existing object would be sufficient or if there’s a need for a completely new object. 80 | 3. Use the template available [here](https://github.com/agntcy/oasf/blob/main/schema/templates/object_name.json), to get started with .json file definition. 81 | 82 | An example `locator.json` object file: 83 | 84 | ``` 85 | { 86 | "caption": "Agent Locator", 87 | "description": "Locators provide actual artifact locators of an agent. For example, this can reference sources such as helm charts, docker images, binaries, etc.", 88 | "extends": "_entity", 89 | "name": "locator", 90 | "attributes": { 91 | "name": { 92 | "description": "The schema extension name. For example: dev.", 93 | "requirement": "required" 94 | }, 95 | "annotations": { 96 | "caption": "Annotations", 97 | "description": "Additional metadata associated with the extension.", 98 | "requirement": "optional" 99 | }, 100 | "type": { 101 | "caption": "Type", 102 | "description": "Describes the type of the release manifest pointed by its URI, e.g. oci-image, docker-image, py-package, binary. Allowed values MAY be defined for common manifest types.", 103 | "requirement": "required" 104 | }, 105 | "url": { 106 | "caption": "URL", 107 | "description": "Specifies an URI from which this object MAY be downloaded. Value MUST conform to RFC 3986. Value SHOULD use the http and https schemes, as defined in RFC 7230.", 108 | "requirement": "required" 109 | }, 110 | "size": { 111 | "caption": "Size", 112 | "description": "Specifies the size of the release manifest in bytes.", 113 | "requirement": "optional" 114 | }, 115 | "digest": { 116 | "caption": "Digest", 117 | "description": "Specifies the digest of the release manifest contents.", 118 | "requirement": "optional" 119 | } 120 | }, 121 | "constraints": {} 122 | } 123 | ``` 124 | 125 | 4. `caption`: A user-friendly name to the object. 126 | 5. `description`: A concise description to define the object. 127 | 6. `extends`: Ensure the value is `object` or an existing object, for example `skill` (all objects in OSAF must extend a base definition of `object` or another existing object). 128 | 7. `name`: Add a **unique** name of the object. `name` must match the filename of the actual `.json` file. 129 | 8. `attributes`: Add the attributes that you want to define in the object: 130 | 1. `requirement`: For each attribute ensure you add a requirement value. Valid values are `optional`, `required`, and `recommended` 131 | 2. `$include`: You can include attributes from other places; to do so, specify a virtual attribute called `$include` and give its value as the list of files (relative to the root of the schema repository) that should contribute their attributes to this object. For example: 132 | ``` 133 | "attributes": { 134 | "$include": [ 135 | "profiles/host.json" 136 | ], 137 | ... 138 | } 139 | ``` 140 | 141 | **Note:** If you want to create an object which would act only as a base for other objects, you must prefix the object `name` and the actual `json` filename with an `_`. The resultant object will not be visible in the [OASF Server.](https://schema.oasf.agntcy.org/objects). For example, take a look at the [entity](https://github.com/agntcy/oasf/blob/main/schema/objects/_entity.json) object. 142 | 143 | Sample entry in the `dictionary.json`: 144 | 145 | ``` 146 | "skill": { 147 | "caption": "Skill", 148 | "description": "A skill that apply to an agent.", 149 | "type": "class_t", 150 | "class_path": "classes/base_skill", 151 | "class_name": "Skill" 152 | } 153 | ``` 154 | 155 | Choose a **unique** object you want to add, `skill` in the example above and populate it as described below. 156 | 157 | 1. `caption`: A user-friendly name to the object. 158 | 2. `description`: A concise description to define the object. 159 | 3. `type`: The type of the object you are defining. 160 | 4. `is_array`: This a boolean key:value pair that you must add if the object you are defining is an array. 161 | 1. for example: `"is_array": true`. 162 | 163 | ``` 164 | "skills": { 165 | "caption": "Skills", 166 | "description": "Skills that apply to an agent.", 167 | "type": "class_t", 168 | "class_path": "skills", 169 | "class_name": "Skill", 170 | "is_array": true 171 | } 172 | ``` 173 | 174 | * * * 175 | 176 | ### Adding/Modifying a `class` 177 | 178 | 1. All the available Classes are defined as .json files in the [/schema](https://github.com/agntcy/oasf/tree/main/schema) folder. 179 | 2. Review existing Classes, determine if a modification of the existing class would be sufficient or if there’s a need for a completely new class. 180 | 3. To define a new class, 181 | 1. Create a new file → `` inside the category specific subdirectory in the [/schema](https://github.com/agntcy/oasf/tree/main/schema) folder. 182 | 2. Use the template available [here](https://github.com/agntcy/oasf/tree/main/schema/templates/class_name.json) to get started with the .json definition. 183 | 3. `uid`: Select an integer in the range 0 - 99. Ensure the integer is **unique** within the category. 184 | * Note: Without `uid`, a class won’t be visible in the oasf server. 185 | 4. `caption`: Add a user-friendly name to the event_class. 186 | 5. `description`: Add a concise description to define the attributes. 187 | 6. `name`: Add a **unique** name of the class. Ensure it matches the file name to maintain consistency. 188 | 7. `extends`: Ensure the value container the parent class `class`. 189 | 8. `attributes`: Add the attributes that you want to define in the `event_class`: 190 | 1. `group`: For each attribute ensure you add a group value. Valid values are `classification`, `context`, `occurrence`, and `primary`. 191 | 2. `requirement`: For each attribute ensure you add a requirement value. Valid values are `optional`, `required`, and `recommended` 192 | 3. `$include`: As for objects, you can also include attributes from other places. To do so, specify the list of files (relative to the root of the schema repository) that should contribute their attributes to this object. For example: 193 | ``` 194 | "attributes": { 195 | "$include": [ 196 | "profiles/cloud.json" 197 | ], 198 | ... 199 | } 200 | ``` 201 | 202 | 9. `constraints`: For each class you can add constraints on the attribute requirements. Valid constraint types are `at_least_one` and `just_one`. For example: 203 | ``` 204 | "constraints": { 205 | "at_least_one": [ 206 | "uid", 207 | "name" 208 | ] 209 | } 210 | ``` 211 | 212 | _(A Constraint is a documented rule subject to validation that requires at least one of the specified recommended attributes of a class to be populated.)_ 213 | 214 | * * * 215 | 216 | ### Deprecating an attribute 217 | 218 | To deprecate an attribute (`field`, `object`) follow the steps below: 219 | 220 | 1. Create a GitHub issue, explaining why an attribute needs to be deprecated and what the alternate solution is. 221 | 2. Utilize the following flag to allow deprecation of attributes. This flag needs to be added a json property of the attribute that is the subject of deprecation. 222 | ``` 223 | "@deprecated": { 224 | "message": "Use the ALTERNATE_ATTRIBUTE attribute instead.", 225 | "since": "semver" 226 | } 227 | ``` 228 | 3. Example of a deprecated field: 229 | ``` 230 | "packages": { 231 | "@deprecated": { 232 | "message": "Use the affected_packages attribute instead.", 233 | "since": "1.0.0" 234 | }, 235 | "caption": "Software Packages", 236 | "description": "List of vulnerable packages as identified by the security product", 237 | "is_array": true, 238 | "type": "package" 239 | } 240 | 4. Example of a deprecated object: 241 | ``` 242 | { 243 | "caption": "Finding", 244 | "description": "The Finding object describes metadata related to a security finding generated by a security tool or system.", 245 | "extends": "object", 246 | "name": "finding", 247 | "@deprecated": { 248 | "message": "Use the new finding_info object.", 249 | "since": "1.0.0" 250 | }, 251 | "attributes": {...} 252 | } 253 | *** 254 | 255 | ### Verifying the changes 256 | 257 | Contributors should verify the changes before they submit the PR, the best 258 | method to test and verify their changes is to run a local instance of the 259 | [oasf/server](https://github.com/agntcy/oasf/tree/main/server). 260 | 261 | If there are any problems with the newly made changes, the server will throw 262 | corresponding errors. Sample error messages: 263 | 264 | ``` 265 | [error] dictionary: missing attribute: 266 | ``` 267 | Address the errors before submitting the changes, your server run should be completely error free. 268 | 269 | *** 270 | 271 | ### Developer's Certificate of Origin 1.1 272 | 273 | By making a contribution to this project, I certify that: 274 | 275 | (a) The contribution was created in whole or in part by me and I 276 | have the right to submit it under the open source license 277 | indicated in the file; or 278 | 279 | (b) The contribution is based upon previous work that, to the 280 | best of my knowledge, is covered under an appropriate open 281 | source license and I have the right under that license to 282 | submit that work with modifications, whether created in whole 283 | or in part by me, under the same open source license (unless 284 | I am permitted to submit under a different license), as 285 | indicated in the file; or 286 | 287 | (c) The contribution was provided directly to me by some other 288 | person who certified (a), (b) or (c) and I have not modified 289 | it. 290 | 291 | (d) I understand and agree that this project and the contribution 292 | are public and that a record of the contribution (including 293 | all personal information I submit with it, including my 294 | sign-off) is maintained indefinitely and may be redistributed 295 | consistent with this project or the open source license(s) 296 | involved. 297 | 298 | --- 299 | We require that every contribution to this repository is signed with a 300 | Developer Certificate of Origin. Additionally, please use your real name. 301 | We do not accept anonymous contributors nor those utilizing pseudonyms. 302 | 303 | Each commit must include a DCO which looks like this 304 | 305 | Signed-off-by: Jane Smith 306 | 307 | You may type this line on your own when writing your commit messages. 308 | However, if your user.name and user.email are set in your git configs, 309 | you can use -s or --signoff to add the Signed-off-by line to the end of the commit message. 310 | 311 | * * * 312 | 313 | ## OASF Extensions 314 | 315 | The OASF Schema can be extended by adding an extension that defines additional 316 | attributes, objects, profiles, event classes and/or categories. 317 | Extensions allow one to create vendor/customer specific schemas or augment an 318 | existing schema to better suit their custom requirements. Extensions can also 319 | be used to factor out non-essential schema domains keeping the core schema 320 | succinct. Extensions use the framework in the same way as a new schema, 321 | optionally creating categories, profiles or event classes from the dictionary. 322 | 323 | As with categories and classes, extensions have unique IDs within the 324 | framework as well as their own versioning. The following sections provide 325 | guidelines to create extensions within OASF. 326 | 327 | ### Reserve a UID and Name for your extension 328 | 329 | In order to reserve an ID space, and make your extension public, add a unique 330 | identifier & a unique name for your extension in the OASF Extensions Registry 331 | [here](https://github.com/agntcy/oasf/blob/main/schema/extensions.md). 332 | This is done to avoid collisions with core or other extension schemas. 333 | For example, a new sample extension would have a row in the table as follows: 334 | 335 | | **Caption** | **Name** | **UID** | **Notes** | 336 | |---------------|----------|---------|-----------------------------------| 337 | | New Extension | new_ex | 123 | The development schema extensions | 338 | 339 | ### Create your Extension's subdirectory 340 | 341 | To extend the schema, create a new subdirectory in the `extensions` directory, 342 | and add a new `extension.json` file, which defines the extension's `name` 343 | and `uid`. For example: 344 | 345 | ``` 346 | { 347 | "caption": "New Extension", 348 | "name": "new_ex", 349 | "uid": 123, 350 | "version": "0.0.0" 351 | } 352 | ``` 353 | 354 | The extension's directory structure is the same as the top level schema directory, 355 | and it may contain the following files and subdirectories, depending on what type of extension is desired: 356 | -------------------------------------------------------------------------------- /docs/pages/oasf.md: -------------------------------------------------------------------------------- 1 | # Open Agentic Schema Framework 2 | 3 | The Open Agentic Schema Framework (OASF) is a standardized schema system for 4 | defining and managing AI agent capabilities, interactions, and metadata. It 5 | provides a structured way to describe agent attributes, capabilities, and 6 | relationships using attribute-based taxonomies. The framework includes 7 | development tools, schema validation, and hot-reload capabilities for rapid 8 | schema development, all managed through a Taskfile-based workflow and 9 | containerized development environment. The OASF serves as the foundation for 10 | interoperable AI agent systems, enabling consistent definition and discovery of 11 | agent capabilities across distributed systems. 12 | 13 | ## Features 14 | 15 | The OASF defines a set of standards for AI agent content representation that aims to: 16 | 17 | - Define common data structure to facilitate content standardisation, validation, and interoperability. 18 | - Ensure unique agent identification to address content discovery and consumption. 19 | - Provide extension capabilities to enable third-party features. 20 | 21 | A core component in OASF is to implement data types and core objects that define the skills of autonomous agents. This component helps in announcing and discovering agents with these skills across various data platforms. 22 | 23 | The current skill set taxonomy is described in [Taxonomy of AI Agent Skills](oasf-taxonomy.md). 24 | 25 | The guidelines to upgrade and maintain OASF are outlined in the [OASF Contribution Guide](oasf-workflow.md). 26 | 27 | ### Open Agentic Schema Framework Server 28 | 29 | The server/directory contains the Open Agents Schema Framework (OASF) Schema Server source code. 30 | The schema server is an HTTP server that provides a convenient way to browse and use the OASF schema. 31 | The server provides also schema validation capabilities to be used during development. 32 | 33 | You can access the OASF schema server, which is running the latest released schema, at [schema.oasf.agntcy.org](https://schema.oasf.agntcy.org). 34 | 35 | The schema server can also be used locally. 36 | 37 | ## Prerequisites 38 | 39 | - [Taskfile](https://taskfile.dev/) 40 | - [Docker](https://www.docker.com/) 41 | 42 | Make sure Docker is installed with Buildx. 43 | 44 | ## Development 45 | 46 | Use `Taskfile` for all related development operations such as testing, validating, deploying, and working with the project. 47 | 48 | ### Clone the repository 49 | 50 | ```shell 51 | git clone https://github.com/agntcy/oasf.git 52 | ``` 53 | 54 | ### Build artifacts 55 | 56 | This step fetches all project dependencies and 57 | subsequently build all project artifacts such as 58 | Helm charts and Docker images. 59 | 60 | ```shell 61 | task deps 62 | task build 63 | ``` 64 | 65 | ### Deploy locally 66 | 67 | This step creates an ephemeral Kind cluster 68 | and deploy OASF services through Helm chart. 69 | It also sets up port forwarding 70 | so that the services can be accessed locally. 71 | 72 | ```shell 73 | task up 74 | ``` 75 | 76 | To access the schema server, open [`localhost:8080`](http://localhost:8080) in your browser. 77 | 78 | **Note**: Any changes made to the schema or server backend itself requires running `task up` again. 79 | 80 | ### Hot reload 81 | 82 | In order to run the server in hot-reload mode, you must first deploy 83 | the services, and run another command to signal that the schema will be actively updated. 84 | 85 | This can be achieved by starting an interactive reload session through: 86 | 87 | ```shell 88 | task reload 89 | ``` 90 | 91 | Note that this only performs hot-reload for schema changes. 92 | Reloading backend changes still requires re-running `task build && task up`. 93 | 94 | ### Cleanup 95 | 96 | This step handles cleanup procedure by 97 | removing resources from previous steps, 98 | including ephemeral Kind clusters and Docker containers. 99 | 100 | ```shell 101 | task down 102 | ``` 103 | 104 | ## Artifacts distribution 105 | 106 | See https://github.com/orgs/agntcy/packages?repo_name=oasf. 107 | -------------------------------------------------------------------------------- /docs/pages/semantic_sdk/semantic_router.md: -------------------------------------------------------------------------------- 1 | # Semantic Router Agent (Coming soon) 2 | 3 | When defining a multi-agent application workflow, it is often necessary to make decisions based on the output of invoked agents. 4 | 5 | In a graph-based agentic applications (for example, LangGraph), this corresponds to decideing which node in the graph to execute based on the current state of the graph. 6 | 7 | There is a large set of common decisions that are based on semantic similarity, even if some of these decisions can be trivially implemented by a simple `if` condition and others can be so complex that they require a dedicated agent to be processed. 8 | 9 | This is where the Semantic Router Agent comes in. The Semantic Router Agent is a component, modelled as a node in the graph, that takes an input in the form of natural language and decides where to go next. Here next means following an edge in the graph that is associated to the semantically closest reference natural language text. In other words: the Semantic Router Agent chooses the next node based on a semantic routing table. 10 | 11 | **Example**: 12 | 13 | > An assistant agent receives a prompt from a user during a conversation and based on the content, it needs to perform different actions: 14 | > - If the user is posting a new request, start the request handling flow. 15 | > - If the user is satisfied with the conversation, terminate it and direct the flow to the auditing agent. 16 | > - If the user is not satisfied, involve a human. 17 | > 18 | > The above can be implemented with a semantic router agent with three possible routes, with each route associated with a text describing what is the expected content of the user prompt. 19 | -------------------------------------------------------------------------------- /docs/pages/syntactic_sdk/agntcy_acp_sdk.md: -------------------------------------------------------------------------------- 1 | # Agntcy ACP Client 2 | 3 | ## Introduction 4 | 5 | The Agent Connect Protocol SDK is an open-source library designed to 6 | facilitate the adoption of the Agent Connect Protocol. It offers tools 7 | for client implementations, enabling seamless integration, and communication 8 | between multi-agent systems. 9 | 10 | The SDK is current available in [Python](https://pypi.org/project/agntcy-acp/) [![PyPI version](https://img.shields.io/pypi/v/agntcy-acp.svg)](https://pypi.org/project/agntcy-acp/). 11 | 12 | ## Getting Started with the client 13 | 14 | To use the package, follow the steps below. 15 | 16 | ### Requirements 17 | 18 | Python 3.9+ 19 | 20 | ### Installation 21 | 22 | Install the latest version from PyPi: 23 | ```shell 24 | pip install agntcy-acp 25 | ``` 26 | 27 | ### Usage 28 | 29 | ```{code-block} python 30 | from agntcy_acp import AsyncACPClient, AsyncApiClient, ApiException 31 | from agntcy_acp.models import RunCreate 32 | 33 | # Defining the host is optional and defaults to http://localhost 34 | config = ApiClientConfiguration( 35 | host="https://localhost:8081/", 36 | api_key={"x-api-key": os.environ["API_KEY"]}, 37 | retries=3 38 | ) 39 | 40 | # Enter a context with an instance of the API client 41 | async with AsyncApiClient(config) as api_client: 42 | agent_id = 'agent_id_example' # str | The ID of the agent. 43 | client = AsyncACPClient(api_client) 44 | 45 | try: 46 | api_response = client.create_run(RunCreate(agent_id="my-agent-id")) 47 | print(f"Run {api_response.run_id} is currently {api_response.status}") 48 | except ApiException as e: 49 | print("Exception when calling create_run: %s\n" % e) 50 | ``` 51 | 52 | ### Documentation for API Endpoints 53 | 54 | The complete documentation for all of the API Endpoints are 55 | available in the reference documentation for the API clients: 56 | 57 | * [ACPClient](https://agntcy.github.io/acp-sdk/html/agntcy_acp.html#agntcy_acp.ACPClient) 58 | * [AsyncACPClient](https://agntcy.github.io/acp-sdk/html/agntcy_acp.html#agntcy_acp.AsyncACPClient) 59 | 60 | ## Using ACP with LangGraph 61 | 62 | The SDK provides integration with LangGraph with the {py:obj}`agntcy_acp.langgraph.ACPNode` class 63 | that can be used as a graph node: 64 | 65 | ```python 66 | from enum import Enum 67 | from typing import List, Optional 68 | 69 | from langgraph.graph import END, START, StateGraph 70 | from pydantic import BaseModel, Field 71 | 72 | from agntcy_acp import ApiClientConfiguration 73 | from agntcy_acp.langgraph.acp_node import ACPNode 74 | 75 | 76 | class Type(Enum): 77 | human = 'human' 78 | assistant = 'assistant' 79 | ai = 'ai' 80 | 81 | class Message(BaseModel): 82 | type: Type = Field( 83 | ..., 84 | description='indicates the originator of the message, a human or an assistant', 85 | ) 86 | content: str = Field(..., description='the content of the message', title='Content') 87 | 88 | class InputSchema(BaseModel): 89 | messages: Optional[List[Message]] = Field(None, title='Messages') 90 | is_completed: Optional[bool] = Field(None, title='Is Completed') 91 | 92 | class OutputSchema(BaseModel): 93 | messages: Optional[List[Message]] = Field(None, title='Messages') 94 | is_completed: Optional[bool] = Field(None, title='Is Completed') 95 | final_email: Optional[str] = Field( 96 | None, 97 | description='Final email produced by the mail composer', 98 | title='Final Email', 99 | ) 100 | 101 | class StateMeasures(BaseModel): 102 | input: InputSchema 103 | output: OutputSchema 104 | 105 | def main(): 106 | # Instantiate the local ACP node for the remote agent 107 | acp_node = ACPNode( 108 | name="mailcomposer", 109 | agent_id='50272dfd-4c77-4529-abbb-419bb1724230', 110 | client_config=ApiClientConfiguration.fromEnvPrefix("COMPOSER_"), 111 | input_path="input", 112 | input_type=InputSchema, 113 | output_path="output", 114 | output_type=OutputSchema, 115 | ) 116 | 117 | # Create the state graph 118 | sg = StateGraph(StateMeasures) 119 | 120 | # Add edges 121 | sg.add_edge(START, acp_node.get_name()) 122 | sg.add_edge(acp_node.get_name(), END) 123 | 124 | graph = sg.compile() 125 | output_state = graph.invoke({ 126 | "input": InputSchema(content=input), 127 | "output": OutputSchema(content="bad-output"), 128 | }) 129 | ``` 130 | 131 | ## Using the CLI to generate Agent-specific bindings 132 | 133 | The Client SDK includes a CLI tool to generate models or OpenAPI specs 134 | specific to an agent using the manifest descriptor. With these models 135 | the agent-specific data sent to ACP can be validated. By default, 136 | only the ACP parameters are validated by the SDK client. 137 | 138 | The CLI also provides validators for the ACP descriptor and manifest 139 | files. 140 | 141 | You can use the CLI easily: 142 | * using [poetry](https://python-poetry.org/): `poetry run acp --help` 143 | * with the package installed: `python3 -m agntcy_acp --help` 144 | 145 | Usage: `acp [OPTIONS] COMMAND [ARGS]...` 146 | 147 | Options: 148 | 149 | * `--help` Show this message and exit. 150 | 151 | Commands: 152 | 153 | * `generate-agent-models [OPTIONS] AGENT_DESCRIPTOR_PATH` 154 | 155 | Generate pydantic models from agent manifest or descriptor. 156 | 157 | Options: 158 | 159 | * `--output-dir TEXT` 160 | 161 | Pydantic models for specific agent based on provided 162 | agent descriptor or agent manifest [required] 163 | 164 | * `--model-file-name TEXT` 165 | 166 | Filename containing the pydantic model of the agent 167 | schemas 168 | 169 | * `generate-agent-oapi [OPTIONS] AGENT_DESCRIPTOR_PATH` 170 | 171 | Generate OpenAPI Spec from agent manifest or descriptor 172 | 173 | Options: 174 | 175 | * `--output TEXT` 176 | 177 | OpenAPI output file 178 | 179 | * `validate-acp-descriptor [OPTIONS] AGENT_DESCRIPTOR_PATH` 180 | 181 | Validate the Agent Descriptor contained in the file AGENT_DESCRIPTOR_PATH 182 | against the ACP specification 183 | 184 | * `validate-acp-manifest [OPTIONS] AGENT_MANIFEST_PATH` 185 | 186 | Validate the Agent Manifest contained in the file AGENT_MANIFEST_PATH 187 | against the Manifest specification 188 | 189 | 190 | ## Testing 191 | 192 | To run the various unit tests in the package, run `make test`. 193 | 194 | ## Roadmap 195 | 196 | See the [open issues](https://github.com/agntcy/acp-sdk/issues) for a list of proposed features and known issues. 197 | 198 | ## Client Reference API 199 | 200 | For a detailed description of the classes and functions in the SDK, please see the 201 | [agntcy-acp Package Documentation](https://agntcy.github.io/acp-sdk/index.html) -------------------------------------------------------------------------------- /docs/pages/syntactic_sdk/hil.md: -------------------------------------------------------------------------------- 1 | # Human in the Loop Agent (Coming soon) 2 | 3 | In many cases, agentic applications require human input. 4 | 5 | Involving the human requires two things: 6 | 7 | * Interrupting a multi-application flow to wait for human input. This is provided by the different frameworks in different ways, but corresponds to pausing the application and resuming it when input is available. 8 | * Engaging with the human to collect the input. This can happen in many ways and each application will have its own preferences. 9 | 10 | The Human in the Loop (HIL) Agent is an agent that implements most common methods to engage with humans. 11 | 12 | Few examples below: 13 | 14 | * Webhook: the agent calls a provided webhook to request for input and receive it through OpenAPI or REST. 15 | * Email engagement: the agent sends an email and offers a web interface to provide input. 16 | * Webex, Slack, or other engagement: the agent uses a messaging paltform to request input. 17 | -------------------------------------------------------------------------------- /docs/pages/syntactic_sdk/sample_acp_descriptors/mailcomposer.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "ref": { 4 | "name": "org.agntcy.mailcomposer", 5 | "version": "0.0.1", 6 | "url": "https://github.com/agntcy/acp-spec/blob/main/docs/sample_acp_descriptors/mailcomposer.json" 7 | }, 8 | "description": "This agent is able to collect user intent through a chat interface and compose wonderful emails based on that." 9 | }, 10 | "specs": { 11 | "capabilities": { 12 | "threads": true, 13 | "interrupts": true, 14 | "callbacks": true 15 | }, 16 | "input": { 17 | "type": "object", 18 | "description": "Agent Input", 19 | "properties": { 20 | "message": { 21 | "type": "string", 22 | "description": "Last message of the chat from the user" 23 | } 24 | } 25 | }, 26 | "thread_state": { 27 | "type": "object", 28 | "description": "The state of the agent", 29 | "properties": { 30 | "messages": { 31 | "type": "array", 32 | "description": "Full chat history", 33 | "items": { 34 | "type": "string", 35 | "description": "A message in the chat" 36 | } 37 | } 38 | } 39 | }, 40 | "output": { 41 | "type": "object", 42 | "description": "Agent Input", 43 | "properties": { 44 | "message": { 45 | "type": "string", 46 | "description": "Last message of the chat from the user" 47 | } 48 | } 49 | }, 50 | "config": { 51 | "type": "object", 52 | "description": "The configuration of the agent", 53 | "properties": { 54 | "style": { 55 | "type": "string", 56 | "enum": ["formal", "friendly"] 57 | } 58 | } 59 | }, 60 | "interrupts": [ 61 | { 62 | "interrupt_type": "mail_send_approval", 63 | "interrupt_payload": { 64 | "type": "object", 65 | "title": "Mail Approval Payload", 66 | "description": "Description of the email", 67 | "properties": { 68 | "subject": { 69 | "title": "Mail Subject", 70 | "description": "Subject of the email that is about to be sent", 71 | "type": "string" 72 | }, 73 | "body": { 74 | "title": "Mail Body", 75 | "description": "Body of the email that is about to be sent", 76 | "type": "string" 77 | }, 78 | "recipients": { 79 | "title": "Mail recipients", 80 | "description": "List of recipients of the email", 81 | "type": "array", 82 | "items": { 83 | "type": "string", 84 | "format": "email" 85 | } 86 | } 87 | }, 88 | "required": [ 89 | "subject", 90 | "body", 91 | "recipients" 92 | ] 93 | }, 94 | "resume_payload": { 95 | "type": "object", 96 | "title": "Email Approval Input", 97 | "description": "User Approval for this email", 98 | "properties": { 99 | "reason": { 100 | "title": "Approval Reason", 101 | "description": "Reason to approve or decline", 102 | "type": "string" 103 | }, 104 | "approved": { 105 | "title": "Approval Decision", 106 | "description": "True if approved, False if declined", 107 | "type": "boolean" 108 | } 109 | }, 110 | "required": [ 111 | "approved" 112 | ] 113 | } 114 | } 115 | ] 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | pip-tools==7.4.1 # BSD 2 | sphinx==8.2.3 # BSD 3 | sphinx-rtd-theme==3.0.2 # MIT 4 | sphinx-markdown-tables==0.0.17 # GPLv3 5 | sphinxcontrib.mermaid==1.0.0 # Apache 2.0 6 | recommonmark==0.7.1 # MIT 7 | myst-parser==4.0.1 # MIT 8 | sphinx-inline-tabs==2023.4.21 # MIT 9 | sphinxemoji==0.2.0 # BSD-3-Clause 10 | sphinx-copybutton==0.5.2 # MIT 11 | -------------------------------------------------------------------------------- /schema/oasf-data-model.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package schema.model; 4 | 5 | // The data model defines a schema for AI agent content representation. 6 | // The schema provides a way to describe agent's features, constraints, artifact 7 | // locators, versioning, ownership, or relevant details. 8 | message Agent { 9 | // Name of the agent. 10 | string name = 1; 11 | 12 | // Version of the agent. 13 | string version = 2; 14 | 15 | // List of agent’s authors in the form of `author-name `. 16 | repeated string authors = 3; 17 | 18 | // Creation timestamp of the agent in the RFC3339 format. 19 | // Specs: https://www.rfc-editor.org/rfc/rfc3339.html 20 | string created_at = 4; 21 | 22 | // Additional metadata associated with this agent. 23 | map annotations = 5; 24 | 25 | // List of skills that this agent is capable of performing. 26 | // Specs: https://schema.oasf.agntcy.org/skills 27 | repeated string skills = 6; 28 | 29 | // Locators provide actual artifact locators of an agent. For example, 30 | // this can reference sources such as helm charts, docker images, binaries, etc. 31 | message Locator { 32 | // Location URI where this source locator can be found. 33 | string url = 1; 34 | 35 | // Type of the source locator, for example: "docker-image", "binary", "source-code". 36 | string type = 2; 37 | 38 | // Metadata associated with this source locator. 39 | map annotations = 3; 40 | 41 | // Size in bytes of the source locator pointed by the `url` property. 42 | optional uint64 size = 4; 43 | 44 | // Digest of the source locator pointed by the `url` property. 45 | optional string digest = 5; 46 | } 47 | // List of source locators where this agent can be found or used from. 48 | repeated Locator locators = 7; 49 | 50 | // Extensions provide dynamic descriptors for an agent. For example, 51 | // security and categorization features can be described using extensions. 52 | message Extension { 53 | // Name of the extension. 54 | string name = 1; 55 | 56 | // Version of the extension. 57 | string version = 2; 58 | 59 | // Metadata associated with this extension. 60 | map annotations = 3; 61 | 62 | // Value of the data, it is available directly 63 | // or can be constructed by fetching from some URL. 64 | bytes specs = 4; 65 | } 66 | // List of extensions that describe this agent more in depth. 67 | repeated Extension extensions = 8; 68 | } 69 | --------------------------------------------------------------------------------