├── .python-version
├── src
└── maestro
│ ├── agents
│ ├── agent_store.json
│ ├── meta_agent
│ │ ├── doctor.sh
│ │ ├── workflow_workflow.yaml
│ │ ├── workflow_agent.yaml
│ │ └── test.sh
│ ├── prompt_agent.py
│ ├── custom_agent.py
│ ├── query_agent.py
│ └── remote_agent.py
│ ├── ui
│ ├── src
│ │ ├── vite-env.d.ts
│ │ ├── main.tsx
│ │ ├── index.css
│ │ └── api.ts
│ ├── tsconfig.json
│ ├── index.html
│ ├── vite.config.ts
│ ├── eslint.config.js
│ ├── Dockerfile
│ ├── tsconfig.node.json
│ ├── tsconfig.app.json
│ ├── package.json
│ └── README.md
│ ├── images
│ ├── maestro.png
│ ├── maestro-gemini.png
│ └── maestro-gemini2.png
│ ├── cli
│ └── __init__.py
│ ├── interface.py
│ ├── __init__.py
│ ├── logging_hooks.py
│ └── utils.py
├── MANIFEST.in
├── tests
├── workflow
│ ├── __init__.py
│ ├── pic.md
│ ├── workflow.yaml
│ ├── test_cron.py
│ ├── test_dspy_agent.py
│ ├── test_exception.py
│ ├── test_loop.py
│ └── test_parallel.py
├── agents
│ ├── beeai_agent
│ │ ├── __init__.py
│ │ ├── agents.yaml
│ │ ├── workflow.yaml
│ │ └── test_beeai.py
│ ├── crewai_agent
│ │ ├── __init__.py
│ │ ├── agents.yaml
│ │ ├── workflow.yaml
│ │ ├── crew_dummy.py
│ │ └── test_crewai.py
│ ├── openai_agent
│ │ ├── __init__.py
│ │ ├── agents.yaml
│ │ ├── agents_ollama.yaml
│ │ ├── agents_mcp.yaml
│ │ ├── agents_ollama_mcp.yaml
│ │ ├── workflow.yaml
│ │ ├── agents_search.yaml
│ │ ├── workflow_mcp.yaml
│ │ ├── workflow_search.yaml
│ │ ├── workflow_ollama.yaml
│ │ ├── workflow_params_creative.yaml
│ │ ├── workflow_ollama_mcp.yaml
│ │ ├── workflow_params_deterministic.yaml
│ │ ├── agents_params_creative.yaml
│ │ ├── agents_params_deterministic.yaml
│ │ └── test_openai.py
│ ├── meta_agents
│ │ ├── weather_prompt.txt
│ │ └── simple_prompt.txt
│ ├── scoring_agent
│ │ ├── agents.yaml
│ │ └── workflow.yaml
│ ├── test_utils.py
│ ├── test_prompt_agent.py
│ ├── test_agent.py
│ ├── test_agent_factory.py
│ └── test_scoring_agent.py
├── examples
│ ├── testrequirements.txt
│ ├── condition_workflow.yaml
│ ├── condition_agents.yaml
│ ├── test_condition.py
│ ├── test_parallel.py
│ ├── code_agent_with_dependencies_requirements.yaml
│ └── code_agent_with_dependencies.yaml
├── __init__.py
├── yamls
│ ├── workflowrun
│ │ └── simple_workflow_run.yaml
│ ├── agents
│ │ ├── slack_agent.yaml
│ │ ├── serve_test_agent.yaml
│ │ ├── openai_agent.yaml
│ │ ├── beeai_agent.yaml
│ │ ├── openai_mcp_agent.yaml
│ │ ├── beeai_mcp_agent.yaml
│ │ ├── prompt_agent.yaml
│ │ ├── dspy_agent.yaml
│ │ ├── query_agent.yaml
│ │ ├── funnier_local_agents.yaml
│ │ ├── funnier_agents.yaml
│ │ ├── simple_wow_agents.yaml
│ │ ├── simple_remote_agents.yaml
│ │ ├── code_exception_agent.yaml
│ │ ├── dry_run_loop_list_agent.yaml
│ │ ├── loop_agent.yaml
│ │ ├── evaluation_test_agent.yaml
│ │ ├── simple_containered_agent.yaml
│ │ ├── dry_run_loop_agent.yaml
│ │ ├── context_test_agent.yaml
│ │ ├── code_agent.yaml
│ │ ├── scoring_agent.yaml
│ │ ├── dry_run_inputs_agent.yaml
│ │ ├── multi_agents.yaml
│ │ ├── simple_local_agent.yaml
│ │ ├── dry_run_paralle_list_agent.yaml
│ │ ├── simple_agent.yaml
│ │ ├── dry_run_paralle_agent.yaml
│ │ ├── multi_agents_parallel.yaml
│ │ └── multi_agents_crew2.yaml
│ ├── tools
│ │ ├── test_remote_mcp_tool.yaml
│ │ ├── simple_tools.yaml
│ │ ├── maestro_knowlege_mcp.yaml
│ │ ├── mcp_tool.yaml
│ │ ├── duckduckgo_tools.yaml
│ │ ├── wikipedia_tools.yaml
│ │ └── openmeteo_tools.yaml
│ └── workflows
│ │ ├── dspy_workflow.yaml
│ │ ├── beeai_mcp_workflow.yaml
│ │ ├── openai_mcp_workflow.yaml
│ │ ├── slack_workflow.yaml
│ │ ├── code_exception_workflow.yaml
│ │ ├── code_workflow.yaml
│ │ ├── prompt_agent.yaml
│ │ ├── exception_no_exception_workflow.yaml
│ │ ├── loop_workflow.yaml
│ │ ├── openai_workflow.yaml
│ │ ├── multi_workflow_parallel.yaml
│ │ ├── simple_remote_workflow.yaml
│ │ ├── multi_workflow.yaml
│ │ ├── exception_workflow.yaml
│ │ ├── simple_workflow.yaml
│ │ ├── simple_wow_workflow.yaml
│ │ ├── dry_run_inputs_workflow.yaml
│ │ ├── parallel_workflow.yaml
│ │ ├── context_test_workflow.yaml
│ │ ├── input_workflow.yaml
│ │ ├── funnier_workflow.yaml
│ │ ├── scoring_workflow.yaml
│ │ ├── evaluation_test_workflow.yaml
│ │ ├── conditional_if_workflow.yaml
│ │ ├── simple_cron_workflow.yaml
│ │ ├── simple_cron_many_steps_workflow.yaml
│ │ ├── conditional_case_workflow.yaml
│ │ └── conditional_workflow.yaml
├── integration
│ ├── deploys
│ │ ├── kind-config.yaml
│ │ └── test_deploy.py
│ └── test_evaluation_mock.py
├── utils
│ ├── mcpclient.py
│ └── responses.yml
└── test_tool_utils.py
├── deployments
├── entrypoint_api.sh
├── service.yaml
├── deployment.yaml
├── maestro.sh
├── maestro.html
└── Dockerfile
├── operator
├── config
│ ├── prometheus
│ │ ├── kustomization.yaml
│ │ └── monitor.yaml
│ ├── network-policy
│ │ ├── kustomization.yaml
│ │ └── allow-metrics-traffic.yaml
│ ├── samples
│ │ ├── kustomization.yaml
│ │ └── maestro_v1alpha1_workflowrun.yaml
│ ├── scorecard
│ │ ├── bases
│ │ │ └── config.yaml
│ │ ├── patches
│ │ │ ├── basic.config.yaml
│ │ │ └── olm.config.yaml
│ │ └── kustomization.yaml
│ ├── manager
│ │ └── kustomization.yaml
│ ├── default
│ │ ├── manager_metrics_patch.yaml
│ │ └── metrics_service.yaml
│ ├── rbac
│ │ ├── metrics_reader_role.yaml
│ │ ├── service_account.yaml
│ │ ├── metrics_auth_role_binding.yaml
│ │ ├── metrics_auth_role.yaml
│ │ ├── role_binding.yaml
│ │ ├── leader_election_role_binding.yaml
│ │ ├── workflowrun_viewer_role.yaml
│ │ ├── workflowrun_editor_role.yaml
│ │ ├── leader_election_role.yaml
│ │ ├── role.yaml
│ │ └── kustomization.yaml
│ ├── crd
│ │ ├── kustomizeconfig.yaml
│ │ └── kustomization.yaml
│ └── manifests
│ │ └── kustomization.yaml
├── test
│ ├── config
│ │ ├── test-configmap.yaml
│ │ └── test-workflowrun.yaml
│ └── e2e
│ │ └── e2e_suite_test.go
├── entrypoint_api.sh
├── PROJECT
├── Dockerfile
├── api
│ └── v1alpha1
│ │ └── groupversion_info.go
└── internal
│ └── controller
│ └── workflowrun.go
├── .gitattributes
├── start_mcp.sh
├── Dockerfile-cli
├── tools
├── container-agent.sh
├── run-all.sh
├── package.json
├── buildimg.sh
├── scripts.py
├── get_release_name.py
├── run-meta-agent.sh
├── README.md
├── validate-mermaid.js
└── update_readmes.sh
├── .pylintrc
├── .github
├── CODEOWNERS
├── release.yml
└── workflows
│ ├── maestro_demo-tests.yaml
│ ├── maestro_check-mermaid.yaml
│ ├── maestro_check-schemas.yaml
│ ├── maestro_test-workflow.yaml
│ ├── stale.yml
│ ├── maestro_operator-build.yaml
│ └── maestro_run-tests.yaml
├── .pre-commit-config.yaml
├── mcp
├── examples
│ └── slack
│ │ ├── tools.yaml
│ │ ├── agents.yaml
│ │ ├── workflow.yaml
│ │ └── README.md
├── README.md
└── mcptools
│ └── slack_mcp.py
├── Dockerfile-agent
├── example.env
├── k8s
├── example
│ └── yamls
│ │ └── test_remotemcpserver.yaml
├── api
│ └── v1alpha1
│ │ └── remotemcpserver_types.go
└── config
│ └── crd
│ └── bases
│ └── maestro.ai4quantum.com_remotemcpservers.yaml
├── .release_names.md
├── SECURITY.md
├── Dockerfile
├── MAINTAINERS.md
└── pyproject.toml
/.python-version:
--------------------------------------------------------------------------------
1 | 3.12.8
2 |
--------------------------------------------------------------------------------
/src/maestro/agents/agent_store.json:
--------------------------------------------------------------------------------
1 | {}
2 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include build_backend.py
2 | include build.py
--------------------------------------------------------------------------------
/tests/workflow/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
--------------------------------------------------------------------------------
/deployments/entrypoint_api.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | python3.11 api.py
4 |
5 |
--------------------------------------------------------------------------------
/src/maestro/ui/src/vite-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
--------------------------------------------------------------------------------
/tests/agents/beeai_agent/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
--------------------------------------------------------------------------------
/operator/config/prometheus/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - monitor.yaml
3 |
--------------------------------------------------------------------------------
/tests/agents/crewai_agent/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
--------------------------------------------------------------------------------
/tests/examples/testrequirements.txt:
--------------------------------------------------------------------------------
1 | requests==2.31.0
2 | beautifulsoup4==4.12.2
3 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/operator/config/network-policy/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - allow-metrics-traffic.yaml
3 |
--------------------------------------------------------------------------------
/start_mcp.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | uv run python src/maestro/maestro_mcp/server.py --port ${1:-"8000"}
4 |
--------------------------------------------------------------------------------
/src/maestro/images/maestro.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI4quantum/maestro/HEAD/src/maestro/images/maestro.png
--------------------------------------------------------------------------------
/src/maestro/images/maestro-gemini.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI4quantum/maestro/HEAD/src/maestro/images/maestro-gemini.png
--------------------------------------------------------------------------------
/src/maestro/images/maestro-gemini2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI4quantum/maestro/HEAD/src/maestro/images/maestro-gemini2.png
--------------------------------------------------------------------------------
/Dockerfile-cli:
--------------------------------------------------------------------------------
1 | ARG MAESTRO_VERSION
2 | ARG GITHUB_ORG
3 | FROM ghcr.io/${GITHUB_ORG}/maestro:${MAESTRO_VERSION}
4 |
5 | ENTRYPOINT ["maestro"]
6 |
--------------------------------------------------------------------------------
/tools/container-agent.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export DRY_RUN=1
4 | maestro serve simple_agent.yaml --host 0.0.0.0 --port 30051 --agent-name test1
5 |
--------------------------------------------------------------------------------
/src/maestro/ui/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "files": [],
3 | "references": [
4 | { "path": "./tsconfig.app.json" },
5 | { "path": "./tsconfig.node.json" }
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/operator/config/samples/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ## Append samples of your project ##
2 | resources:
3 | - maestro_v1alpha1_workflowrun.yaml
4 | # +kubebuilder:scaffold:manifestskustomizesamples
5 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | [MASTER]
2 | jobs=4 #number of processes to use
3 | [BASIC]
4 | good-names=nameOfYourProject #names to be considered ok
5 | [pre-commit-hook]
6 | command=custom_pylint
7 | disable=E0401, C0301
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # This file defines the set of people responsible for different sections of the
2 | # Maestro code.
3 |
4 | # Global rule, unless specialized by a later one
5 | * @AI4quantum/maestro-team
--------------------------------------------------------------------------------
/operator/config/scorecard/bases/config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: scorecard.operatorframework.io/v1alpha3
2 | kind: Configuration
3 | metadata:
4 | name: config
5 | stages:
6 | - parallel: true
7 | tests: []
8 |
--------------------------------------------------------------------------------
/tests/agents/meta_agents/weather_prompt.txt:
--------------------------------------------------------------------------------
1 | I want to compare the current weather with the historical averages. To do this, I will need 2 agents, one to retrieve the weather and one to compare to the historical average.
--------------------------------------------------------------------------------
/operator/config/manager/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - manager.yaml
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | images:
6 | - name: controller
7 | newName: controller
8 | newTag: latest
9 |
--------------------------------------------------------------------------------
/operator/test/config/test-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: test-configmap
5 | data:
6 | DRY_RUN: "1"
7 | #BEE_API_KEY: sk-proj-testkey
8 | #BEE_API: "http://192.168.86.27:4000"
9 |
--------------------------------------------------------------------------------
/operator/config/default/manager_metrics_patch.yaml:
--------------------------------------------------------------------------------
1 | # This patch adds the args to allow exposing the metrics endpoint using HTTPS
2 | - op: add
3 | path: /spec/template/spec/containers/0/args/0
4 | value: --metrics-bind-address=:8443
5 |
--------------------------------------------------------------------------------
/operator/config/rbac/metrics_reader_role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: metrics-reader
5 | rules:
6 | - nonResourceURLs:
7 | - "/metrics"
8 | verbs:
9 | - get
10 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/astral-sh/ruff-pre-commit
3 | # Ruff version.
4 | rev: v0.12.0
5 | hooks:
6 | # Run the linter.
7 | - id: ruff-check
8 | # Run the formatter.
9 | - id: ruff-format
--------------------------------------------------------------------------------
/operator/config/rbac/service_account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: controller-manager
8 | namespace: system
9 |
--------------------------------------------------------------------------------
/tests/workflow/pic.md:
--------------------------------------------------------------------------------
1 | ```mermaid
2 | flowchart LR;
3 | In-->LLM_Call_1;
4 | LLM_Call_1-->|Output_1|Gate;
5 | Gate-->|Pass|LLM_Call_2;
6 | Gate-->|Fail|Exit;
7 | LLM_Call_2 -->|Output_2|LLM_Call_3;
8 | LLM_Call_3 --> Out;
--------------------------------------------------------------------------------
/mcp/examples/slack/tools.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: MCPTool
3 | metadata:
4 | name: slack
5 | namespace: default
6 | spec:
7 | url: http://127.0.0.1:30055/mcp
8 | transport: streamable-http
9 | name: slack
10 | description: slack
11 |
--------------------------------------------------------------------------------
/tests/agents/meta_agents/simple_prompt.txt:
--------------------------------------------------------------------------------
1 | number of agents: 2
2 | agent1: weather_fetcher – Retrieves weather data for a given location using OpenMeteo tool.
3 | agent2: temperature_comparator – Compares the retrieved temperature with historical averages using OpenMeteo tool.
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 | # Copyright © 2025 IBM
3 |
4 | import os
5 | import sys
6 |
7 | sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../src")
8 | sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../cli")
9 |
--------------------------------------------------------------------------------
/deployments/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: maestro
5 | spec:
6 | selector:
7 | app: maestro
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 5000
12 | nodePort: 30051
13 | type: NodePort
14 |
--------------------------------------------------------------------------------
/Dockerfile-agent:
--------------------------------------------------------------------------------
1 | ARG MAESTRO_VERSION
2 | ARG MAESTRO_REPOSITORY
3 | FROM ghcr.io/${MAESTRO_REPOSITORY}/maestro:${MAESTRO_VERSION}
4 |
5 | COPY tests/yamls/agents/simple_agent.yaml .
6 | COPY tools/container-agent.sh .
7 |
8 | EXPOSE 8000
9 | ENTRYPOINT ["./container-agent.sh"]
10 |
--------------------------------------------------------------------------------
/tools/run-all.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Runs all the tools and checks
4 |
5 | list=("./tools/check-schemas.sh" "./tools/check-mermaid.sh" "./tools/run-meta-agent.sh")
6 |
7 | for item in "${list[@]}"; do
8 | echo "Running 🏃🏽♀️➡️ $item"
9 | eval $item
10 | done
--------------------------------------------------------------------------------
/tests/agents/beeai_agent/agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: beetest
5 | labels:
6 | app: testapp
7 | spec:
8 | model: "llama3.1:latest"
9 | description: test
10 | instructions: input = 'Mock agent:'
11 | framework: beeai
12 |
--------------------------------------------------------------------------------
/tests/yamls/workflowrun/simple_workflow_run.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro.i-am-bee.com/v1alpha1
2 | kind: WorkflowRun
3 | metadata:
4 | name: simple-workflow-run
5 | labels:
6 | app: example2
7 | spec:
8 | agents:
9 | - simple-agents
10 | workflow: simple_workflow
11 |
12 |
--------------------------------------------------------------------------------
/src/maestro/ui/src/main.tsx:
--------------------------------------------------------------------------------
1 | import { StrictMode } from 'react'
2 | import { createRoot } from 'react-dom/client'
3 | import './index.css'
4 | import App from './App.tsx'
5 |
6 | createRoot(document.getElementById('root')!).render(
7 |
8 |
9 | ,
10 | )
11 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: openai_test
5 | labels:
6 | app: testapp
7 | spec:
8 | model: "gpt-4o-mini"
9 | description: test
10 | instructions: echo your prompt
11 | framework: openai
12 | mode: local
13 |
--------------------------------------------------------------------------------
/operator/config/scorecard/patches/basic.config.yaml:
--------------------------------------------------------------------------------
1 | - op: add
2 | path: /stages/0/tests/-
3 | value:
4 | entrypoint:
5 | - scorecard-test
6 | - basic-check-spec
7 | image: quay.io/operator-framework/scorecard-test:v1.39.2
8 | labels:
9 | suite: basic
10 | test: basic-check-spec-test
11 |
--------------------------------------------------------------------------------
/operator/entrypoint_api.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | cp /etc/config/agents ./src/agents.yaml
4 | cp /etc/config/workflow ./src/workflow.yaml
5 | mkdir static
6 | cp /etc/config/workflow ./static/workflow.yaml
7 | cp /etc/config/agents ./static/agents.yaml
8 |
9 | export HOME="/usr/src/app"
10 | python3.11 src/api.py
11 |
12 |
--------------------------------------------------------------------------------
/example.env:
--------------------------------------------------------------------------------
1 | BEE_API=http://localhost:4000
2 | BEE_API_KEY=sk-proj-testkey
3 |
4 | CODE_INTERPRETER_URL=http://localhost:50081
5 | CODE_INTERPRETER_TMPDIR=../beeai-framework-py-starter/tmp/code_interpreter_target
6 |
7 | # This is required to prevent some pydantic serialization errors
8 | DEFER_PYDANTIC_BUILD=false
9 |
10 |
--------------------------------------------------------------------------------
/tests/yamls/agents/slack_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: slack
5 | labels:
6 | app: slack-example
7 | custom_agent: slack_agent
8 | spec:
9 | model: dummy
10 | framework: custom
11 | mode: remote
12 | description: slack agent
13 | instructions: post a message to slack
--------------------------------------------------------------------------------
/k8s/example/yamls/test_remotemcpserver.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro.ai4quantum.com/v1alpha1
2 | kind: RemoteMCPServer
3 | metadata:
4 | name: test-remotemcpserver
5 | namespace: default
6 | spec:
7 | url: https://api.githubcopilot.com/mcp
8 | transport: streamable-http
9 | name: remotemcpserver
10 | description: remote MCP server
11 |
12 |
--------------------------------------------------------------------------------
/tests/integration/deploys/kind-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kind.x-k8s.io/v1alpha4
2 | kind: Cluster
3 | nodes:
4 | - role: control-plane
5 | extraPortMappings:
6 | - containerPort: 30051
7 | hostPort: 30051
8 | listenAddress: "0.0.0.0" # Optional, defaults to "0.0.0.0"
9 | protocol: tcp # Optional, defaults to tcp
10 | - role: worker
11 |
--------------------------------------------------------------------------------
/tests/yamls/tools/test_remote_mcp_tool.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: MCPTool
3 | metadata:
4 | name: remote-github
5 | namespace: default
6 | spec:
7 | url: "https://api.githubcopilot.com/mcp"
8 | transport: streamable-http
9 | name: remotemcpserver
10 | description: remote MCP server
11 | secretName: githubtoken
12 |
13 |
--------------------------------------------------------------------------------
/mcp/examples/slack/agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: slack
5 | labels:
6 | app: slack
7 | spec:
8 | model: llama3.1:latest
9 | description: post slack message
10 | instructions: You are a helpful agent. You have a slack tool.
11 | framework: openai
12 | mode: local
13 | tools:
14 | - slack
15 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/agents_ollama.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: openai_test_ollama
5 | labels:
6 | app: testapp_ollama
7 | spec:
8 | model: "granite3.3:8b"
9 | description: test
10 | instructions: echo your prompt
11 | framework: openai
12 | mode: local
13 | tools:
14 | - web_search
15 |
--------------------------------------------------------------------------------
/src/maestro/ui/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Maestro
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/tests/yamls/agents/serve_test_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: serve-test-agent
5 | spec:
6 | framework: code
7 | description: testing the serve command
8 | instructions: Run the code in the `code` field.
9 | code: |
10 | print(f"You asked: {input}")
11 | output["response"] = "Hello from serve-test-agent!"
--------------------------------------------------------------------------------
/tests/yamls/agents/openai_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: my-agent
5 | labels:
6 | app: my-agent
7 | spec:
8 | model: llama3.1:latest
9 | description: my-agent
10 | instructions: You are a helpful agent. Respond to the users question, making use of any required tools
11 | framework: openai
12 | mode: local
13 |
--------------------------------------------------------------------------------
/operator/config/rbac/metrics_auth_role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: metrics-auth-rolebinding
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: metrics-auth-role
9 | subjects:
10 | - kind: ServiceAccount
11 | name: controller-manager
12 | namespace: system
13 |
--------------------------------------------------------------------------------
/tools/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "maestro-validation-tools",
3 | "version": "1.0.0",
4 | "description": "Validation tools for Maestro CI",
5 | "type": "module",
6 | "scripts": {
7 | "validate": "node validate-mermaid.js"
8 | },
9 | "dependencies": {
10 | "mermaid": "^11.4.1",
11 | "jsdom": "^25.0.1",
12 | "dompurify": "^3.2.2"
13 | }
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/tests/agents/beeai_agent/workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: beetest-deployment
5 | labels:
6 | app: testapp
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: testapp
12 | agents:
13 | - beetest
14 | prompt: Welcome
15 | steps:
16 | - name: begin
17 | agent: beetest
18 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/agents_mcp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: openai_test_mcp
5 | labels:
6 | app: testapp_mcp
7 | spec:
8 | model: "gpt-4o-mini"
9 | description: test
10 | instructions: You are a helpful agent. Respond to the users question, making use of any required tools
11 | framework: openai
12 | mode: local
13 |
14 |
--------------------------------------------------------------------------------
/tests/yamls/agents/beeai_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test1
5 | labels:
6 | app: test-example
7 | spec:
8 | model: meta-llama/llama-3-1-70b-instruct
9 | framework: beeai
10 | mode: local
11 | description: this is a test
12 | tools:
13 | - code_interpreter
14 | - test
15 | instructions: print("this is a test.")
16 |
--------------------------------------------------------------------------------
/tests/agents/crewai_agent/agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: DummyCrew
5 | labels:
6 | app: crewaitest
7 | module: tests.agents.crewai_agent.crew_dummy
8 | class: DummyCrew
9 | factory: dummy_crew
10 | spec:
11 | model: "llama3.1:latest"
12 | description: test
13 | instructions: input = 'Mock agent:'
14 | framework: crewai
15 |
--------------------------------------------------------------------------------
/operator/config/rbac/metrics_auth_role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: metrics-auth-role
5 | rules:
6 | - apiGroups:
7 | - authentication.k8s.io
8 | resources:
9 | - tokenreviews
10 | verbs:
11 | - create
12 | - apiGroups:
13 | - authorization.k8s.io
14 | resources:
15 | - subjectaccessreviews
16 | verbs:
17 | - create
18 |
--------------------------------------------------------------------------------
/tests/yamls/agents/openai_mcp_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: my-agent
5 | labels:
6 | app: my-agent
7 | spec:
8 | model: granite3.3:8b
9 | description: my-agent
10 | instructions: You are a helpful agent. Respond to the users question, making use of any required tools
11 | framework: openai
12 | mode: local
13 | tools:
14 | - fetch
15 |
--------------------------------------------------------------------------------
/src/maestro/cli/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 | # Copyright © 2025 IBM
3 |
4 | """CLI module initialization and path configuration."""
5 |
6 | import os
7 | import sys
8 | from dotenv import load_dotenv
9 |
10 | load_dotenv()
11 |
12 | sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../src")
13 | sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../cli")
14 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/agents_ollama_mcp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: openai_test_mcp_ollama
5 | labels:
6 | app: testapp_mcp_ollama
7 | spec:
8 | model: "granite3.3:8b"
9 | description: test
10 | instructions: You are a helpful agent. Respond to the users question, making use of any required tools
11 | framework: openai
12 | mode: local
13 |
14 |
--------------------------------------------------------------------------------
/tests/yamls/agents/beeai_mcp_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test1
5 | labels:
6 | app: test-example
7 | spec:
8 | # model: meta-llama/llama-3-1-70b-instruct
9 | model: llama3.1
10 | framework: beeai
11 | mode: local
12 | description: this is a test
13 | tools:
14 | - code_interpreter
15 | - osv
16 | instructions: print("this is a test.")
17 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: openai_test
5 | labels:
6 | app: testapp
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: testapp
12 | agents:
13 | - openai_test
14 | prompt: Welcome OpenAI to the AI Agent OSS party
15 | steps:
16 | - name: begin
17 | agent: openai_test
18 |
--------------------------------------------------------------------------------
/tests/yamls/agents/prompt_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test-prompt-agent
5 | labels:
6 | app: prompt-example
7 | custom_agent: prompt_agent
8 | spec:
9 | model: dummy
10 | framework: custom
11 | mode: remote
12 | description: prompt agent
13 | instructions: "This message should be used to test the prompt agent. The agent should return the message as is."
--------------------------------------------------------------------------------
/tests/agents/openai_agent/agents_search.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: openai_test_search
5 | labels:
6 | app: testapp_search
7 | spec:
8 | model: "gpt-4o-mini"
9 | description: test
10 | instructions: You are a helpful agent. Respond to the users question, making use of any required tools
11 | framework: openai
12 | mode: local
13 | tools:
14 | - web_search
15 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/workflow_mcp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: openai_test_mcp
5 | labels:
6 | app: testapp_mcp
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: testapp_mcp
12 | agents:
13 | - openai_test
14 | prompt: What tools do you have access to?
15 | steps:
16 | - name: begin
17 | agent: openai_test_mcp
--------------------------------------------------------------------------------
/.release_names.md:
--------------------------------------------------------------------------------
1 | **IMPORTANT MAESTROS**
2 |
3 | - ~~Arturo Toscanini~~
4 | - ~~Nadia Boulanger~~
5 | - ~~Claudio Abbado~~
6 | - ~~Marin Alsop~~
7 | - ~~Pierre Boulez~~
8 | - ~~Daniel Barenboim~~
9 | - ~~Leonard Bernstein~~
10 | - ~~Sir Colin Davis~~
11 | - JoAnn Falletta
12 | - Wilhelm Furtwängler
13 | - Carlos Kleiber
14 | - Herbert von Karajan
15 | - Zubin Mehta
16 | - Georg Solti
17 | - Simon Rattle
18 | - Seiji Ozawa
19 | - Xian Zhang
--------------------------------------------------------------------------------
/tests/yamls/agents/dspy_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: DSPyAgent
5 | labels:
6 | app: my-agent
7 | spec:
8 | model: ollama/granite3.2:latest
9 | url: "http://localhost:11434"
10 | description: DSPy agent that assits user
11 | instructions: You are a helpful agent. Respond to the users question, making use of any required tools
12 | framework: dspy
13 | mode: local
14 |
--------------------------------------------------------------------------------
/.github/release.yml:
--------------------------------------------------------------------------------
1 | changelog:
2 | categories:
3 | - title: 'Features and Enhancements'
4 | labels:
5 | - feature
6 | - enhancement
7 | - title: 'Bug Fixes'
8 | labels:
9 | - bug
10 | - title: 'Documentation'
11 | labels:
12 | - documentation
13 | - title: 'Maintenance'
14 | labels:
15 | - chore
16 | - title: 'Other Changes'
17 | labels:
18 | - '*'
--------------------------------------------------------------------------------
/tests/agents/crewai_agent/workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: maestro-deployment
5 | labels:
6 | app: crewaitest
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: crewaitest
12 | agents:
13 | - DummyCrew
14 | prompt: Show me some activities to do in London in the cold weather
15 | steps:
16 | - name: begin
17 | agent: DummyCrew
18 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/dspy_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: DSPy agent test workflow
5 | labels:
6 | app: test1
7 | spec:
8 | template:
9 | metadata:
10 | name: dspy_test
11 | labels:
12 | app: test
13 | agents:
14 | - DSPyAgent
15 | prompt: Where is the next Olympics game location?
16 | steps:
17 | - name: step1
18 | agent: DSPyAgent
19 |
20 |
--------------------------------------------------------------------------------
/operator/config/rbac/role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: manager-rolebinding
8 | roleRef:
9 | apiGroup: rbac.authorization.k8s.io
10 | kind: ClusterRole
11 | name: manager-role
12 | subjects:
13 | - kind: ServiceAccount
14 | name: controller-manager
15 | namespace: system
16 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/beeai_mcp_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: beeai workflow
5 | labels:
6 | app: example2
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-deployment
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - test1
16 | prompt: This is a test input
17 | steps:
18 | - name: step1
19 | agent: test1
20 |
21 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/openai_mcp_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: Openai agent test workflow
5 | labels:
6 | app: test1
7 | spec:
8 | template:
9 | metadata:
10 | name: openai_test
11 | labels:
12 | app: my-agent
13 | agents:
14 | - my-agent
15 | prompt: What are the available tools you have?
16 | steps:
17 | - name: step1
18 | agent: my-agent
19 |
20 |
--------------------------------------------------------------------------------
/operator/config/samples/maestro_v1alpha1_workflowrun.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro.ai4quantum.com/v1alpha1
2 | kind: WorkflowRun
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: workflowrun-sample
8 | spec:
9 | agents:
10 | - test1
11 | - test2
12 | workflow: sequence-test-deployment
13 | loglevel: DEBUG
14 | nodeport: 30051
15 | environments: myconfigmap
16 | secrets: mysecret
17 |
--------------------------------------------------------------------------------
/operator/test/config/test-workflowrun.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro.ai4quantum.com/v1alpha1
2 | kind: WorkflowRun
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: weather-checker-ai
8 | spec:
9 | agents:
10 | - temperature-agent
11 | - hot-or-not-agent
12 | workflow: maestro-deployment
13 | loglevel: DEBUG
14 | nodeport: 30051
15 | environments: test-configmap
16 | #secrets: mysecret
17 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/workflow_search.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: openai_test_search
5 | labels:
6 | app: testapp_search
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: testapp_search
12 | agents:
13 | - openai_test
14 | prompt: What are the top news headlines in the United Kingdom today
15 | steps:
16 | - name: begin
17 | agent: openai_test_search
18 |
--------------------------------------------------------------------------------
/operator/config/default/metrics_service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | control-plane: controller-manager
6 | app.kubernetes.io/name: operator
7 | app.kubernetes.io/managed-by: kustomize
8 | name: controller-manager-metrics-service
9 | namespace: system
10 | spec:
11 | ports:
12 | - name: https
13 | port: 8443
14 | protocol: TCP
15 | targetPort: 8443
16 | selector:
17 | control-plane: controller-manager
18 |
--------------------------------------------------------------------------------
/operator/config/rbac/leader_election_role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: leader-election-rolebinding
8 | roleRef:
9 | apiGroup: rbac.authorization.k8s.io
10 | kind: Role
11 | name: leader-election-role
12 | subjects:
13 | - kind: ServiceAccount
14 | name: controller-manager
15 | namespace: system
16 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/workflow_ollama.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: openai_test_ollama
5 | labels:
6 | app: testapp_ollama
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: testapp_ollama
12 | agents:
13 | - openai_test_ollama
14 | prompt: Welcome OpenAI to the AI Agent OSS party, now with Ollama
15 | steps:
16 | - name: begin
17 | agent: openai_test_ollama
18 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/workflow_params_creative.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: test_creative
5 | labels:
6 | app: testapp_model_params
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: testapp_model_params
12 | agents:
13 | - test_creative
14 | prompt: Describe the color blue in three words
15 | steps:
16 | - name: test_creative
17 | agent: test_creative
18 |
19 |
--------------------------------------------------------------------------------
/mcp/examples/slack/workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: slack
5 | labels:
6 | app: slack
7 | spec:
8 | template:
9 | metadata:
10 | name: post slack message
11 | labels:
12 | app: slck
13 | agents:
14 | - slack
15 | prompt: Post "I am Maestro agent!! I can help you to do things done" message to slacl channel "CTBJXJBJN"
16 | steps:
17 | - name: slack
18 | agent: slack
19 |
20 |
21 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/workflow_ollama_mcp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: openai_test_mcp_ollama
5 | labels:
6 | app: testapp_mcp_ollama
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: testapp_mcp_ollama
12 | agents:
13 | - openai_test_ollama
14 | prompt: What tools do you have access to? Include all kinds of different types of tools
15 | steps:
16 | - name: begin
17 | agent: openai_test_mcp_ollama
--------------------------------------------------------------------------------
/tests/workflow/workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: sequence-test-deployment
5 | labels:
6 | app: testapp
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: testapp
12 | agents:
13 | - agent1
14 | - agent2
15 | prompt: Start of the workflow
16 | steps:
17 | - name: step1
18 | agent: agent1
19 | - name: step2
20 | agent: agent2
21 | - name: step3
22 | agent: agent1
23 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/slack_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: slack workflow
5 | labels:
6 | app: example2
7 | spec:
8 | template:
9 | metadata:
10 | name: slack agent
11 | labels:
12 | app: slack agent
13 | use-case: test
14 | agents:
15 | - slack
16 | prompt: Post a message Hi! to the slack channel set "SLACK_BOT_TOKEN" and "SLACK_TEAM_ID"
17 | steps:
18 | - name: slackstep
19 | agent: slack
20 |
21 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/workflow_params_deterministic.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: openai_test_deterministic
5 | labels:
6 | app: testapp_model_params
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: testapp_model_params
12 | agents:
13 | - openai_test_deterministic
14 | prompt: Describe the color blue in three words
15 | steps:
16 | - name: test_deterministic
17 | agent: openai_test_deterministic
18 |
19 |
--------------------------------------------------------------------------------
/src/maestro/interface.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
3 | from typing import Dict, Any, Optional
4 |
5 |
6 | class Message:
7 | """Message class for agent communication.
8 | Args:
9 | messages: data sent to an agent
10 | """
11 |
12 | def __init__(self, messages: Optional[Dict[str, Any]] = None):
13 | self.messages = messages or {}
14 |
15 | def add_message(self):
16 | """Add additional message"""
17 |
18 | def get_messages(self):
19 | """Get messages"""
20 |
--------------------------------------------------------------------------------
/tests/yamls/tools/simple_tools.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Tool
3 | metadata:
4 | name: tool1
5 | labels:
6 | app: tool-example
7 | spec:
8 | description: tool definition template
9 | inputSchema:
10 | type: jsonSchema
11 | schema: |
12 | {
13 | "input 1": "string",
14 | "input 2": "string",
15 | }
16 |
17 | outputSchema:
18 | type: jsonSchema
19 | schema: |
20 | {
21 | "result 1": "string",
22 | "result 2": "string",
23 | }
24 |
25 |
--------------------------------------------------------------------------------
/deployments/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: maestro
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: maestro
10 | template:
11 | metadata:
12 | labels:
13 | app: maestro
14 | spec:
15 | containers:
16 | - name: maestro
17 | image: maestro:latest
18 | imagePullPolicy: Never
19 | ports:
20 | - containerPort: 5000
21 | env:
22 | - name: DUMMY
23 | value: dummyvalue
24 |
25 |
--------------------------------------------------------------------------------
/src/maestro/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 | # Copyright © 2025 IBM
3 |
4 | from .workflow import Workflow
5 |
6 | from .agents.crewai_agent import CrewAIAgent
7 | from .agents.openai_agent import OpenAIAgent
8 | from .agents.remote_agent import RemoteAgent
9 |
10 | from .deploy import Deploy
11 |
12 | from dotenv import load_dotenv
13 |
14 | load_dotenv()
15 |
16 | __all__ = [
17 | "Workflow",
18 | "Deploy",
19 | "Deploy",
20 | "CrewAIAgent",
21 | "OpenAIAgent",
22 | "RemoteAgent",
23 | ]
24 |
--------------------------------------------------------------------------------
/src/maestro/ui/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vite'
2 | import react from '@vitejs/plugin-react'
3 | import topLevelAwait from 'vite-plugin-top-level-await'
4 | import wasm from 'vite-plugin-wasm'
5 |
6 | export default defineConfig({
7 | plugins: [react(), wasm(), topLevelAwait()],
8 | server: {
9 | port: 5173,
10 | host: true,
11 | proxy: {
12 | '/chat': 'http://127.0.0.1:8000',
13 | '/health': 'http://127.0.0.1:8000',
14 | '/diagram': 'http://127.0.0.1:8000',
15 | },
16 | },
17 | })
18 |
--------------------------------------------------------------------------------
/operator/config/scorecard/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - bases/config.yaml
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | patches:
6 | - path: patches/basic.config.yaml
7 | target:
8 | group: scorecard.operatorframework.io
9 | kind: Configuration
10 | name: config
11 | version: v1alpha3
12 | - path: patches/olm.config.yaml
13 | target:
14 | group: scorecard.operatorframework.io
15 | kind: Configuration
16 | name: config
17 | version: v1alpha3
18 | # +kubebuilder:scaffold:patches
19 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/code_exception_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: code workflow
5 | labels:
6 | app: code
7 | spec:
8 | template:
9 | metadata:
10 | name: code agent
11 | labels:
12 | app: code agent
13 | use-case: test
14 | agents:
15 | - code
16 | - test1
17 | prompt: Hello code! How are you?
18 | exception:
19 | name: step1
20 | agent: test1
21 | steps:
22 | - name: code_step
23 | agent: code
24 |
25 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/code_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: code workflow
5 | labels:
6 | app: code
7 | spec:
8 | template:
9 | metadata:
10 | name: code agent
11 | labels:
12 | app: code agent
13 | use-case: test
14 | agents:
15 | - code
16 | - github-lister
17 | prompt: Hello code! How are you?
18 | steps:
19 | - name: code_step
20 | agent: code
21 | - name: github_code_testing
22 | agent: github-lister
23 |
24 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/prompt_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: prompt workflow
5 | labels:
6 | app: example
7 | spec:
8 | template:
9 | metadata:
10 | name: prompt agent
11 | labels:
12 | app: test prompt agent
13 | use-case: test
14 | agents:
15 | - test-prompt-agent
16 | prompt: This is a dummy prompt, this should not be returned. The prompt in the agent definition instructions should be returned.
17 | steps:
18 | - name: step1
19 | agent: test-prompt-agent
20 |
--------------------------------------------------------------------------------
/tests/examples/condition_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: maestro-deployment
5 | labels:
6 | app: mas-example
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: mas-example
12 | agents:
13 | - expert
14 | - colleague
15 | prompt: Tell me a joke about IBM
16 | steps:
17 | - name: expert
18 | agent: expert
19 | - name: colleague
20 | agent: colleague
21 | condition:
22 | - if: (input.find('funnier') != -1)
23 | then: expert
24 |
--------------------------------------------------------------------------------
/operator/config/rbac/workflowrun_viewer_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to view workflowruns.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: operator
7 | app.kubernetes.io/managed-by: kustomize
8 | name: workflowrun-viewer-role
9 | rules:
10 | - apiGroups:
11 | - maestro.ai4quantum.com
12 | resources:
13 | - workflowruns
14 | verbs:
15 | - get
16 | - list
17 | - watch
18 | - apiGroups:
19 | - maestro.ai4quantum.com
20 | resources:
21 | - workflowruns/status
22 | verbs:
23 | - get
24 |
--------------------------------------------------------------------------------
/tests/yamls/agents/query_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test-query-agent
5 | labels:
6 | app: query-example
7 | custom_agent: query_agent
8 | query_input:
9 | db_name: testDB
10 | collection_name: MaestroDoc
11 | limit: 10
12 | spec:
13 | framework: custom
14 | url: http://localhost:8030/mcp
15 | description: query agent
16 | output: |
17 | {{result}}
18 |
19 | ---
20 |
21 | Given the context above, your knowledge, answer the question as best as possible. Be concise.
22 |
23 | Question: {{prompt}}
24 |
--------------------------------------------------------------------------------
/operator/config/crd/kustomizeconfig.yaml:
--------------------------------------------------------------------------------
1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD
2 | nameReference:
3 | - kind: Service
4 | version: v1
5 | fieldSpecs:
6 | - kind: CustomResourceDefinition
7 | version: v1
8 | group: apiextensions.k8s.io
9 | path: spec/conversion/webhook/clientConfig/service/name
10 |
11 | namespace:
12 | - kind: CustomResourceDefinition
13 | version: v1
14 | group: apiextensions.k8s.io
15 | path: spec/conversion/webhook/clientConfig/service/namespace
16 | create: false
17 |
18 | varReference:
19 | - path: metadata/annotations
20 |
--------------------------------------------------------------------------------
/src/maestro/agents/meta_agent/doctor.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "🔍 Checking environment..."
4 |
5 | # Check if maestro is installed
6 | if uv run which maestro &> /dev/null; then
7 | echo "✅ Maestro CLI is installed: $(uv run which maestro)"
8 | else
9 | echo "❌ Maestro CLI is not installed. Please run:"
10 | echo " uv sync"
11 | fi
12 |
13 | # Check meta-agent directory structure
14 | echo "📂 Checking meta-agent directory structure..."
15 | if [[ -d "$(dirname "$0")" ]]; then
16 | echo "✅ Environment check passed!"
17 | else
18 | echo "❌ Error: meta-agent directory not found"
19 | exit 1
20 | fi
21 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/exception_no_exception_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: simple workflow
5 | labels:
6 | app: example2
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-deployment
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - test1
16 | - test2
17 | - test3
18 | - test4
19 | prompt: This is a test input
20 | steps:
21 | - name: step1
22 | agent: test1
23 | - name: step2
24 | agent: test10
25 | - name: step3
26 | agent: test3
27 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Reporting a Vulnerability
4 |
5 | To report vulnerabilities, you can privately report a potential security issue
6 | via the GitHub security vulnerabilities feature. This can be done here:
7 |
8 | https://github.com/AI4quantum/maestro/security/advisories
9 |
10 | Please do **not** open a public issue about a potential security vulnerability.
11 |
12 | You can find more details on the security vulnerability feature in the GitHub
13 | documentation here:
14 |
15 | https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability
16 |
--------------------------------------------------------------------------------
/tests/yamls/agents/funnier_local_agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: colleague
5 | labels:
6 | app: funnier-example
7 | spec:
8 | model: llama3.1
9 | mode: local
10 | framework: beeai
11 | instructions: you are a joke telling agent
12 | description: this is a simple agent
13 |
14 | ---
15 | apiVersion: maestro/v1alpha1
16 | kind: Agent
17 | metadata:
18 | name: expert
19 | labels:
20 | app: funnier-example
21 | spec:
22 | model: llama3.1
23 | framework: beeai
24 | mode: local
25 | instructions: you are a joke telling agent
26 | description: this is a simple agent
27 |
--------------------------------------------------------------------------------
/operator/config/rbac/workflowrun_editor_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to edit workflowruns.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: operator
7 | app.kubernetes.io/managed-by: kustomize
8 | name: workflowrun-editor-role
9 | rules:
10 | - apiGroups:
11 | - maestro.ai4quantum.com
12 | resources:
13 | - workflowruns
14 | verbs:
15 | - create
16 | - delete
17 | - get
18 | - list
19 | - patch
20 | - update
21 | - watch
22 | - apiGroups:
23 | - maestro.ai4quantum.com
24 | resources:
25 | - workflowruns/status
26 | verbs:
27 | - get
28 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/loop_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: loop workflow
5 | labels:
6 | app: example
7 | spec:
8 | strategy:
9 | type: sequence
10 | template:
11 | metadata:
12 | name: loop-workflow
13 | labels:
14 | app: example
15 | use-case: test
16 | agents:
17 | - generate1-10
18 | - countdown
19 | prompt: Generate a number
20 | steps:
21 | - name: step1
22 | agent: generate1-10
23 | - name: step2
24 | loop:
25 | agent: countdown
26 | until: (input.find("happy") != -1)
27 |
28 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/openai_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: Openai agent test workflow
5 | labels:
6 | app: test1
7 | spec:
8 | template:
9 | metadata:
10 | name: openai_test
11 | labels:
12 | app: my-agent
13 | agents:
14 | - my-agent
15 | prompt: Say a funny story about dogs and cats.
16 | context: "Dogs and cats are traditional pets that often have amusing interactions due to their different personalities - dogs being enthusiastic and social, cats being independent and aloof."
17 | steps:
18 | - name: step1
19 | agent: my-agent
20 |
21 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/agents_params_creative.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test_creative
5 | labels:
6 | app: testapp_model_params
7 | spec:
8 | model: "gpt-oss:latest"
9 | description: OpenAI agent with creative parameters (high temperature)
10 | instructions: |
11 | You are a creative assistant. Respond with varied and imaginative answers.
12 | Elaborate on the prompt with creative details.
13 | framework: openai
14 | mode: local
15 | model_parameters:
16 | max_tokens: 200
17 | temperature: 1.5
18 | top_p: 0.95
19 | frequency_penalty: 0.3
20 | presence_penalty: 0.6
21 |
22 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/agents_params_deterministic.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: openai_test_deterministic
5 | labels:
6 | app: testapp_model_params
7 | spec:
8 | model: "gpt-oss:latest"
9 | description: OpenAI agent with deterministic parameters (low temperature)
10 | instructions: |
11 | You are a helpful assistant. Respond concisely and consistently.
12 | Echo back the key points from the prompt.
13 | framework: openai
14 | mode: local
15 | model_parameters:
16 | max_tokens: 200
17 | temperature: 0.0
18 | top_p: 0.1
19 | frequency_penalty: 0.0
20 | presence_penalty: 0.0
21 |
22 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/multi_workflow_parallel.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: multi agent workflow
5 | labels:
6 | app: multi-agent
7 | spec:
8 | template:
9 | metadata:
10 | name: multi-agent-workflow
11 | labels:
12 | app: multi-agent
13 | use-case: test
14 | agents:
15 | - beeaiagent
16 | - Generic_Crew
17 | - JudgeAgent
18 | prompt: Pick a number between 1 and 10 for a guessing game
19 | steps:
20 | - name: guess
21 | parallel:
22 | - beeaiagent
23 | - Generic_Crew
24 | - name: JudgeAgent
25 | agent: JudgeAgent
26 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/simple_remote_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: remote workflow
5 | labels:
6 | app: example
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-remote
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - remote_test1
16 | - remote_test2
17 | workflows:
18 | - name: test_workflow1
19 | url: "http://127.0.0.1:8001"
20 | prompt: remote workflow prompt
21 | steps:
22 | - name: remote_step1
23 | agent: remote_test1
24 | - name: remote_step2
25 | agent: remote_test2
26 |
27 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/multi_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: multi agent workflow
5 | labels:
6 | app: multi-agent
7 | spec:
8 | template:
9 | metadata:
10 | name: multi-agent-workflow
11 | labels:
12 | app: multi-agent
13 | use-case: test
14 | agents:
15 | - beeaiagent
16 | - Generic_Crew
17 | - JudgeAgent
18 | prompt: Pick a number between 1 and 10 for a guessing game
19 | steps:
20 | - name: beeai
21 | agent: beeaiagent
22 | - name: crewai
23 | agent: Generic_Crew
24 | - name: JudgeAgent
25 | agent: JudgeAgent
26 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/exception_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: simple workflow
5 | labels:
6 | app: example2
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-deployment
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - test1
16 | - test2
17 | - test3
18 | - test4
19 | prompt: This is a test input
20 | exception:
21 | name: step4
22 | agent: test4
23 | steps:
24 | - name: step1
25 | agent: test1
26 | - name: step2
27 | agent: test10
28 | - name: step3
29 | agent: test3
30 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/simple_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: simple workflow
5 | labels:
6 | app: example2
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-deployment
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - test1
16 | - test2
17 | - test3
18 | - test4
19 | prompt: This is a test input
20 | exception:
21 | name: step4
22 | agent: test4
23 | steps:
24 | - name: step1
25 | agent: test1
26 | - name: step2
27 | agent: test2
28 | - name: step3
29 | agent: test3
30 |
--------------------------------------------------------------------------------
/tests/agents/crewai_agent/crew_dummy.py:
--------------------------------------------------------------------------------
1 | # Dummy class for testing crewai loader
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 |
5 | class CrewOutput:
6 | raw: str
7 |
8 |
9 | class Crew:
10 | # TODO: kickoff actually takes & returns a dict[str,str]
11 | # def kickoff(inputs: dict[str, str]) -> str:
12 | def kickoff(self, inputs: dict[str, str]) -> CrewOutput:
13 | print("Running kickoff method")
14 | print(inputs)
15 | crewout = CrewOutput()
16 | crewout.raw = "OK"
17 | return crewout
18 |
19 |
20 | class DummyCrew:
21 | def dummy_crew(self) -> Crew:
22 | print("Getting a Crew to return")
23 | return Crew()
24 |
--------------------------------------------------------------------------------
/tests/yamls/agents/funnier_agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: colleague
5 | labels:
6 | app: funnier-example
7 | spec:
8 | model: meta-llama/llama-3-1-70b-instruct
9 | framework: beeai
10 | mode: local
11 | instructions: you are a joke telling agent
12 | description: this is a simple agent
13 |
14 | ---
15 | apiVersion: maestro/v1alpha1
16 | kind: Agent
17 | metadata:
18 | name: expert
19 | labels:
20 | app: funnier-example
21 | spec:
22 | model: meta-llama/llama-3-1-70b-instruct
23 | framework: beeai
24 | mode: local
25 | instructions: you are a joke telling agent
26 | description: this is a simple agent
27 |
--------------------------------------------------------------------------------
/tests/agents/scoring_agent/agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: simple_test
5 | labels:
6 | app: testapp
7 | spec:
8 | model: gpt-oss:latest
9 | description: Simple test agent
10 | instructions: Answer the question truthfully
11 | framework: openai
12 | mode: local
13 | ---
14 | apiVersion: maestro/v1alpha1
15 | kind: Agent
16 | metadata:
17 | name: score
18 | labels:
19 | app: test-example
20 | custom_agent: scoring_agent
21 | spec:
22 | model: qwen3
23 | framework: custom
24 | mode: remote
25 | description: gets score, relevance/hallucination using defined model
26 | instructions: evaluates the response using Opik
--------------------------------------------------------------------------------
/tests/agents/scoring_agent/workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: scoring_test
5 | labels:
6 | app: testapp
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: testapp
12 | agents:
13 | - simple_test
14 | - score
15 | prompt: Is the most well known "Paris" the city in the United States?
16 | steps:
17 | - name: answer
18 | agent: simple_test
19 | - name: scoring
20 | agent: score
21 | from: [prompt, answer] # → run's first arg = original prompt, second arg = answer's reply
22 | outputs:
23 | - answer # → re-emit the raw response downstream
--------------------------------------------------------------------------------
/tests/examples/condition_agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: expert
5 | labels:
6 | app: mas-example
7 | spec:
8 | model: "llama3.1:latest"
9 | description: expert
10 | instructions: Say a joke.
11 |
12 | ---
13 | apiVersion: maestro/v1alpha1
14 | kind: Agent
15 | metadata:
16 | name: colleague
17 | labels:
18 | app: mas-example
19 | spec:
20 | model: "llama3.1:latest"
21 | description: colleague
22 | instructions: your colleague likes joks. He says a joke. Put score for the joke between 1-10, 10 is the funniest. If the joke is not very funny, say the score and the joke otherwise say "Can you give me a funnier joke about the same subject".
23 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/simple_wow_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: wow workflow
5 | labels:
6 | app: example
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-wowt
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - wow_test1
16 | - wow_test2
17 | workflows:
18 | - name: test_workflow1
19 | url: "http://127.0.0.1:8003"
20 | prompt: WOW workflow prompt
21 | steps:
22 | - name: wow_step1
23 | agent: wow_test1
24 | - name: wow_step2
25 | workflow: test_workflow1
26 | - name: wow_step3
27 | agent: wow_test2
28 |
29 |
--------------------------------------------------------------------------------
/mcp/README.md:
--------------------------------------------------------------------------------
1 | # Quantum Function MCP
2 |
3 | This repository has MCP tools for the following Quantum Functions
4 |
5 | 1. Iskay Quantum Optimizer by Kipu Quantum
6 | 2. Optimization Solver by Q-CTRL
7 | 3. QURI Chemistry by QunaSys
8 | 4. Quantum portfolio Optimizer by Global Data Quantum
9 |
10 | It is necessary to define following environment variables to make the MCP tools to execute the function.
11 | These variable defined in the local system are transfered to the MCP tools.
12 |
13 | 1. IQP_TOKEN: The token for the IQP account. If this is not defined, mock code
14 | 2. IQP_CHANNEL: this is optional. default is "ibm_quantum"
15 | 3. IQP_INSTANCE: this is optional. default is "project-based/internal/functions"
16 |
17 |
--------------------------------------------------------------------------------
/tests/yamls/agents/simple_wow_agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: wow_test1
5 | labels:
6 | app: test-example
7 | spec:
8 | model: meta-llama/llama-3-1-70b-instruct
9 | framework: beeai
10 | mode: local
11 | description: this is agent 1 in wow agent
12 | instructions: print("this is an agent 1 in wow agent")
13 |
14 | ---
15 | apiVersion: maestro/v1alpha1
16 | kind: Agent
17 | metadata:
18 | name: wow_test2
19 | labels:
20 | app: test-example
21 | spec:
22 | model: meta-llama/llama-3-1-70b-instruct
23 | framework: beeai
24 | mode: local
25 | description: this is an agent 2 in wow agent
26 | instructions: print("this is agent 2 in wow agent")
27 |
28 |
--------------------------------------------------------------------------------
/src/maestro/ui/eslint.config.js:
--------------------------------------------------------------------------------
1 | import js from '@eslint/js'
2 | import globals from 'globals'
3 | import reactHooks from 'eslint-plugin-react-hooks'
4 | import reactRefresh from 'eslint-plugin-react-refresh'
5 | import tseslint from 'typescript-eslint'
6 | import { globalIgnores } from 'eslint/config'
7 |
8 | export default tseslint.config([
9 | globalIgnores(['dist']),
10 | {
11 | files: ['**/*.{ts,tsx}'],
12 | extends: [
13 | js.configs.recommended,
14 | tseslint.configs.recommended,
15 | reactHooks.configs['recommended-latest'],
16 | reactRefresh.configs.vite,
17 | ],
18 | languageOptions: {
19 | ecmaVersion: 2020,
20 | globals: globals.browser,
21 | },
22 | },
23 | ])
24 |
--------------------------------------------------------------------------------
/src/maestro/ui/Dockerfile:
--------------------------------------------------------------------------------
1 | # Multi-stage build: build static assets with Node, serve with NGINX
2 |
3 | FROM node:20-alpine AS build
4 | WORKDIR /app
5 | COPY package*.json ./
6 | RUN npm ci --no-audit --no-fund
7 | COPY . .
8 | RUN npm run build
9 |
10 | FROM nginx:1.27-alpine AS runtime
11 | COPY --from=build /app/dist /usr/share/nginx/html
12 | # Basic security headers and SPA fallback
13 | RUN printf "server {\n listen 80;\n server_name _;\n add_header X-Content-Type-Options nosniff;\n add_header X-Frame-Options SAMEORIGIN;\n add_header X-XSS-Protection '1; mode=block';\n location / {\n try_files $uri /index.html;\n }\n}\n" > /etc/nginx/conf.d/default.conf
14 | EXPOSE 80
15 | CMD ["nginx", "-g", "daemon off;"]
16 |
17 |
18 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/dry_run_inputs_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: simple workflow
5 | labels:
6 | app: example2
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-deployment
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - test1
16 | - test2
17 | - test3
18 | prompt: This is a test input
19 | steps:
20 | - name: step1
21 | agent: test1
22 | - name: step2
23 | from: instructions:step1 # For step2, pass in the instructions: field from the agent used in step1.
24 | agent: test2
25 | - name: step3
26 | agent: test3
27 |
--------------------------------------------------------------------------------
/tools/buildimg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CONTAINER_CMD="${CONTAINER_CMD:=docker}"
4 | GITHUB_ORG="${GITHUB_ORG:=ai4quantum}"
5 |
6 | # extract version from pyproject.toml
7 | PYPROJECT_TOML="pyproject.toml"
8 | VERSION=$(grep -E '^(version|tool\.poetry\.version) *= *"[^"]+"' "$PYPROJECT_TOML" | head -n 1 | sed -E 's/.*"([^"]+)".*/\1/')
9 | echo $VERSION
10 |
11 | # build distribution
12 | uv build
13 |
14 | # build container images
15 | $CONTAINER_CMD build -t ghcr.io/$GITHUB_ORG/maestro:$VERSION -f Dockerfile --build-arg MAESTRO_VERSION=$VERSION --build-arg GITHUB_ORG=$GITHUB_ORG .
16 | $CONTAINER_CMD build -t ghcr.io/$GITHUB_ORG/maestro-cli:$VERSION -f Dockerfile-cli --build-arg MAESTRO_VERSION=$VERSION --build-arg GITHUB_ORG=$GITHUB_ORG .
17 |
--------------------------------------------------------------------------------
/tests/yamls/agents/simple_remote_agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: remote_test1
5 | labels:
6 | app: test-example
7 | spec:
8 | model: meta-llama/llama-3-1-70b-instruct
9 | framework: beeai
10 | mode: local
11 | description: this is agent 1 in remote agent
12 | instructions: print("this is an agent 1 in remote agent")
13 |
14 | ---
15 | apiVersion: maestro/v1alpha1
16 | kind: Agent
17 | metadata:
18 | name: remote_test2
19 | labels:
20 | app: test-example
21 | spec:
22 | model: meta-llama/llama-3-1-70b-instruct
23 | framework: beeai
24 | mode: local
25 | description: this is an agent 2 in remote agent
26 | instructions: print("this is agent 2 in remote agent")
27 |
28 |
--------------------------------------------------------------------------------
/tests/yamls/agents/code_exception_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: code
5 | labels:
6 | app: slack-example
7 | spec:
8 | framework: code
9 | mode: local
10 | description: code agnet
11 | instructions: execute python code
12 | code: |
13 | print(input[0])
14 | input = "Hi!"
15 | oops
16 |
17 | ---
18 |
19 | apiVersion: maestro/v1alpha1
20 | kind: Agent
21 | metadata:
22 | name: test1
23 | labels:
24 | app: test-example
25 | spec:
26 | model: meta-llama/llama-3-1-70b-instruct
27 | framework: beeai
28 | mode: local
29 | description: this is a test
30 | tools:
31 | - code_interpreter
32 | - test
33 | instructions: print("this is a test.")
34 |
35 |
36 |
--------------------------------------------------------------------------------
/operator/PROJECT:
--------------------------------------------------------------------------------
1 | # Code generated by tool. DO NOT EDIT.
2 | # This file is used to track the info used to scaffold your project
3 | # and allow the plugins properly work.
4 | # More info: https://book.kubebuilder.io/reference/project-config.html
5 | domain: ai4quantum.com
6 | layout:
7 | - go.kubebuilder.io/v4
8 | plugins:
9 | manifests.sdk.operatorframework.io/v2: {}
10 | scorecard.sdk.operatorframework.io/v2: {}
11 | projectName: operator
12 | repo: github.com/ai4quantum/maestro
13 | resources:
14 | - api:
15 | crdVersion: v1
16 | namespaced: true
17 | controller: true
18 | domain: ai4quantum.com
19 | group: maestro
20 | kind: WorkflowRun
21 | path: github.com/ai4quantum/maestro/api/v1alpha1
22 | version: v1alpha1
23 | version: "3"
24 |
--------------------------------------------------------------------------------
/src/maestro/ui/tsconfig.node.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
4 | "target": "ES2023",
5 | "lib": ["ES2023"],
6 | "module": "ESNext",
7 | "skipLibCheck": true,
8 |
9 | /* Bundler mode */
10 | "moduleResolution": "bundler",
11 | "allowImportingTsExtensions": true,
12 | "verbatimModuleSyntax": true,
13 | "moduleDetection": "force",
14 | "noEmit": true,
15 |
16 | /* Linting */
17 | "strict": true,
18 | "noUnusedLocals": true,
19 | "noUnusedParameters": true,
20 | "erasableSyntaxOnly": true,
21 | "noFallthroughCasesInSwitch": true,
22 | "noUncheckedSideEffectImports": true
23 | },
24 | "include": ["vite.config.ts"]
25 | }
26 |
--------------------------------------------------------------------------------
/tests/yamls/agents/dry_run_loop_list_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: generate1-10
5 | labels:
6 | app: test-example
7 | spec:
8 | model: "llama3.1:latest"
9 | framework: bee
10 | description:
11 | tools:
12 | - code_interpreter
13 | - test
14 | instructions: |
15 | import random
16 | input = "[This,is,a,test,for,loop]"
17 |
18 | ---
19 |
20 | apiVersion: maestro/v1alpha1
21 | kind: Agent
22 | metadata:
23 | name: countdown
24 | labels:
25 | app: test-example
26 | spec:
27 | model: "llama3.1:latest"
28 | framework: bee
29 | description: this is a test
30 | tools:
31 | - code_interpreter
32 | - test
33 | instructions: |
34 | print(input)
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/.github/workflows/maestro_demo-tests.yaml:
--------------------------------------------------------------------------------
1 | name: Demo Tests
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 | jobs:
10 | demo-tests:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 | - name: Set up Python
15 | uses: actions/setup-python@v5
16 | with:
17 | python-version: '3.12'
18 | - name: Install uv and activate the environment
19 | uses: astral-sh/setup-uv@v6
20 | with:
21 | activate-environment: true
22 | - name: Install dependencies
23 | run: |
24 | uv sync
25 | - name: Run demo tests
26 | run: |
27 | export PYTHONPATH=$PYTHONPATH:$(pwd)/src
28 | uv run pytest tests/integration
29 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/parallel_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: parallel workflow
5 | labels:
6 | app: input
7 | spec:
8 | template:
9 | metadata:
10 | name: input_workflow
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - test1
16 | - test2
17 | - test3
18 | - test4
19 | - test5
20 | prompt: Select a number 1 through 6
21 | exception:
22 | name: step4
23 | agent: test4
24 | steps:
25 | - name: list
26 | agent: test1
27 | - name: parallel
28 | parallel:
29 | - test2
30 | - test3
31 | - test4
32 | - name: result
33 | agent: test5
34 |
--------------------------------------------------------------------------------
/.github/workflows/maestro_check-mermaid.yaml:
--------------------------------------------------------------------------------
1 | name: Check Mermaid
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 | jobs:
10 | check-mermaid:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 | - name: Set up Python
15 | uses: actions/setup-python@v5
16 | with:
17 | python-version: '3.12'
18 | - name: Install uv and activate the environment
19 | uses: astral-sh/setup-uv@v6
20 | with:
21 | activate-environment: true
22 | - name: Install dependencies
23 | run: |
24 | uv sync
25 | - name: Check mermaid
26 | run: |
27 | export PYTHONPATH=$PYTHONPATH:$(pwd)/src
28 | uv run tools/check-mermaid.sh
29 |
--------------------------------------------------------------------------------
/.github/workflows/maestro_check-schemas.yaml:
--------------------------------------------------------------------------------
1 | name: Check Schemas
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 | jobs:
10 | check-schemas:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 | - name: Set up Python
15 | uses: actions/setup-python@v5
16 | with:
17 | python-version: '3.12'
18 | - name: Install uv and activate the environment
19 | uses: astral-sh/setup-uv@v6
20 | with:
21 | activate-environment: true
22 | - name: Install dependencies
23 | run: |
24 | uv sync
25 | - name: Check schemas
26 | run: |
27 | export PYTHONPATH=$PYTHONPATH:$(pwd)/src
28 | uv run tools/check-schemas.sh
29 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # Use tools/buildimg.sh to build this image
3 | #
4 | FROM python:3.12-slim
5 | WORKDIR /usr/src/app
6 |
7 | ARG MAESTRO_VERSION="0.9.0"
8 |
9 | # set environment variables
10 | ENV PYTHONDONTWRITEBYTECODE=1
11 | ENV PYTHONUNBUFFERED=1
12 | ENV HOME=/usr/src/app
13 |
14 | # Install pip and dependencies
15 | RUN pip install --upgrade pip
16 |
17 | # Install dependencies
18 | COPY dist/maestro-${MAESTRO_VERSION}-py3-none-any.whl maestro-${MAESTRO_VERSION}-py3-none-any.whl
19 | RUN pip install maestro-${MAESTRO_VERSION}-py3-none-any.whl
20 | RUN rm maestro-${MAESTRO_VERSION}-py3-none-any.whl
21 |
22 | RUN chown -R 1000:100 /usr/src/app &&\
23 | mkdir -p /usr/src/app/src/media && chown 1000:100 /usr/src/app/src/media
24 |
25 | EXPOSE 5000
26 | USER 1000:100
27 |
--------------------------------------------------------------------------------
/tests/yamls/agents/loop_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: generate1-10
5 | labels:
6 | app: test-example
7 | spec:
8 | model: "llama3.1:latest"
9 | description:
10 | tools:
11 | - code_interpreter
12 | - test
13 | instructions: genereate a number between 1 and 10 and just output the number
14 |
15 | ---
16 |
17 | apiVersion: maestro/v1alpha1
18 | kind: Agent
19 | metadata:
20 | name: countdown
21 | labels:
22 | app: test-example
23 | spec:
24 | model: "llama3.1:latest"
25 | description: this is a test
26 | tools:
27 | - code_interpreter
28 | - test
29 | instructions: you get a nunber. Dicrease the number by 1 and if the number becomes 0, output "happy" otherwise output the new number.
30 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/context_test_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: context-routing-demo
5 | labels:
6 | app: context-routing
7 | use-case: demo
8 | spec:
9 | template:
10 | metadata:
11 | name: context routing demonstration
12 | labels:
13 | app: context routing
14 | agents:
15 | - Recipe Agent
16 | - Recipe Time Agent
17 | - Recipe Cost Agent
18 | prompt: "chicken, rice, tomato, onion, garlic, salt, pepper"
19 | steps:
20 | - name: get_recipe
21 | agent: Recipe Agent
22 | - name: get_recipe_time
23 | agent: Recipe Time Agent
24 | - name: get_recipe_cost
25 | agent: Recipe Cost Agent
26 | from: Recipe Agent
27 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/input_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: input workflow
5 | labels:
6 | app: input
7 | spec:
8 | template:
9 | metadata:
10 | name: input_workflow
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - test1
16 | - test2
17 | - test3
18 | prompt: Select a number 1 through 6
19 | exception:
20 | name: step4
21 | agent: test3
22 | steps:
23 | - name: select
24 | agent: test1
25 | - name: input
26 | input:
27 | prompt: "Guess the number 1 through 6: "
28 | template: Is the number {prompt} is same as {response}?
29 | - name: compare
30 | agent: test2
31 |
--------------------------------------------------------------------------------
/operator/config/rbac/leader_election_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions to do leader election.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: operator
7 | app.kubernetes.io/managed-by: kustomize
8 | name: leader-election-role
9 | rules:
10 | - apiGroups:
11 | - ""
12 | resources:
13 | - configmaps
14 | verbs:
15 | - get
16 | - list
17 | - watch
18 | - create
19 | - update
20 | - patch
21 | - delete
22 | - apiGroups:
23 | - coordination.k8s.io
24 | resources:
25 | - leases
26 | verbs:
27 | - get
28 | - list
29 | - watch
30 | - create
31 | - update
32 | - patch
33 | - delete
34 | - apiGroups:
35 | - ""
36 | resources:
37 | - events
38 | verbs:
39 | - create
40 | - patch
41 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/funnier_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: funnier workflow
5 | labels:
6 | app: mas-example
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | app: mas-example
12 | agents:
13 | - expert
14 | - colleague
15 | prompt: Tell me a joke about IBM
16 | start: expert
17 | steps:
18 | - name: expert
19 | agent: expert
20 | condition:
21 | - case: (input.find('funnier') != -1)
22 | do: expert
23 | default: colleague
24 | - name: colleague
25 | agent: colleague
26 | condition:
27 | - if: (input.find('funnier') != -1)
28 | then: expert
29 | else: end
30 | - name: end
31 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/scoring_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: evaluate workflow
5 | labels:
6 | app: example
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-deployment
11 | prompt: What is the capital of the United States?
12 | steps:
13 | - name: dummy
14 | agent: test1
15 | - name: dummy2
16 | agent: test2
17 | - name: scoring
18 | agent: evaluate
19 | from: [prompt, dummy2] # → run's first arg = original prompt, second arg = dummy's reply
20 | context:
21 | - "Washington, D.C. is the capital of the United States." # → context for evaluation
22 | outputs:
23 | - answer # → re‐emit the raw response downstream
24 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/evaluation_test_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: evaluation-test-workflow
5 | labels:
6 | app: evaluation-test
7 | spec:
8 | template:
9 | metadata:
10 | name: evaluation-middleware-test
11 | labels:
12 | app: evaluation-test
13 | use-case: test
14 | agents:
15 | - evaluation-test-agent
16 | prompt: "What is machine learning and how does it work?"
17 | context: "Machine learning is a subset of artificial intelligence that enables computers to learn and make decisions from data without being explicitly programmed. It uses algorithms to identify patterns in data and make predictions or classifications."
18 | steps:
19 | - name: test-evaluation
20 | agent: evaluation-test-agent
21 |
--------------------------------------------------------------------------------
/tests/utils/mcpclient.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from mcp import ClientSession
3 | from mcp.client.streamable_http import streamablehttp_client
4 |
5 |
6 | async def main():
7 | # Connect to a streamable HTTP server
8 | async with streamablehttp_client("http://127.0.0.1:30051/mcp") as (
9 | read_stream,
10 | write_stream,
11 | _,
12 | ):
13 | # Create a session using the client streams
14 | async with ClientSession(read_stream, write_stream) as session:
15 | # Initialize the connection
16 | await session.initialize()
17 | # List available tools
18 | tools = await session.list_tools()
19 | print(f"Available tools: {[tool.name for tool in tools.tools]}")
20 |
21 |
22 | if __name__ == "__main__":
23 | asyncio.run(main())
24 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/conditional_if_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: conditional if workflow
5 | labels:
6 | app: example2
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-deployment
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - agent1
16 | - agent2
17 | - agent3
18 | prompt: This is a prompt
19 | start: step1
20 | exception:
21 | name: step3
22 | agent: agent3
23 | steps:
24 | - name: step1
25 | agent: agent1
26 | condition:
27 | - if: (input.some_condition == True)
28 | then: step2
29 | else: step3
30 | - name: step2
31 | agent: agent2
32 | - name: step3
33 | agent: agent3
34 |
--------------------------------------------------------------------------------
/src/maestro/ui/tsconfig.app.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
4 | "target": "ES2022",
5 | "useDefineForClassFields": true,
6 | "lib": ["ES2022", "DOM", "DOM.Iterable"],
7 | "module": "ESNext",
8 | "skipLibCheck": true,
9 |
10 | /* Bundler mode */
11 | "moduleResolution": "bundler",
12 | "allowImportingTsExtensions": true,
13 | "verbatimModuleSyntax": true,
14 | "moduleDetection": "force",
15 | "noEmit": true,
16 | "jsx": "react-jsx",
17 |
18 | /* Linting */
19 | "strict": true,
20 | "noUnusedLocals": true,
21 | "noUnusedParameters": true,
22 | "erasableSyntaxOnly": true,
23 | "noFallthroughCasesInSwitch": true,
24 | "noUncheckedSideEffectImports": true
25 | },
26 | "include": ["src"]
27 | }
28 |
--------------------------------------------------------------------------------
/tests/yamls/agents/evaluation_test_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: evaluation-test-agent
5 | labels:
6 | app: evaluation-test
7 | spec:
8 | model: "test-model"
9 | framework: mock
10 | mode: local
11 | description: "Simple mock agent for testing automatic evaluation middleware"
12 | instructions: |
13 | input = "Machine learning is a branch of artificial intelligence that enables computers to learn patterns from data without explicit programming. It works by using algorithms to analyze large datasets, identify patterns, and make predictions or decisions. The key components include training data, algorithms like neural networks or decision trees, and model evaluation. Applications range from image recognition to natural language processing, making it a fundamental technology in modern AI systems."
14 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/simple_cron_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: simple cron workflow
5 | labels:
6 | app: cron-example
7 | spec:
8 | template:
9 | metadata:
10 | name: cron-example
11 | labels:
12 | app: cron
13 | use-case: test
14 | event:
15 | cron: "10 14 * * 1"
16 | name: cron event
17 | agent: test4
18 | exit: ( "test" in input )
19 | agents:
20 | - test1
21 | - test2
22 | - test3
23 | - test4
24 | prompt: This is a test input
25 | exception:
26 | name: step2
27 | agent: test2
28 | steps:
29 | - name: step1
30 | agent: test2
31 | - name: step2
32 | agent: test2
33 | - name: step3
34 | agent: test3
35 | - name: step4
36 | agent: test1
37 |
--------------------------------------------------------------------------------
/tests/yamls/agents/simple_containered_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: containeredagnet1
5 | labels:
6 | app: test-example
7 | spec:
8 | model: meta-llama/llama-3-1-70b-instruct
9 | framework: container
10 | mode: remote
11 | image: "localhost/container-agent:latest"
12 | description: this is a test
13 | tools:
14 | - code_interpreter
15 | - test
16 | instructions: print("this is a test.")
17 |
18 | ---
19 |
20 | apiVersion: maestro/v1alpha1
21 | kind: Agent
22 | metadata:
23 | name: containeredagnet2
24 | labels:
25 | app: test-example
26 | spec:
27 | model: meta-llama/llama-3-1-70b-instruct
28 | framework: container
29 | mode: remote
30 | image: "localhost/container-agent:latest"
31 | description: this is a test
32 | tools:
33 | - code_interpreter
34 | - test
35 | instructions: print("this is a test.")
36 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/simple_cron_many_steps_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: simple cron workflow (many steps)
5 | labels:
6 | app: cron-example
7 | spec:
8 | template:
9 | metadata:
10 | name: cron-example
11 | labels:
12 | app: cron
13 | use-case: test
14 | event:
15 | cron: "* * * * *"
16 | name: multi step cron
17 | steps:
18 | - step2
19 | - step1
20 | exit: (input.get("final_prompt").find("This is a test input") != -1)
21 | agents:
22 | - test5
23 | - test2
24 | - test3
25 | prompt: This is a test input
26 | exception:
27 | name: step2
28 | agent: test2
29 | steps:
30 | - name: step2
31 | agent: test2
32 | - name: step1
33 | agent: test5
34 | - name: step3
35 | agent: test3
--------------------------------------------------------------------------------
/tests/yamls/workflows/conditional_case_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: conditional case workflow
5 | labels:
6 | app: example2
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-deployment
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - agent1
16 | - agent2
17 | - agent3
18 | prompt: This is a prompt
19 | start: step1
20 | exception:
21 | name: step3
22 | agent: agent3
23 | steps:
24 | - name: step1
25 | agent: agent1
26 | - name: step2
27 | agent: agent2
28 | - name: step3
29 | agent: agent3
30 | condition:
31 | - case: (input.some_condition == True)
32 | do: step2
33 | - case: (input.some_condition == False)
34 | do: step3
35 | - default: step3
--------------------------------------------------------------------------------
/tests/yamls/tools/maestro_knowlege_mcp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: MCPTool
3 | metadata:
4 | name: maestro-k
5 | namespace: default
6 | spec:
7 | image: ghcr.io/akihikokuroda/maestro-knowledge:latest
8 | transport: streamable-http
9 | targetPort: 8030
10 | permissionProfile:
11 | type: builtin
12 | name: network
13 | resources:
14 | limits:
15 | cpu: '100m'
16 | memory: '128Mi'
17 | requests:
18 | cpu: '50m'
19 | memory: '64Mi'
20 | podTemplateSpec:
21 | spec:
22 | volumes:
23 | - name: cache
24 | emptyDir: {}
25 | containers:
26 | - name: mcp
27 | volumeMounts:
28 | - mountPath: /app/cache
29 | name: cache
30 | securityContext:
31 | allowPrivilegeEscalation: true
32 | readOnlyRootFilesystem: false
33 | runAsNonRoot: true
34 |
35 |
--------------------------------------------------------------------------------
/.github/workflows/maestro_test-workflow.yaml:
--------------------------------------------------------------------------------
1 | name: Test Workflow with Pip Install
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 | jobs:
10 | test:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 | with:
15 | repository: AI4quantum/maestro-demos
16 | - name: Set up Python
17 | uses: actions/setup-python@v5
18 | with:
19 | python-version: "3.12"
20 | - name: Install Maestro
21 | run: pip install git+https://github.com/${{ github.repository }}.git@${{ github.ref }}
22 | - name: Create Agent
23 | run: maestro create workflows/simple/agents.yaml
24 | - name: Run Workflow
25 | run: maestro run workflows/simple/workflow.yaml
26 | env:
27 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
28 | GITHUB_USER: ${{ github.actor }}
29 |
--------------------------------------------------------------------------------
/tests/yamls/agents/dry_run_loop_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: generate1-10
5 | labels:
6 | app: test-example
7 | spec:
8 | model: "llama3.1:latest"
9 | framework: beeai
10 | description:
11 | tools:
12 | - code_interpreter
13 | - test
14 | instructions: |
15 | import random
16 | input = random.randint(1, 10)
17 |
18 | ---
19 |
20 | apiVersion: maestro/v1alpha1
21 | kind: Agent
22 | metadata:
23 | name: countdown
24 | labels:
25 | app: test-example
26 | spec:
27 | model: "llama3.1:latest"
28 | framework: beeai
29 | description: this is a test
30 | tools:
31 | - code_interpreter
32 | - test
33 | instructions: |
34 | input = int(input)
35 | input = input-1
36 | if input == 0:
37 | input = "happy"
38 | else:
39 | input = str(input)
40 | print(type(input))
41 | print(input)
42 |
43 |
44 |
--------------------------------------------------------------------------------
/tests/yamls/tools/mcp_tool.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: MCPTool
3 | metadata:
4 | name: osv
5 | namespace: default
6 | spec:
7 | image: ghcr.io/stackloklabs/osv-mcp/server
8 | transport: sse
9 | port: 30051
10 | permissionProfile:
11 | type: builtin
12 | name: network
13 | resources:
14 | limits:
15 | cpu: '100m'
16 | memory: '128Mi'
17 | requests:
18 | cpu: '50m'
19 | memory: '64Mi'
20 |
21 | ---
22 |
23 | apiVersion: maestro/v1alpha1
24 | kind: MCPTool
25 | metadata:
26 | name: fetch
27 | namespace: default
28 | spec:
29 | image: ghcr.io/stackloklabs/gofetch/server:latest
30 | transport: streamable-http
31 | port: 30051
32 | permissionProfile:
33 | type: builtin
34 | name: network
35 | resources:
36 | limits:
37 | cpu: '100m'
38 | memory: '128Mi'
39 | requests:
40 | cpu: '50m'
41 | memory: '64Mi'
42 |
43 |
--------------------------------------------------------------------------------
/operator/config/network-policy/allow-metrics-traffic.yaml:
--------------------------------------------------------------------------------
1 | # This NetworkPolicy allows ingress traffic
2 | # with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those
3 | # namespaces are able to gathering data from the metrics endpoint.
4 | apiVersion: networking.k8s.io/v1
5 | kind: NetworkPolicy
6 | metadata:
7 | labels:
8 | app.kubernetes.io/name: operator
9 | app.kubernetes.io/managed-by: kustomize
10 | name: allow-metrics-traffic
11 | namespace: system
12 | spec:
13 | podSelector:
14 | matchLabels:
15 | control-plane: controller-manager
16 | policyTypes:
17 | - Ingress
18 | ingress:
19 | # This allows ingress traffic from any namespace with the label metrics: enabled
20 | - from:
21 | - namespaceSelector:
22 | matchLabels:
23 | metrics: enabled # Only from namespaces with this label
24 | ports:
25 | - port: 8443
26 | protocol: TCP
27 |
--------------------------------------------------------------------------------
/tests/examples/test_condition.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 |
7 | import os
8 | import dotenv
9 | import yaml
10 | import asyncio
11 |
12 |
13 | from maestro.workflow import Workflow
14 |
15 | dotenv.load_dotenv()
16 |
17 |
18 | def parse_yaml(file_path):
19 | with open(file_path, "r") as file:
20 | yaml_data = list(yaml.safe_load_all(file))
21 | return yaml_data
22 |
23 |
24 | if __name__ == "__main__":
25 | agents_yaml = parse_yaml(
26 | os.path.join(os.path.dirname(__file__), "condition_agents.yaml")
27 | )
28 | workflow_yaml = parse_yaml(
29 | os.path.join(os.path.dirname(__file__), "condition_workflow.yaml")
30 | )
31 | try:
32 | workflow = Workflow(agents_yaml, workflow_yaml[0])
33 | except Exception as excep:
34 | raise RuntimeError("Unable to create agents") from excep
35 | asyncio.run(workflow.run())
36 |
--------------------------------------------------------------------------------
/operator/test/e2e/e2e_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package e2e
18 |
19 | import (
20 | "fmt"
21 | "testing"
22 |
23 | . "github.com/onsi/ginkgo/v2"
24 | . "github.com/onsi/gomega"
25 | )
26 |
27 | // Run e2e tests using the Ginkgo runner.
28 | func TestE2E(t *testing.T) {
29 | RegisterFailHandler(Fail)
30 | _, _ = fmt.Fprintf(GinkgoWriter, "Starting operator suite\n")
31 | RunSpecs(t, "e2e suite")
32 | }
33 |
--------------------------------------------------------------------------------
/tests/examples/test_parallel.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 | import dotenv
8 | import yaml
9 | import asyncio
10 |
11 |
12 | from maestro.workflow import Workflow
13 |
14 | dotenv.load_dotenv()
15 |
16 |
17 | def parse_yaml(file_path):
18 | with open(file_path, "r") as file:
19 | yaml_data = list(yaml.safe_load_all(file))
20 | return yaml_data
21 |
22 |
23 | if __name__ == "__main__":
24 | agents_yaml = parse_yaml(
25 | os.path.join(os.path.dirname(__file__), "../yamls/agents/simple_agent.yaml")
26 | )
27 | workflow_yaml = parse_yaml(
28 | os.path.join(
29 | os.path.dirname(__file__), "../yamls/workflows/parallel_workflow.yaml"
30 | )
31 | )
32 | try:
33 | workflow = Workflow(agents_yaml, workflow_yaml[0])
34 | except Exception as excep:
35 | raise RuntimeError("Unable to create agents") from excep
36 | asyncio.run(workflow.run())
37 |
--------------------------------------------------------------------------------
/src/maestro/ui/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "maestro-ui",
3 | "private": true,
4 | "version": "0.0.0",
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite",
8 | "build": "tsc -b && vite build",
9 | "lint": "eslint .",
10 | "preview": "vite preview"
11 | },
12 | "dependencies": {
13 | "event-source-polyfill": "^1.0.31",
14 | "mermaid": "^11.11.0",
15 | "react": "^19.1.1",
16 | "react-dom": "^19.1.1",
17 | "react-markdown": "^10.1.0",
18 | "vite-plugin-top-level-await": "^1.6.0",
19 | "vite-plugin-wasm": "^3.5.0"
20 | },
21 | "devDependencies": {
22 | "@eslint/js": "^9.33.0",
23 | "@types/react": "^19.1.10",
24 | "@types/react-dom": "^19.1.7",
25 | "@vitejs/plugin-react": "^5.0.0",
26 | "eslint": "^9.33.0",
27 | "eslint-plugin-react-hooks": "^5.2.0",
28 | "eslint-plugin-react-refresh": "^0.4.20",
29 | "globals": "^16.3.0",
30 | "typescript": "~5.8.3",
31 | "typescript-eslint": "^8.39.1",
32 | "vite": "^7.1.2"
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/tests/yamls/workflows/conditional_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1
2 | kind: Workflow
3 | metadata:
4 | name: conditional workflow
5 | labels:
6 | app: example2
7 | spec:
8 | template:
9 | metadata:
10 | name: maestro-deployment
11 | labels:
12 | app: example
13 | use-case: test
14 | agents:
15 | - agent1
16 | - agent2
17 | - agent3
18 | prompt: This is a prompt
19 | start: step1
20 | exception:
21 | name: step3
22 | agent: agent3
23 | steps:
24 | - name: step1
25 | agent: agent1
26 | condition:
27 | - if: (in.some_condition == True)
28 | then: step2
29 | else: step3
30 | - name: step2
31 | agent: agent2
32 | - name: step3
33 | agent: agent3
34 | condition:
35 | - case: (in.some_condition == True)
36 | do: step2
37 | - case: (in.some_condition == False)
38 | do: step3
39 | - default: step3
--------------------------------------------------------------------------------
/MAINTAINERS.md:
--------------------------------------------------------------------------------
1 | # Maintainers
2 |
3 | A repository maintainer is a committer with the additional privilege of merging pull requests into the main branch of this repository.
4 |
5 | ## Current Maintainers
6 |
7 | Maintainers are listed in alphabetical order by last name.
8 |
9 | | Name | GitHub Username |
10 | | ---- | ---- |
11 | | Va Barbosa | [vabarbosa](https://github.com/vabarbosa) |
12 | | Angelo Danducci | [AngeloDanducci](https://github.com/AngeloDanducci) |
13 | | Ismael Faro | [ismaelfaro](https://github.com/ismaelfaro) |
14 | | Nigel Jones | [planetf1](https://github.com/planetf1) |
15 | | Akihiko Kuroda | [akihikokuroda](https://github.com/akihikokuroda) |
16 | | George Liu | [george-lhj](https://github.com/george-lhj) |
17 | | Michael Maximilien | [maximilien](https://github.com/maximilien) |
18 | | Paul Schweigert | [psschwei](https://github.com/psschwei) |
19 |
20 | For more details on the process of becoming a maintainer, see the [Governance](https://github.com/AI4quantum/community/blob/main/GOVERNANCE.md) document.
21 |
--------------------------------------------------------------------------------
/tests/examples/code_agent_with_dependencies_requirements.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: web-scraper-agent
5 | dependencies: tests/examples/testrequirements.txt
6 | spec:
7 | framework: code
8 | description: A code agent that fetches and parses web content
9 | code: |
10 | import requests
11 | from bs4 import BeautifulSoup
12 |
13 | # Simple function that uses the installed dependencies
14 | def fetch_webpage_title(url):
15 | try:
16 | response = requests.get(url)
17 | response.raise_for_status()
18 | soup = BeautifulSoup(response.text, 'html.parser')
19 | title = soup.title.string if soup.title else "No title found"
20 | return title
21 | except Exception as e:
22 | return f"Error: {str(e)}"
23 |
24 | # Get a webpage title
25 | url = input[0] if input and len(input) > 0 else "https://www.example.com"
26 | title = fetch_webpage_title(url)
27 | output = title
28 |
--------------------------------------------------------------------------------
/mcp/examples/slack/README.md:
--------------------------------------------------------------------------------
1 | # Slack post message MCP tool
2 |
3 | This MCP tool has one function that posts a message to the specified slack channel.
4 |
5 | ```python
6 | async def post_slack_message(message: str, channel: str) -> str:
7 | """Post a slack message to channel.
8 |
9 | Args:
10 | message: messege posted to the channel
11 | channel: channel ID of the message posted
12 | """
13 | ```
14 |
15 | ## Environment variables
16 |
17 | * SLACK_BOT_TOKEN: This is slack access token (required)
18 | * MCP_SLACK_PORT: MCP server port (optional, default 30055)
19 | * OPENAI_API_KEY: for OpenAI framework
20 | * OPENAI_BASE_URL: for OpenAI framework
21 |
22 | ## Run example
23 |
24 | start mcp server
25 | ```cmd
26 | python slack_mcp.py
27 | ```
28 |
29 | Register mcp server
30 | ```cmd
31 | maestro create tools.yaml
32 | ```
33 |
34 | Update workflow
35 | Update the channel id in the prompt string in the workflow.yaml file
36 |
37 | Run a workflow
38 | ```
39 | maestro run agent.yaml workflow.yaml
40 | ```
41 |
--------------------------------------------------------------------------------
/tests/examples/code_agent_with_dependencies.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: web-scraper-agent
5 | dependencies: |
6 | requests==2.31.0
7 | beautifulsoup4==4.12.2
8 | spec:
9 | framework: code
10 | description: A code agent that fetches and parses web content
11 | code: |
12 | import requests
13 | from bs4 import BeautifulSoup
14 |
15 | # Simple function that uses the installed dependencies
16 | def fetch_webpage_title(url):
17 | try:
18 | response = requests.get(url)
19 | response.raise_for_status()
20 | soup = BeautifulSoup(response.text, 'html.parser')
21 | title = soup.title.string if soup.title else "No title found"
22 | return title
23 | except Exception as e:
24 | return f"Error: {str(e)}"
25 |
26 | # Get a webpage title
27 | url = input[0] if input and len(input) > 0 else "https://www.example.com"
28 | title = fetch_webpage_title(url)
29 | output = title
30 |
--------------------------------------------------------------------------------
/operator/config/rbac/role.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: manager-role
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - configmaps
11 | - services
12 | verbs:
13 | - create
14 | - delete
15 | - get
16 | - list
17 | - patch
18 | - update
19 | - watch
20 | - apiGroups:
21 | - apps
22 | resources:
23 | - deployments
24 | verbs:
25 | - create
26 | - delete
27 | - get
28 | - list
29 | - patch
30 | - update
31 | - watch
32 | - apiGroups:
33 | - maestro.ai4quantum.com
34 | resources:
35 | - agents
36 | - workflowruns
37 | - workflows
38 | verbs:
39 | - create
40 | - delete
41 | - get
42 | - list
43 | - patch
44 | - update
45 | - watch
46 | - apiGroups:
47 | - maestro.ai4quantum.com
48 | resources:
49 | - workflowruns/finalizers
50 | verbs:
51 | - update
52 | - apiGroups:
53 | - maestro.ai4quantum.com
54 | resources:
55 | - workflowruns/status
56 | verbs:
57 | - get
58 | - patch
59 | - update
60 |
--------------------------------------------------------------------------------
/operator/config/crd/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # This kustomization.yaml is not intended to be run by itself,
2 | # since it depends on service name and namespace that are out of this kustomize package.
3 | # It should be run by config/default
4 | resources:
5 | - bases/maestro.ai4quantum.com_workflowruns.yaml
6 | # +kubebuilder:scaffold:crdkustomizeresource
7 |
8 | patches:
9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
10 | # patches here are for enabling the conversion webhook for each CRD
11 | # +kubebuilder:scaffold:crdkustomizewebhookpatch
12 |
13 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
14 | # patches here are for enabling the CA injection for each CRD
15 | #- path: patches/cainjection_in_workflowruns.yaml
16 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch
17 |
18 | # [WEBHOOK] To enable webhook, uncomment the following section
19 | # the following config is for teaching kustomize how to do kustomization for CRDs.
20 |
21 | #configurations:
22 | #- kustomizeconfig.yaml
23 |
--------------------------------------------------------------------------------
/src/maestro/agents/meta_agent/workflow_workflow.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: meta_agent_workflow
5 | labels:
6 | project: maestro-demo
7 | spec:
8 | template:
9 | metadata:
10 | name: meta_agent_workflow
11 | labels:
12 | project: maestro-demo
13 | agents:
14 | - NLP Agent Planner V2
15 | - Format Workflow Agent V2
16 | - Workflow V2
17 | - markdown formatter
18 | - tagger agent
19 | prompt: I want to compare the current weather with the historical averages. To do this, I probably will need 2 agents, one to retrieve the weather and one to compare to the historical average.
20 | steps:
21 | - name: English Instructions to Prompt
22 | agent: NLP Agent Planner V2
23 | - name: Specify Agents for Workflow Generation
24 | agent: Format Workflow Agent V2
25 | - name: Creating Workflow YAML file
26 | agent: Workflow V2
27 | - name: Readable output
28 | agent: markdown formatter
29 | - name: add tags
30 | agent: tagger agent
--------------------------------------------------------------------------------
/src/maestro/agents/meta_agent/workflow_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | name: meta_agent_workflow
5 | labels:
6 | project: maestro-demo
7 | spec:
8 | template:
9 | metadata:
10 | name: meta_agent_workflow
11 | labels:
12 | project: maestro-demo
13 | agents:
14 | - NLP Agent Planner V2
15 | - Format Input Agent V2
16 | - Create Agent YAML V2
17 | - markdown formatter
18 | - tagger agent
19 | prompt: I want to compare the current weather with the historical averages. To do this, I probably will need 2 agents, one to retrieve the weather and one to compare to the historical average.
20 | steps:
21 | - name: English Instructions to Prompt
22 | agent: NLP Agent Planner V2
23 | - name: Specify Agents for Agent Generation
24 | agent: Format Input Agent V2
25 | - name: Creating Agent YAML Workflow
26 | agent: Create Agent YAML V2
27 | - name: Readable output
28 | agent: markdown formatter
29 | - name: add tags
30 | agent: tagger agent
--------------------------------------------------------------------------------
/mcp/mcptools/slack_mcp.py:
--------------------------------------------------------------------------------
1 | from fastmcp import FastMCP
2 | import os
3 | import asyncio
4 | from slack_sdk import WebClient
5 |
6 | mcp = FastMCP("slack")
7 |
8 |
9 | @mcp.tool()
10 | async def post_slack_message(message: str, channel: str) -> str:
11 | """Post a slack message to channel.
12 |
13 | Args:
14 | message: messege posted to the channel
15 | channel: channel ID of the message posted
16 | """
17 | slack_token = os.environ.get("SLACK_BOT_TOKEN")
18 | if not slack_token:
19 | print("Error: SLACK_BOT_TOKEN environment variable not set.")
20 | return
21 | client = WebClient(token=slack_token)
22 | try:
23 | result = client.chat_postMessage(channel=channel, text=message)
24 | print(f"Message posted to channel {channel}: {result['ts']}")
25 | except Exception as e:
26 | print(f"Error posting message: {e}")
27 |
28 |
29 | def main():
30 | port = int(os.getenv("MCP_SLACK_PORT", 30055))
31 | asyncio.run(mcp.run(transport="http", host="0.0.0.0", port=port))
32 |
33 |
34 | if __name__ == "__main__":
35 | main()
36 |
--------------------------------------------------------------------------------
/tests/yamls/tools/duckduckgo_tools.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Tool
3 | metadata:
4 | name: DuckDuckGo
5 | labels:
6 | app: tool-example
7 | spec:
8 | description: Search for online trends, news, current events, real-time information, or research topics.
9 | inputSchema:
10 | type: jsonSchema
11 | schema: |
12 | {
13 | "title": "DuckDuckGoSearchToolInput",
14 | "type": "object",
15 | "properties": {
16 | "query": {
17 | "type": "string",
18 | "description": "The search query."
19 | }
20 | },
21 | "required": ["query"]
22 | }
23 | outputSchema:
24 | type: jsonSchema
25 | schema: |
26 | {
27 | "title": "SearchToolResult",
28 | "type": "object",
29 | "properties": {
30 | "title": {
31 | "type": "string"
32 | },
33 | "description": {
34 | "type": "string"
35 | },
36 | "url": {
37 | "type": "string"
38 | }
39 | },
40 | "required": ["title", "description", "url"]
41 | }
42 |
43 |
44 |
--------------------------------------------------------------------------------
/tests/agents/crewai_agent/test_crewai.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 | import asyncio
8 | from dotenv import load_dotenv
9 |
10 | from unittest import TestCase
11 |
12 | from maestro.cli.common import parse_yaml
13 |
14 | from maestro.workflow import Workflow
15 |
16 | load_dotenv()
17 |
18 |
19 | class CrewAITest(TestCase):
20 | def test_agent_runs(self) -> None:
21 | agents_yaml = parse_yaml(os.path.join(os.path.dirname(__file__), "agents.yaml"))
22 | workflow_yaml = parse_yaml(
23 | os.path.join(os.path.dirname(__file__), "workflow.yaml")
24 | )
25 |
26 | try:
27 | workflow = Workflow(agents_yaml, workflow_yaml[0])
28 | except Exception as excep:
29 | raise RuntimeError("Unable to create agents") from excep
30 |
31 | result = asyncio.run(workflow.run())
32 | print(result)
33 |
34 | assert result is not None
35 | assert result["final_prompt"] == "OK"
36 |
37 |
38 | if __name__ == "__main__":
39 | crewtest = CrewAITest()
40 | crewtest.test_agent_runs()
41 |
--------------------------------------------------------------------------------
/tests/yamls/agents/context_test_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: Recipe Agent
5 | labels:
6 | app: mas-example
7 | spec:
8 | model: gpt-oss:latest
9 | framework: openai
10 | mode: local
11 | description: Get the current weather
12 | instructions: Given the list of ingredients, return a recipe that can be made with the ingredients.
13 | ---
14 | apiVersion: maestro/v1alpha1
15 | kind: Agent
16 | metadata:
17 | name: Recipe Time Agent
18 | labels:
19 | app: mas-example
20 | spec:
21 | model: gpt-oss:latest
22 | framework: openai
23 | mode: local
24 | description: How long will it take to make the recipe?
25 | instructions: |
26 | Given a recipe, estimate the time it will take to make the recipe.
27 |
28 | ---
29 | apiVersion: maestro/v1alpha1
30 | kind: Agent
31 | metadata:
32 | name: Recipe Cost Agent
33 | labels:
34 | app: mas-example
35 | spec:
36 | model: gpt-oss:latest
37 | framework: openai
38 | mode: local
39 | description: How much will it cost to make the recipe?
40 | instructions: Given the list of ingredients, approximate the cost it will take to make the recipe.
41 |
--------------------------------------------------------------------------------
/tests/utils/responses.yml:
--------------------------------------------------------------------------------
1 | responses:
2 | "what colour is the sky?": "The sky is blue during a clear day due to a phenomenon called Rayleigh scattering."
3 | "who is the president?": "This is a mock response. In a production environment, this would be replaced with accurate, up-to-date information."
4 | "tell me a joke": "Why don't programmers like nature? It has too many bugs!"
5 | "what is the meaning of life?": "According to this mock response, the meaning of life is to write better mock servers."
6 | "summarize the Vulnerability id:BIT-tomcat-2024-34750": '{"name": "get_vulnerability", "arguments": {"id": "BIT-tomcat-2024-34750"}}'
7 | "What are the available tools you have?": "I can use the 'fetch' function to retrieve data from a specified URL. This function allows me to specify parameters such as maximum length of response, if the response should be raw or if I need to define a start index for extracting content, and obviously, the URL itself I want to fetch data from."
8 | defaults:
9 | unknown_response: "I don't know the answer to that. This is a mock response."
10 |
11 | settings:
12 | lag_enabled: true
13 | lag_factor: 10 # Higher values = faster responses (10 = fast, 1 = slow)
--------------------------------------------------------------------------------
/tests/yamls/agents/code_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: code
5 | labels:
6 | app: slack-example
7 | spec:
8 | framework: code
9 | mode: local
10 | description: code agnet
11 | instructions: execute python code
12 | code: |
13 | print(input[0])
14 | output["answer"] = "Hi!"
15 |
16 | ---
17 | apiVersion: maestro/v1alpha1
18 | kind: Agent
19 | metadata:
20 | name: github-lister
21 | dependencies: |
22 | requests==2.31.0
23 | labels:
24 | app: cbom-demo-pretest
25 | spec:
26 | framework: code
27 | mode: local
28 | description: list repos from GitHub
29 | instructions: list my GitHub repos using the GitHub API
30 | code: |
31 | import os
32 | import requests
33 |
34 | token = os.getenv("GITHUB_TOKEN")
35 | user = os.getenv("GITHUB_USER")
36 | if not token or not user:
37 | output["error"] = "GITHUB_TOKEN or GITHUB_USER not set"
38 | else:
39 | headers = {"Authorization": f"Bearer {token}"}
40 | url = f"https://api.github.com/users/{user}/repos"
41 | r = requests.get(url, headers=headers)
42 | repos = [repo["name"] for repo in r.json()]
43 | output["repos"] = repos
44 |
--------------------------------------------------------------------------------
/tests/yamls/agents/scoring_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test1
5 | labels:
6 | app: test-example
7 | spec:
8 | model: qwen3:latest
9 | framework: openai
10 | mode: local
11 | description: this is a test
12 | instructions: Concisely answer the question to the best of your ability, and if you are unsure, say "I don't know".
13 |
14 | ---
15 | apiVersion: maestro/v1alpha1
16 | kind: Agent
17 | metadata:
18 | name: test2
19 | labels:
20 | app: test-example
21 | spec:
22 | model: qwen3:latest
23 | framework: openai
24 | mode: local
25 | description: this is a test
26 | instructions: You are a gibberish generator. You will be given a prompt and you will generate a gibberish response that is not related to the prompt at all.
27 |
28 | ---
29 | apiVersion: maestro/v1alpha1
30 | kind: Agent
31 | metadata:
32 | name: evaluate
33 | labels:
34 | app: test-example
35 | custom_agent: scoring_agent
36 | spec:
37 | model: qwen3:latest # NOTE: the model is actually used to determine the LLM call, so must be a valid model
38 | framework: custom
39 | mode: remote
40 | description: testing Opik
41 | instructions: evaluates the response using Opik
42 |
--------------------------------------------------------------------------------
/tests/yamls/agents/dry_run_inputs_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test1
5 | labels:
6 | app: test-example
7 | spec:
8 | model: meta-llama/llama-3-1-70b-instruct
9 | framework: beeai
10 | mode: local
11 | description: this is a test
12 | tools:
13 | - code_interpreter
14 | - test
15 | instructions: print("this is a test 1.")
16 |
17 | ---
18 |
19 | apiVersion: maestro/v1alpha1
20 | kind: Agent
21 | metadata:
22 | name: test2
23 | labels:
24 | app: test-example
25 | spec:
26 | model: meta-llama/llama-3-1-70b-instruct
27 | framework: beeai
28 | mode: local
29 | description: this is a test
30 | tools:
31 | - code_interpreter
32 | - test
33 | instructions: |
34 | print(f"prompt is {input}")
35 | input = f"answer from test2: I got {input}!!"
36 |
37 | ---
38 |
39 | apiVersion: maestro/v1alpha1
40 | kind: Agent
41 | metadata:
42 | name: test3
43 | labels:
44 | app: test-example
45 | spec:
46 | model: meta-llama/llama-3-1-70b-instruct
47 | framework: beeai
48 | mode: local
49 | description: this is a test
50 | tools:
51 | - code_interpreter
52 | - test
53 | instructions: print(f"this is input for this step {input}.")
54 |
--------------------------------------------------------------------------------
/tools/scripts.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 | # Copyright © 2025 IBM
3 |
4 | #!/usr/bin/env python3
5 |
6 | import sys
7 | import subprocess
8 | import re
9 |
10 |
11 | def lint():
12 | try:
13 | subprocess.run(["ruff", "format", "."], check=True)
14 | subprocess.run(["ruff", "check", "."], check=True)
15 | except subprocess.CalledProcessError:
16 | sys.exit(1)
17 |
18 |
19 | def commit():
20 | if len(sys.argv) < 2:
21 | print('Usage: uv run commit ""')
22 | sys.exit(1)
23 |
24 | commit_msg = sys.argv[1]
25 | if not re.match(
26 | r"^(feat|fix|docs|style|refactor|test|chore)(\([a-z-]+\))?: .+", commit_msg
27 | ):
28 | print("Error: Commit message must follow the conventional commits format:")
29 | print("(): ")
30 | print("\nTypes: feat, fix, docs, style, refactor, test, chore")
31 | sys.exit(1)
32 |
33 | print("📦 Adding files...")
34 | subprocess.run(["git", "add", "--all"], check=True)
35 |
36 | print("📝 Committing changes...")
37 | subprocess.run(["git", "commit", "-s", "-m", commit_msg], check=True)
38 |
39 | print("Successfully committed changes")
40 |
41 |
42 | if __name__ == "__main__":
43 | commit()
44 |
--------------------------------------------------------------------------------
/operator/config/manifests/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # These resources constitute the fully configured set of manifests
2 | # used to generate the 'manifests/' directory in a bundle.
3 | resources:
4 | - bases/operator.clusterserviceversion.yaml
5 | - ../default
6 | - ../samples
7 | - ../scorecard
8 |
9 | # [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix.
10 | # Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager.
11 | # These patches remove the unnecessary "cert" volume and its manager container volumeMount.
12 | #patches:
13 | #- target:
14 | # group: apps
15 | # version: v1
16 | # kind: Deployment
17 | # name: controller-manager
18 | # namespace: system
19 | # patch: |-
20 | # # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
21 | # # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
22 | # - op: remove
23 |
24 | # path: /spec/template/spec/containers/0/volumeMounts/0
25 | # # Remove the "cert" volume, since OLM will create and mount a set of certs.
26 | # # Update the indices in this path if adding or removing volumes in the manager's Deployment.
27 | # - op: remove
28 | # path: /spec/template/spec/volumes/0
29 |
--------------------------------------------------------------------------------
/operator/config/rbac/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | # All RBAC will be applied under this service account in
3 | # the deployment namespace. You may comment out this resource
4 | # if your manager will use a service account that exists at
5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding
6 | # subjects if changing service account names.
7 | - service_account.yaml
8 | - role.yaml
9 | - role_binding.yaml
10 | - leader_election_role.yaml
11 | - leader_election_role_binding.yaml
12 | # The following RBAC configurations are used to protect
13 | # the metrics endpoint with authn/authz. These configurations
14 | # ensure that only authorized users and service accounts
15 | # can access the metrics endpoint. Comment the following
16 | # permissions if you want to disable this protection.
17 | # More info: https://book.kubebuilder.io/reference/metrics.html
18 | - metrics_auth_role.yaml
19 | - metrics_auth_role_binding.yaml
20 | - metrics_reader_role.yaml
21 | # For each CRD, "Editor" and "Viewer" roles are scaffolded by
22 | # default, aiding admins in cluster management. Those roles are
23 | # not used by the Project itself. You can comment the following lines
24 | # if you do not want those helpers be installed with your Project.
25 | - workflowrun_editor_role.yaml
26 | - workflowrun_viewer_role.yaml
27 |
28 |
--------------------------------------------------------------------------------
/deployments/maestro.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | cmd=${CONTAINER_CMD:-docker}
4 | target=${TARGET_IP:-127.0.0.1:5000}
5 | flags=${BUILD_FLAGS}
6 | if [ "$1" != "build" ] && [ "$1" != "deploy" ]&& [ "$1" != "deploy-k" ] && [ "$1" != "run" ]; then
7 | echo "Invalid argument. Must be 'build', 'deploy' or 'run'."
8 | exit 1
9 | fi
10 |
11 | if [ "$1" == "build" ]; then
12 | echo "Building..."
13 | $cmd build $flags -t maestro -f Dockerfile ..
14 | elif [ "$1" == "deploy" ]; then
15 | echo "Deploying..."
16 | env=""
17 | while [ "$2" != "" ]; do
18 | env=$env" -e "$2" "
19 | shift
20 | done
21 | $cmd run -d $env -p $target:5000 maestro
22 | elif [ "$1" == "deploy-k" ]; then
23 | echo "Deploying (kubernetes)..."
24 | cp maestro.yaml temp-maestro.yaml
25 | while [ "$2" != "" ]; do
26 | keyvalue=$2
27 | name=$(echo $keyvalue | cut -d= -f1)
28 | value=$(echo $keyvalue | cut -d= -f2)
29 | sed -i -e "s#env:#env:\n - name: $name\n value: $value#" temp-maestro.yaml
30 | shift
31 | done
32 | kubectl apply -f temp-maestro.yaml
33 | elif [ "$1" == "run" ]; then
34 | echo "Running..."
35 | agents=$2
36 | workflow=$3
37 | curl -s -X POST -L http://$target/ -F "agents=@$agents" -F "workflow=@$workflow" | awk '{gsub(/\\n/,"\n")}1'
38 |
39 | fi
40 |
--------------------------------------------------------------------------------
/tests/agents/beeai_agent/test_beeai.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 | # Copyright © 2025 IBM
3 |
4 | import os
5 | import dotenv
6 | import asyncio
7 |
8 |
9 | from maestro.cli.common import parse_yaml
10 |
11 | from maestro.workflow import Workflow
12 | from maestro.agents.beeai_agent import BeeAILocalAgent
13 |
14 | dotenv.load_dotenv()
15 |
16 |
17 | class BeeAIAgentMock:
18 | def __init__(self):
19 | pass
20 |
21 | async def run(self, prompt: str, context=None, step_index=None) -> str:
22 | return "OK:" + prompt
23 |
24 |
25 | def test_agent_runs(monkeypatch) -> None:
26 | # setup mocks
27 | mock_beeai = BeeAIAgentMock()
28 | monkeypatch.setattr(BeeAILocalAgent, "__new__", lambda *args, **kwargs: mock_beeai)
29 |
30 | agents_yaml = parse_yaml(os.path.join(os.path.dirname(__file__), "agents.yaml"))
31 | workflow_yaml = parse_yaml(os.path.join(os.path.dirname(__file__), "workflow.yaml"))
32 | try:
33 | workflow = Workflow(agents_yaml, workflow_yaml[0])
34 | except Exception as excep:
35 | raise RuntimeError("Unable to create agents") from excep
36 | result = asyncio.run(workflow.run())
37 | print(result)
38 |
39 | assert result is not None
40 | assert result["final_prompt"].startswith("OK:Welcome") or result[
41 | "final_prompt"
42 | ].startswith("Mock agent")
43 |
--------------------------------------------------------------------------------
/tests/agents/openai_agent/test_openai.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 | import dotenv
8 | import asyncio
9 |
10 |
11 | from maestro.cli.common import parse_yaml
12 |
13 | from maestro.workflow import Workflow
14 | from maestro.agents.openai_agent import OpenAIAgent
15 |
16 | dotenv.load_dotenv()
17 |
18 |
19 | class OpenAIAgentMock:
20 | def __init__(self):
21 | pass
22 |
23 | async def run(self, prompt: str, context=None, step_index=None) -> str:
24 | return "OK:" + prompt
25 |
26 |
27 | def test_agent_runs(monkeypatch) -> None:
28 | # setup mocks
29 | mock_openai = OpenAIAgentMock()
30 | monkeypatch.setattr(OpenAIAgent, "__new__", lambda *args, **kwargs: mock_openai)
31 |
32 | agents_yaml = parse_yaml(os.path.join(os.path.dirname(__file__), "agents.yaml"))
33 | workflow_yaml = parse_yaml(os.path.join(os.path.dirname(__file__), "workflow.yaml"))
34 | try:
35 | workflow = Workflow(agents_yaml, workflow_yaml[0])
36 | except Exception as excep:
37 | raise RuntimeError("Unable to create agents") from excep
38 | result = asyncio.run(workflow.run())
39 | print(result)
40 |
41 | assert result is not None
42 | assert result["final_prompt"].startswith("OK:Welcome") or result[
43 | "final_prompt"
44 | ].startswith("Mock agent")
45 |
--------------------------------------------------------------------------------
/src/maestro/agents/prompt_agent.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | from maestro.agents.agent import Agent
5 |
6 | from dotenv import load_dotenv
7 |
8 | load_dotenv()
9 |
10 |
11 | class PromptAgent(Agent):
12 | """
13 | Custom 'prompt_agent' that ignores the input response and instead
14 | returns the content of its own `spec.instructions` field as the output.
15 | """
16 |
17 | def __init__(self, agent_def: dict) -> None:
18 | # 1) Initialize the base class first so it can set up everything
19 | super().__init__(agent_def)
20 |
21 | # 2) Now safely pull out the instructions block from your YAML spec
22 | spec = agent_def.get("spec", {})
23 | raw_instr = spec.get("instructions", "")
24 |
25 | # 3) Normalize list vs string into a single string attribute
26 | if isinstance(raw_instr, list):
27 | self.instructions = "\n".join(raw_instr)
28 | else:
29 | self.instructions = raw_instr or ""
30 |
31 | async def run(self, response: str) -> str:
32 | """
33 | Args:
34 | response: the output string from the previous workflow step (ignored)
35 |
36 | Returns:
37 | The content of spec.instructions, exactly as defined in the agent YAML.
38 | """
39 | print(self.instructions)
40 | return self.instructions
41 |
--------------------------------------------------------------------------------
/operator/Dockerfile:
--------------------------------------------------------------------------------
1 | # Build the manager binary
2 | FROM golang:1.24 AS builder
3 | ARG TARGETOS
4 | ARG TARGETARCH
5 |
6 | WORKDIR /workspace
7 | # Copy the Go Modules manifests
8 | COPY go.mod go.mod
9 | COPY go.sum go.sum
10 | # cache deps before building and copying source so that we don't need to re-download as much
11 | # and so that source changes don't invalidate our downloaded layer
12 | RUN go mod download
13 |
14 | # Copy the go source
15 | COPY cmd/main.go cmd/main.go
16 | COPY api/ api/
17 | COPY internal/controller/ internal/controller/
18 |
19 | # Build
20 | # the GOARCH has not a default value to allow the binary be built according to the host where the command
21 | # was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO
22 | # the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
23 | # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
24 | RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go
25 |
26 | # Use distroless as minimal base image to package the manager binary
27 | # Refer to https://github.com/GoogleContainerTools/distroless for more details
28 | FROM gcr.io/distroless/static:nonroot
29 | WORKDIR /
30 | COPY --from=builder /workspace/manager .
31 | USER 65532:65532
32 |
33 | ENTRYPOINT ["/manager"]
34 |
--------------------------------------------------------------------------------
/operator/api/v1alpha1/groupversion_info.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // Package v1alpha1 contains API Schema definitions for the maestro v1alpha1 API group
18 | // +kubebuilder:object:generate=true
19 | // +groupName=maestro.ai4quantum.com
20 | package v1alpha1
21 |
22 | import (
23 | "k8s.io/apimachinery/pkg/runtime/schema"
24 | "sigs.k8s.io/controller-runtime/pkg/scheme"
25 | )
26 |
27 | var (
28 | // GroupVersion is group version used to register these objects
29 | GroupVersion = schema.GroupVersion{Group: "maestro.ai4quantum.com", Version: "v1alpha1"}
30 |
31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme
32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
33 |
34 | // AddToScheme adds the types in this group-version to the given scheme.
35 | AddToScheme = SchemeBuilder.AddToScheme
36 | )
37 |
--------------------------------------------------------------------------------
/tools/get_release_name.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 | import requests
8 |
9 |
10 | def main():
11 | github_token = os.environ["GITHUB_TOKEN"]
12 | repo = os.environ["GITHUB_REPOSITORY"]
13 |
14 | with open(".release_names.md", "r") as f:
15 | all_names = [
16 | line.strip().lstrip("- ")
17 | for line in f
18 | if line.strip() and not line.strip().startswith("**") and "~~" not in line
19 | ]
20 |
21 | headers = {"Authorization": f"token {github_token}"}
22 | response = requests.get(
23 | f"https://api.github.com/repos/{repo}/releases", headers=headers
24 | )
25 |
26 | if not response.ok:
27 | print(f"::error::Failed to fetch releases: {response.status_code}")
28 | exit(1)
29 |
30 | releases = response.json()
31 | used_names = {
32 | r["name"].split(" – ")[0]
33 | for r in releases
34 | if r.get("name") and " – " in r["name"]
35 | }
36 |
37 | next_name = next((name for name in all_names if name not in used_names), None)
38 |
39 | if not next_name:
40 | print("::error::No unused release names available.")
41 | exit(1)
42 |
43 | with open(os.environ["GITHUB_OUTPUT"], "a") as f:
44 | f.write(f"release_name={next_name}\n")
45 |
46 |
47 | if __name__ == "__main__":
48 | main()
49 |
--------------------------------------------------------------------------------
/operator/config/scorecard/patches/olm.config.yaml:
--------------------------------------------------------------------------------
1 | - op: add
2 | path: /stages/0/tests/-
3 | value:
4 | entrypoint:
5 | - scorecard-test
6 | - olm-bundle-validation
7 | image: quay.io/operator-framework/scorecard-test:v1.39.2
8 | labels:
9 | suite: olm
10 | test: olm-bundle-validation-test
11 | - op: add
12 | path: /stages/0/tests/-
13 | value:
14 | entrypoint:
15 | - scorecard-test
16 | - olm-crds-have-validation
17 | image: quay.io/operator-framework/scorecard-test:v1.39.2
18 | labels:
19 | suite: olm
20 | test: olm-crds-have-validation-test
21 | - op: add
22 | path: /stages/0/tests/-
23 | value:
24 | entrypoint:
25 | - scorecard-test
26 | - olm-crds-have-resources
27 | image: quay.io/operator-framework/scorecard-test:v1.39.2
28 | labels:
29 | suite: olm
30 | test: olm-crds-have-resources-test
31 | - op: add
32 | path: /stages/0/tests/-
33 | value:
34 | entrypoint:
35 | - scorecard-test
36 | - olm-spec-descriptors
37 | image: quay.io/operator-framework/scorecard-test:v1.39.2
38 | labels:
39 | suite: olm
40 | test: olm-spec-descriptors-test
41 | - op: add
42 | path: /stages/0/tests/-
43 | value:
44 | entrypoint:
45 | - scorecard-test
46 | - olm-status-descriptors
47 | image: quay.io/operator-framework/scorecard-test:v1.39.2
48 | labels:
49 | suite: olm
50 | test: olm-status-descriptors-test
51 |
--------------------------------------------------------------------------------
/tests/workflow/test_cron.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 | import yaml
8 | import unittest
9 | from unittest import TestCase
10 | from maestro.workflow import Workflow
11 |
12 | import asyncio
13 |
14 |
15 | def parse_yaml(file_path):
16 | with open(file_path, "r") as file:
17 | yaml_data = list(yaml.safe_load_all(file))
18 | return yaml_data
19 |
20 |
21 | # `cron` tests
22 | class TestCron(TestCase):
23 | def setUp(self):
24 | self.agents_yaml = parse_yaml(
25 | os.path.join(os.path.dirname(__file__), "../yamls/agents/simple_agent.yaml")
26 | )
27 | self.workflow_yaml = parse_yaml(
28 | os.path.join(
29 | os.path.dirname(__file__),
30 | "../yamls/workflows/simple_cron_many_steps_workflow.yaml",
31 | )
32 | )
33 | try:
34 | self.workflow = Workflow(self.agents_yaml, self.workflow_yaml[0])
35 | except Exception as excep:
36 | raise RuntimeError("Unable to create agents") from excep
37 |
38 | def tearDown(self):
39 | self.workflow = None
40 |
41 | def test_cron(self):
42 | response = asyncio.run(self.workflow.run())
43 | print(f"==={response}===")
44 | assert "This is a test input" in response.get("final_prompt")
45 |
46 |
47 | if __name__ == "__main__":
48 | unittest.main()
49 |
--------------------------------------------------------------------------------
/tests/yamls/tools/wikipedia_tools.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Tool
3 | metadata:
4 | name: Wikipedia
5 | labels:
6 | app: tool-example
7 | spec:
8 | description: Search factual and historical information, including biography, history, politics, geography, society, culture, science, technology, people, animal species, mathematics, and other subjects.
9 | inputSchema:
10 | type: jsonSchema
11 | schema: |
12 | {
13 | "title": "WikipediaToolInput",
14 | "type": "object",
15 | "properties": {
16 | "query": {
17 | "type": "string",
18 | "description": "Name of the Wikipedia page."
19 | },
20 | "full_text": {
21 | "type": "boolean",
22 | "description": "If set to true, it will return the full text of the page instead of its short summary.",
23 | "default": false
24 | }
25 | },
26 | "required": ["query"]
27 | }
28 | outputSchema:
29 | type: jsonSchema
30 | schema: |
31 | {
32 | "title": "SearchToolResult",
33 | "type": "object",
34 | "properties": {
35 | "title": {
36 | "type": "string"
37 | },
38 | "description": {
39 | "type": "string"
40 | },
41 | "url": {
42 | "type": "string"
43 | }
44 | },
45 | "required": ["title", "description", "url"]
46 | }
47 |
48 |
49 |
--------------------------------------------------------------------------------
/operator/config/prometheus/monitor.yaml:
--------------------------------------------------------------------------------
1 | # Prometheus Monitor Service (Metrics)
2 | apiVersion: monitoring.coreos.com/v1
3 | kind: ServiceMonitor
4 | metadata:
5 | labels:
6 | control-plane: controller-manager
7 | app.kubernetes.io/name: operator
8 | app.kubernetes.io/managed-by: kustomize
9 | name: controller-manager-metrics-monitor
10 | namespace: system
11 | spec:
12 | endpoints:
13 | - path: /metrics
14 | port: https # Ensure this is the name of the port that exposes HTTPS metrics
15 | scheme: https
16 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
17 | tlsConfig:
18 | # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables
19 | # certificate verification. This poses a significant security risk by making the system vulnerable to
20 | # man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between
21 | # Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data,
22 | # compromising the integrity and confidentiality of the information.
23 | # Please use the following options for secure configurations:
24 | # caFile: /etc/metrics-certs/ca.crt
25 | # certFile: /etc/metrics-certs/tls.crt
26 | # keyFile: /etc/metrics-certs/tls.key
27 | insecureSkipVerify: true
28 | selector:
29 | matchLabels:
30 | control-plane: controller-manager
31 |
--------------------------------------------------------------------------------
/tools/run-meta-agent.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "🚀 Running all meta-agent workflow tests in CI..."
4 | REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
5 |
6 | echo "📂 Running from: $REPO_ROOT"
7 |
8 | META_AGENT_DIR="$REPO_ROOT/src/maestro/agents/meta_agent"
9 |
10 | if [[ ! -d "$META_AGENT_DIR" ]]; then
11 | echo "❌ Error: Meta-agent directory not found at $META_AGENT_DIR"
12 | exit 1
13 | fi
14 |
15 | echo "🔍 Verifying Maestro installation..."
16 | cd "$REPO_ROOT"
17 |
18 | # Test if maestro works with or without uv
19 | if uv run maestro --help &>/dev/null; then
20 | MAESTRO_CMD="uv run maestro"
21 | echo "✅ Maestro is running correctly using: $MAESTRO_CMD"
22 | else
23 | echo "❌ Maestro is not installed or not working correctly"
24 | exit 1
25 | fi
26 |
27 | EXPECTED_TESTS=0
28 | TEST_COUNT=0
29 |
30 | # Run doctor.sh first
31 | echo "🩺 Running doctor.sh for meta_agent..."
32 | bash "$REPO_ROOT/src/maestro/agents/meta_agent/doctor.sh" || { echo "❌ Environment check failed"; exit 1; }
33 |
34 | # Run test.sh only once with the directory, instead of looping over workflow files
35 | echo "🧪 Running test.sh for meta_agent directory..."
36 | bash "$REPO_ROOT/src/maestro/agents/meta_agent/test.sh" "$META_AGENT_DIR" || { echo "❌ test.sh failed"; exit 1; }
37 | ((TEST_COUNT++))
38 |
39 | if [[ "$TEST_COUNT" -gt 0 ]]; then
40 | echo "✅ All meta-agent workflow tests completed successfully!"
41 | else
42 | echo "❌ Error: No workflow tests were executed!"
43 | exit 1
44 | fi
45 |
--------------------------------------------------------------------------------
/tests/workflow/test_dspy_agent.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 | import yaml
8 | import unittest
9 | from unittest import TestCase
10 | from maestro.workflow import Workflow
11 | import asyncio
12 |
13 |
14 | def parse_yaml(file_path):
15 | with open(file_path, "r") as file:
16 | yaml_data = list(yaml.safe_load_all(file))
17 | return yaml_data
18 |
19 |
20 | # `code` tests
21 | class TestDSPy(TestCase):
22 | def setUp(self):
23 | self.agents_yaml = parse_yaml(
24 | os.path.join(os.path.dirname(__file__), "../yamls/agents/dspy_agent.yaml")
25 | )
26 | self.workflow_yaml = parse_yaml(
27 | os.path.join(
28 | os.path.dirname(__file__), "../yamls/workflows/dspy_workflow.yaml"
29 | )
30 | )
31 | try:
32 | self.workflow = Workflow(self.agents_yaml, self.workflow_yaml[0])
33 | except Exception as excep:
34 | raise RuntimeError("Unable to create agents") from excep
35 |
36 | def tearDown(self):
37 | self.workflow = None
38 |
39 | def test_dspy(self):
40 | response = asyncio.run(self.workflow.run())
41 | if os.getenv("DRY_RUN") and os.getenv("DRY_RUN") != "":
42 | assert "Olympics" in response["final_prompt"]
43 | else:
44 | assert "Paris" in response["final_prompt"]
45 |
46 |
47 | if __name__ == "__main__":
48 | unittest.main()
49 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | name: Mark and close stale issues
2 |
3 | on:
4 | schedule:
5 | - cron: '0 3 * * *' # daily at 03:00 UTC
6 | workflow_dispatch: {}
7 |
8 | permissions:
9 | contents: read
10 | issues: write
11 | pull-requests: read
12 |
13 | jobs:
14 | stale:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: Run stale action (issues only)
18 | uses: actions/stale@v9
19 | with:
20 | repo-token: ${{ secrets.GITHUB_TOKEN }}
21 | operations-per-run: 200
22 |
23 | # Issues policy
24 | days-before-issue-stale: 14
25 | days-before-issue-close: 14
26 | stale-issue-label: 'stale'
27 | exempt-issue-labels: 'discussion,future,nice-to-have'
28 | stale-issue-message: >-
29 | This issue has been automatically marked as stale because it has not had
30 | recent activity for 14 days. If this is still relevant, please add a comment
31 | or apply an appropriate label to keep it active. Otherwise, it will be
32 | closed in another 14 days.
33 | close-issue-message: >-
34 | Closing this issue due to prolonged inactivity. Feel free to reopen if this
35 | is still a concern or provide more details.
36 |
37 | # PR policy
38 | days-before-pr-stale: -1
39 | days-before-pr-close: -1
40 |
41 | # Behavior
42 | remove-stale-when-updated: true
43 | enable-statistics: true
44 | debug-only: false
45 |
--------------------------------------------------------------------------------
/tools/README.md:
--------------------------------------------------------------------------------
1 | # Maestro Validation Tools
2 |
3 | ## Mermaid Validation
4 |
5 | Validates that generated Mermaid diagrams have correct syntax and will render properly.
6 |
7 | ### Quick Start: Validate a Generated Diagram
8 |
9 | 1. **Generate a Mermaid diagram from your workflow:**
10 | ```bash
11 | maestro mermaid workflow.yaml > workflow.mmd
12 | ```
13 |
14 | 2. **Validate the generated diagram:**
15 | ```bash
16 | node tools/validate-mermaid.js workflow.mmd
17 | ```
18 |
19 | 3. **See the result:**
20 | ```bash
21 | ✅ VALID: workflow.mmd
22 | ```
23 | or
24 | ```bash
25 | ❌ INVALID: workflow.mmd
26 | Error: Parse error on line 5...
27 | ```
28 |
29 | ### Full Validation (All Workflows)
30 |
31 | ```bash
32 | # Validates all workflow files in the repository
33 | ./tools/check-mermaid.sh
34 | ```
35 |
36 | This will:
37 | - Auto-install dependencies (first run only)
38 | - Generate Mermaid diagrams for all workflows
39 | - Validate each diagram's syntax
40 | - Show results in a table format
41 |
42 | ### Setup
43 |
44 | Dependencies are auto-installed by `check-mermaid.sh` on first run.
45 |
46 | Or install manually:
47 | ```bash
48 | cd tools && npm install
49 | ```
50 |
51 | ### Common Validation Errors
52 |
53 | **Spaces in node IDs** (flowcharts):
54 | ```mermaid
55 | # ❌ Invalid
56 | flowchart LR
57 | My Agent --> Another Step
58 |
59 | # ✅ Valid
60 | flowchart LR
61 | agent1["My Agent"] --> step2["Another Step"]
62 | ```
63 |
64 | **Note:** Maestro generates sequence diagrams by default, which handle spaces correctly.
65 |
66 |
--------------------------------------------------------------------------------
/tests/agents/test_utils.py:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 | # Copyright © 2025 IBM
3 | import os
4 |
5 | from maestro.agents.utils import is_url, get_filepath, get_content
6 |
7 | valid = {
8 | "url": "https://github.com/AI4quantum/maestro/blob/main/README.md",
9 | "path": os.path.join(os.path.dirname(__file__), "../../CODE_OF_CONDUCT.md"),
10 | "string": "Some content",
11 | "list": ["Some", "multiline", "content"],
12 | }
13 | invalid = {
14 | "url": "raw.githubusercontent.com/AI4quantum/maestro/refs/heads/main/README.md",
15 | "path": "./NOT_A_FILE.md",
16 | }
17 |
18 |
19 | def test_is_url():
20 | assert is_url(valid["url"]) is True
21 | assert is_url(invalid["url"]) is False
22 | assert is_url(valid["path"]) is False
23 | assert is_url(valid["string"]) is False
24 |
25 |
26 | def test_get_filepath():
27 | assert get_filepath(valid["path"], "") == os.path.join(
28 | os.path.dirname(__file__), "../../CODE_OF_CONDUCT.md"
29 | )
30 | assert get_filepath(invalid["path"], "") is None
31 | assert get_filepath(valid["string"], "") is None
32 | assert get_filepath(valid["url"], "") is None
33 |
34 |
35 | def test_get_content():
36 | assert "# Maestro" in get_content(valid["url"], "")
37 | assert "# Contributor Covenant Code of Conduct" in get_content(valid["path"], "")
38 | assert get_content(valid["string"], "") == valid["string"]
39 | assert get_content(valid["list"], "") == valid["list"]
40 | assert get_content(invalid["url"], "") == invalid["url"]
41 | assert get_content(invalid["path"], "") == invalid["path"]
42 |
--------------------------------------------------------------------------------
/tools/validate-mermaid.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | import fs from 'fs';
4 | import { JSDOM } from 'jsdom';
5 | import mermaid from 'mermaid';
6 | import createDOMPurify from 'dompurify';
7 |
8 | const dom = new JSDOM('');
9 | global.window = dom.window;
10 | global.document = dom.window.document;
11 | global.navigator = dom.window.navigator;
12 | const DOMPurify = createDOMPurify(dom.window);
13 | global.DOMPurify = DOMPurify;
14 | dom.window.DOMPurify = DOMPurify;
15 |
16 | async function validateMermaid(filePath) {
17 | try {
18 | const content = fs.readFileSync(filePath, 'utf-8');
19 | mermaid.initialize({ startOnLoad: false, logLevel: 'error', securityLevel: 'loose' });
20 | await mermaid.parse(content);
21 | console.log(`✅ VALID: ${filePath}`);
22 | return true;
23 | } catch (error) {
24 | console.error(`❌ INVALID: ${filePath}`);
25 | console.error(` Error: ${error.message}`);
26 | if (error.hash && error.hash.line) {
27 | console.error(` Line ${error.hash.line}: ${error.hash.text || ''}`);
28 | }
29 | return false;
30 | }
31 | }
32 |
33 | const filePath = process.argv[2];
34 |
35 | if (!filePath) {
36 | console.error('Usage: node validate-mermaid.js ');
37 | process.exit(1);
38 | }
39 |
40 | if (!fs.existsSync(filePath)) {
41 | console.error(`File not found: ${filePath}`);
42 | process.exit(1);
43 | }
44 |
45 | validateMermaid(filePath)
46 | .then(isValid => process.exit(isValid ? 0 : 1))
47 | .catch(err => {
48 | console.error(`Unexpected error: ${err.message}`);
49 | process.exit(1);
50 | });
51 |
52 |
--------------------------------------------------------------------------------
/tests/integration/test_evaluation_mock.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | import os
5 | import asyncio
6 | import yaml
7 |
8 | from maestro.workflow import Workflow
9 |
10 |
11 | def _load_yaml(path):
12 | with open(path, "r", encoding="utf-8") as f:
13 | docs = list(yaml.safe_load_all(f))
14 | return docs[0] if docs else None
15 |
16 |
17 | def test_mock_evaluation_runs_without_api_key(monkeypatch):
18 | monkeypatch.setenv("MAESTRO_AUTO_EVALUATION", "true")
19 | monkeypatch.delenv("WATSONX_APIKEY", raising=False)
20 |
21 | agents_yaml_path = os.path.join(
22 | os.path.dirname(__file__), "..", "yamls", "agents", "evaluation_test_agent.yaml"
23 | )
24 | workflow_yaml_path = os.path.join(
25 | os.path.dirname(__file__),
26 | "..",
27 | "yamls",
28 | "workflows",
29 | "evaluation_test_workflow.yaml",
30 | )
31 |
32 | agents_def = _load_yaml(os.path.abspath(agents_yaml_path))
33 | workflow_def = _load_yaml(os.path.abspath(workflow_yaml_path))
34 |
35 | assert agents_def is not None and isinstance(agents_def, dict)
36 | assert workflow_def is not None and isinstance(workflow_def, dict)
37 |
38 | # Run the workflow entirely with mock agent; evaluation middleware should no-op gracefully
39 | wf = Workflow(agent_defs=[agents_def], workflow=workflow_def)
40 | result = asyncio.run(wf.run())
41 |
42 | # Validate basic successful execution shape
43 | assert isinstance(result, dict)
44 | assert "final_prompt" in result
45 | assert result["final_prompt"] # non-empty output from mock agent
46 |
--------------------------------------------------------------------------------
/tests/test_tool_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 | import unittest
8 | from unittest import TestCase
9 | from maestro.tool_utils import find_mcp_service
10 |
11 |
12 | class Test_tool_utils(TestCase):
13 | mcp_server_list = "test_mcp_server_list.json"
14 |
15 | def setUp(self):
16 | os.environ["MCP_SERVER_LIST"] = self.mcp_server_list
17 | with open(self.mcp_server_list, "w") as f:
18 | contents = """
19 | [
20 | {
21 | "name": "slack",
22 | "url": "http://localhost:30055",
23 | "transport": "streamable-http",
24 | "access_token": null
25 | },
26 | {
27 | "name": "weather",
28 | "url": "http://localhost:8000",
29 | "transport": "streamable-http",
30 | "access_token": null
31 | }
32 | ]
33 | """
34 | f.write(contents)
35 |
36 | def tearDown(self):
37 | os.remove(self.mcp_server_list)
38 |
39 | def test_(self):
40 | name, url, transport, external, token = find_mcp_service("weather")
41 | assert name == "weather"
42 | assert url == "http://localhost:8000"
43 | assert transport == "streamable-http"
44 | assert external == "http://localhost:8000"
45 | assert not token
46 |
47 | name, url, transport, external, token = find_mcp_service("none")
48 | assert not name
49 | assert not url
50 | assert not transport
51 | assert not external
52 | assert not token
53 |
54 |
55 | if __name__ == "__main__":
56 | unittest.main()
57 |
--------------------------------------------------------------------------------
/k8s/api/v1alpha1/remotemcpserver_types.go:
--------------------------------------------------------------------------------
1 | // Assisted by watsonx Code Assistant
2 | // Code generated by WCA@IBM in this programming language is not approved for use in IBM product development.
3 |
4 | package v1alpha1
5 |
6 | import (
7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8 | )
9 |
10 | // RemoteMCPServerSpec defines the desired state of RemoteMCPServer
11 | type RemoteMCPServerSpec struct {
12 | URL string `json:"url,omitempty"`
13 | Transport string `json:"transport,omitempty"`
14 | ConfigMapName string `json:"configmapName,omitempty"`
15 | SecretName string `json:"secretName,omitempty"`
16 | Name string `json:"name,omitempty"`
17 | Description string `json:"description,omitempty"`
18 | }
19 |
20 | // RemoteMCPServer is the Schema for the remotemcpservers API
21 | // +k8s:openapi-gen=true
22 | type RemoteMCPServer struct {
23 | metav1.TypeMeta `json:",inline"`
24 | metav1.ObjectMeta `json:"metadata,omitempty"`
25 |
26 | Spec RemoteMCPServerSpec `json:"spec,omitempty"`
27 | }
28 |
29 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
30 |
31 | // RemoteMCPServerList contains a list of RemoteMCPServer
32 | type RemoteMCPServerList struct {
33 | metav1.TypeMeta `json:",inline"`
34 | metav1.ListMeta `json:"metadata,omitempty"`
35 | Items []RemoteMCPServer `json:"items"`
36 | }
37 |
38 | // RemoteMCPServerGroupVersionKind is the group version kind for RemoteMCPServer
39 | var RemoteMCPServerGroupVersionKind = schema.GroupVersionKind{
40 | Group: "maestro.ai4quantum.com",
41 | Version: "v1alpha1",
42 | Kind: "RemoteMCPServer",
43 | }
44 |
45 |
--------------------------------------------------------------------------------
/tests/agents/test_prompt_agent.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | import asyncio
5 |
6 | from maestro.agents.prompt_agent import PromptAgent
7 |
8 |
9 | def test_prompt_agent_returns_instruction_string():
10 | """
11 | Given spec.instructions as a single string,
12 | PromptAgent.run() should return that exact string.
13 | """
14 | agent_def = {
15 | "metadata": {"name": "test-prompt-agent", "labels": {}},
16 | "spec": {
17 | "framework": "custom",
18 | "model": "dummy",
19 | "description": "desc",
20 | "instructions": "This is a test instruction.",
21 | },
22 | }
23 | agent = PromptAgent(agent_def)
24 | result = asyncio.run(agent.run("ignored input"))
25 | assert isinstance(result, str)
26 | assert result == "This is a test instruction."
27 |
28 |
29 | def test_prompt_agent_returns_instruction_list_joined():
30 | """
31 | Given spec.instructions as a list of strings,
32 | PromptAgent.run() should return them joined by newline.
33 | """
34 | instructions_list = [
35 | "First instruction line.",
36 | "Second instruction line.",
37 | "Third instruction.",
38 | ]
39 | agent_def = {
40 | "metadata": {"name": "test-prompt-agent", "labels": {}},
41 | "spec": {
42 | "framework": "custom",
43 | "model": "dummy",
44 | "description": "desc",
45 | "instructions": instructions_list,
46 | },
47 | }
48 | agent = PromptAgent(agent_def)
49 | result = asyncio.run(agent.run("anything"))
50 | expected = "\n".join(instructions_list)
51 | assert result == expected
52 |
--------------------------------------------------------------------------------
/.github/workflows/maestro_operator-build.yaml:
--------------------------------------------------------------------------------
1 | name: Maestro Operator build
2 | on:
3 | push:
4 | branches: [ "main" ]
5 | paths:
6 | - '**'
7 | pull_request:
8 | branches: [ "main" ]
9 | paths:
10 | - '**'
11 | jobs:
12 | image-build:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v4
16 | - name: Set up Python
17 | uses: actions/setup-python@v5
18 | with:
19 | python-version: '3.12'
20 | - name: Install uv and activate the environment
21 | uses: astral-sh/setup-uv@v6
22 | with:
23 | activate-environment: true
24 | - name: Install dependencies
25 | run: |
26 | uv sync
27 | - name: Set up Docker Buildx
28 | uses: docker/setup-buildx-action@v3
29 |
30 | - name: Cache Docker layers
31 | uses: actions/cache@v3
32 | with:
33 | path: /tmp/.buildx-cache
34 | key: ${{ runner.os }}-buildx-${{ github.sha }}
35 | restore-keys: |
36 | ${{ runner.os }}-buildx-
37 |
38 | - name: Build the Docker image
39 | uses: docker/build-push-action@v5
40 | with:
41 | context: ./operator
42 | push: false
43 | load: true
44 | tags: localhost/controller:latest,localhost/maestro-engine:latest
45 | cache-from: type=local,src=/tmp/.buildx-cache
46 | cache-to: type=local,dest=/tmp/.buildx-cache-new
47 |
48 | - name: Move cache
49 | run: |
50 | rm -rf /tmp/.buildx-cache
51 | mv /tmp/.buildx-cache-new /tmp/.buildx-cache
52 |
53 | - name: Upload Docker images
54 | uses: actions/upload-artifact@v4
55 | with:
56 | name: docker-images
57 | path: |
58 | /tmp/.buildx-cache
59 | retention-days: 1
60 |
--------------------------------------------------------------------------------
/.github/workflows/maestro_run-tests.yaml:
--------------------------------------------------------------------------------
1 | name: Maestro Run Tests
2 |
3 | on:
4 | push:
5 | branches: [ "main" ]
6 | paths:
7 | - '**'
8 | pull_request:
9 | branches: [ "main" ]
10 | paths:
11 | - '**'
12 |
13 | jobs:
14 | test:
15 | runs-on: ubuntu-latest
16 | strategy:
17 | matrix:
18 | python-version: ["3.11", "3.12", "3.13"]
19 | env:
20 | DEPLOY_KUBERNETES_TEST: 1
21 | DEPLOY_DOCKER_TEST: 1
22 | IMAGE_PUSH_CMD: 'kind load docker-image docker.io/library/maestro:latest'
23 | # IMAGE_TAG_CMD: 'docker tag localhost/maestro:latest docker.io/library/maestro:latest'
24 | IN_GITHUB_ACTION: 1
25 | steps:
26 | - uses: actions/checkout@v4
27 | - name: Set up Python ${{ matrix.python-version }}
28 | uses: actions/setup-python@v5
29 | with:
30 | python-version: ${{ matrix.python-version }}
31 | - name: Install Kind
32 | run: |
33 | curl -Lo ./kind "https://github.com/kubernetes-sigs/kind/releases/download/v0.20.0/kind-$(uname)-amd64"
34 | chmod +x ./kind
35 | sudo mv kind /usr/local/bin
36 | which kind
37 | - name: Create Kind cluster
38 | run: |
39 | kind create cluster --config tests/integration/deploys/kind-config.yaml
40 | - name: Install uv and activate the environment
41 | uses: astral-sh/setup-uv@v6
42 | with:
43 | activate-environment: true
44 | - name: Install dependencies
45 | run: |
46 | uv sync --all-extras
47 | - name: Check code format and style
48 | run: |
49 | uv run ruff check
50 | uv run ruff format --check
51 | - name: Run tests
52 | run: |
53 | export PYTHONPATH=$PYTHONPATH:$(pwd)/src
54 | uv run pytest -v -rA
55 |
--------------------------------------------------------------------------------
/tests/integration/deploys/test_deploy.py:
--------------------------------------------------------------------------------
1 | # /usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 | import requests
8 |
9 | import pytest
10 | import unittest
11 | from unittest import TestCase
12 | from maestro.deploy import Deploy
13 |
14 |
15 | # `deploy` tests
16 | class TestDeploy(TestCase):
17 | def setUp(self):
18 | self.cwd = os.getcwd()
19 | self.deploy = Deploy(
20 | "tests/examples/condition_agents.yaml",
21 | "tests/examples/condition_workflow.yaml",
22 | "BEE_API_KEY=sk-proj-testkey BEE_API=http://192.168.86.45:4000 DRY_RUN=1",
23 | )
24 |
25 | def tearDown(self):
26 | self.deploy = None
27 |
28 | @pytest.mark.skipif(
29 | os.getenv("DEPLOY_KUBERNETES_TEST") != "1", reason="Kubernetes deploy skipped"
30 | )
31 | def test_deploy_to_kubernetes(self):
32 | self.deploy.deploy_to_kubernetes()
33 | if os.getenv("IN_GITHUB_ACTION") != "1":
34 | response = requests.get("http://127.0.0.1:30051/").text
35 | self.assertTrue(response.find("Running expert...") != -1)
36 | self.assertTrue(response.find("Running colleague...") != -1)
37 |
38 | @pytest.mark.skipif(
39 | os.getenv("DEPLOY_DOCKER_TEST") != "1", reason="Docker deploy skipped"
40 | )
41 | def test_deploy_to_docker(self):
42 | self.deploy.deploy_to_docker()
43 | if os.getenv("IN_GITHUB_ACTION") != "1":
44 | response = requests.get("http://127.0.0.1:5000/").text
45 | self.assertTrue(response.find("Running expert...") != -1)
46 | self.assertTrue(response.find("Running colleague...") != -1)
47 |
48 |
49 | if __name__ == "__main__":
50 | unittest.main()
51 |
--------------------------------------------------------------------------------
/tests/yamls/agents/multi_agents.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: beeaiagent
5 | labels:
6 | app: multi-agent
7 | spec:
8 | #model: meta-llama/llama-3-1-70b-instruct
9 | model: llama3.1
10 | # model: granite3.1-dense:8b
11 | framework: beeai
12 | mode: local
13 | description: beeai agent
14 | tools:
15 | - code_interpreter
16 | instructions: |
17 | You are playing a guessing game. One person has a number between 1 and 10 in mind. You guess the number.
18 | Output format is {"BeeAI=" + guessed number'}
19 |
20 | ---
21 |
22 | apiVersion: maestro/v1alpha1
23 | kind: Agent
24 | metadata:
25 | name: Generic_Crew
26 | labels:
27 | app: multi-agent
28 | module: agents.crewai.generic.generic_agent
29 | class: Generic_Crew
30 | factory: generic_crew
31 | spec:
32 | model: "llama3.1:latest"
33 | description: crewai agent
34 | instructions: dummy
35 | framework: crewai
36 |
37 | ---
38 |
39 | apiVersion: maestro/v1alpha1
40 | kind: Agent
41 | metadata:
42 | name: JudgeAgent
43 | labels:
44 | app: multi-agent
45 | spec:
46 | #model: meta-llama/llama-3-1-70b-instruct
47 | model: llama3.1
48 | # model: granite3.1-dense:8b
49 | framework: beeai
50 | mode: local
51 | description: beeai agent
52 | tools:
53 | - code_interpreter
54 | instructions: |
55 | You are a judge of a guess game. You generate a ramdom number between 1 and 10
56 | You can use the code interpreter tools to generate the number.
57 | The players guess numbers are given in the prompt or input.
58 | The format of the prompt is a list of "name=guess number".
59 | Find a winner who guessed the closest number to the number your generated.
60 | Output format is {Number: you generated, Winner: winner and its guess number}
61 |
--------------------------------------------------------------------------------
/tests/agents/test_agent.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import unittest
7 | from maestro.agents.agent import Agent
8 |
9 |
10 | class TestAgentEmojis(unittest.TestCase):
11 | def _create_agent(self, framework: str) -> Agent:
12 | agent_dict = {
13 | "metadata": {"name": f"{framework}_test_agent"},
14 | "spec": {
15 | "framework": framework,
16 | "model": "test_model", # Placeholder value
17 | "description": "test desc", # Placeholder value
18 | "instructions": "test instr", # Placeholder value
19 | },
20 | }
21 | return Agent(agent_dict)
22 |
23 | def test_emoji_outputs(self):
24 | test_cases = {
25 | # Known frameworks from Agent.EMOJIS
26 | "beeai": "🐝",
27 | "crewai": "👥",
28 | "openai": "🔓",
29 | "mock": "🤖",
30 | "remote": "💸",
31 | # Unknown framework should return the default
32 | "some_new_framework": "⚙️",
33 | "another_unknown": "⚙️",
34 | # Edge case: empty string framework name
35 | "": "⚙️",
36 | }
37 |
38 | for framework, expected_emoji in test_cases.items():
39 | with self.subTest(framework=framework):
40 | agent = self._create_agent(framework)
41 | actual_emoji = agent.emoji()
42 |
43 | self.assertEqual(
44 | actual_emoji,
45 | expected_emoji,
46 | f"Emoji for framework '{framework}' should be '{expected_emoji}' but got '{actual_emoji}'",
47 | )
48 |
49 |
50 | if __name__ == "__main__":
51 | unittest.main()
52 |
--------------------------------------------------------------------------------
/tests/yamls/agents/simple_local_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test1
5 | labels:
6 | app: test-example
7 | spec:
8 |
9 | model: ollama/llama3.1
10 | framework: beeai
11 | mode: local
12 | description: this is a test
13 | tools:
14 | - code_interpreter
15 | - test
16 | instructions: print("this is a test.")
17 |
18 | ---
19 |
20 | apiVersion: maestro/v1alpha1
21 | kind: Agent
22 | metadata:
23 | name: test2
24 | labels:
25 | app: test-example
26 | spec:
27 | model: ollama/llama3.1
28 | mode: local
29 | framework: beeai
30 | description: this is a test
31 | tools:
32 | - code_interpreter
33 | - test
34 | instructions: print("this is a test.")
35 |
36 | ---
37 |
38 | apiVersion: maestro/v1alpha1
39 | kind: Agent
40 | metadata:
41 | name: test3
42 | labels:
43 | app: test-example
44 | spec:
45 | model: ollama/llama3.1
46 | framework: beeai
47 | mode: local
48 | description: this is a test
49 | tools:
50 | - code_interpreter
51 | - test
52 | instructions: print("this is a test.")
53 |
54 | ---
55 |
56 | apiVersion: maestro/v1alpha1
57 | kind: Agent
58 | metadata:
59 | name: test4
60 | labels:
61 | app: test-example
62 | spec:
63 | model: ollama/llama3.1
64 | framework: beeai
65 | mode: local
66 | description: this is a test
67 | tools:
68 | - code_interpreter
69 | - test
70 | instructions: print("this is a test.")
71 |
72 | ---
73 |
74 | apiVersion: maestro/v1alpha1
75 | kind: Agent
76 | metadata:
77 | name: test5
78 | labels:
79 | app: test-example
80 | spec:
81 | model: ollama/llama3.1
82 | framework: beeai
83 | mode: local
84 | description: this is a test
85 | tools:
86 | - code_interpreter
87 | - test
88 | instructions: print("this is a test.")
--------------------------------------------------------------------------------
/tests/yamls/agents/dry_run_paralle_list_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test1
5 | labels:
6 | app: test-example
7 | spec:
8 | model: meta-llama/llama-3-1-70b-instruct
9 | framework: bee
10 | description: this is a test
11 | tools:
12 | - code_interpreter
13 | - test
14 | instructions: input='[aa,bb,cc]'
15 |
16 | ---
17 |
18 | apiVersion: maestro/v1alpha1
19 | kind: Agent
20 | metadata:
21 | name: test2
22 | labels:
23 | app: test-example
24 | spec:
25 | model: meta-llama/llama-3-1-70b-instruct
26 | framework: bee
27 | description: this is a test
28 | tools:
29 | - code_interpreter
30 | - test
31 | instructions: |
32 | import time
33 | time.sleep(5)
34 |
35 | ---
36 |
37 | apiVersion: maestro/v1alpha1
38 | kind: Agent
39 | metadata:
40 | name: test3
41 | labels:
42 | app: test-example
43 | spec:
44 | model: meta-llama/llama-3-1-70b-instruct
45 | framework: bee
46 | description: this is a test
47 | tools:
48 | - code_interpreter
49 | - test
50 | instructions: |
51 | import time
52 | time.sleep(1)
53 |
54 | ---
55 |
56 | apiVersion: maestro/v1alpha1
57 | kind: Agent
58 | metadata:
59 | name: test4
60 | labels:
61 | app: test-example
62 | spec:
63 | model: meta-llama/llama-3-1-70b-instruct
64 | framework: bee
65 | description: this is a test
66 | tools:
67 | - code_interpreter
68 | - test
69 | instructions: |
70 | import time
71 | time.sleep(3)
72 |
73 | ---
74 |
75 | apiVersion: maestro/v1alpha1
76 | kind: Agent
77 | metadata:
78 | name: test5
79 | labels:
80 | app: test-example
81 | spec:
82 | model: meta-llama/llama-3-1-70b-instruct
83 | framework: bee
84 | description: this is a test
85 | tools:
86 | - code_interpreter
87 | - test
88 | instructions: "print(input)"
--------------------------------------------------------------------------------
/src/maestro/agents/custom_agent.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | from typing import Any
5 |
6 | from maestro.agents.agent import Agent
7 | from maestro.agents.query_agent import QueryAgent
8 | from maestro.agents.slack_agent import SlackAgent
9 | from maestro.agents.scoring_agent import ScoringAgent
10 | from maestro.agents.prompt_agent import PromptAgent
11 |
12 | # adding a custom agent
13 | # 1. add necessary import for the agent
14 | # 2. add the custom agent name and class in the custom_agent map
15 |
16 | # using a custom agent
17 | # 1. set "custom" to "framework"
18 | # 2 set the custom agent name to "metadata.labels.custom_agent"
19 |
20 | custom_agent = {
21 | "slack_agent": SlackAgent,
22 | "scoring_agent": ScoringAgent,
23 | "prompt_agent": PromptAgent,
24 | "query_agent": QueryAgent,
25 | }
26 |
27 |
28 | class CustomAgent(Agent):
29 | """
30 | Proxy that dispatches to the configured custom agent.
31 | """
32 |
33 | def __init__(self, agent_def: dict) -> None:
34 | super().__init__(agent_def)
35 | name = agent_def["metadata"]["labels"].get("custom_agent")
36 | if not name or name not in custom_agent:
37 | raise ValueError(f"Unknown custom_agent '{name}'")
38 | # instantiate the real agent
39 | self.agent = custom_agent[name](agent_def)
40 |
41 | async def run(self, *args: Any, **kwargs: Any) -> Any:
42 | """
43 | Forward any positional or keyword args to the underlying custom agent.
44 | """
45 | return await self.agent.run(*args, **kwargs)
46 |
47 | async def run_streaming(self, *args: Any, **kwargs: Any) -> Any:
48 | """
49 | Forward any positional or keyword args to the underlying agent's streaming run.
50 | """
51 | return await self.agent.run_streaming(*args, **kwargs)
52 |
--------------------------------------------------------------------------------
/tests/yamls/agents/simple_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test1
5 | labels:
6 | app: test-example
7 | spec:
8 | model: meta-llama/llama-3-1-70b-instruct
9 | framework: beeai
10 | mode: local
11 | description: this is a test
12 | tools:
13 | - code_interpreter
14 | - test
15 | instructions: print("this is a test.")
16 |
17 | ---
18 |
19 | apiVersion: maestro/v1alpha1
20 | kind: Agent
21 | metadata:
22 | name: test2
23 | labels:
24 | app: test-example
25 | spec:
26 | model: meta-llama/llama-3-1-70b-instruct
27 | framework: beeai
28 | mode: local
29 | description: this is a test
30 | tools:
31 | - code_interpreter
32 | - test
33 | instructions: print("this is a test.")
34 |
35 | ---
36 |
37 | apiVersion: maestro/v1alpha1
38 | kind: Agent
39 | metadata:
40 | name: test3
41 | labels:
42 | app: test-example
43 | spec:
44 | model: meta-llama/llama-3-1-70b-instruct
45 | framework: beeai
46 | mode: local
47 | description: this is a test
48 | tools:
49 | - code_interpreter
50 | - test
51 | instructions: print("this is a test.")
52 |
53 | ---
54 |
55 | apiVersion: maestro/v1alpha1
56 | kind: Agent
57 | metadata:
58 | name: test4
59 | labels:
60 | app: test-example
61 | spec:
62 | model: meta-llama/llama-3-1-70b-instruct
63 | framework: beeai
64 | mode: local
65 | description: this is a test
66 | tools:
67 | - code_interpreter
68 | - test
69 | instructions: print("this is a test.")
70 |
71 | ---
72 |
73 | apiVersion: maestro/v1alpha1
74 | kind: Agent
75 | metadata:
76 | name: test5
77 | labels:
78 | app: test-example
79 | spec:
80 | model: meta-llama/llama-3-1-70b-instruct
81 | framework: beeai
82 | mode: local
83 | description: this is a test
84 | tools:
85 | - code_interpreter
86 | - test
87 | instructions: print("this is a test.")
--------------------------------------------------------------------------------
/tests/yamls/agents/dry_run_paralle_agent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: test1
5 | labels:
6 | app: test-example
7 | spec:
8 | model: meta-llama/llama-3-1-70b-instruct
9 | framework: bee
10 | description: this is a test
11 | tools:
12 | - code_interpreter
13 | - test
14 | instructions: "print(5)"
15 |
16 | ---
17 |
18 | apiVersion: maestro/v1alpha1
19 | kind: Agent
20 | metadata:
21 | name: test2
22 | labels:
23 | app: test-example
24 | spec:
25 | model: meta-llama/llama-3-1-70b-instruct
26 | framework: bee
27 | description: this is a test
28 | tools:
29 | - code_interpreter
30 | - test
31 | instructions: |
32 | import time
33 | time.sleep(5)
34 | input = "test2"
35 |
36 | ---
37 |
38 | apiVersion: maestro/v1alpha1
39 | kind: Agent
40 | metadata:
41 | name: test3
42 | labels:
43 | app: test-example
44 | spec:
45 | model: meta-llama/llama-3-1-70b-instruct
46 | framework: bee
47 | description: this is a test
48 | tools:
49 | - code_interpreter
50 | - test
51 | instructions: |
52 | import time
53 | time.sleep(1)
54 | input = "test3"
55 |
56 | ---
57 |
58 | apiVersion: maestro/v1alpha1
59 | kind: Agent
60 | metadata:
61 | name: test4
62 | labels:
63 | app: test-example
64 | spec:
65 | model: meta-llama/llama-3-1-70b-instruct
66 | framework: bee
67 | description: this is a test
68 | tools:
69 | - code_interpreter
70 | - test
71 | instructions: |
72 | import time
73 | time.sleep(3)
74 | input = "test4"
75 |
76 | ---
77 |
78 | apiVersion: maestro/v1alpha1
79 | kind: Agent
80 | metadata:
81 | name: test5
82 | labels:
83 | app: test-example
84 | spec:
85 | model: meta-llama/llama-3-1-70b-instruct
86 | framework: bee
87 | description: this is a test
88 | tools:
89 | - code_interpreter
90 | - test
91 | instructions: "print(input)"
--------------------------------------------------------------------------------
/src/maestro/logging_hooks.py:
--------------------------------------------------------------------------------
1 | import time
2 | from datetime import datetime, UTC
3 | from maestro.file_logger import FileLogger
4 |
5 | logger = FileLogger()
6 |
7 |
8 | def log_agent_run(workflow_id, agent_name, agent_model):
9 | def decorator(run_func):
10 | async def wrapper(*args, **kwargs):
11 | step_index = kwargs.pop("step_index", None)
12 | if step_index is None:
13 | raise ValueError("Missing step_index for logging.")
14 |
15 | perf_start = time.perf_counter()
16 | start_time = datetime.now(UTC)
17 |
18 | result = await run_func(*args, **kwargs)
19 |
20 | end_time = datetime.now(UTC)
21 | perf_end = time.perf_counter()
22 | execution_time = perf_end - perf_start
23 |
24 | input_text = ""
25 | if len(args) > 0:
26 | input_text = args[0]
27 | token_usage = None
28 | if hasattr(run_func.__self__, "get_token_usage"):
29 | token_usage = run_func.__self__.get_token_usage()
30 |
31 | logger.log_agent_response(
32 | workflow_id=workflow_id,
33 | step_index=step_index,
34 | agent_name=agent_name,
35 | model=agent_model,
36 | input_text=input_text,
37 | response_text=result,
38 | tool_used=None,
39 | start_time=start_time,
40 | end_time=end_time,
41 | duration_ms=int(execution_time * 1000),
42 | token_usage=token_usage,
43 | )
44 | if hasattr(run_func.__self__, "_workflow_instance"):
45 | run_func.__self__._workflow_instance._track_agent_execution_time(
46 | agent_name, execution_time
47 | )
48 |
49 | return result
50 |
51 | return wrapper
52 |
53 | return decorator
54 |
--------------------------------------------------------------------------------
/k8s/config/crd/bases/maestro.ai4quantum.com_remotemcpservers.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: remotemcpservers.maestro.ai4quantum.com
6 | spec:
7 | group: maestro.ai4quantum.com
8 | names:
9 | kind: RemoteMCPServer
10 | singular: remotemcpserver
11 | plural: remotemcpservers
12 | shortNames:
13 | - rmcps
14 | scope: Namespaced
15 | versions:
16 | - name: v1alpha1
17 | served: true
18 | storage: true
19 | schema:
20 | openAPIV3Schema:
21 | type: object
22 | properties:
23 | apiVersion:
24 | type: string
25 | description: '`apiVersion` of the resource (e.g. maestro.ai4quantum.com/v1alpha1)'
26 | kind:
27 | type: string
28 | description: '`kind` of the resource (always RemoteMCPServer)'
29 | metadata:
30 | type: object
31 | spec:
32 | type: object
33 | description: Desired state of the RemoteMCPServer
34 | properties:
35 | url:
36 | type: string
37 | description: The server URL
38 | transport:
39 | type: string
40 | description: The protocol type used (e.g., "sse", "streamable-http", "stdio")
41 | configmapName:
42 | type: string
43 | description: Name of the ConfigMap containing configuration data
44 | secretName:
45 | type: string
46 | description: Name of the Secret containing sensitive data
47 | name:
48 | type: string
49 | description: Friendly name of the server
50 | description:
51 | type: string
52 | description: Human‑readable description
53 |
--------------------------------------------------------------------------------
/tests/yamls/agents/multi_agents_parallel.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: beeaiagent
5 | labels:
6 | app: multi-agent
7 | spec:
8 | model: llama3.1
9 | framework: beeai
10 | mode: local
11 | description: beeai agent
12 | tools:
13 | - code_interpreter
14 | instructions: |
15 | You are playing a guessing game. One person has a number between 1 and 10 in mind. You guess the number.
16 | Output format is {"BeeAI=" + guessed number'}
17 |
18 | ---
19 |
20 | apiVersion: maestro/v1alpha1
21 | kind: Agent
22 | metadata:
23 | name: Generic_Crew
24 | labels:
25 | app: multi-agent
26 | crew_role: "gussing game player"
27 | crew_goal: "guess the number and output it"
28 | crew_backstory: "I am a guess game player. I guess a number between 1 and 10."
29 | crew_description: "You are playing a guessing game. One person has a number between 1 and 10 in mind. You guess the number."
30 | crew_expected_output: "your answer format is 'CrewAI=' + guessed number."
31 | spec:
32 | model: "ollama/llama3.1"
33 | url: "http://localhost:11434"
34 | description: crewai agent
35 | instructions: dummy
36 | framework: crewai
37 |
38 | ---
39 |
40 | apiVersion: maestro/v1alpha1
41 | kind: Agent
42 | metadata:
43 | name: JudgeAgent
44 | labels:
45 | app: multi-agent
46 | spec:
47 | model: llama3.1
48 | framework: beeai
49 | mode: local
50 | description: beeai agent
51 | tools:
52 | - code_interpreter
53 | instructions: |
54 | You are a judge of a guess game. You generate a ramdom number between 1 and 10
55 | You can use the code interpreter tools to generate the number.
56 | The players guess numbers are given in the prompt or input.
57 | The format of the prompt is a list of "name=guess number".
58 | Find a winner who guessed the closest number to the number your generated.
59 | Output format is {Number: you generated, Winner: winner and its guess number}
60 |
--------------------------------------------------------------------------------
/tests/agents/test_agent_factory.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import unittest
7 |
8 | from typing import Callable
9 |
10 | from unittest import TestCase
11 |
12 | from maestro.agents.agent_factory import AgentFramework, AgentFactory
13 |
14 |
15 | class TestAgentFramework(TestCase):
16 | def test_frameworks(self):
17 | self.assertTrue(AgentFramework.BEEAI is not None)
18 | self.assertTrue(AgentFramework.CREWAI is not None)
19 | self.assertTrue(AgentFramework.OPENAI is not None)
20 | self.assertTrue(AgentFramework.MOCK is not None)
21 | self.assertTrue(AgentFramework.REMOTE is not None)
22 |
23 |
24 | class TestAgentFactory(TestCase):
25 | def test_create_agents(self):
26 | self.assertTrue(
27 | isinstance(AgentFactory.create_agent(AgentFramework.BEEAI), Callable)
28 | )
29 | self.assertTrue(
30 | isinstance(AgentFactory.create_agent(AgentFramework.CREWAI), Callable)
31 | )
32 | self.assertTrue(
33 | isinstance(AgentFactory.create_agent(AgentFramework.OPENAI), Callable)
34 | )
35 | self.assertTrue(
36 | isinstance(AgentFactory.create_agent(AgentFramework.MOCK), Callable)
37 | )
38 | self.assertTrue(
39 | isinstance(AgentFactory.create_agent(AgentFramework.REMOTE), Callable)
40 | )
41 |
42 | def test_get_factory(self):
43 | self.assertTrue(AgentFactory.get_factory(AgentFramework.BEEAI) is not None)
44 | self.assertTrue(AgentFactory.get_factory(AgentFramework.CREWAI) is not None)
45 | self.assertTrue(AgentFactory.get_factory(AgentFramework.OPENAI) is not None)
46 | self.assertTrue(AgentFactory.get_factory(AgentFramework.MOCK) is not None)
47 | self.assertTrue(AgentFactory.get_factory(AgentFramework.REMOTE) is not None)
48 |
49 |
50 | if __name__ == "__main__":
51 | unittest.main()
52 |
--------------------------------------------------------------------------------
/tests/workflow/test_exception.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 |
8 | import pytest
9 | import yaml
10 | import sys
11 | import io
12 | import unittest
13 | from unittest import TestCase
14 | from maestro.workflow import Workflow
15 |
16 | import asyncio
17 |
18 |
19 | def parse_yaml(file_path):
20 | with open(file_path, "r") as file:
21 | yaml_data = list(yaml.safe_load_all(file))
22 | return yaml_data
23 |
24 |
25 | # `exception` tests
26 | class TestException(TestCase):
27 | def tearDown(self):
28 | self.workflow = None
29 |
30 | def test_exception(self):
31 | self.agents_yaml = parse_yaml(
32 | os.path.join(os.path.dirname(__file__), "../yamls/agents/simple_agent.yaml")
33 | )
34 | self.workflow_yaml = parse_yaml(
35 | os.path.join(
36 | os.path.dirname(__file__), "../yamls/workflows/exception_workflow.yaml"
37 | )
38 | )
39 | self.workflow = Workflow(self.agents_yaml, self.workflow_yaml[0])
40 | output = io.StringIO()
41 | sys.stdout = output
42 | asyncio.run(self.workflow.run())
43 | assert "Running test4..." in output.getvalue()
44 |
45 | def test_exception_no_exception(self):
46 | self.agents_yaml = parse_yaml(
47 | os.path.join(os.path.dirname(__file__), "../yamls/agents/simple_agent.yaml")
48 | )
49 | self.workflow_yaml = parse_yaml(
50 | os.path.join(
51 | os.path.dirname(__file__),
52 | "../yamls/workflows/exception_no_exception_workflow.yaml",
53 | )
54 | )
55 | self.workflow = Workflow(self.agents_yaml, self.workflow_yaml[0])
56 | with pytest.raises(Exception) as exc_info:
57 | asyncio.run(self.workflow.run())
58 |
59 | assert "Could not find agent named" in str(exc_info.value)
60 |
61 |
62 | if __name__ == "__main__":
63 | unittest.main()
64 |
--------------------------------------------------------------------------------
/src/maestro/agents/query_agent.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | from fastmcp import Client
4 | from jinja2 import Template
5 |
6 | from maestro.agents.agent import Agent
7 |
8 |
9 | class QueryAgent(Agent):
10 | def __init__(self, agent_def: dict) -> None:
11 | super().__init__(agent_def)
12 | self.db_name = agent_def["metadata"]["query_input"]["db_name"]
13 | self.collection_name = agent_def["metadata"]["query_input"].get(
14 | "collection_name", "MaestroDocs"
15 | )
16 | self.limit = agent_def["metadata"]["query_input"].get("limit", 10)
17 | self.output_template = Template(self.agent_output or "{{result}}")
18 |
19 | async def run(self, prompt: str, context=None, step_index=None) -> str:
20 | self.print(f"Running {self.agent_name} with prompt...")
21 |
22 | async with Client(
23 | self.agent_url or "http://localhost:8030/mcp/", timeout=30
24 | ) as client:
25 | self.print(f"Querying vector database '{self.db_name}'...")
26 | params = {
27 | "input": {
28 | "db_name": self.db_name,
29 | "query": prompt,
30 | "limit": self.limit,
31 | "collection_name": self.collection_name,
32 | }
33 | }
34 | tool_result = await client.call_tool("search", params)
35 |
36 | try:
37 | output = "\n\n".join(
38 | [doc["text"] for doc in json.loads(tool_result.data)]
39 | )
40 |
41 | answer = self.output_template.render(result=output, prompt=prompt)
42 |
43 | self.print(f"Response from {self.agent_name}: {answer}\n")
44 |
45 | return answer
46 | except json.JSONDecodeError:
47 | self.print(f"ERROR [QueryAgent {self.agent_name}]: {tool_result.data}")
48 | return tool_result.data
49 |
50 | async def run_streaming(self, prompt: str) -> str:
51 | return await self.run(prompt)
52 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "maestro"
3 | version = "0.9.0"
4 | description = "A multi-agent platform with the vision to facilitate deploy and run AI agents."
5 | authors = [
6 | {name = "IBM"}
7 | ]
8 | license = "Apache-2.0"
9 | readme = "README.md"
10 | requires-python = ">=3.11,<3.14"
11 | dependencies = [
12 | "python-dotenv>=1.0.1",
13 | "openai>=1.76.2,<1.99",
14 | "pyyaml>=6.0.2",
15 | "jsonschema>=4.23.0",
16 | "docopt-ng>=0.9.0",
17 | "langchain-community>=0.3.16",
18 | "psutil>=7.0.0",
19 | "openapi>=2.0.0",
20 | "openai-agents[litellm]>=0.0.14",
21 | "pycron>=3.1.2",
22 | "beeai-framework>=0.1.31",
23 | "tiktoken>=0.5.0",
24 | "slack_sdk>=3.35.0",
25 | "nest-asyncio>=1.6.0",
26 | "pydantic-ai[logfire]>=0.1.8,<1.0.0",
27 | "opik>=1.7.22",
28 | "fastapi>=0.104.0",
29 | "uvicorn[standard]>=0.24.0",
30 | "dspy>=2.6.27",
31 | "kubernetes>=33.1.0",
32 | "ddgs>=9.4.0",
33 | "fastmcp>=1.0",
34 | "jinja2>=3.1.6",
35 | "crewai>=0.134.0",
36 | "anyio>=4.10.0",
37 | "pytest-asyncio>=1.2.0",
38 | "pytest-tornasync>=0.6.0.post2",
39 | "pytest-trio>=0.8.0",
40 | "pytest-twisted>=1.14.3",
41 | "twisted>=25.5.0",
42 | "argparse>=1.4.0",
43 | "pandas>=2.0.0",
44 | ]
45 |
46 | [dependency-groups]
47 | dev = [
48 | "python-dotenv>=1.1.0",
49 | "ruff>=0.12.0",
50 | "pytest>=8.3.4",
51 | "pytest-asyncio>=1.2.0",
52 | "pytest-mock>=3.14.0",
53 | "pre-commit>=4.2.0"
54 | ]
55 |
56 | [project.scripts]
57 | maestro = "maestro.cli.run_maestro:__run_cli"
58 |
59 | [tool.uv]
60 | package = true
61 |
62 | [build-system]
63 | requires = ["setuptools>=61.0", "wheel"]
64 | build-backend = "build_backend"
65 | backend-path = ["."]
66 |
67 | [tool.setuptools]
68 |
69 | [tool.setuptools.packages.find]
70 | where = ["src"]
71 | include = ["maestro*"]
72 |
73 | [tool.setuptools.package-data]
74 | "maestro" = ["images/*.png", "schemas/*.json", "ui/dist/**/*", "ui/dist/assets/*"]
75 |
76 | [tool.pytest.ini_options]
77 | addopts = "-v -s --ignore=framework"
78 |
--------------------------------------------------------------------------------
/deployments/maestro.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Maestro
5 |
6 |
7 |
26 |
27 |
28 |
29 | Agent definitions
30 |
31 |
32 |
41 |
42 |
43 |
44 | {{ diagram }}
45 |
46 |
47 |
48 |
53 |
54 |
66 |
67 |
--------------------------------------------------------------------------------
/src/maestro/ui/src/index.css:
--------------------------------------------------------------------------------
1 | :root {
2 | font-family: system-ui, Avenir, Helvetica, Arial, sans-serif;
3 | line-height: 1.5;
4 | font-weight: 400;
5 |
6 | color-scheme: light dark;
7 | color: rgba(255, 255, 255, 0.87);
8 | background-color: #242424;
9 |
10 | font-synthesis: none;
11 | text-rendering: optimizeLegibility;
12 | -webkit-font-smoothing: antialiased;
13 | -moz-osx-font-smoothing: grayscale;
14 | }
15 |
16 | a {
17 | font-weight: 500;
18 | color: #646cff;
19 | text-decoration: inherit;
20 | }
21 | a:hover {
22 | color: #535bf2;
23 | }
24 |
25 | body {
26 | margin: 0;
27 | min-width: 320px;
28 | min-height: 100vh;
29 | overflow: hidden;
30 | background-color: #ffffff;
31 | transition: background-color 0.2s ease;
32 | }
33 |
34 | h1 {
35 | font-size: 3.2em;
36 | line-height: 1.1;
37 | }
38 |
39 | button {
40 | border-radius: 8px;
41 | border: 1px solid transparent;
42 | padding: 0.6em 1.2em;
43 | font-size: 1em;
44 | font-weight: 500;
45 | font-family: inherit;
46 | background-color: #1a1a1a;
47 | cursor: pointer;
48 | transition: all 0.2s ease;
49 | }
50 | button:hover {
51 | opacity: 0.9;
52 | transform: translateY(-1px);
53 | }
54 | button:active {
55 | transform: translateY(0);
56 | }
57 | button:focus,
58 | button:focus-visible {
59 | outline: 2px solid #007AFF;
60 | outline-offset: 2px;
61 | }
62 |
63 | input, textarea {
64 | font-family: inherit;
65 | transition: border-color 0.2s ease;
66 | }
67 | input:focus, textarea:focus {
68 | border-color: #007AFF !important;
69 | outline: none;
70 | }
71 |
72 | textarea:disabled {
73 | cursor: not-allowed;
74 | background-color: #f5f5f5;
75 | }
76 |
77 | .dark-mode body {
78 | background-color: #1a1a1a;
79 | }
80 |
81 | @media (prefers-color-scheme: light) {
82 | :root {
83 | color: #213547;
84 | background-color: #ffffff;
85 | }
86 | a:hover {
87 | color: #747bff;
88 | }
89 | button {
90 | background-color: #f9f9f9;
91 | }
92 | }
93 |
94 | @media (prefers-color-scheme: dark) {
95 | body {
96 | background-color: #1a1a1a;
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/src/maestro/agents/meta_agent/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | META_AGENT_DIR="${1:-$(cd "$(dirname "$0")" && pwd)}"
4 | echo "📂 Running tests for: $META_AGENT_DIR"
5 | bash "$(dirname "$0")/doctor.sh" || { echo "❌ Environment check failed"; exit 1; }
6 | AGENTS_YAML=$(find "$META_AGENT_DIR" -maxdepth 1 -type f -name "agents.yaml")
7 | WORKFLOW_FILES=($(find "$META_AGENT_DIR" -maxdepth 1 -type f -name "workflow*.yaml"))
8 |
9 | if [[ -z "$AGENTS_YAML" ]]; then
10 | echo "❌ Error: Missing agents.yaml in $META_AGENT_DIR"
11 | exit 1
12 | fi
13 |
14 | if [[ ${#WORKFLOW_FILES[@]} -eq 0 ]]; then
15 | echo "❌ Error: No workflow YAML files found in $META_AGENT_DIR"
16 | exit 1
17 | fi
18 |
19 | SCHEMA_DIR="$(cd "$(dirname "$0")/../../../../schemas" && pwd)"
20 |
21 | if [[ ! -d "$SCHEMA_DIR" ]]; then
22 | echo "❌ Error: Could not find schemas/ directory"
23 | exit 1
24 | fi
25 |
26 | AGENT_SCHEMA_PATH="$SCHEMA_DIR/agent_schema.json"
27 | WORKFLOW_SCHEMA_PATH="$SCHEMA_DIR/workflow_schema.json"
28 |
29 | echo "🔍 Detected schema directory: $SCHEMA_DIR"
30 | echo "🔍 Using schema file: $AGENT_SCHEMA_PATH"
31 | echo "🔍 Using schema file: $WORKFLOW_SCHEMA_PATH"
32 |
33 | echo "📝 Validating $AGENTS_YAML..."
34 | if ! uv run maestro validate "$AGENT_SCHEMA_PATH" "$AGENTS_YAML"; then
35 | echo "⚠️ Warning: agents.yaml failed validation, but continuing in loose mode."
36 | fi
37 |
38 | for WORKFLOW_YAML in "${WORKFLOW_FILES[@]}"; do
39 | echo "📝 Validating $WORKFLOW_YAML..."
40 | if ! uv run maestro validate "$WORKFLOW_SCHEMA_PATH" "$WORKFLOW_YAML"; then
41 | echo "⚠️ Warning: $WORKFLOW_YAML failed validation, but continuing in loose mode."
42 | fi
43 |
44 | echo "🧪 Running workflow in dry-run mode for $WORKFLOW_YAML..."
45 | if ! echo "" | uv run maestro run --dry-run "$AGENTS_YAML" "$WORKFLOW_YAML"; then
46 | echo "⚠️ Warning: Workflow test failed for $WORKFLOW_YAML in loose mode."
47 | else
48 | echo "✅ Workflow dry-run succeeded for $WORKFLOW_YAML!"
49 | fi
50 | done
51 |
52 | echo "✅ All meta-agent workflow tests completed (loose mode) for $META_AGENT_DIR!"
53 |
--------------------------------------------------------------------------------
/deployments/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG MICRO_IMAGE_DIR=/ubi-micro-img
2 |
3 | # BASE image using UBI 9 micro where the
4 | # application and requirements will be installed
5 | FROM registry.access.redhat.com/ubi9-micro:9.4-15 AS base
6 |
7 | # BUILD image using UBI 9 where the dependencies that
8 | # require installing with a package manager will be installed
9 | FROM registry.access.redhat.com/ubi9:9.4-1214.1726694543 AS build
10 | ARG MICRO_IMAGE_DIR
11 |
12 | # Copy the BASE image into the BUILD image
13 | RUN mkdir ${MICRO_IMAGE_DIR}
14 | COPY --from=base / ${MICRO_IMAGE_DIR}
15 |
16 | # Install Python inside the BASE image
17 | RUN dnf install --installroot ${MICRO_IMAGE_DIR} --nodocs -y \
18 | python3.12 \
19 | python3.12-devel \
20 | libstdc++ &&\
21 | dnf upgrade --installroot ${MICRO_IMAGE_DIR} --nodocs -y && \
22 | dnf clean all --installroot ${MICRO_IMAGE_DIR}
23 |
24 | # APP image from `scratch` which will be the final image
25 | # and remaining application requirements will be installed
26 | FROM scratch AS app
27 | ARG MICRO_IMAGE_DIR
28 | COPY --from=build ${MICRO_IMAGE_DIR}/ .
29 |
30 | # create symlinks for python
31 | RUN ln -s /usr/bin/python3.11 /usr/bin/python
32 |
33 | # Create project dir
34 | WORKDIR /usr/src/app
35 |
36 | # set environment variables
37 | ENV PYTHONDONTWRITEBYTECODE=1
38 | ENV PYTHONUNBUFFERED=1
39 |
40 | # Install pip
41 | RUN python3.12 -m ensurepip --upgrade
42 | # Install dependencies and update then uninstall pip (not needed in final image)
43 | RUN python3.12 -m pip install openai pyyaml python-dotenv requests flask --no-cache-dir --upgrade && \
44 | python3.12 -m pip uninstall -y pip
45 |
46 | COPY maestro ./src
47 | COPY tmp/entrypoint_api.sh .
48 | COPY tmp/api.py .
49 | COPY tmp/maestro.html ./templates/index.html
50 | COPY tmp/agents.yaml ./src/agents.yaml
51 | COPY tmp/workflow.yaml ./src/workflow.yaml
52 | COPY tmp/workflow.yaml ./static/workflow.yaml
53 | COPY tmp/agents.yaml ./static/agents.yaml
54 |
55 | RUN chown -R 1000:100 /usr/src/app &&\
56 | mkdir /usr/src/app/media && chown 1000:100 /usr/src/app/media
57 |
58 | EXPOSE 5000
59 | USER 1000:100
60 | ENTRYPOINT ["/usr/src/app/entrypoint_api.sh"]
61 |
--------------------------------------------------------------------------------
/tests/yamls/tools/openmeteo_tools.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Tool
3 | metadata:
4 | name: OpenMeteoTool
5 | labels:
6 | app: tool-example
7 | spec:
8 | description: Retrieve current, past, or future weather forecasts for a location.
9 | inputSchema:
10 | type: jsonSchema
11 | schema: |
12 | {
13 | "title": "OpenMeteoToolInput",
14 | "type": "object",
15 | "properties": {
16 | "location_name": {
17 | "type": "string",
18 | "description": "The name of the location to retrieve weather information."
19 | },
20 | "country": {
21 | "type": ["string", "null"],
22 | "description": "Country name.",
23 | "default": null
24 | },
25 | "start_date": {
26 | "type": "string",
27 | "format": "date",
28 | "description": "Start date for the weather forecast in the format YYYY-MM-DD (UTC)",
29 | "default": null
30 | },
31 | "end_date": {
32 | "type": "string",
33 | "format": "date",
34 | "description": "End date for the weather forecast in the format YYYY-MM-DD (UTC)",
35 | "default": null
36 | },
37 | "temperature_unit": {
38 | "type": "string",
39 | "enum": ["celsius", "fahrenheit"],
40 | "description": "The unit to express temperature",
41 | "default": "celsius",
42 | "additionalProperties": false,
43 | "before": [
44 | "to_lower"
45 | ]
46 | }
47 | },
48 | "additionalProperties": false,
49 | "required": ["location_name", "temperature_unit"],
50 | "field_validators": {
51 | "to_lower": {
52 | "cls": "OpenMeteoToolInput",
53 | "mode": "before",
54 | "function": "def _to_lower(cls, value): if isinstance(value, str): return value.lower(); else: return value"
55 | }
56 | }
57 | }
58 | outputSchema:
59 | type: jsonSchema
60 | schema: |
61 | {
62 | "response": string,
63 | }
64 |
65 |
--------------------------------------------------------------------------------
/src/maestro/utils.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | # SPDX-License-Identifier: Apache-2.0
3 | from typing import Dict, Any
4 |
5 |
6 | def eval_expression(expression, prompt):
7 | """
8 | Evaluate an expression with a given prompt.
9 |
10 | Args:
11 | expression (str): The expression to evaluate.
12 | prompt: The value bound to `input` when evaluating.
13 | Returns:
14 | The result of evaluating the expression.
15 | """
16 | local = {"input": prompt}
17 | return eval(expression, local)
18 |
19 |
20 | def convert_to_list(s):
21 | if s[0] != "[" or s[-1] != "]":
22 | raise ValueError("parallel or loop prompt is not a list string")
23 | result = s[1:-1].split(",")
24 | return result
25 |
26 |
27 | def aggregate_token_usage_from_agents(agents: Dict[str, Any]) -> Dict[str, Any]:
28 | """
29 | Aggregate token usage from all agents in a workflow.
30 |
31 | Args:
32 | agents: Dictionary of agent_name -> agent_instance
33 |
34 | Returns:
35 | Dictionary containing aggregated token usage:
36 | - total_prompt_tokens: Sum of all prompt tokens
37 | - total_response_tokens: Sum of all response tokens
38 | - total_tokens: Sum of all total tokens
39 | - agent_token_usage: Individual token usage per agent
40 | """
41 | total_token_usage = {
42 | "total_prompt_tokens": 0,
43 | "total_response_tokens": 0,
44 | "total_tokens": 0,
45 | "agent_token_usage": {},
46 | }
47 |
48 | for agent_name, agent in agents.items():
49 | if hasattr(agent, "get_token_usage"):
50 | token_usage = agent.get_token_usage()
51 | total_token_usage["agent_token_usage"][agent_name] = token_usage
52 | if "prompt_tokens" in token_usage:
53 | total_token_usage["total_prompt_tokens"] += token_usage.get(
54 | "prompt_tokens", 0
55 | )
56 | total_token_usage["total_response_tokens"] += token_usage.get(
57 | "response_tokens", 0
58 | )
59 | total_token_usage["total_tokens"] += token_usage.get("total_tokens", 0)
60 |
61 | return total_token_usage
62 |
--------------------------------------------------------------------------------
/tests/agents/test_scoring_agent.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | import asyncio
5 | import pytest
6 | import litellm
7 |
8 | from maestro.agents.scoring_agent import ScoringAgent
9 | from opik.evaluation.metrics import AnswerRelevance, Hallucination
10 |
11 |
12 | @pytest.fixture(autouse=True)
13 | def patch_litellm_provider(monkeypatch):
14 | monkeypatch.setattr(
15 | litellm,
16 | "get_llm_provider",
17 | lambda model_name, **kwargs: (model_name, "openai", None, None),
18 | )
19 |
20 |
21 | def test_metrics_agent_run_with_context(monkeypatch):
22 | seen = {"relevance": None, "hallucination": None}
23 |
24 | class DummyScore:
25 | def __init__(self, value):
26 | self.value = value
27 | self.reason = "Test reason"
28 | self.metadata = {}
29 |
30 | def fake_rel(self, input, output, context):
31 | seen["relevance"] = context
32 | return DummyScore(0.50)
33 |
34 | def fake_hall(self, input, output, context):
35 | seen["hallucination"] = context
36 | return DummyScore(0.20)
37 |
38 | monkeypatch.setattr(AnswerRelevance, "score", fake_rel)
39 | monkeypatch.setattr(Hallucination, "score", fake_hall)
40 |
41 | printed = []
42 | monkeypatch.setattr(ScoringAgent, "print", lambda self, msg: printed.append(msg))
43 |
44 | agent_def = {
45 | "metadata": {"name": "metrics_agent", "labels": {}},
46 | "spec": {
47 | "framework": "custom",
48 | "model": "qwen3:latest",
49 | "description": "desc",
50 | "instructions": "instr",
51 | },
52 | }
53 | agent = ScoringAgent(agent_def)
54 | prompt = "What is the capital of France?"
55 | response = "Lyon"
56 | context = ["Paris is the capital of France."]
57 | out = asyncio.run(agent.run(prompt, response, context=context))
58 |
59 | assert isinstance(out, dict)
60 | assert out["prompt"] == response
61 | assert "scoring_metrics" in out
62 |
63 | assert seen["relevance"] is context
64 | assert seen["hallucination"] is context
65 |
66 | assert len(printed) == 1
67 | assert printed[0] == "Lyon\n[relevance: 0.50, hallucination: 0.20]"
68 |
--------------------------------------------------------------------------------
/src/maestro/ui/README.md:
--------------------------------------------------------------------------------
1 | # Maestro Node UI
2 |
3 | A React/TypeScript frontend for the Maestro workflow system.
4 |
5 | ## Prerequisites
6 |
7 | - Python 3.8+
8 | - Node.js and npm
9 | - Git (to clone the repository)
10 |
11 | ## Installation
12 |
13 | 1. **Install Maestro:**
14 | ```bash
15 | pip install -e .
16 | ```
17 |
18 | 2. **Install UI dependencies:**
19 | ```bash
20 | cd src/maestro/ui
21 | npm install
22 | ```
23 |
24 | 3. **Configure environment** (for full functionality):
25 | ```bash
26 | export OPENAI_API_KEY=your_api_key_here
27 | ```
28 |
29 | ## Quick Start
30 |
31 | 1. **Deploy with sample files:**
32 | ```bash
33 | maestro deploy tests/yamls/agents/openai_agent.yaml tests/yamls/workflows/openai_mcp_workflow.yaml --node-ui
34 | ```
35 |
36 | 2. **Open in browser:**
37 | http://localhost:5173
38 |
39 | 3. **Stop servers:**
40 | ```bash
41 | maestro clean
42 | ```
43 |
44 | ## Development
45 |
46 | For custom agent and workflow files:
47 |
48 | ```bash
49 | maestro deploy your-agents.yaml your-workflow.yaml --node-ui
50 | ```
51 |
52 | ## Production (Docker)
53 |
54 | ### Backend API:
55 | ```bash
56 | maestro deploy agents.yaml workflow.yaml --docker
57 | ```
58 | - API available at `http://localhost:5000`
59 |
60 | ### UI (Optional):
61 | ```bash
62 | cd src/maestro/ui
63 | docker build -t maestro-ui:dev .
64 | docker run -p 8080:80 maestro-ui:dev
65 | ```
66 | - UI available at `http://localhost:8080`
67 |
68 | ### Stop:
69 | ```bash
70 | maestro clean
71 | docker stop $(docker ps -q --filter ancestor=maestro-ui:dev)
72 | ```
73 |
74 | ## API Endpoints
75 |
76 | The backend provides the following endpoints:
77 |
78 | - `POST /chat` - Send chat messages
79 | - `POST /chat/stream` - Stream chat responses
80 | - `GET /health` - Health check
81 | - `GET /diagram` - Get workflow diagram
82 |
83 | ## Notes
84 |
85 | - The `maestro clean` command dynamically cleans up all Maestro-related processes including FastAPI servers, Vite dev servers, and Docker containers
86 | - For CORS configuration, the system automatically sets `CORS_ALLOW_ORIGINS=http://localhost:5173` when using `--node-ui`
87 | - The `--node-ui` flag automatically starts both the FastAPI backend and the Vite frontend development server
--------------------------------------------------------------------------------
/tests/yamls/agents/multi_agents_crew2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: maestro/v1alpha1
2 | kind: Agent
3 | metadata:
4 | name: beeaiagent
5 | labels:
6 | app: multi-agent
7 | spec:
8 | model: llama3.1
9 | framework: beeai
10 | mode: local
11 | description: beeai agent
12 | tools:
13 | - code_interpreter
14 | instructions: |
15 | You are playing a guessing game. One person has a number between 1 and 10 in mind. You guess the number.
16 | Output format is {"BeeAI=" + guessed number'}
17 |
18 | ---
19 |
20 | apiVersion: maestro/v1alpha1
21 | kind: Agent
22 | metadata:
23 | name: Generic_Crew
24 | labels:
25 | app: multi-agent
26 | crew_role: "gussing game player"
27 | crew_goal: "guess the number and output it"
28 | crew_backstory: "I am a guess game player. I guess a number between 1 and 10. You are given the guess numbers from the other players in the prompt. The format in the prompt is a list of 'name=guess'. Output your guess along with them."
29 | crew_description: "You are playing a guessing game. One person has a number between 1 and 10 in mind. You guess the number."
30 | crew_expected_output: "your answer format is 'CrewAI=' + guessed number {prompt} are the guess numbers from the other players. The format of guesses is a list of 'name=guess'. Output your answer along with them for the next player."
31 | spec:
32 | model: "ollama/llama3.1"
33 | url: "http://localhost:11434"
34 | description: crewai agent
35 | instructions: dummy
36 | framework: crewai
37 |
38 | ---
39 |
40 | apiVersion: maestro/v1alpha1
41 | kind: Agent
42 | metadata:
43 | name: JudgeAgent
44 | labels:
45 | app: multi-agent
46 | spec:
47 | model: llama3.1
48 | framework: beeai
49 | mode: local
50 | description: beeai agent
51 | tools:
52 | - code_interpreter
53 | instructions: |
54 | You are a judge of a guess game. You generate a ramdom number between 1 and 10
55 | You can use the code interpreter tools to generate the number.
56 | The players guess numbers are given in the prompt or input.
57 | The format of the prompt is a list of "name=guess number".
58 | Find a winner who guessed the closest number to the number your generated.
59 | Output format is {Number: you generated, Winner: winner and its guess number}
60 |
--------------------------------------------------------------------------------
/tools/update_readmes.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # update_all_readme.sh
4 | #
5 | # This script searches the demos folder (a sibling of the tools folder)
6 | # for directories containing both a workflow.yaml and a README.md.
7 | # For each such directory, it runs `maestro mermaid` on workflow.yaml,
8 | # filters out any WARNING output, and then uses Perl to replace everything
9 | # between the existing and markers
10 | # with the newly generated Mermaid diagram.
11 | # NOTE: Demos have been moved to https://github.com/AI4quantum/maestro-demos
12 | # This script is deprecated and will exit.
13 | #
14 |
15 | PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
16 | echo "This script is deprecated. Demos have been moved to https://github.com/AI4quantum/maestro-demos"
17 | echo "Please run this script from the maestro-demos repository instead."
18 | exit 0
19 |
20 | START_MARKER=""
21 | END_MARKER=""
22 |
23 | update_readme_in_dir() {
24 | local dir="$1"
25 | local workflow_file="$dir/workflow.yaml"
26 | local readme_file="$dir/README.md"
27 |
28 | if [ ! -f "$workflow_file" ] || [ ! -f "$readme_file" ]; then
29 | return
30 | fi
31 |
32 | echo "Processing directory: $dir"
33 |
34 | local mermaid_output
35 | mermaid_output=$(maestro mermaid "$workflow_file" --silent 2>&1 | grep -v "WARNING")
36 | if [ $? -ne 0 ]; then
37 | echo "Error generating Mermaid diagram for $workflow_file"
38 | return
39 | fi
40 |
41 | local triple_backticks='```'
42 | local code_block
43 | code_block=$(printf '%smermaid\n%s\n%s' "$triple_backticks" "$mermaid_output" "$triple_backticks")
44 |
45 | local new_block
46 | new_block=$(printf "%s\n%s\n%s" "$START_MARKER" "$code_block" "$END_MARKER")
47 |
48 | local tmp
49 | tmp=$(mktemp)
50 | perl -0777 -pe "s{.*}{$new_block}s" "$readme_file" > "$tmp"
51 | mv "$tmp" "$readme_file"
52 | echo "Updated $readme_file"
53 | }
54 |
55 | find "$DEMOS_DIR" -type f -name "workflow.yaml" | while read -r wf; do
56 | dir=$(dirname "$wf")
57 | if [ -f "$dir/README.md" ]; then
58 | update_readme_in_dir "$dir"
59 | fi
60 | done
61 |
62 | echo "All updates complete."
63 |
--------------------------------------------------------------------------------
/src/maestro/agents/remote_agent.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | # SPDX-License-Identifier: Apache-2.0
3 | import json
4 | from string import Template
5 |
6 | import dotenv
7 | import requests
8 | from requests import RequestException
9 |
10 | from maestro.agents.agent import Agent
11 |
12 | dotenv.load_dotenv()
13 |
14 |
15 | class RemoteAgent(Agent):
16 | """
17 | RemoteAgent extends the Agent class to load and run a specific agent.
18 | """
19 |
20 | def __init__(self, agent: dict) -> None:
21 | """
22 | Initializes the workflow for the specified agent.
23 |
24 | Args:
25 | agent_name (str): The name of the agent.
26 | """
27 | super().__init__(agent)
28 | self.url = agent["spec"]["url"]
29 | self.request_template = agent["spec"]["request_template"]
30 | self.response_template = agent["spec"]["response_template"]
31 |
32 | async def run(self, prompt: str, context=None, step_index=None) -> str:
33 | """
34 | Runs the agent with the given prompt.
35 | Args:
36 | prompt (str): The prompt to run the agent with.
37 | """
38 | print(f"👩🏻💻 Running {self.agent_name}...\n")
39 | try:
40 | if self.request_template is not None:
41 | json_str = Template(self.request_template).safe_substitute(
42 | prompt=prompt
43 | )
44 | data = json.loads(json_str)
45 | else:
46 | data = {"prompt": prompt}
47 | print("❓ ", prompt)
48 | response = requests.post(self.url, json=data)
49 | response.raise_for_status()
50 | result = Template(self.response_template).safe_substitute(
51 | response="response.json()"
52 | )
53 | answer = eval(result)
54 | print("🤖 ", answer)
55 | return answer or json.dumps(response.json())
56 | except RequestException as e:
57 | print(f"An error occurred: {e}")
58 | return None
59 |
60 | def run_streaming(self, prompt: str) -> str:
61 | """
62 | Runs the agent in streaming mode with the given prompt.
63 | Args:
64 | prompt (str): The prompt to run the agent with.
65 | """
66 | pass
67 |
--------------------------------------------------------------------------------
/tests/workflow/test_loop.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 | import yaml
8 | import unittest
9 | from unittest import TestCase
10 | from maestro.workflow import Workflow
11 |
12 | import asyncio
13 |
14 |
15 | def parse_yaml(file_path):
16 | with open(file_path, "r") as file:
17 | yaml_data = list(yaml.safe_load_all(file))
18 | return yaml_data
19 |
20 |
21 | # `loop` tests
22 | class TestLoop(TestCase):
23 | def setUp(self):
24 | self.agents_yaml = parse_yaml(
25 | os.path.join(
26 | os.path.dirname(__file__), "../yamls/agents/dry_run_loop_agent.yaml"
27 | )
28 | )
29 | self.workflow_yaml = parse_yaml(
30 | os.path.join(
31 | os.path.dirname(__file__), "../yamls/workflows/loop_workflow.yaml"
32 | )
33 | )
34 | try:
35 | self.workflow = Workflow(self.agents_yaml, self.workflow_yaml[0])
36 | except Exception as excep:
37 | raise RuntimeError("Unable to create agents") from excep
38 |
39 | def tearDown(self):
40 | self.workflow = None
41 |
42 | def test_loop(self):
43 | response = asyncio.run(self.workflow.run())
44 | assert "happy" in response["final_prompt"]
45 |
46 |
47 | class TestLoopList(TestCase):
48 | def setUp(self):
49 | self.agents_yaml = parse_yaml(
50 | os.path.join(
51 | os.path.dirname(__file__),
52 | "../yamls/agents/dry_run_loop_list_agent.yaml",
53 | )
54 | )
55 | self.workflow_yaml = parse_yaml(
56 | os.path.join(
57 | os.path.dirname(__file__), "../yamls/workflows/loop_workflow.yaml"
58 | )
59 | )
60 | try:
61 | self.workflow = Workflow(self.agents_yaml, self.workflow_yaml[0])
62 | except Exception as excep:
63 | raise RuntimeError("Unable to create agents") from excep
64 |
65 | def tearDown(self):
66 | self.workflow = None
67 |
68 | def test_loop(self):
69 | response = asyncio.run(self.workflow.run())
70 | assert "['This', 'is', 'a', 'test', 'for', 'loop']" in response["final_prompt"]
71 |
72 |
73 | if __name__ == "__main__":
74 | unittest.main()
75 |
--------------------------------------------------------------------------------
/tests/workflow/test_parallel.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # SPDX-License-Identifier: Apache-2.0
4 | # Copyright © 2025 IBM
5 |
6 | import os
7 | import yaml
8 | import unittest
9 | from unittest import TestCase
10 | from maestro.workflow import Workflow
11 |
12 | import asyncio
13 |
14 |
15 | def parse_yaml(file_path):
16 | with open(file_path, "r") as file:
17 | yaml_data = list(yaml.safe_load_all(file))
18 | return yaml_data
19 |
20 |
21 | # `parallel` tests
22 | class TestParallel(TestCase):
23 | def setUp(self):
24 | self.agents_yaml = parse_yaml(
25 | os.path.join(
26 | os.path.dirname(__file__), "../yamls/agents/dry_run_paralle_agent.yaml"
27 | )
28 | )
29 | self.workflow_yaml = parse_yaml(
30 | os.path.join(
31 | os.path.dirname(__file__), "../yamls/workflows/parallel_workflow.yaml"
32 | )
33 | )
34 | try:
35 | self.workflow = Workflow(self.agents_yaml, self.workflow_yaml[0])
36 | except Exception as excep:
37 | raise RuntimeError("Unable to create agents") from excep
38 |
39 | def tearDown(self):
40 | self.workflow = None
41 |
42 | def test_parallel(self):
43 | response = asyncio.run(self.workflow.run())
44 | assert "['test2', 'test3', 'test4']" in response["final_prompt"]
45 |
46 |
47 | class TestParallelList(TestCase):
48 | def setUp(self):
49 | self.agents_yaml = parse_yaml(
50 | os.path.join(
51 | os.path.dirname(__file__),
52 | "../yamls/agents/dry_run_paralle_list_agent.yaml",
53 | )
54 | )
55 | self.workflow_yaml = parse_yaml(
56 | os.path.join(
57 | os.path.dirname(__file__), "../yamls/workflows/parallel_workflow.yaml"
58 | )
59 | )
60 | try:
61 | self.workflow = Workflow(self.agents_yaml, self.workflow_yaml[0])
62 | except Exception as excep:
63 | raise RuntimeError("Unable to create agents") from excep
64 |
65 | def tearDown(self):
66 | self.workflow = None
67 |
68 | def test_parallel(self):
69 | response = asyncio.run(self.workflow.run())
70 | assert "['aa', 'bb', 'cc']" in response["final_prompt"]
71 |
72 |
73 | if __name__ == "__main__":
74 | unittest.main()
75 |
--------------------------------------------------------------------------------
/src/maestro/ui/src/api.ts:
--------------------------------------------------------------------------------
1 | export type WorkflowChatResponse = {
2 | response: string
3 | workflow_name: string
4 | timestamp: string
5 | }
6 |
7 | export type StreamEvent = {
8 | step_name?: string
9 | step_result?: string
10 | agent_name?: string
11 | step_complete?: boolean
12 | error?: string
13 | prompt_tokens?: number
14 | response_tokens?: number
15 | total_tokens?: number
16 | }
17 |
18 | export async function health(): Promise {
19 | const res = await fetch('/health')
20 | if (!res.ok) throw new Error('health failed')
21 | const j = await res.json()
22 | return j.status as string
23 | }
24 |
25 | export async function chat(prompt: string): Promise {
26 | const res = await fetch('/chat', {
27 | method: 'POST',
28 | headers: { 'Content-Type': 'application/json' },
29 | body: JSON.stringify({ prompt, stream: false }),
30 | })
31 | if (!res.ok) throw new Error(`chat failed: ${res.status}`)
32 | return res.json()
33 | }
34 |
35 | export async function chatStream(
36 | prompt: string,
37 | onEvent: (event: StreamEvent) => void,
38 | ): Promise {
39 | const res = await fetch('/chat/stream', {
40 | method: 'POST',
41 | headers: { 'Content-Type': 'application/json' },
42 | body: JSON.stringify({ prompt }),
43 | })
44 | if (!res.ok || !res.body) {
45 | throw new Error(`stream failed: ${res.status}`)
46 | }
47 |
48 | const reader = res.body.getReader()
49 | const decoder = new TextDecoder()
50 | let buffer = ''
51 | while (true) {
52 | const { done, value } = await reader.read()
53 | if (done) break
54 | buffer += decoder.decode(value, { stream: true })
55 | const parts = buffer.split('\n\n')
56 | buffer = parts.pop() || ''
57 | for (const chunk of parts) {
58 | const line = chunk.trim()
59 | if (!line) continue
60 | // Expect lines like: data: {json}\n\n
61 | const idx = line.indexOf('data:')
62 | const jsonPart = idx >= 0 ? line.slice(idx + 5).trim() : line
63 | try {
64 | const evt = JSON.parse(jsonPart) as StreamEvent
65 | onEvent(evt)
66 | } catch {
67 | // ignore malformed
68 | }
69 | }
70 | }
71 | }
72 |
73 | export async function fetchDiagram(): Promise<{ diagram: string; workflow_name: string }> {
74 | const res = await fetch('/diagram')
75 | if (!res.ok) throw new Error('diagram failed')
76 | return res.json()
77 | }
78 |
79 |
80 |
--------------------------------------------------------------------------------
/operator/internal/controller/workflowrun.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "context"
5 |
6 | "sigs.k8s.io/controller-runtime/pkg/log"
7 |
8 | maestrov1alpha1 "github.com/ai4quantum/maestro/api/v1alpha1"
9 | "k8s.io/apimachinery/pkg/api/meta"
10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11 | ctrl "sigs.k8s.io/controller-runtime"
12 | )
13 |
14 | // ConditionStatus defines WorkflowRun condition status.
15 | type ConditionStatus string
16 |
17 | // Defines WorkflowRun condition status.
18 | const (
19 | TypeAvailable ConditionStatus = "Available"
20 | TypeProgressing ConditionStatus = "Progressing"
21 | TypeCompleted ConditionStatus = "Completed"
22 | TypeFailed ConditionStatus = "Failed"
23 | )
24 |
25 | // GetWorkflowRun gets the WorkflowRun from api server.
26 | func (r *WorkflowRunReconciler) GetWorkflowRun(ctx context.Context, req ctrl.Request, workflowrun *maestrov1alpha1.WorkflowRun) error {
27 | err := r.Get(ctx, req.NamespacedName, workflowrun)
28 | if err != nil {
29 | return err
30 | }
31 |
32 | return nil
33 | }
34 |
35 | // SetInitialCondition sets the status condition of the WorkflowRun to available initially
36 | // when no condition exists yet.
37 | func (r *WorkflowRunReconciler) SetInitialCondition(ctx context.Context, req ctrl.Request, workflowrun *maestrov1alpha1.WorkflowRun) error {
38 | if workflowrun.Status.Conditions != nil || len(workflowrun.Status.Conditions) != 0 {
39 | return nil
40 | }
41 |
42 | err := r.SetCondition(ctx, req, workflowrun, TypeAvailable, "Starting reconciliation")
43 |
44 | return err
45 | }
46 |
47 | // SetCondition sets the status condition of the WorkflowRun.
48 | func (r *WorkflowRunReconciler) SetCondition(
49 | ctx context.Context, req ctrl.Request,
50 | workflowrun *maestrov1alpha1.WorkflowRun, condition ConditionStatus,
51 | message string,
52 | ) error {
53 | log := log.FromContext(ctx)
54 |
55 | meta.SetStatusCondition(
56 | &workflowrun.Status.Conditions,
57 | metav1.Condition{
58 | Type: string(condition),
59 | Status: metav1.ConditionUnknown, Reason: "Reconciling",
60 | Message: message,
61 | },
62 | )
63 |
64 | if err := r.Status().Update(ctx, workflowrun); err != nil {
65 | log.Error(err, "Failed to update WorkflowRun status")
66 |
67 | return err
68 | }
69 |
70 | if err := r.Get(ctx, req.NamespacedName, workflowrun); err != nil {
71 | log.Error(err, "Failed to re-fetch WorkflowRun")
72 |
73 | return err
74 | }
75 |
76 | return nil
77 | }
78 |
--------------------------------------------------------------------------------