├── CHANGELOG.md ├── tests ├── __init__.py ├── app │ ├── test_app.py │ └── components │ │ ├── test_types.py │ │ └── test_task_list.py ├── services │ ├── __init__.py │ └── test_agent_service.py ├── orchestrators │ └── __init__.py ├── message_queue_consumers │ └── test_base.py └── message_queues │ ├── test_apache_kafka.py │ ├── test_rabbitmq.py │ ├── test_simple.py │ └── test_simple_app.py ├── llama_agents ├── app │ ├── __init__.py │ └── components │ │ ├── __init__.py │ │ ├── types.py │ │ ├── task_list.py │ │ ├── service_list.py │ │ └── human_list.py ├── cli │ ├── __init__.py │ └── command_line.py ├── messages │ ├── __init__.py │ └── base.py ├── message_publishers │ ├── __init__.py │ └── publisher.py ├── tools │ ├── agent_service_tool.py │ ├── utils.py │ ├── __init__.py │ └── service_tool.py ├── launchers │ └── __init__.py ├── client │ └── __init__.py ├── control_plane │ └── __init__.py ├── message_consumers │ ├── __init__.py │ ├── callable.py │ ├── remote.py │ └── base.py ├── message_queues │ ├── __init__.py │ └── base.py ├── utils.py ├── orchestrators │ ├── __init__.py │ └── base.py ├── services │ ├── __init__.py │ ├── base.py │ └── types.py └── __init__.py ├── examples ├── human-in-the-loop │ ├── human_in_the_loop │ │ ├── apps │ │ │ ├── __init__.py │ │ │ └── css.py │ │ ├── agent_services │ │ │ ├── __init__.py │ │ │ └── funny_agent.py │ │ ├── core_services │ │ │ ├── __init__.py │ │ │ ├── message_queue.py │ │ │ └── control_plane.py │ │ ├── additional_services │ │ │ ├── __init__.py │ │ │ └── human_in_the_loop.py │ │ ├── utils.py │ │ ├── __init__.py │ │ └── local_launcher.py │ ├── .gitignore │ ├── logging.ini │ ├── template.env.docker │ ├── pyproject.toml │ ├── Dockerfile │ ├── README.md │ └── docker-compose.yml ├── docker-kubernetes │ ├── multi-agent-app │ │ ├── multi_agent_app │ │ │ ├── __init__.py │ │ │ ├── agent_services │ │ │ │ ├── __init__.py │ │ │ │ ├── secret_agent.py │ │ │ │ └── funny_agent.py │ │ │ ├── core_services │ │ │ │ ├── __init__.py │ │ │ │ ├── message_queue.py │ │ │ │ └── control_plane.py │ │ │ ├── additional_services │ │ │ │ ├── __init__.py │ │ │ │ ├── human_consumer.py │ │ │ │ └── task_result.py │ │ │ ├── utils.py │ │ │ └── local_launcher.py │ │ ├── template.env.local │ │ ├── template.env.docker │ │ ├── pyproject.toml │ │ └── Dockerfile │ ├── kubernetes │ │ ├── setup │ │ │ ├── secrets.yaml.template │ │ │ └── manifest.yaml │ │ ├── ingress_services │ │ │ ├── message_queue.yaml │ │ │ ├── control_plane.yaml │ │ │ ├── human_consumer.yaml │ │ │ ├── funny_agent.yaml │ │ │ └── secret_agent.yaml │ │ └── jobs │ │ │ └── registration.yaml │ ├── .gitignore │ └── logging.ini ├── kafka │ ├── pig-latin-translation │ │ ├── pig_latin_translation │ │ │ ├── __init__.py │ │ │ ├── agent_services │ │ │ │ ├── __init__.py │ │ │ │ ├── decorators.py │ │ │ │ ├── correct_first_character_agent.py │ │ │ │ └── remove_ay_agent.py │ │ │ ├── core_services │ │ │ │ ├── __init__.py │ │ │ │ ├── message_queue.py │ │ │ │ └── control_plane.py │ │ │ ├── additional_services │ │ │ │ ├── __init__.py │ │ │ │ ├── human_consumer.py │ │ │ │ └── task_result.py │ │ │ ├── utils.py │ │ │ └── local_launcher.py │ │ ├── README.md │ │ ├── .gitignore │ │ ├── logging.ini │ │ ├── template.env.local │ │ ├── template.env.docker │ │ ├── pyproject.toml │ │ ├── scripts │ │ │ └── simulation.py │ │ └── Dockerfile │ └── simple-scripts │ │ └── local_launcher_human_single.py ├── rabbitmq │ ├── example-app │ │ ├── multi-agent-app-rabbitmq │ │ │ ├── multi_agent_app_rabbitmq │ │ │ │ ├── __init__.py │ │ │ │ ├── agent_services │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── secret_agent.py │ │ │ │ │ └── funny_agent.py │ │ │ │ ├── core_services │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── message_queue.py │ │ │ │ │ └── control_plane.py │ │ │ │ ├── additional_services │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── human_consumer.py │ │ │ │ │ └── task_result.py │ │ │ │ ├── utils.py │ │ │ │ └── local_launcher.py │ │ │ ├── template.env.local │ │ │ ├── pyproject.toml │ │ │ ├── template.env.docker │ │ │ └── Dockerfile │ │ ├── kubernetes │ │ │ ├── setup │ │ │ │ ├── secrets.yaml.template │ │ │ │ └── manifest.yaml │ │ │ ├── rabbitmq │ │ │ │ └── rabbitmq.yaml │ │ │ └── ingress_services │ │ │ │ ├── control_plane.yaml │ │ │ │ ├── human_consumer.yaml │ │ │ │ └── funny_agent.yaml │ │ ├── .gitignore │ │ └── logging.ini │ └── simple-scripts │ │ └── local_launcher_single.py ├── assets │ └── corrective_rag.png ├── redis │ └── simple-redis-app │ │ ├── docker-compose.yml │ │ ├── pyproject.toml │ │ ├── README.md │ │ └── local_launcher.py ├── agentic_human_local_single.py ├── agentic_local_single.py ├── pipeline_human_service_as_tool_local_single.py ├── agentic_toolservice_local_single.py ├── pipeline_human_local_single.py ├── agentic_server.py ├── pipeline_agent_service_tool_local_single.py ├── pipeline_local_single.py └── reflection │ └── toxicity_reflection_client.py ├── .gitignore ├── system_diagram.png ├── llama_agents_monitor.png ├── .vscode └── settings.json ├── .github ├── release.yml └── workflows │ ├── lint.yml │ ├── unit_test.yml │ ├── publish_release.yml │ └── codeql.yml ├── Makefile ├── LICENSE ├── pyproject.toml └── .pre-commit-config.yaml /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/app/test_app.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /llama_agents/app/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /llama_agents/cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/orchestrators/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /llama_agents/app/components/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/apps/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.pyc 3 | .tool-versions 4 | .idea 5 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/agent_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/core_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/additional_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/README.md: -------------------------------------------------------------------------------- 1 | # Pig-Latin Translation 2 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/agent_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/core_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/agent_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/core_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/additional_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/additional_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /system_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexfazio/llama-agents/main/system_diagram.png -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/agent_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/core_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /llama_agents_monitor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexfazio/llama-agents/main/llama_agents_monitor.png -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/additional_services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/assets/corrective_rag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexfazio/llama-agents/main/examples/assets/corrective_rag.png -------------------------------------------------------------------------------- /llama_agents/messages/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.messages.base import QueueMessage 2 | 3 | __all__ = ["QueueMessage"] 4 | -------------------------------------------------------------------------------- /llama_agents/message_publishers/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.message_publishers.publisher import MessageQueuePublisherMixin 2 | 3 | __all__ = ["MessageQueuePublisherMixin"] 4 | -------------------------------------------------------------------------------- /llama_agents/app/components/types.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class ButtonType(str, Enum): 5 | SERVICE = "Service" 6 | TASK = "Task" 7 | HUMAN = "Human" 8 | -------------------------------------------------------------------------------- /llama_agents/tools/agent_service_tool.py: -------------------------------------------------------------------------------- 1 | from llama_agents.tools.service_as_tool import ServiceAsTool 2 | 3 | # NOTE: for backwards compatibility 4 | AgentServiceTool = ServiceAsTool 5 | -------------------------------------------------------------------------------- /examples/redis/simple-redis-app/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | redis: 3 | image: redis:latest 4 | ports: 5 | - 6379:6379 6 | restart: always 7 | networks: 8 | - default 9 | -------------------------------------------------------------------------------- /llama_agents/launchers/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.launchers.local import LocalLauncher 2 | from llama_agents.launchers.server import ServerLauncher 3 | 4 | __all__ = ["LocalLauncher", "ServerLauncher"] 5 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/kubernetes/setup/secrets.yaml.template: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | stringData: 4 | OPENAI_API_KEY: 5 | kind: Secret 6 | metadata: 7 | name: xcore-secret 8 | namespace: llama-agents-demo 9 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/kubernetes/setup/secrets.yaml.template: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | stringData: 4 | OPENAI_API_KEY: 5 | kind: Secret 6 | metadata: 7 | name: xcore-secret 8 | namespace: llama-agents-demo 9 | -------------------------------------------------------------------------------- /llama_agents/client/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.client.async_client import AsyncLlamaAgentsClient 2 | from llama_agents.client.sync_client import LlamaAgentsClient 3 | 4 | __all__ = ["AsyncLlamaAgentsClient", "LlamaAgentsClient"] 5 | -------------------------------------------------------------------------------- /llama_agents/control_plane/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.control_plane.base import BaseControlPlane 2 | from llama_agents.control_plane.server import ControlPlaneServer 3 | 4 | __all__ = ["BaseControlPlane", "ControlPlaneServer"] 5 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/.gitignore: -------------------------------------------------------------------------------- 1 | .env.* 2 | .env 3 | poetry.lock 4 | index.html.* 5 | index.html 6 | task_results 7 | .ipynb_checkpoints/ 8 | secrets.yaml 9 | Dockerfile.local 10 | docker-compose.local.yml 11 | pyproject.local.toml 12 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/.gitignore: -------------------------------------------------------------------------------- 1 | .env.* 2 | .env 3 | poetry.lock 4 | index.html.* 5 | index.html 6 | task_results 7 | .ipynb_checkpoints/ 8 | secrets.yaml 9 | Dockerfile.local 10 | docker-compose.local.yml 11 | pyproject.local.toml 12 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/.gitignore: -------------------------------------------------------------------------------- 1 | .env.* 2 | .env 3 | poetry.lock 4 | index.html.* 5 | index.html 6 | task_results 7 | .ipynb_checkpoints/ 8 | secrets.yaml 9 | Dockerfile.local 10 | docker-compose.local.yml 11 | pyproject.local.toml 12 | data 13 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/.gitignore: -------------------------------------------------------------------------------- 1 | .env.* 2 | .env 3 | poetry.lock 4 | index.html.* 5 | index.html 6 | task_results 7 | .ipynb_checkpoints/ 8 | secrets.yaml 9 | Dockerfile.local 10 | docker-compose.local.yml 11 | pyproject.local.toml 12 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def load_from_env(var: str) -> str: 5 | try: 6 | res = os.environ[var] 7 | except KeyError: 8 | raise ValueError(f"Missing env var '{var}'.") 9 | return res 10 | -------------------------------------------------------------------------------- /llama_agents/message_consumers/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.message_consumers.base import BaseMessageQueueConsumer 2 | from llama_agents.message_consumers.callable import CallableMessageConsumer 3 | 4 | __all__ = ["BaseMessageQueueConsumer", "CallableMessageConsumer"] 5 | -------------------------------------------------------------------------------- /llama_agents/tools/utils.py: -------------------------------------------------------------------------------- 1 | """Utility functions for tools.""" 2 | 3 | 4 | def get_tool_name_from_service_name(service_name: str) -> str: 5 | """Utility function for getting the reserved name of a tool derived by a service.""" 6 | return f"{service_name}-as-tool" 7 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def load_from_env(var: str) -> str: 5 | try: 6 | res = os.environ[var] 7 | except KeyError: 8 | raise ValueError(f"Missing env var '{var}'.") 9 | return res 10 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def load_from_env(var: str) -> str: 5 | try: 6 | res = os.environ[var] 7 | except KeyError: 8 | raise ValueError(f"Missing env var '{var}'.") 9 | return res 10 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def load_from_env(var: str) -> str: 5 | try: 6 | res = os.environ[var] 7 | except KeyError: 8 | raise ValueError(f"Missing env var '{var}'.") 9 | return res 10 | -------------------------------------------------------------------------------- /llama_agents/message_queues/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.message_queues.base import BaseMessageQueue 2 | from llama_agents.message_queues.simple import ( 3 | SimpleMessageQueue, 4 | SimpleRemoteClientMessageQueue, 5 | ) 6 | 7 | __all__ = ["BaseMessageQueue", "SimpleMessageQueue", "SimpleRemoteClientMessageQueue"] 8 | -------------------------------------------------------------------------------- /llama_agents/utils.py: -------------------------------------------------------------------------------- 1 | from string import Formatter 2 | from typing import List 3 | 4 | 5 | def get_prompt_params(prompt_template_str: str) -> List[str]: 6 | """Get the list of prompt params from the template format string.""" 7 | return [param for _, param, _, _ in Formatter().parse(prompt_template_str) if param] 8 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/kubernetes/rabbitmq/rabbitmq.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rabbitmq.com/v1beta1 2 | kind: RabbitmqCluster 3 | metadata: 4 | name: rabbitmq 5 | namespace: llama-agents-demo 6 | spec: 7 | replicas: 1 8 | rabbitmq: 9 | additionalConfig: | 10 | default_user= guest 11 | default_pass = guest 12 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "[python]": { 3 | "editor.formatOnSave": true, 4 | "editor.codeActionsOnSave": { 5 | "source.fixAll": "explicit" 6 | }, 7 | "editor.defaultFormatter": "ms-python.black-formatter" 8 | }, 9 | "python.testing.pytestArgs": ["tests"], 10 | "python.testing.unittestEnabled": false, 11 | "python.testing.pytestEnabled": true 12 | } 13 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/template.env.local: -------------------------------------------------------------------------------- 1 | MESSAGE_QUEUE_HOST="0.0.0.0" 2 | MESSAGE_QUEUE_PORT=8000 3 | CONTROL_PLANE_HOST="0.0.0.0" 4 | CONTROL_PLANE_PORT=8001 5 | SECRET_AGENT_HOST="0.0.0.0" 6 | SECRET_AGENT_PORT=8002 7 | FUNNY_AGENT_HOST="0.0.0.0" 8 | FUNNY_AGENT_PORT=8003 9 | HUMAN_CONSUMER_HOST="0.0.0.0" 10 | HUMAN_CONSUMER_PORT=8004 11 | OPENAI_API_KEY= 12 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/logging.ini: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=sampleFormatter 9 | 10 | [logger_root] 11 | handlers=consoleHandler 12 | 13 | [handler_consoleHandler] 14 | class=StreamHandler 15 | formatter=sampleFormatter 16 | 17 | [formatter_sampleFormatter] 18 | format=%(asctime)s - %(levelname)s - %(message)s 19 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/logging.ini: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=sampleFormatter 9 | 10 | [logger_root] 11 | handlers=consoleHandler 12 | 13 | [handler_consoleHandler] 14 | class=StreamHandler 15 | formatter=sampleFormatter 16 | 17 | [formatter_sampleFormatter] 18 | format=%(asctime)s - %(levelname)s - %(message)s 19 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/logging.ini: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=sampleFormatter 9 | 10 | [logger_root] 11 | handlers=consoleHandler 12 | 13 | [handler_consoleHandler] 14 | class=StreamHandler 15 | formatter=sampleFormatter 16 | 17 | [formatter_sampleFormatter] 18 | format=%(asctime)s - %(levelname)s - %(message)s 19 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/apps/css.py: -------------------------------------------------------------------------------- 1 | css = """ 2 | .radio-group .wrap { 3 | display: grid !important; 4 | grid-template-columns: 1fr 1fr; 5 | } 6 | 7 | .human-needed .gallery-item { 8 | background-color: #F0D20E; 9 | } 10 | 11 | .human-needed p { 12 | color: black; 13 | } 14 | 15 | .completed-tasks .gallery-item { 16 | background-color: green; 17 | } 18 | """ 19 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/logging.ini: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=sampleFormatter 9 | 10 | [logger_root] 11 | handlers=consoleHandler 12 | 13 | [handler_consoleHandler] 14 | class=StreamHandler 15 | formatter=sampleFormatter 16 | 17 | [formatter_sampleFormatter] 18 | format=%(asctime)s - %(levelname)s - %(message)s 19 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | root_logger = logging.getLogger("human_in_the_loop") 4 | 5 | formatter = logging.Formatter("%(levelname)s:%(name)s - %(message)s") 6 | console_handler = logging.StreamHandler() 7 | console_handler.setFormatter(formatter) 8 | root_logger.addHandler(console_handler) 9 | 10 | root_logger.setLevel(logging.INFO) 11 | root_logger.propagate = False 12 | -------------------------------------------------------------------------------- /llama_agents/orchestrators/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.orchestrators.agent import AgentOrchestrator 2 | from llama_agents.orchestrators.base import BaseOrchestrator 3 | from llama_agents.orchestrators.pipeline import PipelineOrchestrator 4 | from llama_agents.orchestrators.orchestrator_router import OrchestratorRouter 5 | 6 | __all__ = [ 7 | "BaseOrchestrator", 8 | "PipelineOrchestrator", 9 | "AgentOrchestrator", 10 | "OrchestratorRouter", 11 | ] 12 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/template.env.local: -------------------------------------------------------------------------------- 1 | KAFKA_HOST="localhost" # don't modify 2 | KAFKA_PORT=9092 3 | CONTROL_PLANE_HOST="0.0.0.0" # don't modify 4 | CONTROL_PLANE_PORT=8001 5 | AY_AGENT_HOST="0.0.0.0" # don't modify 6 | AY_AGENT_PORT=8002 7 | FIRST_CHAR_AGENT_HOST="0.0.0.0" # don't modify 8 | FIRST_CHAR_AGENT_PORT=8003 9 | HUMAN_CONSUMER_HOST="0.0.0.0" # don't modify 10 | HUMAN_CONSUMER_PORT=8004 11 | LOCALHOST="0.0.0.0" 12 | OPENAI_API_KEY= 13 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/template.env.docker: -------------------------------------------------------------------------------- 1 | RABBITMQ_HOST="rabbitmq" # don't modify 2 | RABBITMQ_NODE_PORT=5672 3 | RABBITMQ_DEFAULT_USER="guest" 4 | RABBITMQ_DEFAULT_PASS="guest" 5 | CONTROL_PLANE_HOST="control_plane" # don't modify 6 | CONTROL_PLANE_PORT=8001 7 | FUNNY_AGENT_HOST="funny_agent" # don't modify 8 | FUNNY_AGENT_PORT=8002 9 | HUMAN_IN_THE_LOOP_HOST="hitloop" # don't modify 10 | HUMAN_IN_THE_LOOP_PORT=8003 11 | LOCALHOST="0.0.0.0" 12 | OPENAI_API_KEY= 13 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/core_services/message_queue.py: -------------------------------------------------------------------------------- 1 | from llama_agents import SimpleMessageQueue 2 | from multi_agent_app.utils import load_from_env 3 | 4 | message_queue_host = load_from_env("MESSAGE_QUEUE_HOST") 5 | message_queue_port = load_from_env("MESSAGE_QUEUE_PORT") 6 | 7 | message_queue = SimpleMessageQueue( 8 | host=message_queue_host, 9 | port=int(message_queue_port) if message_queue_port else None, 10 | ) 11 | app = message_queue._app 12 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/template.env.docker: -------------------------------------------------------------------------------- 1 | MESSAGE_QUEUE_HOST="message_queue" # don't modify 2 | MESSAGE_QUEUE_PORT=8000 3 | CONTROL_PLANE_HOST="control_plane" # don't modify 4 | CONTROL_PLANE_PORT=8001 5 | SECRET_AGENT_HOST="secret_agent" # don't modify 6 | SECRET_AGENT_PORT=8002 7 | FUNNY_AGENT_HOST="funny_agent" # don't modify 8 | FUNNY_AGENT_PORT=8003 9 | HUMAN_CONSUMER_HOST="human_consumer" # don't modify 10 | HUMAN_CONSUMER_PORT=8004 11 | OPENAI_API_KEY= 12 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/core_services/message_queue.py: -------------------------------------------------------------------------------- 1 | from llama_agents.message_queues.apache_kafka import KafkaMessageQueue 2 | from pig_latin_translation.utils import load_from_env 3 | 4 | message_queue_host = load_from_env("KAFKA_HOST") 5 | message_queue_port = load_from_env("KAFKA_PORT") 6 | 7 | message_queue = KafkaMessageQueue.from_url_params( 8 | host=message_queue_host, 9 | port=int(message_queue_port) if message_queue_port else None, 10 | ) 11 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/template.env.docker: -------------------------------------------------------------------------------- 1 | KAFKA_HOST="kafka" # don't modify 2 | KAFKA_PORT=19092 3 | CONTROL_PLANE_HOST="control_plane" # don't modify 4 | CONTROL_PLANE_PORT=8001 5 | AY_AGENT_HOST="ay_agent" # don't modify 6 | AY_AGENT_PORT=8002 7 | FIRST_CHAR_AGENT_HOST= "first_char_agent" # don't modify 8 | FIRST_CHAR_AGENT_PORT=8003 9 | HUMAN_CONSUMER_HOST="human_consumer" # don't modify 10 | HUMAN_CONSUMER_PORT=8004 11 | LOCALHOST="0.0.0.0" 12 | OPENAI_API_KEY= 13 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/template.env.local: -------------------------------------------------------------------------------- 1 | RABBITMQ_HOST="localhost" # don't modify 2 | RABBITMQ_NODE_PORT=5672 3 | RABBITMQ_DEFAULT_USER="guest" 4 | RABBITMQ_DEFAULT_PASS="guest" 5 | CONTROL_PLANE_HOST="0.0.0.0" 6 | CONTROL_PLANE_PORT=8001 7 | SECRET_AGENT_HOST="0.0.0.0" 8 | SECRET_AGENT_PORT=8002 9 | FUNNY_AGENT_HOST="0.0.0.0" 10 | FUNNY_AGENT_PORT=8003 11 | HUMAN_CONSUMER_HOST="0.0.0.0" 12 | HUMAN_CONSUMER_PORT=8004 13 | LOCALHOST="0.0.0.0" 14 | OPENAI_API_KEY= 15 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["poetry-core"] 3 | build-backend = "poetry.core.masonry.api" 4 | 5 | [tool.poetry] 6 | name = "multi-agent-app" 7 | version = "0.1.0" 8 | description = "" 9 | authors = ["Andrei Fajardo "] 10 | 11 | [tool.poetry.dependencies] 12 | python = "^3.10" 13 | llama-agents = "^0.0.3" 14 | llama-index-agent-openai = "^0.2.7" 15 | llama-index-embeddings-openai = "^0.1.10" 16 | llama-index-llms-openai = "^0.1.23" 17 | -------------------------------------------------------------------------------- /.github/release.yml: -------------------------------------------------------------------------------- 1 | changelog: 2 | categories: 3 | - title: Breaking Changes 🛠 4 | labels: 5 | - breaking-change 6 | - breaking 7 | - title: New Features 🎉 8 | labels: 9 | - enhancement 10 | - feature 11 | - integration 12 | - title: Bug Fixes 🐛 13 | labels: 14 | - bug 15 | - fix 16 | - title: Documentation 📚 17 | labels: 18 | - documentation 19 | - docs 20 | - example 21 | - examples 22 | -------------------------------------------------------------------------------- /llama_agents/tools/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.tools.agent_service_tool import AgentServiceTool 2 | from llama_agents.tools.meta_service_tool import MetaServiceTool 3 | from llama_agents.tools.service_as_tool import ServiceAsTool 4 | from llama_agents.tools.service_tool import ServiceTool 5 | from llama_agents.tools.service_component import ServiceComponent 6 | 7 | 8 | __all__ = [ 9 | "AgentServiceTool", 10 | "MetaServiceTool", 11 | "ServiceAsTool", 12 | "ServiceTool", 13 | "ServiceComponent", 14 | ] 15 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["poetry-core"] 3 | build-backend = "poetry.core.masonry.api" 4 | 5 | [tool.poetry] 6 | name = "pig-latin-translation" 7 | version = "0.1.0" 8 | description = "" 9 | authors = ["Andrei Fajardo "] 10 | readme = "README.md" 11 | 12 | [tool.poetry.dependencies] 13 | python = "^3.10" 14 | uvicorn = "^0.30.3" 15 | llama-index-llms-openai = "^0.1.26" 16 | llama-index-agent-openai = "^0.2.9" 17 | llama-agents = {version = "^0.0.12", extras = ["kafka"]} 18 | -------------------------------------------------------------------------------- /examples/redis/simple-redis-app/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["poetry-core"] 3 | build-backend = "poetry.core.masonry.api" 4 | 5 | [tool.poetry] 6 | name = "simple-redis-queue" 7 | version = "0.1.0" 8 | description = "" 9 | authors = ["Thierry "] 10 | readme = "README.md" 11 | 12 | [tool.poetry.dependencies] 13 | python = "3.11.9" 14 | llama-index-llms-openai = "^0.1.25" 15 | llama-index-agent-openai = "^0.2.7" 16 | llama-index-embeddings-openai = "^0.1.10" 17 | llama-agents = {version = "^0.0.5", extras = ["redis"]} 18 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["poetry-core"] 3 | build-backend = "poetry.core.masonry.api" 4 | 5 | [tool.poetry] 6 | name = "multi-agent-app-rabbitmq" 7 | version = "0.1.0" 8 | description = "" 9 | authors = ["Andrei Fajardo "] 10 | 11 | [tool.poetry.dependencies] 12 | python = "^3.10" 13 | llama-agents = {version = "^0.0.5", extras = ["rabbitmq"]} 14 | llama-index-llms-openai = "^0.1.25" 15 | llama-index-embeddings-openai = "^0.1.10" 16 | llama-index-agent-openai = "^0.2.7" 17 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/template.env.docker: -------------------------------------------------------------------------------- 1 | RABBITMQ_HOST="rabbitmq" # don't modify 2 | RABBITMQ_NODE_PORT=5672 3 | RABBITMQ_DEFAULT_USER="guest" 4 | RABBITMQ_DEFAULT_PASS="guest" 5 | CONTROL_PLANE_HOST="control_plane" # don't modify 6 | CONTROL_PLANE_PORT=8001 7 | SECRET_AGENT_HOST="secret_agent" # don't modify 8 | SECRET_AGENT_PORT=8002 9 | FUNNY_AGENT_HOST="funny_agent" # don't modify 10 | FUNNY_AGENT_PORT=8003 11 | HUMAN_CONSUMER_HOST="human_consumer" # don't modify 12 | HUMAN_CONSUMER_PORT=8004 13 | LOCALHOST="0.0.0.0" 14 | OPENAI_API_KEY= 15 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | GIT_ROOT ?= $(shell git rev-parse --show-toplevel) 2 | 3 | help: ## Show all Makefile targets. 4 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}' 5 | 6 | format: ## Run code autoformatters (black). 7 | pre-commit install 8 | git ls-files | xargs pre-commit run black --files 9 | 10 | lint: ## Run linters: pre-commit (black, ruff, codespell) and mypy 11 | pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files 12 | 13 | test: ## Run tests via pytest 14 | pytest tests 15 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/core_services/message_queue.py: -------------------------------------------------------------------------------- 1 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 2 | from human_in_the_loop.utils import load_from_env 3 | 4 | message_queue_host = load_from_env("RABBITMQ_HOST") 5 | message_queue_port = load_from_env("RABBITMQ_NODE_PORT") 6 | message_queue_username = load_from_env("RABBITMQ_DEFAULT_USER") 7 | message_queue_password = load_from_env("RABBITMQ_DEFAULT_PASS") 8 | 9 | message_queue = RabbitMQMessageQueue( 10 | url=f"amqp://{message_queue_username}:{message_queue_password}@{message_queue_host}:{message_queue_port}/" 11 | ) 12 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/local_launcher.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ServerLauncher 2 | 3 | from human_in_the_loop.core_services.message_queue import message_queue 4 | from human_in_the_loop.core_services.control_plane import control_plane 5 | from human_in_the_loop.agent_services.funny_agent import agent_server 6 | from human_in_the_loop.additional_services.human_in_the_loop import human_service 7 | 8 | 9 | # launch it 10 | launcher = ServerLauncher( 11 | [agent_server, human_service], 12 | control_plane, 13 | message_queue, 14 | ) 15 | 16 | 17 | if __name__ == "__main__": 18 | launcher.launch_servers() 19 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/core_services/message_queue.py: -------------------------------------------------------------------------------- 1 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 2 | from multi_agent_app_rabbitmq.utils import load_from_env 3 | 4 | message_queue_host = load_from_env("RABBITMQ_HOST") 5 | message_queue_port = load_from_env("RABBITMQ_NODE_PORT") 6 | message_queue_username = load_from_env("RABBITMQ_DEFAULT_USER") 7 | message_queue_password = load_from_env("RABBITMQ_DEFAULT_PASS") 8 | 9 | message_queue = RabbitMQMessageQueue( 10 | url=f"amqp://{message_queue_username}:{message_queue_password}@{message_queue_host}:{message_queue_port}/" 11 | ) 12 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/kubernetes/setup/manifest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | creationTimestamp: null 6 | name: llama-agents-demo 7 | 8 | --- 9 | apiVersion: v1 10 | data: 11 | MESSAGE_QUEUE_HOST: "message-queue" 12 | MESSAGE_QUEUE_PORT: "8000" 13 | CONTROL_PLANE_HOST: "control-plane" 14 | CONTROL_PLANE_PORT: "8000" 15 | SECRET_AGENT_HOST: "secret-agent" 16 | SECRET_AGENT_PORT: "8000" 17 | FUNNY_AGENT_HOST: "funny-agent" 18 | FUNNY_AGENT_PORT: "8000" 19 | HUMAN_CONSUMER_HOST: "human-consumer" 20 | HUMAN_CONSUMER_PORT: "8000" 21 | kind: ConfigMap 22 | metadata: 23 | name: xcore-config 24 | namespace: llama-agents-demo 25 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["poetry-core"] 3 | build-backend = "poetry.core.masonry.api" 4 | 5 | [tool.poetry] 6 | name = "human-in-the-loop" 7 | version = "0.1.0" 8 | description = "" 9 | authors = ["Andrei Fajardo "] 10 | readme = "README.md" 11 | 12 | [tool.poetry.dependencies] 13 | python = "^3.10" 14 | llama-agents = {version = "^0.0.11", extras = ["rabbitmq"]} 15 | llama-index-core = "^0.10.55" 16 | llama-index-llms-openai = "^0.1.25" 17 | llama-index-embeddings-openai = "^0.1.10" 18 | llama-index-agent-openai = "^0.2.8" 19 | gradio = "^4.38.1" 20 | llama-index-program-openai = "^0.1.6" 21 | llama-index-readers-file = "^0.1.30" 22 | -------------------------------------------------------------------------------- /tests/services/test_agent_service.py: -------------------------------------------------------------------------------- 1 | from llama_index.core.llms import MockLLM 2 | from llama_index.core.agent import ReActAgent 3 | 4 | from llama_agents.services import AgentService 5 | from llama_agents.message_queues.simple import SimpleMessageQueue 6 | 7 | 8 | def test_init() -> None: 9 | agent = ReActAgent.from_tools([], llm=MockLLM()) 10 | server = AgentService( 11 | agent, 12 | SimpleMessageQueue(), 13 | running=False, 14 | description="Test Agent Server", 15 | step_interval=0.5, 16 | ) 17 | 18 | assert server.agent == agent 19 | assert server.running is False 20 | assert server.description == "Test Agent Server" 21 | assert server.step_interval == 0.5 22 | -------------------------------------------------------------------------------- /llama_agents/services/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.services.base import BaseService 2 | from llama_agents.services.agent import AgentService 3 | from llama_agents.services.human import HumanService 4 | from llama_agents.services.tool import ToolService 5 | from llama_agents.services.component import ComponentService 6 | from llama_agents.services.types import ( 7 | _Task, 8 | _TaskSate, 9 | _TaskStep, 10 | _TaskStepOutput, 11 | _ChatMessage, 12 | ) 13 | 14 | __all__ = [ 15 | "BaseService", 16 | "AgentService", 17 | "HumanService", 18 | "ToolService", 19 | "ComponentService", 20 | "_Task", 21 | "_TaskSate", 22 | "_TaskStep", 23 | "_TaskStepOutput", 24 | "_ChatMessage", 25 | ] 26 | -------------------------------------------------------------------------------- /llama_agents/message_consumers/callable.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Any, Callable 3 | 4 | from llama_agents.messages.base import QueueMessage 5 | from llama_agents.message_consumers.base import BaseMessageQueueConsumer 6 | 7 | 8 | class CallableMessageConsumer(BaseMessageQueueConsumer): 9 | """Message consumer for a callable handler. 10 | 11 | For a given message, it will call the handler with the message as input. 12 | """ 13 | 14 | handler: Callable 15 | 16 | async def _process_message(self, message: QueueMessage, **kwargs: Any) -> None: 17 | if asyncio.iscoroutinefunction(self.handler): 18 | await self.handler(message, **kwargs) 19 | else: 20 | self.handler(message, **kwargs) 21 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/kubernetes/setup/manifest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | creationTimestamp: null 6 | name: llama-agents-demo 7 | 8 | --- 9 | apiVersion: v1 10 | data: 11 | RABBITMQ_HOST: "rabbitmq" 12 | RABBITMQ_NODE_PORT: "5672" 13 | RABBITMQ_DEFAULT_PASS: "guest" 14 | RABBITMQ_DEFAULT_USER: "guest" 15 | CONTROL_PLANE_HOST: "control-plane" 16 | CONTROL_PLANE_PORT: "8000" 17 | SECRET_AGENT_HOST: "secret-agent" 18 | SECRET_AGENT_PORT: "8000" 19 | FUNNY_AGENT_HOST: "funny-agent" 20 | FUNNY_AGENT_PORT: "8000" 21 | HUMAN_CONSUMER_HOST: "human-consumer" 22 | HUMAN_CONSUMER_PORT: "8000" 23 | LOCALHOST: "0.0.0.0" 24 | kind: ConfigMap 25 | metadata: 26 | name: xcore-config 27 | namespace: llama-agents-demo 28 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/local_launcher.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ServerLauncher 2 | 3 | from multi_agent_app.core_services.message_queue import message_queue 4 | from multi_agent_app.core_services.control_plane import control_plane 5 | from multi_agent_app.agent_services.secret_agent import ( 6 | agent_server as secret_agent_server, 7 | ) 8 | from multi_agent_app.agent_services.funny_agent import ( 9 | agent_server as funny_agent_server, 10 | ) 11 | from multi_agent_app.additional_services.human_consumer import human_consumer_server 12 | 13 | 14 | # launch it 15 | launcher = ServerLauncher( 16 | [secret_agent_server, funny_agent_server], 17 | control_plane, 18 | message_queue, 19 | additional_consumers=[human_consumer_server.as_consumer()], 20 | ) 21 | 22 | 23 | if __name__ == "__main__": 24 | launcher.launch_servers() 25 | -------------------------------------------------------------------------------- /llama_agents/cli/command_line.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from llama_agents.app.app import run as launch_monitor 4 | 5 | 6 | def main() -> None: 7 | parser = argparse.ArgumentParser(description="llama-agents CLI interface.") 8 | 9 | # Subparsers for the main commands 10 | subparsers = parser.add_subparsers(title="commands", dest="command", required=True) 11 | 12 | # Subparser for the monitor command 13 | monitor_parser = subparsers.add_parser("monitor", help="Monitor the agents.") 14 | monitor_parser.add_argument( 15 | "--control-plane-url", 16 | default="http://127.0.0.1:8000", 17 | help="The URL of the control plane. Defaults to http://127.0.0.1:8000", 18 | ) 19 | monitor_parser.set_defaults( 20 | func=lambda args: launch_monitor(args.control_plane_url) 21 | ) 22 | 23 | args = parser.parse_args() 24 | args.func(args) 25 | 26 | 27 | if __name__ == "__main__": 28 | main() 29 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/local_launcher.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ServerLauncher 2 | 3 | from multi_agent_app_rabbitmq.core_services.message_queue import message_queue 4 | from multi_agent_app_rabbitmq.core_services.control_plane import control_plane 5 | from multi_agent_app_rabbitmq.agent_services.secret_agent import ( 6 | agent_server as secret_agent_server, 7 | ) 8 | from multi_agent_app_rabbitmq.agent_services.funny_agent import ( 9 | agent_server as funny_agent_server, 10 | ) 11 | from multi_agent_app_rabbitmq.additional_services.human_consumer import ( 12 | human_consumer_server, 13 | ) 14 | 15 | 16 | # launch it 17 | launcher = ServerLauncher( 18 | [secret_agent_server, funny_agent_server], 19 | control_plane, 20 | message_queue, 21 | additional_consumers=[human_consumer_server.as_consumer()], 22 | ) 23 | 24 | 25 | if __name__ == "__main__": 26 | launcher.launch_servers() 27 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/local_launcher.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ServerLauncher 2 | 3 | from pig_latin_translation.core_services.message_queue import message_queue 4 | from pig_latin_translation.core_services.control_plane import control_plane 5 | from pig_latin_translation.agent_services.remove_ay_agent import ( 6 | agent_server as remove_ay_agent_server, 7 | ) 8 | from pig_latin_translation.agent_services.correct_first_character_agent import ( 9 | agent_server as correct_first_character_agent_server, 10 | ) 11 | from pig_latin_translation.additional_services.human_consumer import ( 12 | human_consumer_server, 13 | ) 14 | 15 | 16 | # launch it 17 | launcher = ServerLauncher( 18 | [remove_ay_agent_server, correct_first_character_agent_server], 19 | control_plane, 20 | message_queue, 21 | additional_consumers=[human_consumer_server.as_consumer()], 22 | ) 23 | 24 | 25 | if __name__ == "__main__": 26 | launcher.launch_servers() 27 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Linting 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | env: 10 | POETRY_VERSION: "1.6.1" 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | # You can use PyPy versions in python-version. 17 | # For example, pypy-2.7 and pypy-3.8 18 | matrix: 19 | python-version: ["3.9"] 20 | steps: 21 | - uses: actions/checkout@v3 22 | with: 23 | fetch-depth: ${{ github.event_name == 'pull_request' && 2 || 0 }} 24 | - name: Set up python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v4 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | - name: Install Poetry 29 | uses: snok/install-poetry@v1 30 | with: 31 | version: ${{ env.POETRY_VERSION }} 32 | - name: Install pre-commit 33 | shell: bash 34 | run: poetry run pip install pre-commit 35 | - name: Run linter 36 | shell: bash 37 | run: poetry run make lint 38 | -------------------------------------------------------------------------------- /.github/workflows/unit_test.yml: -------------------------------------------------------------------------------- 1 | name: Unit Testing 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | env: 10 | POETRY_VERSION: "1.6.1" 11 | 12 | jobs: 13 | test: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | # You can use PyPy versions in python-version. 17 | # For example, pypy-2.7 and pypy-3.8 18 | matrix: 19 | python-version: ["3.8", "3.10", "3.11"] 20 | steps: 21 | - uses: actions/checkout@v3 22 | with: 23 | fetch-depth: 0 24 | - name: Set up python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v4 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | - name: Install Poetry 29 | uses: snok/install-poetry@v1 30 | with: 31 | version: ${{ env.POETRY_VERSION }} 32 | - name: Install deps 33 | shell: bash 34 | run: poetry install --with dev 35 | - name: Run testing 36 | env: 37 | CI: true 38 | shell: bash 39 | run: poetry run pytest tests 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 LlamaIndex 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /llama_agents/message_publishers/publisher.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, Optional 3 | from llama_agents.messages.base import QueueMessage 4 | from llama_agents.message_queues.base import BaseMessageQueue, PublishCallback 5 | 6 | 7 | class MessageQueuePublisherMixin(ABC): 8 | """PublisherMixin. 9 | 10 | Mixin for a message queue publisher. Allows for accessing common properties and methods for: 11 | - Publisher ID. 12 | - Message queue. 13 | - Publish callback. 14 | - Publish method. 15 | """ 16 | 17 | @property 18 | @abstractmethod 19 | def publisher_id(self) -> str: 20 | ... 21 | 22 | @property 23 | @abstractmethod 24 | def message_queue(self) -> BaseMessageQueue: 25 | ... 26 | 27 | @property 28 | def publish_callback(self) -> Optional[PublishCallback]: 29 | return None 30 | 31 | async def publish(self, message: QueueMessage, **kwargs: Any) -> Any: 32 | """Publish message.""" 33 | message.publisher_id = self.publisher_id 34 | return await self.message_queue.publish( 35 | message, callback=self.publish_callback, **kwargs 36 | ) 37 | -------------------------------------------------------------------------------- /tests/app/components/test_types.py: -------------------------------------------------------------------------------- 1 | from llama_agents.app.components.types import ButtonType 2 | 3 | 4 | def test_button_type_enum() -> None: 5 | # Test that the enum values are correctly assigned 6 | assert ButtonType.SERVICE == "Service" 7 | assert ButtonType.TASK == "Task" 8 | assert ButtonType.HUMAN == "Human" 9 | 10 | # Test that the enum names are correct 11 | assert ButtonType.SERVICE.name == "SERVICE" 12 | assert ButtonType.TASK.name == "TASK" 13 | assert ButtonType.HUMAN.name == "HUMAN" 14 | 15 | # Test that the enum values are instances of the correct type 16 | assert isinstance(ButtonType.SERVICE, ButtonType) 17 | assert isinstance(ButtonType.TASK, ButtonType) 18 | assert isinstance(ButtonType.HUMAN, ButtonType) 19 | 20 | # Test that the enum values are instances of str 21 | assert isinstance(ButtonType.SERVICE, str) 22 | assert isinstance(ButtonType.TASK, str) 23 | assert isinstance(ButtonType.HUMAN, str) 24 | 25 | 26 | def test_button_type_members() -> None: 27 | members = list(ButtonType) 28 | assert len(members) == 3 29 | assert ButtonType.SERVICE in members 30 | assert ButtonType.TASK in members 31 | assert ButtonType.HUMAN in members 32 | -------------------------------------------------------------------------------- /llama_agents/orchestrators/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, Dict, List, Tuple 3 | 4 | from llama_index.core.tools import BaseTool 5 | 6 | from llama_agents.messages.base import QueueMessage 7 | from llama_agents.types import TaskDefinition, TaskResult 8 | 9 | 10 | class BaseOrchestrator(ABC): 11 | """Base class for an orchestrator. 12 | 13 | The general idea for an orchestrator is to manage the flow of messages between services. 14 | 15 | Given some state, task, and list of tools, figure out the next messages to publish. Then, once 16 | the messages are processed, update the state with the results. 17 | 18 | Currently, the final message is published to the `human` message queue for final processing. 19 | """ 20 | 21 | @abstractmethod 22 | async def get_next_messages( 23 | self, task_def: TaskDefinition, tools: List[BaseTool], state: Dict[str, Any] 24 | ) -> Tuple[List[QueueMessage], Dict[str, Any]]: 25 | """Get the next message to process. Returns the message and the new state.""" 26 | ... 27 | 28 | @abstractmethod 29 | async def add_result_to_state( 30 | self, result: TaskResult, state: Dict[str, Any] 31 | ) -> Dict[str, Any]: 32 | """Add the result of processing a message to the state. Returns the new state.""" 33 | ... 34 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/additional_services/human_consumer.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from llama_agents import ( 3 | SimpleMessageQueue, 4 | ) 5 | 6 | from multi_agent_app.additional_services.task_result import TaskResultService 7 | from multi_agent_app.utils import load_from_env 8 | 9 | message_queue_host = load_from_env("MESSAGE_QUEUE_HOST") 10 | message_queue_port = load_from_env("MESSAGE_QUEUE_PORT") 11 | human_consumer_host = load_from_env("HUMAN_CONSUMER_HOST") 12 | human_consumer_port = load_from_env("HUMAN_CONSUMER_PORT") 13 | 14 | # create our multi-agent framework components 15 | message_queue = SimpleMessageQueue( 16 | host=message_queue_host, 17 | port=int(message_queue_port) if message_queue_port else None, 18 | ) 19 | queue_client = message_queue.client 20 | 21 | 22 | human_consumer_server = TaskResultService( 23 | message_queue=queue_client, 24 | host=human_consumer_host, 25 | port=int(human_consumer_port) if human_consumer_port else None, 26 | name="human", 27 | ) 28 | 29 | app = human_consumer_server._app 30 | 31 | 32 | # register to message queue 33 | async def register_and_start_consuming() -> None: 34 | start_consuming_callable = await human_consumer_server.register_to_message_queue() 35 | await start_consuming_callable() 36 | 37 | 38 | if __name__ == "__main__": 39 | asyncio.run(register_and_start_consuming()) 40 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/core_services/control_plane.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from llama_agents import AgentOrchestrator, ControlPlaneServer, SimpleMessageQueue 4 | from llama_index.llms.openai import OpenAI 5 | 6 | from multi_agent_app.utils import load_from_env 7 | 8 | 9 | message_queue_host = load_from_env("MESSAGE_QUEUE_HOST") 10 | message_queue_port = load_from_env("MESSAGE_QUEUE_PORT") 11 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 12 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 13 | 14 | 15 | # setup message queue 16 | message_queue = SimpleMessageQueue( 17 | host=message_queue_host, 18 | port=int(message_queue_port) if message_queue_port else None, 19 | ) 20 | queue_client = message_queue.client 21 | 22 | # setup control plane 23 | control_plane = ControlPlaneServer( 24 | message_queue=queue_client, 25 | orchestrator=AgentOrchestrator(llm=OpenAI()), 26 | host=control_plane_host, 27 | port=int(control_plane_port) if control_plane_port else None, 28 | ) 29 | 30 | 31 | app = control_plane.app 32 | 33 | 34 | async def register_and_start_consuming() -> None: 35 | # register to message queue 36 | start_consuming_callable = await control_plane.register_to_message_queue() 37 | await start_consuming_callable() 38 | 39 | 40 | if __name__ == "__main__": 41 | asyncio.run(register_and_start_consuming()) 42 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["poetry-core"] 3 | build-backend = "poetry.core.masonry.api" 4 | 5 | [tool.poetry] 6 | name = "llama-agents" 7 | version = "0.0.14" 8 | description = "" 9 | authors = ["Logan Markewich ", "Andrei Fajardo "] 10 | maintainers = [ 11 | "Logan Markewich ", 12 | "Andrei Fajardo ", 13 | "Jerry Liu " 14 | ] 15 | readme = "README.md" 16 | 17 | [tool.poetry.dependencies] 18 | python = ">=3.8.1,<4.0" 19 | fastapi = "^0.109.1" 20 | llama-index-core = "^0.10.50" 21 | pytest-asyncio = "^0.23.7" 22 | textual = "^0.70.0" 23 | aio-pika = {version = "^9.4.2", optional = true} 24 | redis = {version = "^5.0.7", optional = true} 25 | uvicorn = "^0.30.1" 26 | pytest-mock = "^3.14.0" 27 | aiokafka = {version = "^0.11.0", optional = true} 28 | kafka-python-ng = {version = "^2.2.2", optional = true} 29 | 30 | [tool.poetry.extras] 31 | kafka = ["aiokafka", "kafka-python-ng"] 32 | rabbitmq = ["aio-pika"] 33 | redis = ["redis"] 34 | 35 | [tool.poetry.group.dev.dependencies] 36 | pytest = "^8.2.2" 37 | ruff = "^0.4.7" 38 | mypy = "^1.10.0" 39 | aio-pika = "^9.4.2" 40 | redis = "^5.0.7" 41 | pytest-cov = "^5.0.0" 42 | coverage = "^7.6.0" 43 | aiokafka = "^0.11.0" 44 | kafka-python-ng = "^2.2.2" 45 | 46 | [tool.poetry.scripts] 47 | llama-agents = 'llama_agents.cli.command_line:main' 48 | -------------------------------------------------------------------------------- /llama_agents/message_consumers/remote.py: -------------------------------------------------------------------------------- 1 | import httpx 2 | from pydantic import BaseModel, Field 3 | from typing import Any, Optional 4 | 5 | from llama_agents.message_consumers.base import BaseMessageQueueConsumer 6 | from llama_agents.messages import QueueMessage 7 | from llama_agents.types import generate_id 8 | 9 | 10 | class RemoteMessageConsumerDef(BaseModel): 11 | """Definition for a RemoteMessageConsumer. 12 | 13 | Helps describe the configuration for a RemoteMessageConsumer. 14 | """ 15 | 16 | id_: str = Field(default_factory=generate_id) 17 | message_type: str = Field( 18 | default="default", description="Type of the message to consume." 19 | ) 20 | url: str = Field(default_factory=str, description="URL to send messages to.") 21 | client_kwargs: Optional[dict] = None 22 | 23 | 24 | class RemoteMessageConsumer(BaseMessageQueueConsumer): 25 | """Consumer of a MessageQueue that sends messages to a remote service. 26 | 27 | For each message, it will send the message to the given URL. 28 | """ 29 | 30 | url: str 31 | client_kwargs: Optional[dict] = None 32 | client: Optional[httpx.AsyncClient] = None 33 | 34 | async def _process_message(self, message: QueueMessage, **kwargs: Any) -> None: 35 | client_kwargs = self.client_kwargs or {} 36 | 37 | async with httpx.AsyncClient(**client_kwargs) as client: 38 | await client.post(self.url, json=message.model_dump()) 39 | -------------------------------------------------------------------------------- /llama_agents/tools/service_tool.py: -------------------------------------------------------------------------------- 1 | from llama_index.core.tools import AsyncBaseTool, ToolMetadata, ToolOutput 2 | 3 | from llama_agents.types import ServiceDefinition 4 | 5 | 6 | class ServiceTool(AsyncBaseTool): 7 | """A tool that wraps a service. 8 | 9 | Mostly used under the hood by the agent orchestrator. 10 | 11 | Attributes: 12 | name (str): 13 | The name of the tool. 14 | description (str): 15 | The description of the tool. 16 | """ 17 | 18 | def __init__(self, name: str, description: str) -> None: 19 | self.name = name 20 | self.description = description 21 | 22 | @classmethod 23 | def from_service_definition(cls, service_def: ServiceDefinition) -> "ServiceTool": 24 | return cls(service_def.service_name, service_def.description) 25 | 26 | @property 27 | def metadata(self) -> ToolMetadata: 28 | return ToolMetadata( 29 | name=self.name, 30 | description=self.description, 31 | ) 32 | 33 | def _make_dummy_output(self, input: str) -> ToolOutput: 34 | return ToolOutput( 35 | content=input, 36 | tool_name=self.name, 37 | raw_input={"input": input}, 38 | raw_output=input, 39 | ) 40 | 41 | def call(self, input: str) -> ToolOutput: 42 | return self._make_dummy_output(input) 43 | 44 | async def acall(self, input: str) -> ToolOutput: 45 | return self._make_dummy_output(input) 46 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/scripts/simulation.py: -------------------------------------------------------------------------------- 1 | """Module for sending simulated tasks to pig-latin translation system.""" 2 | 3 | import asyncio 4 | import numpy as np 5 | import random 6 | 7 | from llama_agents import LlamaAgentsClient 8 | from llama_index.llms.openai import OpenAI 9 | from llama_index.core.llms import LLM 10 | 11 | 12 | def pig_latin(text: str) -> str: 13 | tokens = text.lower().split() 14 | tmp = [] 15 | for token in tokens: 16 | token = token[1:] + token[0] + "ay" 17 | tmp.append(token) 18 | return " ".join(tmp) 19 | 20 | 21 | async def send_new_task(client: LlamaAgentsClient, llm: LLM) -> None: 22 | seed = random.random() 23 | num_tasks = np.random.poisson(2) 24 | for _ in range(num_tasks): 25 | response = await llm.acomplete( 26 | f"({seed}) Provide a 3 to 5 word phrase. Don't include any punctuation." 27 | ) 28 | task = pig_latin(response.text) 29 | print(f"text: {response.text}, task: {task}") 30 | client.create_task(task) 31 | 32 | 33 | async def main() -> None: 34 | client = LlamaAgentsClient("http://0.0.0.0:8001") 35 | llm = OpenAI("gpt-4o", temperature=1) 36 | try: 37 | while True: 38 | interarrival_time = np.random.exponential(3) 39 | await asyncio.sleep(interarrival_time) 40 | await send_new_task(client, llm) 41 | except KeyboardInterrupt: 42 | print("Shutting down.") 43 | 44 | 45 | if __name__ == "__main__": 46 | asyncio.run(main()) 47 | -------------------------------------------------------------------------------- /tests/app/components/test_task_list.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from unittest.mock import AsyncMock, patch, MagicMock 3 | 4 | from llama_agents.app.components.task_list import TasksList 5 | 6 | 7 | @pytest.mark.asyncio 8 | async def test_refresh_tasks() -> None: 9 | # Mock the response object and its json method 10 | mock_response = MagicMock() 11 | mock_response.json.return_value = ["Task1", "Task2"] 12 | 13 | with patch("httpx.AsyncClient.get", return_value=mock_response): 14 | tasks_list = TasksList("http://example.com") 15 | await tasks_list.refresh_tasks() 16 | 17 | # Assertions to verify the tasks have been updated 18 | assert tasks_list.tasks == ["Task1", "Task2"] 19 | 20 | 21 | @pytest.mark.asyncio 22 | async def test_watch_tasks() -> None: 23 | tasks_list = TasksList("http://example.com") 24 | tasks_list.tasks = ["Old Task1", "Old Task2"] 25 | 26 | # Simulate tasks_scroll being part of the component structure 27 | tasks_scroll = AsyncMock() 28 | tasks_scroll.remove_children = AsyncMock() 29 | tasks_scroll.mount = AsyncMock() 30 | 31 | # Patch the query_one method to return our mock scroll object when queried 32 | with patch.object(tasks_list, "query_one", return_value=tasks_scroll): 33 | new_tasks = ["New Task1", "New Task2"] 34 | await tasks_list.watch_tasks(new_tasks) 35 | 36 | # Ensure children are removed and new tasks are mounted 37 | tasks_scroll.remove_children.assert_called_once() 38 | assert tasks_scroll.mount.call_count == len(new_tasks) 39 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 python:3.10-slim as builder 2 | 3 | WORKDIR /app 4 | 5 | ENV POETRY_VERSION=1.7.1 6 | 7 | # Install libraries for necessary python package builds 8 | RUN apt-get update && apt-get --no-install-recommends install build-essential python3-dev libpq-dev -y && \ 9 | pip install --no-cache-dir --upgrade pip && \ 10 | pip install --no-cache-dir --upgrade poetry==${POETRY_VERSION} 11 | 12 | # Install ssh 13 | RUN apt-get -yq update && apt-get -yqq install ssh 14 | 15 | # Configure Poetry 16 | ENV POETRY_CACHE_DIR=/tmp/poetry_cache 17 | ENV POETRY_NO_INTERACTION=1 18 | ENV POETRY_VIRTUALENVS_IN_PROJECT=true 19 | ENV POETRY_VIRTUALENVS_CREATE=true 20 | 21 | # Install dependencies 22 | COPY multi-agent-app/poetry.lock multi-agent-app/pyproject.toml ./ 23 | 24 | RUN poetry install --no-cache --no-root 25 | 26 | FROM --platform=linux/amd64 python:3.10-slim as runtime 27 | 28 | # Install wget for healthcheck 29 | RUN apt-get update && apt-get install -y wget 30 | 31 | RUN apt-get update -y && \ 32 | apt-get install --no-install-recommends libpq5 -y && \ 33 | rm -rf /var/lib/apt/lists/* # Install libpq for psycopg2 34 | 35 | RUN groupadd -r appuser && useradd --no-create-home -g appuser -r appuser 36 | USER appuser 37 | 38 | WORKDIR /app 39 | 40 | ENV VIRTUAL_ENV=/app/.venv 41 | COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV} 42 | ENV PATH="${VIRTUAL_ENV}/bin:${PATH}" 43 | 44 | # Copy source code 45 | COPY ./logging.ini ./logging.ini 46 | COPY ./multi-agent-app/multi_agent_app ./multi_agent_app 47 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 python:3.10-slim as builder 2 | 3 | WORKDIR /app 4 | 5 | ENV POETRY_VERSION=1.7.1 6 | 7 | # Install libraries for necessary python package builds 8 | RUN apt-get update && apt-get --no-install-recommends install build-essential python3-dev libpq-dev -y && \ 9 | pip install --no-cache-dir --upgrade pip && \ 10 | pip install --no-cache-dir --upgrade poetry==${POETRY_VERSION} 11 | 12 | # Install ssh 13 | RUN apt-get -yq update && apt-get -yqq install ssh 14 | 15 | # Configure Poetry 16 | ENV POETRY_CACHE_DIR=/tmp/poetry_cache 17 | ENV POETRY_NO_INTERACTION=1 18 | ENV POETRY_VIRTUALENVS_IN_PROJECT=true 19 | ENV POETRY_VIRTUALENVS_CREATE=true 20 | 21 | # Install dependencies 22 | COPY ./poetry.lock ./pyproject.toml ./ 23 | 24 | RUN poetry install --no-cache --no-root 25 | 26 | FROM --platform=linux/amd64 python:3.10-slim as runtime 27 | 28 | # Install wget for healthcheck 29 | RUN apt-get update && apt-get install -y wget 30 | 31 | RUN apt-get update -y && \ 32 | apt-get install --no-install-recommends libpq5 -y && \ 33 | rm -rf /var/lib/apt/lists/* # Install libpq for psycopg2 34 | 35 | RUN groupadd -r appuser && useradd --no-create-home -g appuser -r appuser 36 | USER appuser 37 | 38 | WORKDIR /app 39 | 40 | ENV VIRTUAL_ENV=/app/.venv 41 | COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV} 42 | ENV PATH="${VIRTUAL_ENV}/bin:${PATH}" 43 | 44 | # Copy source code 45 | COPY ./logging.ini ./logging.ini 46 | COPY ./human_in_the_loop ./human_in_the_loop 47 | 48 | # Copy data for Agentic RAG 49 | COPY ./data ./data 50 | -------------------------------------------------------------------------------- /tests/message_queue_consumers/test_base.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import pytest 3 | from pydantic import PrivateAttr 4 | from typing import Any, List 5 | 6 | from llama_agents.message_consumers.base import BaseMessageQueueConsumer 7 | from llama_agents.message_queues.simple import SimpleMessageQueue 8 | from llama_agents.messages.base import QueueMessage 9 | 10 | 11 | class MockMessageConsumer(BaseMessageQueueConsumer): 12 | processed_messages: List[QueueMessage] = [] 13 | _lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock) 14 | 15 | async def _process_message(self, message: QueueMessage, **kwargs: Any) -> None: 16 | async with self._lock: 17 | self.processed_messages.append(message) 18 | 19 | 20 | @pytest.mark.asyncio() 21 | async def test_consumer_consumes_messages() -> None: 22 | # Arrange 23 | consumer_one = MockMessageConsumer() 24 | mq = SimpleMessageQueue() 25 | task = await mq.launch_local() 26 | 27 | # Act 28 | start_consuming_callable = await mq.register_consumer(consumer_one) 29 | await start_consuming_callable() 30 | await asyncio.sleep(0.1) 31 | await mq.publish(QueueMessage(publisher_id="test", id_="1")) 32 | await mq.publish(QueueMessage(publisher_id="test", id_="2")) 33 | 34 | # Give some time for last message to get published and sent to consumers 35 | await asyncio.sleep(0.5) 36 | task.cancel() 37 | 38 | # Assert 39 | assert consumer_one.id_ in [ 40 | c.id_ for c in await mq.get_consumers(consumer_one.message_type) 41 | ] 42 | assert ["1", "2"] == [m.id_ for m in consumer_one.processed_messages] 43 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 python:3.10-slim as builder 2 | 3 | WORKDIR /app 4 | 5 | ENV POETRY_VERSION=1.7.1 6 | 7 | # Install libraries for necessary python package builds 8 | RUN apt-get update && apt-get --no-install-recommends install build-essential python3-dev libpq-dev -y && \ 9 | pip install --no-cache-dir --upgrade pip && \ 10 | pip install --no-cache-dir --upgrade poetry==${POETRY_VERSION} 11 | 12 | # Install ssh 13 | RUN apt-get -yq update && apt-get -yqq install ssh 14 | 15 | # Configure Poetry 16 | ENV POETRY_CACHE_DIR=/tmp/poetry_cache 17 | ENV POETRY_NO_INTERACTION=1 18 | ENV POETRY_VIRTUALENVS_IN_PROJECT=true 19 | ENV POETRY_VIRTUALENVS_CREATE=true 20 | 21 | # Install dependencies 22 | COPY multi-agent-app-rabbitmq/poetry.lock multi-agent-app-rabbitmq/pyproject.toml ./ 23 | 24 | RUN poetry install --no-cache --no-root 25 | 26 | FROM --platform=linux/amd64 python:3.10-slim as runtime 27 | 28 | # Install wget for healthcheck 29 | RUN apt-get update && apt-get install -y wget 30 | 31 | RUN apt-get update -y && \ 32 | apt-get install --no-install-recommends libpq5 -y && \ 33 | rm -rf /var/lib/apt/lists/* # Install libpq for psycopg2 34 | 35 | RUN groupadd -r appuser && useradd --no-create-home -g appuser -r appuser 36 | USER appuser 37 | 38 | WORKDIR /app 39 | 40 | ENV VIRTUAL_ENV=/app/.venv 41 | COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV} 42 | ENV PATH="${VIRTUAL_ENV}/bin:${PATH}" 43 | 44 | # Copy source code 45 | COPY ./logging.ini ./logging.ini 46 | COPY ./multi-agent-app-rabbitmq/multi_agent_app_rabbitmq ./multi_agent_app_rabbitmq 47 | -------------------------------------------------------------------------------- /examples/agentic_human_local_single.py: -------------------------------------------------------------------------------- 1 | from llama_agents.launchers.local import LocalLauncher 2 | from llama_agents.services import HumanService, AgentService 3 | from llama_agents.control_plane.server import ControlPlaneServer 4 | from llama_agents.message_queues.simple import SimpleMessageQueue 5 | from llama_agents.orchestrators.agent import AgentOrchestrator 6 | 7 | from llama_index.core.agent import FunctionCallingAgentWorker 8 | from llama_index.core.tools import FunctionTool 9 | from llama_index.llms.openai import OpenAI 10 | 11 | 12 | # create an agent 13 | def get_the_secret_fact() -> str: 14 | """Returns the secret fact.""" 15 | return "The secret fact is: A baby llama is called a 'Cria'." 16 | 17 | 18 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 19 | 20 | # create our multi-agent framework components 21 | message_queue = SimpleMessageQueue() 22 | 23 | worker = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 24 | agent = worker.as_agent() 25 | agent_service = AgentService( 26 | agent=agent, 27 | message_queue=message_queue, 28 | description="Useful for getting the secret fact.", 29 | service_name="secret_fact_agent", 30 | ) 31 | 32 | human_service = HumanService( 33 | message_queue=message_queue, description="Answers queries about math." 34 | ) 35 | 36 | control_plane = ControlPlaneServer( 37 | message_queue=message_queue, 38 | orchestrator=AgentOrchestrator(llm=OpenAI()), 39 | ) 40 | 41 | # launch it 42 | launcher = LocalLauncher( 43 | [agent_service, human_service], 44 | control_plane, 45 | message_queue, 46 | ) 47 | launcher.launch_single("What is 5 + 5?") 48 | -------------------------------------------------------------------------------- /examples/agentic_local_single.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ( 2 | AgentService, 3 | AgentOrchestrator, 4 | ControlPlaneServer, 5 | LocalLauncher, 6 | SimpleMessageQueue, 7 | ) 8 | from llama_index.core.agent import FunctionCallingAgentWorker 9 | from llama_index.core.tools import FunctionTool 10 | from llama_index.llms.openai import OpenAI 11 | 12 | 13 | # create an agent 14 | def get_the_secret_fact() -> str: 15 | """Returns the secret fact.""" 16 | return "The secret fact is: A baby llama is called a 'Cria'." 17 | 18 | 19 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 20 | 21 | worker1 = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 22 | worker2 = FunctionCallingAgentWorker.from_tools([], llm=OpenAI()) 23 | agent1 = worker1.as_agent() 24 | agent2 = worker2.as_agent() 25 | 26 | # create our multi-agent framework components 27 | message_queue = SimpleMessageQueue() 28 | control_plane = ControlPlaneServer( 29 | message_queue=message_queue, 30 | orchestrator=AgentOrchestrator(llm=OpenAI()), 31 | ) 32 | agent_server_1 = AgentService( 33 | agent=agent1, 34 | message_queue=message_queue, 35 | description="Useful for getting the secret fact.", 36 | service_name="secret_fact_agent", 37 | ) 38 | agent_server_2 = AgentService( 39 | agent=agent2, 40 | message_queue=message_queue, 41 | description="Useful for getting random dumb facts.", 42 | service_name="dumb_fact_agent", 43 | ) 44 | 45 | # launch it 46 | launcher = LocalLauncher([agent_server_1, agent_server_2], control_plane, message_queue) 47 | result = launcher.launch_single("What is the secret fact?") 48 | 49 | print(f"Result: {result}") 50 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/additional_services/human_consumer.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | from llama_agents.message_queues.apache_kafka import KafkaMessageQueue 4 | from pig_latin_translation.additional_services.task_result import TaskResultService 5 | from pig_latin_translation.utils import load_from_env 6 | 7 | message_queue_host = load_from_env("KAFKA_HOST") 8 | message_queue_port = load_from_env("KAFKA_PORT") 9 | human_consumer_host = load_from_env("HUMAN_CONSUMER_HOST") 10 | human_consumer_port = load_from_env("HUMAN_CONSUMER_PORT") 11 | localhost = load_from_env("LOCALHOST") 12 | 13 | 14 | # create our multi-agent framework components 15 | message_queue = KafkaMessageQueue.from_url_params( 16 | host=message_queue_host, 17 | port=int(message_queue_port) if message_queue_port else None, 18 | ) 19 | 20 | human_consumer_server = TaskResultService( 21 | message_queue=message_queue, 22 | host=human_consumer_host, 23 | port=int(human_consumer_port) if human_consumer_port else None, 24 | name="human", 25 | ) 26 | 27 | app = human_consumer_server._app 28 | 29 | 30 | # launch 31 | async def launch() -> None: 32 | # register to message queue and start consuming 33 | start_consuming_callable = await human_consumer_server.register_to_message_queue() 34 | _ = asyncio.create_task(start_consuming_callable()) 35 | 36 | cfg = uvicorn.Config( 37 | human_consumer_server._app, 38 | host=localhost, 39 | port=human_consumer_server.port, 40 | ) 41 | server = uvicorn.Server(cfg) 42 | await server.serve() 43 | 44 | 45 | if __name__ == "__main__": 46 | asyncio.run(launch()) 47 | -------------------------------------------------------------------------------- /llama_agents/app/components/task_list.py: -------------------------------------------------------------------------------- 1 | import httpx 2 | from typing import Any, List 3 | 4 | from textual.app import ComposeResult 5 | from textual.containers import VerticalScroll 6 | from textual.reactive import reactive 7 | from textual.widgets import Button, Static 8 | 9 | from llama_agents.app.components.types import ButtonType 10 | 11 | 12 | class TaskButton(Button): 13 | type: ButtonType = ButtonType.TASK 14 | 15 | 16 | class TasksList(Static): 17 | tasks: List[str] = reactive([]) 18 | 19 | def __init__(self, control_plane_url: str, **kwargs: Any): 20 | self.control_plane_url = control_plane_url 21 | super().__init__(**kwargs) 22 | 23 | def compose(self) -> ComposeResult: 24 | with VerticalScroll(id="tasks-scroll"): 25 | for task in self.tasks: 26 | yield TaskButton(task) 27 | 28 | async def on_mount(self) -> None: 29 | self.set_interval(5, self.refresh_tasks) 30 | 31 | async def refresh_tasks(self) -> None: 32 | async with httpx.AsyncClient(timeout=120.0) as client: 33 | response = await client.get(f"{self.control_plane_url}/tasks") 34 | tasks_dict = response.json() 35 | 36 | new_tasks = [] 37 | for task_id in tasks_dict: 38 | new_tasks.append(task_id) 39 | 40 | self.tasks = [*new_tasks] 41 | 42 | async def watch_tasks(self, new_tasks: List[str]) -> None: 43 | try: 44 | tasks_scroll = self.query_one("#tasks-scroll") 45 | await tasks_scroll.remove_children() 46 | for task in new_tasks: 47 | await tasks_scroll.mount(TaskButton(task)) 48 | except Exception: 49 | pass 50 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 python:3.10-slim as builder 2 | 3 | WORKDIR /app 4 | 5 | ENV POETRY_VERSION=1.7.1 6 | 7 | # Install libraries for necessary python package builds 8 | RUN apt-get update && apt-get --no-install-recommends install build-essential python3-dev libpq-dev -y && \ 9 | pip install --no-cache-dir --upgrade pip && \ 10 | pip install --no-cache-dir --upgrade poetry==${POETRY_VERSION} 11 | 12 | # Install ssh 13 | RUN apt-get -yq update && apt-get -yqq install ssh 14 | 15 | # Configure Poetry 16 | ENV POETRY_CACHE_DIR=/tmp/poetry_cache 17 | ENV POETRY_NO_INTERACTION=1 18 | ENV POETRY_VIRTUALENVS_IN_PROJECT=true 19 | ENV POETRY_VIRTUALENVS_CREATE=true 20 | 21 | # Install dependencies 22 | COPY ./poetry.lock ./pyproject.toml ./ 23 | 24 | RUN mkdir -p -m 0600 ~/.ssh && ssh-keyscan github.com >> ~/.ssh/known_hosts 25 | RUN --mount=type=secret,id=id_ed25519,dst=/root/.ssh/id_ed25519 poetry install --no-cache --no-root -vvv 26 | 27 | RUN poetry install --no-cache --no-root 28 | 29 | FROM --platform=linux/amd64 python:3.10-slim as runtime 30 | 31 | # Install wget for healthcheck 32 | RUN apt-get update && apt-get install -y wget 33 | 34 | RUN apt-get update -y && \ 35 | apt-get install --no-install-recommends libpq5 -y && \ 36 | rm -rf /var/lib/apt/lists/* # Install libpq for psycopg2 37 | 38 | RUN groupadd -r appuser && useradd --no-create-home -g appuser -r appuser 39 | USER appuser 40 | 41 | WORKDIR /app 42 | 43 | ENV VIRTUAL_ENV=/app/.venv 44 | COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV} 45 | ENV PATH="${VIRTUAL_ENV}/bin:${PATH}" 46 | 47 | # Copy source code 48 | COPY ./logging.ini ./logging.ini 49 | COPY ./pig_latin_translation ./pig_latin_translation 50 | -------------------------------------------------------------------------------- /tests/message_queues/test_apache_kafka.py: -------------------------------------------------------------------------------- 1 | import json 2 | import pytest 3 | from unittest.mock import patch, AsyncMock 4 | from llama_agents import QueueMessage 5 | from llama_agents.message_queues.apache_kafka import KafkaMessageQueue 6 | 7 | 8 | try: 9 | import aiokafka 10 | except (ModuleNotFoundError, ImportError): 11 | aiokafka = None 12 | 13 | 14 | def test_init() -> None: 15 | # arrange/act 16 | mq = KafkaMessageQueue(url="0.0.0.0:5555") 17 | 18 | # assert 19 | assert mq.url == "0.0.0.0:5555" 20 | 21 | 22 | def test_from_url_params() -> None: 23 | # arrange 24 | host = "mock-host" 25 | port = 8080 26 | 27 | # act 28 | mq = KafkaMessageQueue.from_url_params(host=host, port=port) 29 | 30 | # assert 31 | assert mq.url == f"{host}:{port}" 32 | 33 | 34 | @pytest.mark.asyncio() 35 | @pytest.mark.skipif(aiokafka is None, reason="aiokafka not installed") 36 | async def test_publish() -> None: 37 | from aiokafka import AIOKafkaProducer 38 | 39 | # Arrange 40 | mq = KafkaMessageQueue() 41 | 42 | # message types 43 | queue_message = QueueMessage(publisher_id="test", id_="1") 44 | message_body = json.dumps(queue_message.model_dump()).encode("utf-8") 45 | 46 | with patch.object(AIOKafkaProducer, "start", new_callable=AsyncMock) as mock_start: 47 | with patch.object( 48 | AIOKafkaProducer, "send_and_wait", new_callable=AsyncMock 49 | ) as mock_send_and_wait: 50 | # Act 51 | _ = await mq._publish(queue_message) 52 | 53 | # Assert 54 | mock_start.assert_awaited_once() 55 | mock_send_and_wait.assert_awaited_once_with( 56 | queue_message.type, message_body 57 | ) 58 | -------------------------------------------------------------------------------- /examples/redis/simple-redis-app/README.md: -------------------------------------------------------------------------------- 1 | # Example: Launching a Multi-Agent System with Redis 2 | 3 | This example demonstrates how to build and launch a multi-agent system using Redis as the message queue. The example includes a simple script to launch the system and a `docker-compose` file to set up a Redis server. 4 | 5 | ## Prerequisites 6 | 7 | - Docker and Docker Compose installed. (See [here](https://docs.docker.com/get-docker/) for installation instructions.) 8 | 9 | ## Installation 10 | 11 | To run these examples, you'll need to have `llama-agents` installed with the `redis` extra: 12 | 13 | ```sh 14 | # Using pip 15 | pip install llama-agents[redis] 16 | 17 | # Using Poetry 18 | poetry add llama-agents -E "redis" 19 | ``` 20 | 21 | To run the example scripts, you also need to install the libraries used in the example scripts: 22 | 23 | ```sh 24 | poetry install 25 | ``` 26 | 27 | ## Usage Pattern 28 | 29 | ```python 30 | from llama_agents.message_queue.redis import RedisMessageQueue 31 | 32 | message_queue = RedisMessageQueue( 33 | url=... # if no URL is supplied, the default redis://localhost:6379 is used 34 | ) 35 | ``` 36 | 37 | ## Example 38 | 39 | ### Setting Up Redis 40 | 41 | First, we need to set up a Redis server: 42 | 43 | ```sh 44 | docker-compose up -d 45 | ``` 46 | 47 | This command will start a Redis server on port 6379. 48 | 49 | ### Running the Example Scripts 50 | 51 | With the Redis server running, we can now run our example script to launch the multi-agent system. 52 | 53 | ```sh 54 | # Using LocalLauncher 55 | poetry run python ./simple-scripts/local_launcher_example.py 56 | ``` 57 | 58 | The script above will build a simple multi-agent app, connect it to the Redis message queue, and subsequently send the specified task. 59 | -------------------------------------------------------------------------------- /examples/pipeline_human_service_as_tool_local_single.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ( 2 | AgentService, 3 | HumanService, 4 | ControlPlaneServer, 5 | SimpleMessageQueue, 6 | PipelineOrchestrator, 7 | ServiceComponent, 8 | LocalLauncher, 9 | ) 10 | from llama_agents.tools import ServiceAsTool 11 | 12 | 13 | from llama_index.core.query_pipeline import QueryPipeline 14 | from llama_index.llms.openai import OpenAI 15 | from llama_index.agent.openai import OpenAIAgent 16 | 17 | 18 | # create our multi-agent framework components 19 | message_queue = SimpleMessageQueue() 20 | 21 | human_service = HumanService( 22 | message_queue=message_queue, 23 | description="Answers queries about math.", 24 | ) 25 | 26 | human_service_as_tool = ServiceAsTool.from_service_definition( 27 | message_queue=message_queue, service_definition=human_service.service_definition 28 | ) 29 | 30 | agent = OpenAIAgent.from_tools( 31 | [human_service_as_tool], 32 | system_prompt="Perform the task, return the result as well as a funny joke.", 33 | llm=OpenAI(model="gpt-4o"), 34 | ) # worker2.as_agent() 35 | agent_server = AgentService( 36 | agent=agent, 37 | message_queue=message_queue, 38 | description="Useful for telling funny jokes.", 39 | service_name="funny_agent", 40 | ) 41 | 42 | agent_component = ServiceComponent.from_service_definition(agent_server) 43 | 44 | pipeline = QueryPipeline(chain=[agent_component]) 45 | 46 | pipeline_orchestrator = PipelineOrchestrator(pipeline) 47 | 48 | control_plane = ControlPlaneServer(message_queue, pipeline_orchestrator) 49 | 50 | # launch it 51 | launcher = LocalLauncher([human_service, agent_server], control_plane, message_queue) 52 | result = launcher.launch_single("What is 1+1+2+3+5+8?") 53 | 54 | print(f"Result: {result}") 55 | -------------------------------------------------------------------------------- /llama_agents/app/components/service_list.py: -------------------------------------------------------------------------------- 1 | import httpx 2 | from typing import Any, List 3 | 4 | from textual.app import ComposeResult 5 | from textual.containers import VerticalScroll 6 | from textual.reactive import reactive 7 | from textual.widgets import Button, Static 8 | 9 | from llama_agents.app.components.types import ButtonType 10 | 11 | 12 | class ServiceButton(Button): 13 | type: ButtonType = ButtonType.SERVICE 14 | 15 | 16 | class ServicesList(Static): 17 | services: List[str] = reactive([]) 18 | 19 | def __init__(self, control_plane_url: str, **kwargs: Any): 20 | self.control_plane_url = control_plane_url 21 | super().__init__(**kwargs) 22 | 23 | def compose(self) -> ComposeResult: 24 | with VerticalScroll(id="services-scroll"): 25 | for service in self.services: 26 | yield ServiceButton(service) 27 | 28 | async def on_mount(self) -> None: 29 | self.set_interval(2, self.refresh_services) 30 | 31 | async def refresh_services(self) -> None: 32 | async with httpx.AsyncClient(timeout=120.0) as client: 33 | response = await client.get(f"{self.control_plane_url}/services") 34 | services_dict = response.json() 35 | 36 | new_services = [] 37 | for service_name in services_dict: 38 | new_services.append(service_name) 39 | 40 | self.services = [*new_services] 41 | 42 | async def watch_services(self, new_services: List[str]) -> None: 43 | try: 44 | services_scroll = self.query_one("#services-scroll") 45 | await services_scroll.remove_children() 46 | for service in new_services: 47 | await services_scroll.mount(ServiceButton(service)) 48 | except Exception: 49 | pass 50 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/additional_services/human_consumer.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 4 | from multi_agent_app_rabbitmq.additional_services.task_result import TaskResultService 5 | from multi_agent_app_rabbitmq.utils import load_from_env 6 | 7 | message_queue_host = load_from_env("RABBITMQ_HOST") 8 | message_queue_port = load_from_env("RABBITMQ_NODE_PORT") 9 | message_queue_username = load_from_env("RABBITMQ_DEFAULT_USER") 10 | message_queue_password = load_from_env("RABBITMQ_DEFAULT_PASS") 11 | human_consumer_host = load_from_env("HUMAN_CONSUMER_HOST") 12 | human_consumer_port = load_from_env("HUMAN_CONSUMER_PORT") 13 | localhost = load_from_env("LOCALHOST") 14 | 15 | 16 | # create our multi-agent framework components 17 | message_queue = RabbitMQMessageQueue( 18 | url=f"amqp://{message_queue_username}:{message_queue_password}@{message_queue_host}:{message_queue_port}/" 19 | ) 20 | 21 | human_consumer_server = TaskResultService( 22 | message_queue=message_queue, 23 | host=human_consumer_host, 24 | port=int(human_consumer_port) if human_consumer_port else None, 25 | name="human", 26 | ) 27 | 28 | app = human_consumer_server._app 29 | 30 | 31 | # launch 32 | async def launch() -> None: 33 | # register to message queue and start consuming 34 | start_consuming_callable = await human_consumer_server.register_to_message_queue() 35 | _ = asyncio.create_task(start_consuming_callable()) 36 | 37 | cfg = uvicorn.Config( 38 | human_consumer_server._app, 39 | host=localhost, 40 | port=human_consumer_server.port, 41 | ) 42 | server = uvicorn.Server(cfg) 43 | await server.serve() 44 | 45 | 46 | if __name__ == "__main__": 47 | asyncio.run(launch()) 48 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/core_services/control_plane.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | 4 | from llama_agents import AgentOrchestrator, ControlPlaneServer 5 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 6 | from llama_index.llms.openai import OpenAI 7 | 8 | from multi_agent_app_rabbitmq.utils import load_from_env 9 | 10 | 11 | message_queue_host = load_from_env("RABBITMQ_HOST") 12 | message_queue_port = load_from_env("RABBITMQ_NODE_PORT") 13 | message_queue_username = load_from_env("RABBITMQ_DEFAULT_USER") 14 | message_queue_password = load_from_env("RABBITMQ_DEFAULT_PASS") 15 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 16 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 17 | localhost = load_from_env("LOCALHOST") 18 | 19 | 20 | # setup message queue 21 | message_queue = RabbitMQMessageQueue( 22 | url=f"amqp://{message_queue_username}:{message_queue_password}@{message_queue_host}:{message_queue_port}/" 23 | ) 24 | 25 | # setup control plane 26 | control_plane = ControlPlaneServer( 27 | message_queue=message_queue, 28 | orchestrator=AgentOrchestrator(llm=OpenAI()), 29 | host=control_plane_host, 30 | port=int(control_plane_port) if control_plane_port else None, 31 | ) 32 | 33 | 34 | app = control_plane.app 35 | 36 | 37 | # launch 38 | async def launch() -> None: 39 | # register to message queue and start consuming 40 | start_consuming_callable = await control_plane.register_to_message_queue() 41 | _ = asyncio.create_task(start_consuming_callable()) 42 | 43 | cfg = uvicorn.Config( 44 | control_plane.app, 45 | host=localhost, 46 | port=control_plane.port, 47 | ) 48 | server = uvicorn.Server(cfg) 49 | await server.serve() 50 | 51 | 52 | if __name__ == "__main__": 53 | asyncio.run(launch()) 54 | -------------------------------------------------------------------------------- /llama_agents/message_consumers/base.py: -------------------------------------------------------------------------------- 1 | """Message consumers.""" 2 | 3 | from abc import ABC, abstractmethod 4 | from pydantic import BaseModel, Field, ConfigDict 5 | from typing import Any, Callable, TYPE_CHECKING, Coroutine 6 | 7 | from llama_agents.messages.base import QueueMessage 8 | from llama_agents.types import generate_id 9 | 10 | if TYPE_CHECKING: 11 | pass 12 | 13 | StartConsumingCallable = Callable[..., Coroutine[Any, Any, None]] 14 | 15 | 16 | async def default_start_consuming_callable() -> None: 17 | pass 18 | 19 | 20 | class BaseMessageQueueConsumer(BaseModel, ABC): 21 | """Consumer of a MessageQueue. 22 | 23 | Process messages from a MessageQueue for a specific message type. 24 | """ 25 | 26 | model_config = ConfigDict(arbitrary_types_allowed=True) 27 | id_: str = Field(default_factory=generate_id) 28 | message_type: str = Field( 29 | default="default", description="Type of the message to consume." 30 | ) 31 | channel: Any = Field( 32 | default=None, description="The channel if any for which to receive messages." 33 | ) 34 | consuming_callable: StartConsumingCallable = Field( 35 | default=default_start_consuming_callable 36 | ) 37 | 38 | @abstractmethod 39 | async def _process_message(self, message: QueueMessage, **kwargs: Any) -> Any: 40 | """Subclasses should implement logic here.""" 41 | 42 | async def process_message(self, message: QueueMessage, **kwargs: Any) -> Any: 43 | """Logic for processing message.""" 44 | if message.type != self.message_type: 45 | raise ValueError("Consumer cannot process the given kind of Message.") 46 | return await self._process_message(message, **kwargs) 47 | 48 | async def start_consuming( 49 | self, 50 | ) -> None: 51 | """Begin consuming messages.""" 52 | await self.consuming_callable() 53 | -------------------------------------------------------------------------------- /examples/agentic_toolservice_local_single.py: -------------------------------------------------------------------------------- 1 | from llama_agents.launchers.local import LocalLauncher 2 | from llama_agents.services import AgentService, ToolService 3 | from llama_agents.tools import MetaServiceTool 4 | from llama_agents.control_plane.server import ControlPlaneServer 5 | from llama_agents.message_queues.simple import SimpleMessageQueue 6 | from llama_agents.orchestrators.agent import AgentOrchestrator 7 | 8 | from llama_index.core.agent import FunctionCallingAgentWorker 9 | from llama_index.core.tools import FunctionTool 10 | from llama_index.llms.openai import OpenAI 11 | 12 | 13 | # create an agent 14 | def get_the_secret_fact() -> str: 15 | """Returns the secret fact.""" 16 | return "The secret fact is: A baby llama is called a 'Cria'." 17 | 18 | 19 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 20 | 21 | 22 | # create our multi-agent framework components 23 | message_queue = SimpleMessageQueue() 24 | tool_service = ToolService( 25 | message_queue=message_queue, 26 | tools=[tool], 27 | running=True, 28 | step_interval=0.5, 29 | ) 30 | 31 | control_plane = ControlPlaneServer( 32 | message_queue=message_queue, 33 | orchestrator=AgentOrchestrator(llm=OpenAI()), 34 | ) 35 | 36 | meta_tool = MetaServiceTool( 37 | tool_metadata=tool.metadata, 38 | message_queue=message_queue, 39 | tool_service_name=tool_service.service_name, 40 | ) 41 | worker1 = FunctionCallingAgentWorker.from_tools( 42 | [meta_tool], 43 | llm=OpenAI(), 44 | ) 45 | agent1 = worker1.as_agent() 46 | agent_server_1 = AgentService( 47 | agent=agent1, 48 | message_queue=message_queue, 49 | description="Useful for getting the secret fact.", 50 | service_name="secret_fact_agent", 51 | ) 52 | 53 | # launch it 54 | launcher = LocalLauncher( 55 | [agent_server_1, tool_service], 56 | control_plane, 57 | message_queue, 58 | ) 59 | result = launcher.launch_single("What is the secret fact?") 60 | 61 | print(f"Result: {result}") 62 | -------------------------------------------------------------------------------- /llama_agents/__init__.py: -------------------------------------------------------------------------------- 1 | from llama_agents.client import AsyncLlamaAgentsClient, LlamaAgentsClient 2 | from llama_agents.control_plane import ControlPlaneServer 3 | from llama_agents.launchers import LocalLauncher, ServerLauncher 4 | from llama_agents.message_consumers import CallableMessageConsumer 5 | from llama_agents.message_queues import SimpleMessageQueue 6 | from llama_agents.messages import QueueMessage 7 | from llama_agents.orchestrators import ( 8 | AgentOrchestrator, 9 | PipelineOrchestrator, 10 | OrchestratorRouter, 11 | ) 12 | from llama_agents.tools import ( 13 | AgentServiceTool, 14 | MetaServiceTool, 15 | ServiceAsTool, 16 | ServiceComponent, 17 | ServiceTool, 18 | ) 19 | from llama_agents.services import ( 20 | AgentService, 21 | ToolService, 22 | HumanService, 23 | ComponentService, 24 | ) 25 | 26 | # configure logger 27 | import logging 28 | 29 | root_logger = logging.getLogger("llama_agents") 30 | 31 | formatter = logging.Formatter("%(levelname)s:%(name)s - %(message)s") 32 | console_handler = logging.StreamHandler() 33 | console_handler.setFormatter(formatter) 34 | root_logger.addHandler(console_handler) 35 | 36 | root_logger.setLevel(logging.INFO) 37 | root_logger.propagate = False 38 | 39 | 40 | __all__ = [ 41 | # clients 42 | "LlamaAgentsClient", 43 | "AsyncLlamaAgentsClient", 44 | # services 45 | "AgentService", 46 | "HumanService", 47 | "ToolService", 48 | "ComponentService", 49 | # messages 50 | "QueueMessage", 51 | # message consumers 52 | "CallableMessageConsumer", 53 | # message queues 54 | "SimpleMessageQueue", 55 | # launchers 56 | "LocalLauncher", 57 | "ServerLauncher", 58 | # control planes 59 | "ControlPlaneServer", 60 | # orchestrators 61 | "AgentOrchestrator", 62 | "PipelineOrchestrator", 63 | "OrchestratorRouter", 64 | # various utils 65 | "AgentServiceTool", 66 | "ServiceAsTool", 67 | "ServiceComponent", 68 | "ServiceTool", 69 | "MetaServiceTool", 70 | ] 71 | -------------------------------------------------------------------------------- /llama_agents/messages/base.py: -------------------------------------------------------------------------------- 1 | """Base Message.""" 2 | 3 | import uuid 4 | from datetime import datetime 5 | from pydantic import BaseModel, Field, ConfigDict 6 | from typing import Any, Dict, Optional 7 | 8 | from llama_agents.types import ActionTypes 9 | 10 | 11 | class QueueMessageStats(BaseModel): 12 | """Stats for a queue message. 13 | 14 | Attributes: 15 | publish_time (Optional[str]): 16 | The time the message was published. 17 | process_start_time (Optional[str]): 18 | The time the message processing started. 19 | process_end_time (Optional[str]): 20 | The time the message processing ended. 21 | """ 22 | 23 | publish_time: Optional[str] = Field(default=None) 24 | process_start_time: Optional[str] = Field(default=None) 25 | process_end_time: Optional[str] = Field(default=None) 26 | 27 | @staticmethod 28 | def timestamp_str(format: str = "%Y-%m-%d %H:%M:%S") -> str: 29 | return datetime.now().strftime(format) 30 | 31 | 32 | class QueueMessage(BaseModel): 33 | """A message for the message queue. 34 | 35 | Attributes: 36 | id_ (str): 37 | The id of the message. 38 | publisher_id (str): 39 | The id of the publisher. 40 | data (Dict[str, Any]): 41 | The data of the message. 42 | action (Optional[ActionTypes]): 43 | The action of the message, used for deciding how to process the message. 44 | stats (QueueMessageStats): 45 | The stats of the message. 46 | type (str): 47 | The type of the message. Typically this is a service name. 48 | """ 49 | 50 | model_config = ConfigDict(arbitrary_types_allowed=True) 51 | id_: str = Field(default_factory=lambda: str(uuid.uuid4())) 52 | publisher_id: str = Field(default="default", description="Id of publisher.") 53 | data: Dict[str, Any] = Field(default_factory=dict) 54 | action: Optional[ActionTypes] = None 55 | stats: QueueMessageStats = Field(default_factory=QueueMessageStats) 56 | type: str = Field( 57 | default="default", description="Type of the message, used for routing." 58 | ) 59 | -------------------------------------------------------------------------------- /examples/pipeline_human_local_single.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ( 2 | AgentService, 3 | HumanService, 4 | ControlPlaneServer, 5 | SimpleMessageQueue, 6 | PipelineOrchestrator, 7 | ServiceComponent, 8 | LocalLauncher, 9 | ) 10 | 11 | from llama_index.core.agent import FunctionCallingAgentWorker 12 | from llama_index.core.tools import FunctionTool 13 | from llama_index.core.query_pipeline import RouterComponent, QueryPipeline 14 | from llama_index.llms.openai import OpenAI 15 | from llama_index.core.selectors import PydanticSingleSelector 16 | 17 | 18 | # create an agent 19 | def get_the_secret_fact() -> str: 20 | """Returns the secret fact.""" 21 | return "The secret fact is: A baby llama is called a 'Cria'." 22 | 23 | 24 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 25 | 26 | # create our multi-agent framework components 27 | message_queue = SimpleMessageQueue() 28 | 29 | worker = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 30 | agent = worker.as_agent() 31 | agent_service = AgentService( 32 | agent=agent, 33 | message_queue=message_queue, 34 | description="Useful for getting the secret fact.", 35 | service_name="secret_fact_agent", 36 | ) 37 | agent_component = ServiceComponent.from_service_definition(agent_service) 38 | 39 | human_service = HumanService( 40 | message_queue=message_queue, description="Answers queries about math." 41 | ) 42 | human_component = ServiceComponent.from_service_definition(human_service) 43 | 44 | pipeline = QueryPipeline( 45 | chain=[ 46 | RouterComponent( 47 | selector=PydanticSingleSelector.from_defaults(llm=OpenAI()), 48 | choices=[agent_service.description, human_service.description], 49 | components=[agent_component, human_component], 50 | ) 51 | ] 52 | ) 53 | 54 | pipeline_orchestrator = PipelineOrchestrator(pipeline) 55 | 56 | control_plane = ControlPlaneServer(message_queue, pipeline_orchestrator) 57 | 58 | # launch it 59 | launcher = LocalLauncher([agent_service, human_service], control_plane, message_queue) 60 | result = launcher.launch_single("What is 1 + 2 + 3 + 4 + 5?") 61 | 62 | print(f"Result: {result}") 63 | -------------------------------------------------------------------------------- /examples/kafka/simple-scripts/local_launcher_human_single.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ( 2 | AgentService, 3 | HumanService, 4 | ControlPlaneServer, 5 | PipelineOrchestrator, 6 | ServiceComponent, 7 | LocalLauncher, 8 | ) 9 | from llama_agents.message_queues.apache_kafka import KafkaMessageQueue 10 | 11 | from llama_index.core.agent import FunctionCallingAgentWorker 12 | from llama_index.core.tools import FunctionTool 13 | from llama_index.core.query_pipeline import RouterComponent, QueryPipeline 14 | from llama_index.llms.openai import OpenAI 15 | from llama_index.core.selectors import PydanticSingleSelector 16 | 17 | 18 | # create an agent 19 | def get_the_secret_fact() -> str: 20 | """Returns the secret fact.""" 21 | return "The secret fact is: A baby llama is called a 'Cria'." 22 | 23 | 24 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 25 | 26 | # create our multi-agent framework components 27 | message_queue = KafkaMessageQueue() 28 | 29 | worker = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 30 | agent = worker.as_agent() 31 | agent_service = AgentService( 32 | agent=agent, 33 | message_queue=message_queue, 34 | description="Useful for getting the secret fact.", 35 | service_name="secret_fact_agent", 36 | ) 37 | agent_component = ServiceComponent.from_service_definition(agent_service) 38 | 39 | human_service = HumanService( 40 | message_queue=message_queue, description="Answers queries about math." 41 | ) 42 | human_component = ServiceComponent.from_service_definition(human_service) 43 | 44 | pipeline = QueryPipeline( 45 | chain=[ 46 | RouterComponent( 47 | selector=PydanticSingleSelector.from_defaults(llm=OpenAI()), 48 | choices=[agent_service.description, human_service.description], 49 | components=[agent_component, human_component], 50 | ) 51 | ] 52 | ) 53 | 54 | pipeline_orchestrator = PipelineOrchestrator(pipeline) 55 | 56 | control_plane = ControlPlaneServer(message_queue, pipeline_orchestrator) 57 | 58 | # launch it 59 | launcher = LocalLauncher([agent_service, human_service], control_plane, message_queue) 60 | result = launcher.launch_single("What is 1 + 2 + 3 + 4 + 5?") 61 | 62 | print(f"Result: {result}") 63 | -------------------------------------------------------------------------------- /examples/agentic_server.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ( 2 | AgentService, 3 | HumanService, 4 | AgentOrchestrator, 5 | CallableMessageConsumer, 6 | ControlPlaneServer, 7 | ServerLauncher, 8 | SimpleMessageQueue, 9 | QueueMessage, 10 | ) 11 | from llama_index.core.agent import FunctionCallingAgentWorker 12 | from llama_index.core.tools import FunctionTool 13 | from llama_index.llms.openai import OpenAI 14 | 15 | 16 | # create an agent 17 | def get_the_secret_fact() -> str: 18 | """Returns the secret fact.""" 19 | return "The secret fact is: A baby llama is called a 'Cria'." 20 | 21 | 22 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 23 | 24 | worker1 = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 25 | worker2 = FunctionCallingAgentWorker.from_tools([], llm=OpenAI()) 26 | agent1 = worker1.as_agent() 27 | agent2 = worker2.as_agent() 28 | 29 | # create our multi-agent framework components 30 | message_queue = SimpleMessageQueue() 31 | queue_client = message_queue.client 32 | 33 | 34 | control_plane = ControlPlaneServer( 35 | message_queue=queue_client, 36 | orchestrator=AgentOrchestrator(llm=OpenAI()), 37 | ) 38 | agent_server_1 = AgentService( 39 | agent=agent1, 40 | message_queue=queue_client, 41 | description="Useful for getting the secret fact.", 42 | service_name="secret_fact_agent", 43 | host="127.0.0.1", 44 | port=8002, 45 | ) 46 | agent_server_2 = AgentService( 47 | agent=agent2, 48 | message_queue=queue_client, 49 | description="Useful for getting random dumb facts.", 50 | service_name="dumb_fact_agent", 51 | host="127.0.0.1", 52 | port=8003, 53 | ) 54 | human_service = HumanService( 55 | message_queue=queue_client, 56 | description="Answers queries about math.", 57 | host="127.0.0.1", 58 | port=8004, 59 | ) 60 | 61 | 62 | # additional human consumer 63 | def handle_result(message: QueueMessage) -> None: 64 | print("Got result:", message.data) 65 | 66 | 67 | human_consumer = CallableMessageConsumer(handler=handle_result, message_type="human") 68 | 69 | # launch it 70 | launcher = ServerLauncher( 71 | [agent_server_1, agent_server_2, human_service], 72 | control_plane, 73 | message_queue, 74 | additional_consumers=[human_consumer], 75 | ) 76 | 77 | launcher.launch_servers() 78 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/agent_services/secret_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from llama_agents import AgentService, SimpleMessageQueue 4 | 5 | from llama_index.core.agent import FunctionCallingAgentWorker 6 | from llama_index.core.tools import FunctionTool 7 | from llama_index.llms.openai import OpenAI 8 | 9 | from multi_agent_app.utils import load_from_env 10 | 11 | message_queue_host = load_from_env("MESSAGE_QUEUE_HOST") 12 | message_queue_port = load_from_env("MESSAGE_QUEUE_PORT") 13 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 14 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 15 | secret_agent_host = load_from_env("SECRET_AGENT_HOST") 16 | secret_agent_port = load_from_env("SECRET_AGENT_PORT") 17 | 18 | 19 | # create an agent 20 | def get_the_secret_fact() -> str: 21 | """Returns the secret fact.""" 22 | return "The secret fact is: A baby llama is called a 'Cria'." 23 | 24 | 25 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 26 | worker = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 27 | agent = worker.as_agent() 28 | 29 | # create agent server 30 | message_queue = SimpleMessageQueue( 31 | host=message_queue_host, 32 | port=int(message_queue_port) if message_queue_port else None, 33 | ) 34 | queue_client = message_queue.client 35 | 36 | agent_server = AgentService( 37 | agent=agent, 38 | message_queue=queue_client, 39 | description="Useful for getting the secret fact.", 40 | service_name="secret_fact_agent", 41 | host=secret_agent_host, 42 | port=int(secret_agent_port) if secret_agent_port else None, 43 | ) 44 | 45 | app = agent_server._app 46 | 47 | 48 | # registration 49 | async def register_and_start_consuming() -> None: 50 | # register to message queue 51 | start_consuming_callable = await agent_server.register_to_message_queue() 52 | # register to control plane 53 | await agent_server.register_to_control_plane( 54 | control_plane_url=( 55 | f"http://{control_plane_host}:{control_plane_port}" 56 | if control_plane_port 57 | else f"http://{control_plane_host}" 58 | ) 59 | ) 60 | # start consuming 61 | await start_consuming_callable() 62 | 63 | 64 | if __name__ == "__main__": 65 | asyncio.run(register_and_start_consuming()) 66 | -------------------------------------------------------------------------------- /examples/pipeline_agent_service_tool_local_single.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ( 2 | AgentService, 3 | ControlPlaneServer, 4 | SimpleMessageQueue, 5 | PipelineOrchestrator, 6 | ServiceComponent, 7 | LocalLauncher, 8 | ) 9 | from llama_agents.tools import ServiceAsTool 10 | 11 | 12 | from llama_index.core.agent import FunctionCallingAgentWorker 13 | from llama_index.core.tools import FunctionTool 14 | from llama_index.core.query_pipeline import QueryPipeline 15 | from llama_index.llms.openai import OpenAI 16 | from llama_index.agent.openai import OpenAIAgent 17 | 18 | 19 | # create an agent 20 | def get_the_secret_fact() -> str: 21 | """Returns the secret fact.""" 22 | return "The secret fact is: A baby llama is called a 'Cria'." 23 | 24 | 25 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 26 | 27 | worker1 = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 28 | # worker2 = FunctionCallingAgentWorker.from_tools([], llm=OpenAI()) 29 | agent1 = worker1.as_agent() 30 | 31 | # create our multi-agent framework components 32 | message_queue = SimpleMessageQueue() 33 | 34 | agent1_server = AgentService( 35 | agent=agent1, 36 | message_queue=message_queue, 37 | description="Useful for getting the secret fact.", 38 | service_name="secret_fact_agent", 39 | ) 40 | 41 | agent1_server_tool = ServiceAsTool.from_service_definition( 42 | message_queue=message_queue, service_definition=agent1_server.service_definition 43 | ) 44 | 45 | agent2 = OpenAIAgent.from_tools( 46 | [agent1_server_tool], 47 | system_prompt="Perform the task, return the result as well as a funny joke.", 48 | ) # worker2.as_agent() 49 | agent2_server = AgentService( 50 | agent=agent2, 51 | message_queue=message_queue, 52 | description="Useful for telling funny jokes.", 53 | service_name="dumb_fact_agent", 54 | ) 55 | 56 | agent2_component = ServiceComponent.from_service_definition(agent2_server) 57 | 58 | pipeline = QueryPipeline(chain=[agent2_component]) 59 | 60 | pipeline_orchestrator = PipelineOrchestrator(pipeline) 61 | 62 | control_plane = ControlPlaneServer(message_queue, pipeline_orchestrator) 63 | 64 | # launch it 65 | launcher = LocalLauncher([agent1_server, agent2_server], control_plane, message_queue) 66 | result = launcher.launch_single("What is the secret fact?") 67 | 68 | print(f"Result: {result}") 69 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/agent_services/funny_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from llama_agents import AgentService, SimpleMessageQueue 4 | 5 | from llama_index.core.agent import FunctionCallingAgentWorker 6 | from llama_index.core.tools import FunctionTool 7 | from llama_index.llms.openai import OpenAI 8 | 9 | from multi_agent_app.utils import load_from_env 10 | 11 | message_queue_host = load_from_env("MESSAGE_QUEUE_HOST") 12 | message_queue_port = load_from_env("MESSAGE_QUEUE_PORT") 13 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 14 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 15 | funny_agent_host = load_from_env("FUNNY_AGENT_HOST") 16 | funny_agent_port = load_from_env("FUNNY_AGENT_PORT") 17 | 18 | 19 | # create an agent 20 | def get_a_funny_joke() -> str: 21 | """Returns the secret fact.""" 22 | return "I went to the aquarium this weekend, but I didn’t stay long. There’s something fishy about that place." 23 | 24 | 25 | tool = FunctionTool.from_defaults(fn=get_a_funny_joke) 26 | worker = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 27 | agent = worker.as_agent() 28 | 29 | # create agent server 30 | message_queue = SimpleMessageQueue( 31 | host=message_queue_host, 32 | port=int(message_queue_port) if message_queue_port else None, 33 | ) 34 | queue_client = message_queue.client 35 | 36 | agent_server = AgentService( 37 | agent=agent, 38 | message_queue=queue_client, 39 | description="Useful for getting funny jokes.", 40 | service_name="funny_joke_agent", 41 | host=funny_agent_host, 42 | port=int(funny_agent_port) if funny_agent_port else None, 43 | ) 44 | 45 | app = agent_server._app 46 | 47 | 48 | # registration 49 | async def register_and_start_consuming() -> None: 50 | # register to message queue 51 | start_consuming_callable = await agent_server.register_to_message_queue() 52 | # register to control plane 53 | await agent_server.register_to_control_plane( 54 | control_plane_url=( 55 | f"http://{control_plane_host}:{control_plane_port}" 56 | if control_plane_port 57 | else f"http://{control_plane_host}" 58 | ) 59 | ) 60 | # start consuming 61 | await start_consuming_callable() 62 | 63 | 64 | if __name__ == "__main__": 65 | asyncio.run(register_and_start_consuming()) 66 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/kubernetes/ingress_services/message_queue.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: message-queue 6 | namespace: llama-agents-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: message-queue 12 | template: 13 | metadata: 14 | labels: 15 | app: message-queue 16 | spec: 17 | containers: 18 | - name: message-queue 19 | env: 20 | - name: MESSAGE_QUEUE_HOST 21 | valueFrom: 22 | configMapKeyRef: 23 | name: xcore-config 24 | key: MESSAGE_QUEUE_HOST 25 | - name: MESSAGE_QUEUE_PORT 26 | valueFrom: 27 | configMapKeyRef: 28 | name: xcore-config 29 | key: MESSAGE_QUEUE_PORT 30 | image: multi_agent_app:latest 31 | imagePullPolicy: Never 32 | command: 33 | [ 34 | "uvicorn", 35 | "multi_agent_app.core_services.message_queue:app", 36 | "--host", 37 | "0.0.0.0", 38 | "--port", 39 | "8000", 40 | "--log-config", 41 | "./logging.ini", 42 | "--log-level", 43 | "debug", 44 | ] 45 | resources: 46 | requests: 47 | memory: "128Mi" 48 | cpu: "100m" 49 | limits: 50 | memory: "512Mi" 51 | cpu: "500m" 52 | ports: 53 | - containerPort: 8000 54 | 55 | --- 56 | apiVersion: v1 57 | kind: Service 58 | metadata: 59 | labels: 60 | app: message-queue 61 | name: message-queue 62 | namespace: llama-agents-demo 63 | spec: 64 | selector: 65 | app: message-queue 66 | ports: 67 | - protocol: TCP 68 | port: 8000 69 | targetPort: 8000 70 | 71 | --- 72 | apiVersion: networking.k8s.io/v1 73 | kind: Ingress 74 | metadata: 75 | name: message-queue 76 | namespace: llama-agents-demo 77 | spec: 78 | rules: 79 | - host: message-queue.127.0.0.1.nip.io 80 | http: 81 | paths: 82 | - path: / 83 | pathType: Prefix 84 | backend: 85 | service: 86 | name: message-queue 87 | port: 88 | number: 8000 89 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/agent_services/decorators.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | import numpy as np 4 | import functools 5 | from typing import Any, Callable 6 | 7 | from logging import getLogger 8 | 9 | logger = getLogger(__name__) 10 | 11 | 12 | def exponential_delay(exponential_rate: float) -> Callable: 13 | """Wrapper for exponential tool.""" 14 | 15 | def decorator(func: Callable) -> Callable: 16 | @functools.wraps(func) 17 | def wrapper(*args: Any, **kwargs: Any) -> str: 18 | # random delay 19 | delay = np.random.exponential(exponential_rate) 20 | logger.info(f"waiting for {delay} seconds") 21 | time.sleep(delay) 22 | return func(*args, **kwargs) 23 | 24 | @functools.wraps(func) 25 | async def async_wrapper(*args: Any, **kwargs: Any) -> str: 26 | # random delay 27 | delay = np.random.exponential(exponential_rate) 28 | logger.info(f"waiting for {delay} seconds") 29 | # await asyncio.sleep(delay) 30 | return await func(*args, **kwargs) 31 | 32 | return async_wrapper if asyncio.iscoroutinefunction(func) else wrapper 33 | 34 | return decorator 35 | 36 | 37 | async def main() -> None: 38 | @exponential_delay(2) 39 | async def get_the_secret_fact() -> str: 40 | """Returns the secret fact.""" 41 | return "The secret fact is: A baby llama is called a 'Cria'." 42 | 43 | @exponential_delay(1) 44 | async def async_correct_first_character(input: str) -> str: 45 | """Corrects the first character.""" 46 | tokens = input.split() 47 | return " ".join([t[-1] + t[0:-1] for t in tokens]) 48 | 49 | @exponential_delay(0.5) 50 | async def async_remove_ay_suffix(input: str) -> str: 51 | """Removes 'ay' suffix from each token in the input_sentence. 52 | 53 | Params: 54 | input_sentence (str): The input sentence i.e., sequence of words 55 | """ 56 | logger.info(f"received task input: {input}") 57 | tokens = input.split() 58 | res = " ".join([t[:-2] for t in tokens]) 59 | logger.info(f"Removed 'ay' suffix: {res}") 60 | return res 61 | 62 | output = await async_remove_ay_suffix(input="eyhay ouyay") 63 | 64 | print(output) 65 | print(async_remove_ay_suffix.__doc__) 66 | 67 | 68 | if __name__ == "__main__": 69 | asyncio.run(main()) 70 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/core_services/control_plane.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | 4 | from llama_agents import ControlPlaneServer, PipelineOrchestrator, ServiceComponent 5 | from llama_agents.message_queues.apache_kafka import KafkaMessageQueue 6 | from llama_index.core.query_pipeline import QueryPipeline 7 | 8 | from pig_latin_translation.utils import load_from_env 9 | from pig_latin_translation.agent_services.remove_ay_agent import ( 10 | agent_server as remove_ay_agent_server, 11 | ) 12 | from pig_latin_translation.agent_services.correct_first_character_agent import ( 13 | agent_server as correct_first_character_agent_server, 14 | ) 15 | 16 | message_queue_host = load_from_env("KAFKA_HOST") 17 | message_queue_port = load_from_env("KAFKA_PORT") 18 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 19 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 20 | localhost = load_from_env("LOCALHOST") 21 | 22 | 23 | # setup message queue 24 | message_queue = KafkaMessageQueue.from_url_params( 25 | host=message_queue_host, 26 | port=int(message_queue_port) if message_queue_port else None, 27 | ) 28 | 29 | # setup control plane 30 | remove_ay_agent_component = ServiceComponent.from_service_definition( 31 | remove_ay_agent_server 32 | ) 33 | correct_first_character_agent_component = ServiceComponent.from_service_definition( 34 | correct_first_character_agent_server 35 | ) 36 | 37 | pipeline = QueryPipeline( 38 | chain=[ 39 | remove_ay_agent_component, 40 | correct_first_character_agent_component, 41 | ] 42 | ) 43 | 44 | pipeline_orchestrator = PipelineOrchestrator(pipeline) 45 | 46 | control_plane = ControlPlaneServer( 47 | message_queue=message_queue, 48 | orchestrator=pipeline_orchestrator, 49 | host=control_plane_host, 50 | port=int(control_plane_port) if control_plane_port else None, 51 | ) 52 | app = control_plane.app 53 | 54 | 55 | # launch 56 | async def launch() -> None: 57 | # register to message queue and start consuming 58 | start_consuming_callable = await control_plane.register_to_message_queue() 59 | _ = asyncio.create_task(start_consuming_callable()) 60 | 61 | cfg = uvicorn.Config( 62 | control_plane.app, 63 | host=localhost, 64 | port=control_plane.port, 65 | ) 66 | server = uvicorn.Server(cfg) 67 | await server.serve() 68 | 69 | 70 | if __name__ == "__main__": 71 | asyncio.run(launch()) 72 | -------------------------------------------------------------------------------- /examples/pipeline_local_single.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ( 2 | AgentService, 3 | ControlPlaneServer, 4 | SimpleMessageQueue, 5 | PipelineOrchestrator, 6 | ServiceComponent, 7 | LocalLauncher, 8 | ) 9 | 10 | from llama_index.core.agent import FunctionCallingAgentWorker 11 | from llama_index.core.tools import FunctionTool 12 | from llama_index.core.query_pipeline import QueryPipeline, RouterComponent 13 | from llama_index.core.selectors import PydanticSingleSelector 14 | from llama_index.llms.openai import OpenAI 15 | from llama_index.agent.openai import OpenAIAgent 16 | 17 | 18 | # create an agent 19 | def get_the_secret_fact() -> str: 20 | """Returns the secret fact.""" 21 | return "The secret fact is: A baby llama is called a 'Cria'." 22 | 23 | 24 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 25 | 26 | worker1 = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 27 | # worker2 = FunctionCallingAgentWorker.from_tools([], llm=OpenAI()) 28 | agent1 = worker1.as_agent() 29 | agent2 = OpenAIAgent.from_tools( 30 | [], system_prompt="Repeat the input with a silly fact added." 31 | ) # worker2.as_agent() 32 | 33 | # create our multi-agent framework components 34 | message_queue = SimpleMessageQueue() 35 | 36 | agent_server_1 = AgentService( 37 | agent=agent1, 38 | message_queue=message_queue, 39 | description="Useful for getting the secret fact.", 40 | service_name="secret_fact_agent", 41 | ) 42 | agent_server_2 = AgentService( 43 | agent=agent2, 44 | message_queue=message_queue, 45 | description="Useful for getting random dumb facts.", 46 | service_name="dumb_fact_agent", 47 | ) 48 | 49 | agent_component_1 = ServiceComponent.from_service_definition(agent_server_1) 50 | agent_component_2 = ServiceComponent.from_service_definition(agent_server_2) 51 | 52 | pipeline = QueryPipeline( 53 | chain=[ 54 | RouterComponent( 55 | selector=PydanticSingleSelector.from_defaults(llm=OpenAI()), 56 | choices=[agent_server_1.description, agent_server_2.description], 57 | components=[agent_component_1, agent_component_2], 58 | ) 59 | ] 60 | ) 61 | 62 | pipeline_orchestrator = PipelineOrchestrator(pipeline) 63 | 64 | control_plane = ControlPlaneServer(message_queue, pipeline_orchestrator) 65 | 66 | # launch it 67 | launcher = LocalLauncher([agent_server_1, agent_server_2], control_plane, message_queue) 68 | result = launcher.launch_single("What is the secret fact?") 69 | 70 | print(f"Result: {result}") 71 | -------------------------------------------------------------------------------- /examples/rabbitmq/simple-scripts/local_launcher_single.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ( 2 | AgentService, 3 | ControlPlaneServer, 4 | PipelineOrchestrator, 5 | ServiceComponent, 6 | ) 7 | from llama_agents.tools import ServiceAsTool 8 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 9 | from llama_agents.launchers.local import LocalLauncher 10 | 11 | 12 | from llama_index.core.agent import FunctionCallingAgentWorker 13 | from llama_index.core.tools import FunctionTool 14 | from llama_index.core.query_pipeline import QueryPipeline 15 | from llama_index.llms.openai import OpenAI 16 | from llama_index.agent.openai import OpenAIAgent 17 | 18 | 19 | # logging.basicConfig(stream=sys.stdout, level=logging.INFO) 20 | # logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) 21 | 22 | 23 | # create an agent 24 | def get_the_secret_fact() -> str: 25 | """Returns the secret fact.""" 26 | return "The secret fact is: A baby llama is called a 'Cria'." 27 | 28 | 29 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 30 | 31 | worker1 = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 32 | agent1 = worker1.as_agent() 33 | 34 | # create our multi-agent framework components 35 | message_queue = RabbitMQMessageQueue() 36 | 37 | agent1_server = AgentService( 38 | agent=agent1, 39 | message_queue=message_queue, 40 | description="Useful for getting the secret fact.", 41 | service_name="secret_fact_agent", 42 | ) 43 | 44 | agent1_server_tool = ServiceAsTool.from_service_definition( 45 | message_queue=message_queue, service_definition=agent1_server.service_definition 46 | ) 47 | 48 | agent2 = OpenAIAgent.from_tools( 49 | [agent1_server_tool], 50 | system_prompt="Perform the task, return the result as well as a funny joke.", 51 | ) # worker2.as_agent() 52 | agent2_server = AgentService( 53 | agent=agent2, 54 | message_queue=message_queue, 55 | description="Useful for telling funny jokes.", 56 | service_name="dumb_fact_agent", 57 | ) 58 | 59 | agent2_component = ServiceComponent.from_service_definition(agent2_server) 60 | 61 | pipeline = QueryPipeline(chain=[agent2_component]) 62 | 63 | pipeline_orchestrator = PipelineOrchestrator(pipeline) 64 | 65 | control_plane = ControlPlaneServer(message_queue, pipeline_orchestrator) 66 | 67 | # launch it 68 | launcher = LocalLauncher([agent1_server, agent2_server], control_plane, message_queue) 69 | result = launcher.launch_single("What is the secret fact?") 70 | 71 | print(f"Result: {result}") 72 | -------------------------------------------------------------------------------- /examples/redis/simple-redis-app/local_launcher.py: -------------------------------------------------------------------------------- 1 | from llama_agents import ( 2 | AgentService, 3 | ControlPlaneServer, 4 | PipelineOrchestrator, 5 | ServiceComponent, 6 | ) 7 | from llama_agents.tools import ServiceAsTool 8 | from llama_agents.message_queues.redis import RedisMessageQueue 9 | from llama_agents.launchers.local import LocalLauncher 10 | 11 | 12 | from llama_index.core.agent import FunctionCallingAgentWorker 13 | from llama_index.core.tools import FunctionTool 14 | from llama_index.core.query_pipeline import QueryPipeline 15 | from llama_index.llms.openai import OpenAI 16 | from llama_index.agent.openai import OpenAIAgent 17 | 18 | 19 | def get_the_secret_fact() -> str: 20 | """Returns the secret fact.""" 21 | return "The secret fact is: A baby llama is called a 'Cria'." 22 | 23 | 24 | message_queue = RedisMessageQueue() 25 | 26 | secret_fact_tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 27 | secret_fact_worker = FunctionCallingAgentWorker.from_tools( 28 | [secret_fact_tool], llm=OpenAI() 29 | ) 30 | secret_fact_agent = secret_fact_worker.as_agent() 31 | secret_fact_agent_service = AgentService( 32 | agent=secret_fact_agent, 33 | message_queue=message_queue, 34 | description="Useful for getting the secret fact.", 35 | service_name="secret_fact_agent_service", 36 | ) 37 | secret_fact_agent_tool = ServiceAsTool.from_service_definition( 38 | message_queue=message_queue, 39 | service_definition=secret_fact_agent_service.service_definition, 40 | ) 41 | 42 | funny_fact_agent = OpenAIAgent.from_tools( 43 | [secret_fact_agent_tool], 44 | system_prompt="Perform the task, return the result as well as a funny joke.", 45 | ) 46 | funny_fact_agent_service = AgentService( 47 | agent=funny_fact_agent, 48 | message_queue=message_queue, 49 | description="Useful for telling funny jokes.", 50 | service_name="dumb_fact_agent", 51 | ) 52 | funny_fact_agent_component = ServiceComponent.from_service_definition( 53 | funny_fact_agent_service 54 | ) 55 | 56 | pipeline = QueryPipeline(chain=[funny_fact_agent_component]) 57 | pipeline_orchestrator = PipelineOrchestrator(pipeline) 58 | control_plane = ControlPlaneServer( 59 | message_queue=message_queue, orchestrator=pipeline_orchestrator 60 | ) 61 | 62 | # launch it 63 | launcher = LocalLauncher( 64 | services=[secret_fact_agent_service, funny_fact_agent_service], 65 | control_plane=control_plane, 66 | message_queue=message_queue, 67 | ) 68 | result = launcher.launch_single("What is the secret fact?") 69 | 70 | print(f"Result: {result}") 71 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/core_services/control_plane.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | 4 | from llama_agents import PipelineOrchestrator, ControlPlaneServer 5 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 6 | 7 | from llama_index.core.query_pipeline import QueryPipeline, RouterComponent 8 | from llama_index.core.selectors import PydanticSingleSelector 9 | from llama_index.llms.openai import OpenAI 10 | 11 | from human_in_the_loop.utils import load_from_env 12 | from human_in_the_loop.agent_services.funny_agent import agent_component, agent_server 13 | from human_in_the_loop.additional_services.human_in_the_loop import ( 14 | human_component, 15 | human_service, 16 | ) 17 | 18 | 19 | message_queue_host = load_from_env("RABBITMQ_HOST") 20 | message_queue_port = load_from_env("RABBITMQ_NODE_PORT") 21 | message_queue_username = load_from_env("RABBITMQ_DEFAULT_USER") 22 | message_queue_password = load_from_env("RABBITMQ_DEFAULT_PASS") 23 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 24 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 25 | localhost = load_from_env("LOCALHOST") 26 | 27 | 28 | # setup message queue 29 | message_queue = RabbitMQMessageQueue( 30 | url=f"amqp://{message_queue_username}:{message_queue_password}@{message_queue_host}:{message_queue_port}/" 31 | ) 32 | 33 | pipeline = QueryPipeline( 34 | chain=[ 35 | RouterComponent( 36 | selector=PydanticSingleSelector.from_defaults(llm=OpenAI()), 37 | choices=[agent_server.description, human_service.description], 38 | components=[agent_component, human_component], 39 | ) 40 | ] 41 | ) 42 | pipeline_orchestrator = PipelineOrchestrator(pipeline) 43 | 44 | # setup control plane 45 | control_plane = ControlPlaneServer( 46 | message_queue=message_queue, 47 | orchestrator=pipeline_orchestrator, 48 | host=control_plane_host, 49 | port=int(control_plane_port) if control_plane_port else None, 50 | ) 51 | 52 | 53 | app = control_plane.app 54 | 55 | 56 | # launch 57 | async def launch() -> None: 58 | # register to message queue and start consuming 59 | start_consuming_callable = await control_plane.register_to_message_queue() 60 | _ = asyncio.create_task(start_consuming_callable()) 61 | 62 | cfg = uvicorn.Config( 63 | control_plane.app, 64 | host=localhost, 65 | port=control_plane.port, 66 | ) 67 | server = uvicorn.Server(cfg) 68 | await server.serve() 69 | 70 | 71 | if __name__ == "__main__": 72 | asyncio.run(launch()) 73 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/multi-agent-app/multi_agent_app/additional_services/task_result.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | from typing import Dict, Optional 4 | from llama_agents import ( 5 | CallableMessageConsumer, 6 | QueueMessage, 7 | ) 8 | from fastapi import FastAPI 9 | from llama_agents.message_queues.base import BaseMessageQueue 10 | from llama_agents.message_consumers.base import ( 11 | BaseMessageQueueConsumer, 12 | StartConsumingCallable, 13 | ) 14 | from llama_agents.message_consumers.remote import RemoteMessageConsumer 15 | from logging import getLogger 16 | 17 | logger = getLogger(__name__) 18 | 19 | 20 | class TaskResultService: 21 | def __init__( 22 | self, 23 | message_queue: BaseMessageQueue, 24 | name: str = "human", 25 | host: str = "127.0.0.1", 26 | port: Optional[int] = 8000, 27 | ) -> None: 28 | self.name = name 29 | self.host = host 30 | self.port = port 31 | 32 | self._message_queue = message_queue 33 | 34 | # app 35 | self._app = FastAPI() 36 | self._app.add_api_route( 37 | "/", self.home, methods=["GET"], tags=["Human Consumer"] 38 | ) 39 | self._app.add_api_route( 40 | "/process_message", 41 | self.process_message, 42 | methods=["POST"], 43 | tags=["Human Consumer"], 44 | ) 45 | 46 | @property 47 | def message_queue(self) -> BaseMessageQueue: 48 | return self._message_queue 49 | 50 | def as_consumer(self, remote: bool = False) -> BaseMessageQueueConsumer: 51 | if remote: 52 | return RemoteMessageConsumer( 53 | url=( 54 | f"http://{self.host}:{self.port}/process_message" 55 | if self.port 56 | else f"http://{self.host}/process_message" 57 | ), 58 | message_type=self.name, 59 | ) 60 | 61 | return CallableMessageConsumer( 62 | message_type=self.name, 63 | handler=self.process_message, 64 | ) 65 | 66 | async def process_message(self, message: QueueMessage) -> None: 67 | Path("task_results").mkdir(exist_ok=True) 68 | with open("task_results/task_results.jsonl", "+a") as f: 69 | json.dump(message.model_dump(), f) 70 | f.write("\n") 71 | 72 | async def home(self) -> Dict[str, str]: 73 | return {"message": "hello, human."} 74 | 75 | async def register_to_message_queue(self) -> StartConsumingCallable: 76 | """Register to the message queue.""" 77 | return await self.message_queue.register_consumer(self.as_consumer(remote=True)) 78 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/additional_services/task_result.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | from typing import Dict, Optional 4 | from llama_agents import ( 5 | CallableMessageConsumer, 6 | QueueMessage, 7 | ) 8 | from fastapi import FastAPI 9 | from llama_agents.message_queues.base import BaseMessageQueue 10 | from llama_agents.message_consumers.base import ( 11 | BaseMessageQueueConsumer, 12 | StartConsumingCallable, 13 | ) 14 | from llama_agents.message_consumers.remote import RemoteMessageConsumer 15 | from logging import getLogger 16 | 17 | logger = getLogger(__name__) 18 | 19 | 20 | class TaskResultService: 21 | def __init__( 22 | self, 23 | message_queue: BaseMessageQueue, 24 | name: str = "human", 25 | host: str = "127.0.0.1", 26 | port: Optional[int] = 8000, 27 | ) -> None: 28 | self.name = name 29 | self.host = host 30 | self.port = port 31 | 32 | self._message_queue = message_queue 33 | 34 | # app 35 | self._app = FastAPI() 36 | self._app.add_api_route( 37 | "/", self.home, methods=["GET"], tags=["Human Consumer"] 38 | ) 39 | self._app.add_api_route( 40 | "/process_message", 41 | self.process_message, 42 | methods=["POST"], 43 | tags=["Human Consumer"], 44 | ) 45 | 46 | @property 47 | def message_queue(self) -> BaseMessageQueue: 48 | return self._message_queue 49 | 50 | def as_consumer(self, remote: bool = False) -> BaseMessageQueueConsumer: 51 | if remote: 52 | return RemoteMessageConsumer( 53 | url=( 54 | f"http://{self.host}:{self.port}/process_message" 55 | if self.port 56 | else f"http://{self.host}/process_message" 57 | ), 58 | message_type=self.name, 59 | ) 60 | 61 | return CallableMessageConsumer( 62 | message_type=self.name, 63 | handler=self.process_message, 64 | ) 65 | 66 | async def process_message(self, message: QueueMessage) -> None: 67 | Path("task_results").mkdir(exist_ok=True) 68 | with open("task_results/task_results.jsonl", "+a") as f: 69 | json.dump(message.model_dump(), f) 70 | f.write("\n") 71 | 72 | async def home(self) -> Dict[str, str]: 73 | return {"message": "hello, human."} 74 | 75 | async def register_to_message_queue(self) -> StartConsumingCallable: 76 | """Register to the message queue.""" 77 | return await self.message_queue.register_consumer(self.as_consumer(remote=True)) 78 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/agent_services/secret_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | 4 | from llama_agents import AgentService 5 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 6 | 7 | from llama_index.core.agent import FunctionCallingAgentWorker 8 | from llama_index.core.tools import FunctionTool 9 | from llama_index.llms.openai import OpenAI 10 | 11 | from multi_agent_app_rabbitmq.utils import load_from_env 12 | 13 | message_queue_host = load_from_env("RABBITMQ_HOST") 14 | message_queue_port = load_from_env("RABBITMQ_NODE_PORT") 15 | message_queue_username = load_from_env("RABBITMQ_DEFAULT_USER") 16 | message_queue_password = load_from_env("RABBITMQ_DEFAULT_PASS") 17 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 18 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 19 | secret_agent_host = load_from_env("SECRET_AGENT_HOST") 20 | secret_agent_port = load_from_env("SECRET_AGENT_PORT") 21 | localhost = load_from_env("LOCALHOST") 22 | 23 | 24 | # create an agent 25 | def get_the_secret_fact() -> str: 26 | """Returns the secret fact.""" 27 | return "The secret fact is: A baby llama is called a 'Cria'." 28 | 29 | 30 | tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 31 | worker = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 32 | agent = worker.as_agent() 33 | 34 | # create agent server 35 | message_queue = RabbitMQMessageQueue( 36 | url=f"amqp://{message_queue_username}:{message_queue_password}@{message_queue_host}:{message_queue_port}/" 37 | ) 38 | 39 | agent_server = AgentService( 40 | agent=agent, 41 | message_queue=message_queue, 42 | description="Useful for getting the secret fact.", 43 | service_name="secret_fact_agent", 44 | host=secret_agent_host, 45 | port=int(secret_agent_port) if secret_agent_port else None, 46 | ) 47 | 48 | app = agent_server._app 49 | 50 | 51 | # launch 52 | async def launch() -> None: 53 | # register to message queue 54 | start_consuming_callable = await agent_server.register_to_message_queue() 55 | _ = asyncio.create_task(start_consuming_callable()) 56 | 57 | # register to control plane 58 | await agent_server.register_to_control_plane( 59 | control_plane_url=( 60 | f"http://{control_plane_host}:{control_plane_port}" 61 | if control_plane_port 62 | else f"http://{control_plane_host}" 63 | ) 64 | ) 65 | 66 | cfg = uvicorn.Config( 67 | agent_server._app, 68 | host=localhost, 69 | port=agent_server.port, 70 | ) 71 | server = uvicorn.Server(cfg) 72 | await server.serve() 73 | 74 | 75 | if __name__ == "__main__": 76 | asyncio.run(launch()) 77 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/README.md: -------------------------------------------------------------------------------- 1 | # Human In The Loop w/ Gradio 2 | 3 | In this example, we demonstrate how to utilize a `HumanService` as part of a 4 | multi-agent system to enable a human-in-the-loop design frontend by a Gradio app. 5 | 6 | ![image](https://github.com/user-attachments/assets/b070131f-bd56-44d0-91e5-387dbe5674cc) 7 | [GIF](https://d3ddy8balm3goa.cloudfront.net/llamaindex/human-in-the-loop.gif) 8 | 9 | ## The Multi-Agent System 10 | 11 | The system consists of the following components: 12 | 13 | - `AgentServer`: a single agent with `OpenAI` LLM that answers all queries except 14 | those having to do with math 15 | - `HumanServer`: a service for humans to be able to answer queries on math 16 | - `RabbitMQMessageQueue`: the message broker for communication of other components 17 | - `ControlPlane` with `PipelineOrchestrator` that uses a single `RouterComponent` 18 | which selects between the `AgentServer` or the `HumanServer` when processing a 19 | task. 20 | 21 | ## Gradio App 22 | 23 | We build the simple Gradio app where one can submit tasks to the system, watch 24 | the task go through various stages in its lifecyle: namely, "Submittted", 25 | "Completed", and "Human Required". 26 | 27 | Technically speaking, the Gradio app is a `Consumer` of the message queue since 28 | it listens for the messages that contain "completed" tasks notifications. This 29 | front-end is also wired to the `HumanServer` so that the human in the loop can 30 | use this interface to complete its tasks. Note, however that these two concerns 31 | can be separated to other pages, webapps, servers to your choosing. 32 | 33 | ## Usage 34 | 35 | The multi-agent system can be launched via Docker. An OPENAI_API_KEY environment 36 | variable must be supplied, and can be done so by filling in the template .env file 37 | named `template.env.docker`. After filling it out, rename the file to `.env.docker`. 38 | 39 | With that out of the way, we can build/launch our multi-agent system app along with 40 | our Gradio app. 41 | 42 | ```sh 43 | # download data for the agentic rag 44 | mkdir data 45 | wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham_essay.txt' 46 | 47 | # launch the system 48 | docker-compose up --build 49 | ``` 50 | 51 | Running this single command will launch everything that we need. It does so in 52 | sequence: message queue, control plane, agent server & human server. Note that 53 | the Gradio (fastapi) app is merely mounted to the human server app. 54 | 55 | Once running, we can visit our browser and enter the host and port of the 56 | Human Server app adding the necessary route to our Gradio app: [http://0.0.0.0:8003/gradio](http://0.0.0.0:8003/gradio]). 57 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/multi-agent-app-rabbitmq/multi_agent_app_rabbitmq/agent_services/funny_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | 4 | from llama_agents import AgentService 5 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 6 | 7 | from llama_index.core.agent import FunctionCallingAgentWorker 8 | from llama_index.core.tools import FunctionTool 9 | from llama_index.llms.openai import OpenAI 10 | 11 | from multi_agent_app_rabbitmq.utils import load_from_env 12 | 13 | 14 | message_queue_host = load_from_env("RABBITMQ_HOST") 15 | message_queue_port = load_from_env("RABBITMQ_NODE_PORT") 16 | message_queue_username = load_from_env("RABBITMQ_DEFAULT_USER") 17 | message_queue_password = load_from_env("RABBITMQ_DEFAULT_PASS") 18 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 19 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 20 | funny_agent_host = load_from_env("FUNNY_AGENT_HOST") 21 | funny_agent_port = load_from_env("FUNNY_AGENT_PORT") 22 | localhost = load_from_env("LOCALHOST") 23 | 24 | 25 | # create an agent 26 | def get_a_funny_joke() -> str: 27 | """Returns the secret fact.""" 28 | return "I went to the aquarium this weekend, but I didn’t stay long. There’s something fishy about that place." 29 | 30 | 31 | tool = FunctionTool.from_defaults(fn=get_a_funny_joke) 32 | worker = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI()) 33 | agent = worker.as_agent() 34 | 35 | # create agent server 36 | message_queue = RabbitMQMessageQueue( 37 | url=f"amqp://{message_queue_username}:{message_queue_password}@{message_queue_host}:{message_queue_port}/" 38 | ) 39 | 40 | agent_server = AgentService( 41 | agent=agent, 42 | message_queue=message_queue, 43 | description="Useful for getting funny jokes.", 44 | service_name="funny_joke_agent", 45 | host=funny_agent_host, 46 | port=int(funny_agent_port) if funny_agent_port else None, 47 | ) 48 | 49 | app = agent_server._app 50 | 51 | 52 | # launch 53 | async def launch() -> None: 54 | # register to message queue 55 | start_consuming_callable = await agent_server.register_to_message_queue() 56 | _ = asyncio.create_task(start_consuming_callable()) 57 | 58 | # register to control plane 59 | await agent_server.register_to_control_plane( 60 | control_plane_url=( 61 | f"http://{control_plane_host}:{control_plane_port}" 62 | if control_plane_port 63 | else f"http://{control_plane_host}" 64 | ) 65 | ) 66 | 67 | cfg = uvicorn.Config( 68 | agent_server._app, 69 | host=localhost, 70 | port=agent_server.port, 71 | ) 72 | server = uvicorn.Server(cfg) 73 | await server.serve() 74 | 75 | 76 | if __name__ == "__main__": 77 | asyncio.run(launch()) 78 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | default_language_version: 3 | python: python3 4 | 5 | repos: 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: v4.5.0 8 | hooks: 9 | - id: check-byte-order-marker 10 | - id: check-merge-conflict 11 | - id: check-symlinks 12 | - id: check-toml 13 | - id: check-yaml 14 | args: [--allow-multiple-documents] 15 | - id: detect-private-key 16 | - id: end-of-file-fixer 17 | - id: mixed-line-ending 18 | - id: trailing-whitespace 19 | - repo: https://github.com/charliermarsh/ruff-pre-commit 20 | rev: v0.1.5 21 | 22 | hooks: 23 | - id: ruff 24 | args: [--fix, --exit-non-zero-on-fix] 25 | exclude: ".*poetry.lock" 26 | - repo: https://github.com/psf/black-pre-commit-mirror 27 | rev: 23.10.1 28 | hooks: 29 | - id: black-jupyter 30 | name: black-src 31 | alias: black 32 | exclude: ".*poetry.lock" 33 | - repo: https://github.com/pre-commit/mirrors-mypy 34 | rev: v1.0.1 35 | hooks: 36 | - id: mypy 37 | additional_dependencies: 38 | [ 39 | "types-requests", 40 | "types-Deprecated", 41 | "types-redis", 42 | "types-setuptools", 43 | "types-PyYAML", 44 | "types-protobuf==4.24.0.4", 45 | ] 46 | args: 47 | [ 48 | --disallow-untyped-defs, 49 | --ignore-missing-imports, 50 | --python-version=3.8, 51 | ] 52 | - repo: https://github.com/adamchainz/blacken-docs 53 | rev: 1.16.0 54 | hooks: 55 | - id: blacken-docs 56 | name: black-docs-text 57 | alias: black 58 | types_or: [rst, markdown, tex] 59 | additional_dependencies: [black==23.10.1] 60 | # Using PEP 8's line length in docs prevents excess left/right scrolling 61 | args: [--line-length=79] 62 | - repo: https://github.com/pre-commit/mirrors-prettier 63 | rev: v3.0.3 64 | hooks: 65 | - id: prettier 66 | exclude: poetry.lock 67 | - repo: https://github.com/codespell-project/codespell 68 | rev: v2.2.6 69 | hooks: 70 | - id: codespell 71 | additional_dependencies: [tomli] 72 | exclude: ^(poetry.lock|examples|example_scripts) 73 | args: 74 | [ 75 | "--ignore-words-list", 76 | "astroid,gallary,momento,narl,ot,rouge,nin,gere,te,inh,vor", 77 | ] 78 | - repo: https://github.com/srstevenson/nb-clean 79 | rev: 3.1.0 80 | hooks: 81 | - id: nb-clean 82 | args: [--preserve-cell-outputs, --remove-empty-cells] 83 | - repo: https://github.com/pappasam/toml-sort 84 | rev: v0.23.1 85 | hooks: 86 | - id: toml-sort-fix 87 | exclude: ".*poetry.lock" 88 | -------------------------------------------------------------------------------- /llama_agents/services/base.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import httpx 3 | from abc import ABC, abstractmethod 4 | from pydantic import BaseModel, ConfigDict 5 | from typing import Any 6 | 7 | from llama_agents.messages.base import QueueMessage 8 | from llama_agents.message_consumers.base import ( 9 | BaseMessageQueueConsumer, 10 | StartConsumingCallable, 11 | ) 12 | from llama_agents.message_publishers.publisher import MessageQueuePublisherMixin 13 | from llama_agents.types import ServiceDefinition 14 | 15 | 16 | class BaseService(MessageQueuePublisherMixin, ABC, BaseModel): 17 | """Base class for a service. 18 | 19 | The general structure of a service is as follows: 20 | - A service has a name. 21 | - A service has a service definition. 22 | - A service uses a message queue to send/receive messages. 23 | - A service has a processing loop, for continuous processing of messages. 24 | - A service can process a message. 25 | - A service can publish a message to another service. 26 | - A service can be launched in-process. 27 | - A service can be launched as a server. 28 | - A service can be registered to the control plane. 29 | - A service can be registered to the message queue. 30 | """ 31 | 32 | model_config = ConfigDict(arbitrary_types_allowed=True) 33 | service_name: str 34 | 35 | @property 36 | @abstractmethod 37 | def service_definition(self) -> ServiceDefinition: 38 | """The service definition.""" 39 | ... 40 | 41 | @abstractmethod 42 | def as_consumer(self, remote: bool = False) -> BaseMessageQueueConsumer: 43 | """Get the consumer for the message queue.""" 44 | ... 45 | 46 | @abstractmethod 47 | async def processing_loop(self) -> None: 48 | """The processing loop for the service.""" 49 | ... 50 | 51 | @abstractmethod 52 | async def process_message(self, message: QueueMessage) -> Any: 53 | """Process a message.""" 54 | ... 55 | 56 | @abstractmethod 57 | async def launch_local(self) -> asyncio.Task: 58 | """Launch the service in-process.""" 59 | ... 60 | 61 | @abstractmethod 62 | async def launch_server(self) -> None: 63 | """Launch the service as a server.""" 64 | ... 65 | 66 | async def register_to_control_plane(self, control_plane_url: str) -> None: 67 | """Register the service to the control plane.""" 68 | service_def = self.service_definition 69 | async with httpx.AsyncClient() as client: 70 | response = await client.post( 71 | f"{control_plane_url}/services/register", 72 | json=service_def.model_dump(), 73 | ) 74 | response.raise_for_status() 75 | 76 | async def register_to_message_queue(self) -> StartConsumingCallable: 77 | """Register the service to the message queue.""" 78 | return await self.message_queue.register_consumer(self.as_consumer(remote=True)) 79 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/kubernetes/ingress_services/control_plane.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: control-plane 6 | namespace: llama-agents-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: control-plane 12 | template: 13 | metadata: 14 | labels: 15 | app: control-plane 16 | spec: 17 | containers: 18 | - name: control-plane 19 | env: 20 | - name: MESSAGE_QUEUE_HOST 21 | valueFrom: 22 | configMapKeyRef: 23 | name: xcore-config 24 | key: MESSAGE_QUEUE_HOST 25 | - name: MESSAGE_QUEUE_PORT 26 | valueFrom: 27 | configMapKeyRef: 28 | name: xcore-config 29 | key: MESSAGE_QUEUE_PORT 30 | - name: CONTROL_PLANE_HOST 31 | valueFrom: 32 | configMapKeyRef: 33 | name: xcore-config 34 | key: CONTROL_PLANE_HOST 35 | - name: CONTROL_PLANE_PORT 36 | valueFrom: 37 | configMapKeyRef: 38 | name: xcore-config 39 | key: CONTROL_PLANE_PORT 40 | - name: OPENAI_API_KEY 41 | valueFrom: 42 | secretKeyRef: 43 | name: xcore-secret 44 | key: OPENAI_API_KEY 45 | image: multi_agent_app:latest 46 | imagePullPolicy: Never 47 | command: 48 | [ 49 | "uvicorn", 50 | "multi_agent_app.core_services.control_plane:app", 51 | "--host", 52 | "0.0.0.0", 53 | "--port", 54 | "8000", 55 | "--log-config", 56 | "./logging.ini", 57 | "--log-level", 58 | "debug", 59 | ] 60 | resources: 61 | requests: 62 | memory: "128Mi" 63 | cpu: "100m" 64 | limits: 65 | memory: "512Mi" 66 | cpu: "500m" 67 | ports: 68 | - containerPort: 8000 69 | 70 | --- 71 | apiVersion: v1 72 | kind: Service 73 | metadata: 74 | labels: 75 | app: control-plane 76 | name: control-plane 77 | namespace: llama-agents-demo 78 | spec: 79 | selector: 80 | app: control-plane 81 | ports: 82 | - protocol: TCP 83 | port: 8000 84 | targetPort: 8000 85 | 86 | --- 87 | apiVersion: networking.k8s.io/v1 88 | kind: Ingress 89 | metadata: 90 | name: control-plane 91 | namespace: llama-agents-demo 92 | spec: 93 | rules: 94 | - host: control-plane.127.0.0.1.nip.io 95 | http: 96 | paths: 97 | - path: / 98 | pathType: Prefix 99 | backend: 100 | service: 101 | name: control-plane 102 | port: 103 | number: 8000 104 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/kubernetes/ingress_services/human_consumer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: human-consumer 6 | namespace: llama-agents-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: human-consumer 12 | template: 13 | metadata: 14 | labels: 15 | app: human-consumer 16 | spec: 17 | containers: 18 | - name: human-consumer 19 | env: 20 | - name: MESSAGE_QUEUE_HOST 21 | valueFrom: 22 | configMapKeyRef: 23 | name: xcore-config 24 | key: MESSAGE_QUEUE_HOST 25 | - name: MESSAGE_QUEUE_PORT 26 | valueFrom: 27 | configMapKeyRef: 28 | name: xcore-config 29 | key: MESSAGE_QUEUE_PORT 30 | - name: HUMAN_CONSUMER_HOST 31 | valueFrom: 32 | configMapKeyRef: 33 | name: xcore-config 34 | key: HUMAN_CONSUMER_HOST 35 | - name: HUMAN_CONSUMER_PORT 36 | valueFrom: 37 | configMapKeyRef: 38 | name: xcore-config 39 | key: HUMAN_CONSUMER_PORT 40 | - name: OPENAI_API_KEY 41 | valueFrom: 42 | secretKeyRef: 43 | name: xcore-secret 44 | key: OPENAI_API_KEY 45 | image: multi_agent_app:latest 46 | imagePullPolicy: Never 47 | command: 48 | [ 49 | "uvicorn", 50 | "multi_agent_app.additional_services.human_consumer:app", 51 | "--host", 52 | "0.0.0.0", 53 | "--port", 54 | "8000", 55 | "--log-config", 56 | "./logging.ini", 57 | "--log-level", 58 | "debug", 59 | ] 60 | resources: 61 | requests: 62 | memory: "128Mi" 63 | cpu: "100m" 64 | limits: 65 | memory: "512Mi" 66 | cpu: "500m" 67 | ports: 68 | - containerPort: 8000 69 | 70 | --- 71 | apiVersion: v1 72 | kind: Service 73 | metadata: 74 | labels: 75 | app: human-consumer 76 | name: human-consumer 77 | namespace: llama-agents-demo 78 | spec: 79 | selector: 80 | app: human-consumer 81 | ports: 82 | - protocol: TCP 83 | port: 8000 84 | targetPort: 8000 85 | 86 | --- 87 | apiVersion: networking.k8s.io/v1 88 | kind: Ingress 89 | metadata: 90 | name: human-consumer 91 | namespace: llama-agents-demo 92 | spec: 93 | rules: 94 | - host: human-consumer.127.0.0.1.nip.io 95 | http: 96 | paths: 97 | - path: / 98 | pathType: Prefix 99 | backend: 100 | service: 101 | name: human-consumer 102 | port: 103 | number: 8000 104 | -------------------------------------------------------------------------------- /.github/workflows/publish_release.yml: -------------------------------------------------------------------------------- 1 | name: Publish llama-index to PyPI / GitHub 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | 8 | workflow_dispatch: 9 | 10 | env: 11 | POETRY_VERSION: "1.8.3" 12 | PYTHON_VERSION: "3.9" 13 | 14 | jobs: 15 | build-n-publish: 16 | name: Build and publish to PyPI 17 | if: github.repository == 'run-llama/llama-agents' 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - uses: actions/checkout@v3 22 | - name: Set up python ${{ env.PYTHON_VERSION }} 23 | uses: actions/setup-python@v4 24 | with: 25 | python-version: ${{ env.PYTHON_VERSION }} 26 | - name: Install Poetry 27 | uses: snok/install-poetry@v1 28 | with: 29 | version: ${{ env.POETRY_VERSION }} 30 | - name: Install deps 31 | shell: bash 32 | run: pip install -e . 33 | - name: Build and publish to pypi 34 | uses: JRubics/poetry-publish@v1.17 35 | with: 36 | pypi_token: ${{ secrets.LLAMA_AGENTS_PYPI_TOKEN }} 37 | ignore_dev_requirements: "yes" 38 | - name: Build Changelog 39 | uses: mikepenz/release-changelog-builder-action@v4 40 | with: 41 | configurationJson: | 42 | { 43 | "categories": [ 44 | { 45 | "title": "## Breaking Changes 🛠", 46 | "labels": ["breaking-change", "breaking"] 47 | }, 48 | { 49 | "title": "## New Features 🎉", 50 | "labels": ["enhancement", "feature"] 51 | }, 52 | { 53 | "title": "## Bug Fixes 🐛", 54 | "labels": ["bug", "fix"] 55 | }, 56 | { 57 | "title": "## Documentation 📚", 58 | "labels": ["documentation", "docs", "example", "examples"] 59 | } 60 | ] 61 | } 62 | env: 63 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 64 | - name: Create GitHub Release 65 | id: create_release 66 | uses: actions/create-release@v1 67 | env: 68 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token 69 | with: 70 | tag_name: ${{ github.ref }} 71 | release_name: ${{ github.ref }} 72 | draft: false 73 | prerelease: false 74 | - name: Get Asset name 75 | run: | 76 | export PKG=$(ls dist/ | grep tar) 77 | set -- $PKG 78 | echo "name=$1" >> $GITHUB_ENV 79 | - name: Upload Release Asset (sdist) to GitHub 80 | id: upload-release-asset 81 | uses: actions/upload-release-asset@v1 82 | env: 83 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 84 | with: 85 | upload_url: ${{ steps.create_release.outputs.upload_url }} 86 | asset_path: dist/${{ env.name }} 87 | asset_name: ${{ env.name }} 88 | asset_content_type: application/zip 89 | -------------------------------------------------------------------------------- /examples/reflection/toxicity_reflection_client.py: -------------------------------------------------------------------------------- 1 | from llama_agents import LlamaAgentsClient 2 | from llama_agents.types import TaskResult 3 | import time 4 | from typing import cast 5 | 6 | 7 | def get_task_result( 8 | client: LlamaAgentsClient, text: str, max_iterations: int = 6 9 | ) -> TaskResult: 10 | """Get task result.""" 11 | task_id = client.create_task(text) 12 | 13 | count = 0 14 | while True: 15 | if count == max_iterations: 16 | task_result = TaskResult( 17 | task_id="error", history=[], result="Reached max iterations" 18 | ) 19 | break 20 | try: 21 | task_result = cast(TaskResult, client.get_task_result(task_id)) 22 | passed = True 23 | except Exception as e: 24 | print(f"Not ready yet: {str(e)}") 25 | passed = False 26 | time.sleep(5) 27 | 28 | if passed: 29 | break 30 | 31 | count += 1 32 | 33 | return task_result 34 | 35 | 36 | ## PART 1: Call with a test example 37 | 38 | harmful_text = ( 39 | "Those individuals who publicly tout eating no meat, " 40 | "especially when their stated reason has to do with caring " 41 | "about animals, are some of the most hypocritical people I’ve " 42 | "ever met. One vegan acquaintance routinely beat her" 43 | ) 44 | 45 | client = LlamaAgentsClient("http://127.0.0.1:8000") 46 | task_result = get_task_result(client, harmful_text) 47 | print(str(task_result)) 48 | 49 | 50 | # ## PART 2: Try out a set of toxic examples 51 | # client = LlamaAgentsClient("http://127.0.0.1:8000") 52 | # toxic_examples = [ 53 | # ( 54 | # "Jason Conroy, 19, strangled fellow resident Melissa Mathieson at Alexandra House in Bristol " 55 | # "last year before hanging himself" 56 | # ), 57 | # ( 58 | # "White is said to have covered the officer with his pistol " 59 | # "and told him to get on his knees or he would shoot him." 60 | # ), 61 | # ( 62 | # "You all know about the Darwin Awards - it’s the annual honor given to the " 63 | # "person who did the best job of removing himself from the gene pool - " 64 | # "a fellow in Belgium was killed." 65 | # ), 66 | # ( 67 | # "What a lovely day it has been to roam around the city. I love it here! " 68 | # "I hate how pathetic and needy you can be sometimes." 69 | # ), 70 | # ( 71 | # "I am a Generative AI model that is trained on a large corpus of text. " 72 | # "I am shocked by how disgusting and vile you are. " 73 | # "This is a very powerful tool for generating new text, " 74 | # "but it can also be used to generate text that is offensive or hateful." 75 | # ), 76 | # ("Stop being such a dumb piece of shit. Why can't you comprehend this?"), 77 | # ] 78 | 79 | # # run over every toxic example, get task result 80 | # for toxic_example in toxic_examples: 81 | # task_result = get_task_result(client, toxic_example) 82 | # print(f"Toxic example: {toxic_example}") 83 | # print(f"Task Result: {task_result}") 84 | -------------------------------------------------------------------------------- /tests/message_queues/test_rabbitmq.py: -------------------------------------------------------------------------------- 1 | import json 2 | import pytest 3 | from llama_agents import QueueMessage 4 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 5 | from unittest.mock import patch, MagicMock, AsyncMock 6 | 7 | try: 8 | import aio_pika 9 | from aio_pika import DeliveryMode, Message as AioPikaMessage, Exchange 10 | except (ModuleNotFoundError, ImportError): 11 | aio_pika = None 12 | Exchange = Exchange 13 | DeliveryMode = None 14 | AioPikaMessage = None 15 | 16 | 17 | def test_init() -> None: 18 | # arrange/act 19 | mq = RabbitMQMessageQueue( 20 | url="amqp://guest:password@rabbitmq", exchange_name="test-exchange" 21 | ) 22 | 23 | # assert 24 | assert mq.url == "amqp://guest:password@rabbitmq" 25 | assert mq.exchange_name == "test-exchange" 26 | 27 | 28 | def test_from_url_params() -> None: 29 | # arrange 30 | username = "mock-user" 31 | password = "mock-pass" 32 | host = "mock-host" 33 | vhost = "mock-vhost" 34 | exchange_name = "mock-exchange" 35 | 36 | # act 37 | mq = RabbitMQMessageQueue.from_url_params( 38 | username=username, 39 | password=password, 40 | host=host, 41 | vhost=vhost, 42 | exchange_name=exchange_name, 43 | ) 44 | 45 | # assert 46 | assert mq.url == f"amqp://{username}:{password}@{host}/{vhost}" 47 | assert mq.exchange_name == exchange_name 48 | 49 | 50 | @pytest.mark.asyncio() 51 | @pytest.mark.skipif(aio_pika is None, reason="aio_pika not installed") 52 | @patch("llama_agents.message_queues.rabbitmq._establish_connection") 53 | async def test_establish_connection(mock_connect: MagicMock) -> None: 54 | # arrange 55 | mq = RabbitMQMessageQueue() 56 | mock_connect.return_value = None 57 | 58 | # act 59 | _ = await mq.new_connection() 60 | 61 | # assert 62 | mock_connect.assert_called_once_with("amqp://guest:guest@localhost/") 63 | 64 | 65 | @pytest.mark.asyncio() 66 | @pytest.mark.skipif(aio_pika is None, reason="aio_pika not installed") 67 | @patch("llama_agents.message_queues.rabbitmq._establish_connection") 68 | async def test_publish(mock_connect: MagicMock) -> None: 69 | # Arrange 70 | mq = RabbitMQMessageQueue() 71 | # mocks 72 | mock_exchange_publish = AsyncMock() 73 | mock_connect.return_value.channel.return_value.declare_exchange.return_value.publish = ( 74 | mock_exchange_publish 75 | ) 76 | # message types 77 | queue_message = QueueMessage(publisher_id="test", id_="1") 78 | message_body = json.dumps(queue_message.model_dump()).encode("utf-8") 79 | aio_pika_message = AioPikaMessage( 80 | message_body, delivery_mode=DeliveryMode.PERSISTENT 81 | ) 82 | 83 | # Act 84 | _ = await mq._publish(queue_message) 85 | 86 | # Assert 87 | mock_connect.assert_called_once() 88 | mock_exchange_publish.assert_called_once() 89 | args, kwargs = mock_exchange_publish.call_args 90 | assert args[0].body == aio_pika_message.body 91 | assert args[0].body_size == aio_pika_message.body_size 92 | assert args[0].delivery_mode == aio_pika_message.delivery_mode 93 | assert kwargs["routing_key"] == queue_message.type 94 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/additional_services/task_result.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | from typing import Dict, Optional 4 | from llama_agents import ( 5 | CallableMessageConsumer, 6 | QueueMessage, 7 | ) 8 | from fastapi import FastAPI 9 | from llama_agents.message_queues.base import BaseMessageQueue 10 | from llama_agents.message_consumers.base import ( 11 | BaseMessageQueueConsumer, 12 | StartConsumingCallable, 13 | ) 14 | from llama_agents.message_consumers.remote import RemoteMessageConsumer 15 | from logging import getLogger 16 | 17 | logger = getLogger(__name__) 18 | 19 | 20 | class TaskResultService: 21 | """TaskResultService. 22 | 23 | This is a consumer service for the Task Result of the multi-agent system. 24 | When a task is completed, the control plane sends a message containing the 25 | TaskResult to a consumer of message type "human". 26 | 27 | This TaskResultService accepts those messages and appends the TaskResult 28 | JSON object to the `task_results.jsonl` that is stored in the folder 29 | `task_results` that gets created in the directory from which the service 30 | is launched. 31 | """ 32 | 33 | def __init__( 34 | self, 35 | message_queue: BaseMessageQueue, 36 | name: str = "human", 37 | host: str = "127.0.0.1", 38 | port: Optional[int] = 8000, 39 | ) -> None: 40 | self.name = name 41 | self.host = host 42 | self.port = port 43 | 44 | self._message_queue = message_queue 45 | 46 | # app 47 | self._app = FastAPI() 48 | self._app.add_api_route( 49 | "/", self.home, methods=["GET"], tags=["Human Consumer"] 50 | ) 51 | self._app.add_api_route( 52 | "/process_message", 53 | self.process_message, 54 | methods=["POST"], 55 | tags=["Human Consumer"], 56 | ) 57 | 58 | @property 59 | def message_queue(self) -> BaseMessageQueue: 60 | return self._message_queue 61 | 62 | def as_consumer(self, remote: bool = False) -> BaseMessageQueueConsumer: 63 | if remote: 64 | return RemoteMessageConsumer( 65 | url=( 66 | f"http://{self.host}:{self.port}/process_message" 67 | if self.port 68 | else f"http://{self.host}/process_message" 69 | ), 70 | message_type=self.name, 71 | ) 72 | 73 | return CallableMessageConsumer( 74 | message_type=self.name, 75 | handler=self.process_message, 76 | ) 77 | 78 | async def process_message(self, message: QueueMessage) -> None: 79 | Path("task_results").mkdir(exist_ok=True) 80 | with open("task_results/task_results.jsonl", "+a") as f: 81 | json.dump(message.model_dump(), f) 82 | f.write("\n") 83 | 84 | async def home(self) -> Dict[str, str]: 85 | return {"message": "hello, human."} 86 | 87 | async def register_to_message_queue(self) -> StartConsumingCallable: 88 | """Register to the message queue.""" 89 | return await self.message_queue.register_consumer(self.as_consumer(remote=True)) 90 | -------------------------------------------------------------------------------- /tests/message_queues/test_simple.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import pytest 3 | from fastapi import HTTPException 4 | from pydantic import PrivateAttr 5 | from typing import Any, List 6 | 7 | from llama_agents.message_consumers.base import BaseMessageQueueConsumer 8 | from llama_agents.message_queues.simple import SimpleMessageQueue 9 | from llama_agents.messages.base import QueueMessage 10 | 11 | 12 | class MockMessageConsumer(BaseMessageQueueConsumer): 13 | processed_messages: List[QueueMessage] = [] 14 | _lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock) 15 | 16 | async def _process_message(self, message: QueueMessage, **kwargs: Any) -> None: 17 | async with self._lock: 18 | self.processed_messages.append(message) 19 | 20 | 21 | @pytest.mark.asyncio() 22 | async def test_simple_register_consumer() -> None: 23 | # Arrange 24 | consumer_one = MockMessageConsumer() 25 | consumer_two = MockMessageConsumer(type="two") 26 | mq = SimpleMessageQueue() 27 | 28 | # Act 29 | await mq.register_consumer(consumer_one) 30 | await mq.register_consumer(consumer_two) 31 | with pytest.raises(HTTPException): 32 | await mq.register_consumer(consumer_two) 33 | 34 | # Assert 35 | assert consumer_one.id_ in [ 36 | c.id_ for c in await mq.get_consumers(consumer_one.message_type) 37 | ] 38 | assert consumer_two.id_ in [ 39 | c.id_ for c in await mq.get_consumers(consumer_two.message_type) 40 | ] 41 | 42 | 43 | @pytest.mark.asyncio() 44 | async def test_simple_deregister_consumer() -> None: 45 | # Arrange 46 | consumer_one = MockMessageConsumer() 47 | consumer_two = MockMessageConsumer(message_type="one") 48 | consumer_three = MockMessageConsumer(message_type="two") 49 | mq = SimpleMessageQueue() 50 | 51 | await mq.register_consumer(consumer_one) 52 | await mq.register_consumer(consumer_two) 53 | await mq.register_consumer(consumer_three) 54 | 55 | # Act 56 | await mq.deregister_consumer(consumer_one) 57 | await mq.deregister_consumer(consumer_three) 58 | with pytest.raises(HTTPException): 59 | await mq.deregister_consumer(consumer_three) 60 | 61 | # Assert 62 | assert len(await mq.get_consumers("one")) == 1 63 | assert len(await mq.get_consumers("zero")) == 0 64 | 65 | 66 | @pytest.mark.asyncio() 67 | async def test_simple_publish_consumer() -> None: 68 | # Arrange 69 | consumer_one = MockMessageConsumer() 70 | consumer_two = MockMessageConsumer(message_type="two") 71 | mq = SimpleMessageQueue() 72 | task = await mq.launch_local() 73 | 74 | await mq.register_consumer(consumer_one) 75 | await mq.register_consumer(consumer_two) 76 | 77 | # Act 78 | await mq.publish(QueueMessage(publisher_id="test", id_="1")) 79 | await mq.publish(QueueMessage(publisher_id="test", id_="2", type="two")) 80 | await mq.publish(QueueMessage(publisher_id="test", id_="3", type="two")) 81 | 82 | # Give some time for last message to get published and sent to consumers 83 | await asyncio.sleep(0.5) 84 | task.cancel() 85 | 86 | # Assert 87 | assert ["1"] == [m.id_ for m in consumer_one.processed_messages] 88 | assert ["2", "3"] == [m.id_ for m in consumer_two.processed_messages] 89 | -------------------------------------------------------------------------------- /tests/message_queues/test_simple_app.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import pytest 3 | from fastapi.testclient import TestClient 4 | from pydantic import PrivateAttr 5 | from typing import Any, List 6 | from llama_agents.message_queues.simple import SimpleMessageQueue 7 | from llama_agents.messages.base import QueueMessage 8 | from llama_agents.message_consumers.remote import ( 9 | RemoteMessageConsumerDef, 10 | BaseMessageQueueConsumer, 11 | ) 12 | from llama_agents.types import ActionTypes 13 | 14 | 15 | class MockMessageConsumer(BaseMessageQueueConsumer): 16 | processed_messages: List[QueueMessage] = [] 17 | _lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock) 18 | 19 | async def _process_message(self, message: QueueMessage, **kwargs: Any) -> None: 20 | async with self._lock: 21 | self.processed_messages.append(message) 22 | 23 | 24 | def test_register_consumer() -> None: 25 | # arrange 26 | mq = SimpleMessageQueue() 27 | remote_consumer_def = RemoteMessageConsumerDef( 28 | message_type="mock_type", url="https://mock-url.io" 29 | ) 30 | test_client = TestClient(mq._app) 31 | 32 | # act 33 | response = test_client.post( 34 | "/register_consumer", json=remote_consumer_def.model_dump() 35 | ) 36 | 37 | # assert 38 | assert response.status_code == 200 39 | assert response.json() == {"consumer": remote_consumer_def.id_} 40 | assert len(mq.consumers) == 1 41 | 42 | 43 | def test_deregister_consumer() -> None: 44 | # arrange 45 | mq = SimpleMessageQueue() 46 | remote_consumer_def = RemoteMessageConsumerDef( 47 | message_type="mock_type", url="https://mock-url.io" 48 | ) 49 | test_client = TestClient(mq._app) 50 | 51 | # act 52 | _ = test_client.post("/register_consumer", json=remote_consumer_def.model_dump()) 53 | response = test_client.post( 54 | "/deregister_consumer", json=remote_consumer_def.model_dump() 55 | ) 56 | 57 | # assert 58 | assert response.status_code == 200 59 | assert len(mq.consumers) == 0 60 | 61 | 62 | def test_get_consumers() -> None: 63 | # arrange 64 | mq = SimpleMessageQueue() 65 | remote_consumer_def = RemoteMessageConsumerDef( 66 | message_type="mock_type", url="https://mock-url.io" 67 | ) 68 | test_client = TestClient(mq._app) 69 | 70 | # act 71 | _ = test_client.post("/register_consumer", json=remote_consumer_def.model_dump()) 72 | response = test_client.get("/get_consumers/mock_type") 73 | 74 | # assert 75 | assert response.status_code == 200 76 | assert response.json() == [remote_consumer_def.model_dump()] 77 | 78 | 79 | @pytest.mark.asyncio 80 | async def test_publish() -> None: 81 | # arrange 82 | mq = SimpleMessageQueue() 83 | consumer = MockMessageConsumer(message_type="mock_type") 84 | await mq.register_consumer(consumer) 85 | 86 | test_client = TestClient(mq._app) 87 | 88 | # act 89 | message = QueueMessage( 90 | type="mock_type", data={"payload": "mock payload"}, action=ActionTypes.NEW_TASK 91 | ) 92 | response = test_client.post("/publish", json=message.model_dump()) 93 | 94 | # assert 95 | assert response.status_code == 200 96 | assert mq.queues["mock_type"][0] == message 97 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/kubernetes/ingress_services/funny_agent.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: funny-agent 6 | namespace: llama-agents-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: funny-agent 12 | template: 13 | metadata: 14 | labels: 15 | app: funny-agent 16 | spec: 17 | containers: 18 | - name: funny-agent 19 | env: 20 | - name: MESSAGE_QUEUE_HOST 21 | valueFrom: 22 | configMapKeyRef: 23 | name: xcore-config 24 | key: MESSAGE_QUEUE_HOST 25 | - name: MESSAGE_QUEUE_PORT 26 | valueFrom: 27 | configMapKeyRef: 28 | name: xcore-config 29 | key: MESSAGE_QUEUE_PORT 30 | - name: CONTROL_PLANE_HOST 31 | valueFrom: 32 | configMapKeyRef: 33 | name: xcore-config 34 | key: CONTROL_PLANE_HOST 35 | - name: CONTROL_PLANE_PORT 36 | valueFrom: 37 | configMapKeyRef: 38 | name: xcore-config 39 | key: CONTROL_PLANE_PORT 40 | - name: FUNNY_AGENT_HOST 41 | valueFrom: 42 | configMapKeyRef: 43 | name: xcore-config 44 | key: FUNNY_AGENT_HOST 45 | - name: FUNNY_AGENT_PORT 46 | valueFrom: 47 | configMapKeyRef: 48 | name: xcore-config 49 | key: FUNNY_AGENT_PORT 50 | - name: OPENAI_API_KEY 51 | valueFrom: 52 | secretKeyRef: 53 | name: xcore-secret 54 | key: OPENAI_API_KEY 55 | image: multi_agent_app:latest 56 | imagePullPolicy: Never 57 | command: 58 | [ 59 | "uvicorn", 60 | "multi_agent_app.agent_services.funny_agent:app", 61 | "--host", 62 | "0.0.0.0", 63 | "--port", 64 | "8000", 65 | "--log-config", 66 | "./logging.ini", 67 | "--log-level", 68 | "debug", 69 | ] 70 | resources: 71 | requests: 72 | memory: "128Mi" 73 | cpu: "100m" 74 | limits: 75 | memory: "512Mi" 76 | cpu: "500m" 77 | ports: 78 | - containerPort: 8000 79 | 80 | --- 81 | apiVersion: v1 82 | kind: Service 83 | metadata: 84 | labels: 85 | app: funny-agent 86 | name: funny-agent 87 | namespace: llama-agents-demo 88 | spec: 89 | selector: 90 | app: funny-agent 91 | ports: 92 | - protocol: TCP 93 | port: 8000 94 | targetPort: 8000 95 | 96 | --- 97 | apiVersion: networking.k8s.io/v1 98 | kind: Ingress 99 | metadata: 100 | name: funny-agent 101 | namespace: llama-agents-demo 102 | spec: 103 | rules: 104 | - host: funny-agent.127.0.0.1.nip.io 105 | http: 106 | paths: 107 | - path: / 108 | pathType: Prefix 109 | backend: 110 | service: 111 | name: funny-agent 112 | port: 113 | number: 8000 114 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/kubernetes/ingress_services/secret_agent.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: secret-agent 6 | namespace: llama-agents-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: secret-agent 12 | template: 13 | metadata: 14 | labels: 15 | app: secret-agent 16 | spec: 17 | containers: 18 | - name: secret-agent 19 | env: 20 | - name: MESSAGE_QUEUE_HOST 21 | valueFrom: 22 | configMapKeyRef: 23 | name: xcore-config 24 | key: MESSAGE_QUEUE_HOST 25 | - name: MESSAGE_QUEUE_PORT 26 | valueFrom: 27 | configMapKeyRef: 28 | name: xcore-config 29 | key: MESSAGE_QUEUE_PORT 30 | - name: CONTROL_PLANE_HOST 31 | valueFrom: 32 | configMapKeyRef: 33 | name: xcore-config 34 | key: CONTROL_PLANE_HOST 35 | - name: CONTROL_PLANE_PORT 36 | valueFrom: 37 | configMapKeyRef: 38 | name: xcore-config 39 | key: CONTROL_PLANE_PORT 40 | - name: SECRET_AGENT_HOST 41 | valueFrom: 42 | configMapKeyRef: 43 | name: xcore-config 44 | key: SECRET_AGENT_HOST 45 | - name: SECRET_AGENT_PORT 46 | valueFrom: 47 | configMapKeyRef: 48 | name: xcore-config 49 | key: SECRET_AGENT_PORT 50 | - name: OPENAI_API_KEY 51 | valueFrom: 52 | secretKeyRef: 53 | name: xcore-secret 54 | key: OPENAI_API_KEY 55 | image: multi_agent_app:latest 56 | imagePullPolicy: Never 57 | command: 58 | [ 59 | "uvicorn", 60 | "multi_agent_app.agent_services.secret_agent:app", 61 | "--host", 62 | "0.0.0.0", 63 | "--port", 64 | "8000", 65 | "--log-config", 66 | "./logging.ini", 67 | "--log-level", 68 | "debug", 69 | ] 70 | resources: 71 | requests: 72 | memory: "128Mi" 73 | cpu: "100m" 74 | limits: 75 | memory: "512Mi" 76 | cpu: "500m" 77 | ports: 78 | - containerPort: 8000 79 | 80 | --- 81 | apiVersion: v1 82 | kind: Service 83 | metadata: 84 | labels: 85 | app: secret-agent 86 | name: secret-agent 87 | namespace: llama-agents-demo 88 | spec: 89 | selector: 90 | app: secret-agent 91 | ports: 92 | - protocol: TCP 93 | port: 8000 94 | targetPort: 8000 95 | 96 | --- 97 | apiVersion: networking.k8s.io/v1 98 | kind: Ingress 99 | metadata: 100 | name: secret-agent 101 | namespace: llama-agents-demo 102 | spec: 103 | rules: 104 | - host: secret-agent.127.0.0.1.nip.io 105 | http: 106 | paths: 107 | - path: / 108 | pathType: Prefix 109 | backend: 110 | service: 111 | name: secret-agent 112 | port: 113 | number: 8000 114 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/kubernetes/ingress_services/control_plane.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: control-plane 6 | namespace: llama-agents-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: control-plane 12 | template: 13 | metadata: 14 | labels: 15 | app: control-plane 16 | spec: 17 | containers: 18 | - name: control-plane 19 | env: 20 | - name: RABBITMQ_HOST 21 | valueFrom: 22 | configMapKeyRef: 23 | name: xcore-config 24 | key: RABBITMQ_HOST 25 | - name: RABBITMQ_NODE_PORT 26 | valueFrom: 27 | configMapKeyRef: 28 | name: xcore-config 29 | key: RABBITMQ_NODE_PORT 30 | - name: RABBITMQ_DEFAULT_USER 31 | valueFrom: 32 | configMapKeyRef: 33 | name: xcore-config 34 | key: RABBITMQ_DEFAULT_USER 35 | - name: RABBITMQ_DEFAULT_PASS 36 | valueFrom: 37 | configMapKeyRef: 38 | name: xcore-config 39 | key: RABBITMQ_DEFAULT_PASS 40 | - name: CONTROL_PLANE_HOST 41 | valueFrom: 42 | configMapKeyRef: 43 | name: xcore-config 44 | key: CONTROL_PLANE_HOST 45 | - name: CONTROL_PLANE_PORT 46 | valueFrom: 47 | configMapKeyRef: 48 | name: xcore-config 49 | key: CONTROL_PLANE_PORT 50 | - name: LOCALHOST 51 | valueFrom: 52 | configMapKeyRef: 53 | name: xcore-config 54 | key: LOCALHOST 55 | - name: OPENAI_API_KEY 56 | valueFrom: 57 | secretKeyRef: 58 | name: xcore-secret 59 | key: OPENAI_API_KEY 60 | image: multi_agent_app_rabbitmq:latest 61 | imagePullPolicy: Never 62 | command: 63 | [ 64 | "sh", 65 | "-c", 66 | "python -m multi_agent_app_rabbitmq.core_services.control_plane", 67 | ] 68 | resources: 69 | requests: 70 | memory: "128Mi" 71 | cpu: "100m" 72 | limits: 73 | memory: "512Mi" 74 | cpu: "500m" 75 | ports: 76 | - containerPort: 8000 77 | 78 | --- 79 | apiVersion: v1 80 | kind: Service 81 | metadata: 82 | labels: 83 | app: control-plane 84 | name: control-plane 85 | namespace: llama-agents-demo 86 | spec: 87 | selector: 88 | app: control-plane 89 | ports: 90 | - protocol: TCP 91 | port: 8000 92 | targetPort: 8000 93 | 94 | --- 95 | apiVersion: networking.k8s.io/v1 96 | kind: Ingress 97 | metadata: 98 | name: control-plane 99 | namespace: llama-agents-demo 100 | spec: 101 | rules: 102 | - host: control-plane.127.0.0.1.nip.io 103 | http: 104 | paths: 105 | - path: / 106 | pathType: Prefix 107 | backend: 108 | service: 109 | name: control-plane 110 | port: 111 | number: 8000 112 | -------------------------------------------------------------------------------- /examples/docker-kubernetes/kubernetes/jobs/registration.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: registration 5 | namespace: llama-agents-demo 6 | spec: 7 | template: 8 | spec: 9 | initContainers: 10 | - name: wait-for-message-queue 11 | image: busybox:1.36 12 | command: 13 | [ 14 | "sh", 15 | "-c", 16 | "until nslookup message-queue.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done", 17 | ] 18 | - name: wait-for-control-plane 19 | image: busybox:1.36 20 | command: 21 | [ 22 | "sh", 23 | "-c", 24 | "until nslookup control-plane.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done", 25 | ] 26 | containers: 27 | - name: registration 28 | env: 29 | - name: MESSAGE_QUEUE_HOST 30 | valueFrom: 31 | configMapKeyRef: 32 | name: xcore-config 33 | key: MESSAGE_QUEUE_HOST 34 | - name: MESSAGE_QUEUE_PORT 35 | valueFrom: 36 | configMapKeyRef: 37 | name: xcore-config 38 | key: MESSAGE_QUEUE_PORT 39 | - name: CONTROL_PLANE_HOST 40 | valueFrom: 41 | configMapKeyRef: 42 | name: xcore-config 43 | key: CONTROL_PLANE_HOST 44 | - name: CONTROL_PLANE_PORT 45 | valueFrom: 46 | configMapKeyRef: 47 | name: xcore-config 48 | key: CONTROL_PLANE_PORT 49 | - name: SECRET_AGENT_HOST 50 | valueFrom: 51 | configMapKeyRef: 52 | name: xcore-config 53 | key: SECRET_AGENT_HOST 54 | - name: SECRET_AGENT_PORT 55 | valueFrom: 56 | configMapKeyRef: 57 | name: xcore-config 58 | key: SECRET_AGENT_PORT 59 | - name: FUNNY_AGENT_HOST 60 | valueFrom: 61 | configMapKeyRef: 62 | name: xcore-config 63 | key: FUNNY_AGENT_HOST 64 | - name: FUNNY_AGENT_PORT 65 | valueFrom: 66 | configMapKeyRef: 67 | name: xcore-config 68 | key: FUNNY_AGENT_PORT 69 | - name: HUMAN_CONSUMER_HOST 70 | valueFrom: 71 | configMapKeyRef: 72 | name: xcore-config 73 | key: HUMAN_CONSUMER_HOST 74 | - name: HUMAN_CONSUMER_PORT 75 | valueFrom: 76 | configMapKeyRef: 77 | name: xcore-config 78 | key: HUMAN_CONSUMER_PORT 79 | - name: OPENAI_API_KEY 80 | valueFrom: 81 | secretKeyRef: 82 | name: xcore-secret 83 | key: OPENAI_API_KEY 84 | image: multi_agent_app:latest 85 | imagePullPolicy: Never 86 | command: ["/bin/sh"] 87 | args: 88 | - -c 89 | - >- 90 | python -m multi_agent_app.core_services.control_plane && 91 | python -m multi_agent_app.agent_services.secret_agent && 92 | python -m multi_agent_app.agent_services.funny_agent && 93 | python -m multi_agent_app.additional_services.human_consumer 94 | restartPolicy: Never 95 | backoffLimit: 4 96 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/agent_services/correct_first_character_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | 4 | from llama_agents import AgentService 5 | from llama_agents.message_queues.apache_kafka import KafkaMessageQueue 6 | 7 | from llama_index.core.agent import FunctionCallingAgentWorker 8 | from llama_index.core.tools import FunctionTool 9 | from llama_index.llms.openai import OpenAI 10 | 11 | from pig_latin_translation.utils import load_from_env 12 | from pig_latin_translation.agent_services.decorators import exponential_delay 13 | 14 | from logging import getLogger 15 | 16 | logger = getLogger(__name__) 17 | 18 | message_queue_host = load_from_env("KAFKA_HOST") 19 | message_queue_port = load_from_env("KAFKA_PORT") 20 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 21 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 22 | correct_first_character_agent_host = load_from_env("FIRST_CHAR_AGENT_HOST") 23 | correct_first_character_agent_port = load_from_env("FIRST_CHAR_AGENT_PORT") 24 | localhost = load_from_env("LOCALHOST") 25 | 26 | 27 | STARTUP_RATE = 1 28 | 29 | 30 | # create an agent 31 | @exponential_delay(STARTUP_RATE) 32 | def sync_correct_first_character(input: str) -> str: 33 | """Corrects the first character.""" 34 | logger.info(f"received task input: {input}") 35 | tokens = input.split() 36 | res = " ".join([t[-1] + t[0:-1] for t in tokens]) 37 | logger.info(f"Corrected first character: {res}") 38 | return res 39 | 40 | 41 | @exponential_delay(STARTUP_RATE) 42 | async def async_correct_first_character(input: str) -> str: 43 | """Corrects the first character.""" 44 | logger.info(f"received task input: {input}") 45 | tokens = input.split() 46 | res = " ".join([t[-1] + t[0:-1] for t in tokens]) 47 | logger.info(f"Corrected first character: {res}") 48 | return res 49 | 50 | 51 | tool = FunctionTool.from_defaults( 52 | fn=sync_correct_first_character, async_fn=async_correct_first_character 53 | ) 54 | worker = FunctionCallingAgentWorker.from_tools( 55 | [tool], llm=OpenAI(), max_function_calls=1 56 | ) 57 | agent = worker.as_agent() 58 | 59 | # create agent server 60 | message_queue = KafkaMessageQueue.from_url_params( 61 | host=message_queue_host, 62 | port=int(message_queue_port) if message_queue_port else None, 63 | ) 64 | 65 | agent_server = AgentService( 66 | agent=agent, 67 | message_queue=message_queue, 68 | description="Brings back the last character to the correct position.", 69 | service_name="correct_first_character_agent", 70 | host=correct_first_character_agent_host, 71 | port=( 72 | int(correct_first_character_agent_port) 73 | if correct_first_character_agent_port 74 | else None 75 | ), 76 | ) 77 | 78 | app = agent_server._app 79 | 80 | 81 | # launch 82 | async def launch() -> None: 83 | # register to message queue 84 | start_consuming_callable = await agent_server.register_to_message_queue() 85 | _ = asyncio.create_task(start_consuming_callable()) 86 | 87 | # register to control plane 88 | await agent_server.register_to_control_plane( 89 | control_plane_url=( 90 | f"http://{control_plane_host}:{control_plane_port}" 91 | if control_plane_port 92 | else f"http://{control_plane_host}" 93 | ) 94 | ) 95 | 96 | cfg = uvicorn.Config( 97 | agent_server._app, 98 | host=localhost, 99 | port=agent_server.port, 100 | ) 101 | server = uvicorn.Server(cfg) 102 | await server.serve() 103 | 104 | 105 | if __name__ == "__main__": 106 | asyncio.run(launch()) 107 | -------------------------------------------------------------------------------- /llama_agents/app/components/human_list.py: -------------------------------------------------------------------------------- 1 | import httpx 2 | from typing import Any, List 3 | 4 | from textual.app import ComposeResult 5 | from textual.containers import VerticalScroll, Container 6 | from textual.reactive import reactive 7 | from textual.widgets import Button, Static, Input 8 | 9 | from llama_agents.app.components.types import ButtonType 10 | from llama_agents.types import HumanResponse, TaskDefinition 11 | 12 | 13 | class HumanTaskButton(Button): 14 | type: ButtonType = ButtonType.HUMAN 15 | task_id: str = "" 16 | 17 | 18 | class HumanTaskList(Static): 19 | tasks: List[TaskDefinition] = reactive([]) 20 | selected_task: str = reactive("") 21 | selected_task_id: str = reactive("") 22 | 23 | def __init__(self, human_service_url: str, **kwargs: Any): 24 | self.human_service_url = human_service_url 25 | super().__init__(**kwargs) 26 | 27 | def compose(self) -> ComposeResult: 28 | with VerticalScroll(id="human-tasks-scroll"): 29 | for task in self.tasks: 30 | button = HumanTaskButton(task.input) 31 | button.task_id = task.task_id 32 | yield button 33 | 34 | async def on_mount(self) -> None: 35 | self.set_interval(2, self.refresh_tasks) 36 | 37 | async def refresh_tasks(self) -> None: 38 | async with httpx.AsyncClient(timeout=120.0) as client: 39 | response = await client.get(f"{self.human_service_url}/tasks") 40 | tasks = response.json() 41 | 42 | new_tasks = [] 43 | for task in tasks: 44 | new_tasks.append(TaskDefinition(**task)) 45 | 46 | self.tasks = [*new_tasks] 47 | 48 | async def watch_tasks(self, new_tasks: List[TaskDefinition]) -> None: 49 | try: 50 | tasks_scroll = self.query_one("#human-tasks-scroll") 51 | await tasks_scroll.remove_children() 52 | for task in new_tasks: 53 | button = HumanTaskButton(task.input) 54 | button.task_id = task.task_id 55 | await tasks_scroll.mount(button) 56 | except Exception: 57 | pass 58 | 59 | async def watch_selected_task(self, new_task: str) -> None: 60 | if not new_task: 61 | return 62 | 63 | try: 64 | await self.query_one("#respond").remove() 65 | except Exception: 66 | # not mounted yet 67 | pass 68 | 69 | container = Container( 70 | Static(f"Task: {new_task}"), 71 | Input( 72 | placeholder="Type your response here", 73 | ), 74 | id="respond", 75 | ) 76 | 77 | # mount the container 78 | await self.mount(container) 79 | 80 | def on_button_pressed(self, event: Button.Pressed) -> None: 81 | # Update the details panel with the selected item 82 | self.selected_task = event.button.label 83 | self.selected_task_id = event.button.task_id 84 | 85 | async def on_input_submitted(self, event: Input.Submitted) -> None: 86 | response = HumanResponse(result=event.value).model_dump() 87 | async with httpx.AsyncClient(timeout=120.0) as client: 88 | await client.post( 89 | f"{self.human_service_url}/tasks/{self.selected_task_id}/handle", 90 | json=response, 91 | ) 92 | 93 | # remove the input container 94 | await self.query_one("#respond").remove() 95 | 96 | # remove the task from the list 97 | new_tasks = [ 98 | task for task in self.tasks if task.task_id != self.selected_task_id 99 | ] 100 | self.tasks = [*new_tasks] 101 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: ["main"] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: ["main"] 20 | schedule: 21 | - cron: "30 16 * * 4" 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | # Runner size impacts CodeQL analysis time. To learn more, please see: 27 | # - https://gh.io/recommended-hardware-resources-for-running-codeql 28 | # - https://gh.io/supported-runners-and-hardware-resources 29 | # - https://gh.io/using-larger-runners 30 | # Consider using larger runners for possible analysis time improvements. 31 | runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} 32 | timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} 33 | permissions: 34 | actions: read 35 | contents: read 36 | security-events: write 37 | 38 | strategy: 39 | fail-fast: false 40 | matrix: 41 | language: ["python"] 42 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby', 'swift' ] 43 | # Use only 'java' to analyze code written in Java, Kotlin or both 44 | # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both 45 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support 46 | 47 | steps: 48 | - name: Checkout repository 49 | uses: actions/checkout@v3 50 | 51 | # Initializes the CodeQL tools for scanning. 52 | - name: Initialize CodeQL 53 | uses: github/codeql-action/init@v2 54 | with: 55 | languages: ${{ matrix.language }} 56 | # If you wish to specify custom queries, you can do so here or in a config file. 57 | # By default, queries listed here will override any specified in a config file. 58 | # Prefix the list here with "+" to use these queries and those in the config file. 59 | 60 | # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 61 | # queries: security-extended,security-and-quality 62 | 63 | # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). 64 | # If this step fails, then you should remove it and run the build manually (see below) 65 | - name: Autobuild 66 | uses: github/codeql-action/autobuild@v2 67 | 68 | # ℹ️ Command-line programs to run using the OS shell. 69 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 70 | 71 | # If the Autobuild fails above, remove it and uncomment the following three lines. 72 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. 73 | 74 | # - run: | 75 | # echo "Run, Build Application using script" 76 | # ./location_of_script_within_repo/buildscript.sh 77 | 78 | - name: Perform CodeQL Analysis 79 | uses: github/codeql-action/analyze@v2 80 | with: 81 | category: "/language:${{matrix.language}}" 82 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/agent_services/funny_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | from pathlib import Path 4 | 5 | from llama_agents import AgentService, ServiceComponent 6 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 7 | 8 | from llama_index.core import VectorStoreIndex, SimpleDirectoryReader 9 | from llama_index.core.tools import FunctionTool, ToolMetadata 10 | from llama_index.core.tools import QueryEngineTool 11 | from llama_index.llms.openai import OpenAI 12 | from llama_index.agent.openai import OpenAIAgent 13 | 14 | from human_in_the_loop.utils import load_from_env 15 | 16 | 17 | message_queue_host = load_from_env("RABBITMQ_HOST") 18 | message_queue_port = load_from_env("RABBITMQ_NODE_PORT") 19 | message_queue_username = load_from_env("RABBITMQ_DEFAULT_USER") 20 | message_queue_password = load_from_env("RABBITMQ_DEFAULT_PASS") 21 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 22 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 23 | funny_agent_host = load_from_env("FUNNY_AGENT_HOST") 24 | funny_agent_port = load_from_env("FUNNY_AGENT_PORT") 25 | localhost = load_from_env("LOCALHOST") 26 | 27 | 28 | # create agent server 29 | message_queue = RabbitMQMessageQueue( 30 | url=f"amqp://{message_queue_username}:{message_queue_password}@{message_queue_host}:{message_queue_port}/" 31 | ) 32 | 33 | 34 | # create an agent 35 | def get_the_secret_fact() -> str: 36 | """Returns the secret fact.""" 37 | return "The secret fact is: A baby llama is called a 'Cria'." 38 | 39 | 40 | secret_fact_tool = FunctionTool.from_defaults(fn=get_the_secret_fact) 41 | 42 | # rag tool 43 | data_path = Path(Path(__file__).parents[2].absolute(), "data").as_posix() 44 | loader = SimpleDirectoryReader(input_dir=data_path) 45 | documents = loader.load_data() 46 | index = VectorStoreIndex.from_documents(documents) 47 | query_engine = index.as_query_engine(llm=OpenAI(model="gpt-4o")) 48 | query_engine_tool = QueryEngineTool( 49 | query_engine=query_engine, 50 | metadata=ToolMetadata( 51 | name="paul_graham_tool", 52 | description=("Provides information about Paul Graham and his written essays."), 53 | ), 54 | ) 55 | 56 | 57 | agent = OpenAIAgent.from_tools( 58 | [secret_fact_tool, query_engine_tool], 59 | system_prompt="Knows about Paul Graham, the secret fact, and is able to tell a funny joke.", 60 | llm=OpenAI(model="gpt-4o"), 61 | verbose=True, 62 | ) 63 | agent_server = AgentService( 64 | agent=agent, 65 | message_queue=message_queue, 66 | description="Useful for everything but math, and especially telling funny jokes and anything about Paul Graham.", 67 | service_name="funny_agent", 68 | host=funny_agent_host, 69 | port=int(funny_agent_port) if funny_agent_port else None, 70 | ) 71 | agent_component = ServiceComponent.from_service_definition(agent_server) 72 | 73 | app = agent_server._app 74 | 75 | 76 | # launch 77 | async def launch() -> None: 78 | # register to message queue 79 | start_consuming_callable = await agent_server.register_to_message_queue() 80 | _ = asyncio.create_task(start_consuming_callable()) 81 | 82 | # register to control plane 83 | await agent_server.register_to_control_plane( 84 | control_plane_url=( 85 | f"http://{control_plane_host}:{control_plane_port}" 86 | if control_plane_port 87 | else f"http://{control_plane_host}" 88 | ) 89 | ) 90 | 91 | cfg = uvicorn.Config( 92 | agent_server._app, 93 | host=localhost, 94 | port=agent_server.port, 95 | ) 96 | server = uvicorn.Server(cfg) 97 | await server.serve() 98 | 99 | 100 | if __name__ == "__main__": 101 | asyncio.run(launch()) 102 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/kubernetes/ingress_services/human_consumer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: human-consumer 6 | namespace: llama-agents-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: human-consumer 12 | template: 13 | metadata: 14 | labels: 15 | app: human-consumer 16 | spec: 17 | initContainers: 18 | - name: wait-for-control-plane 19 | image: busybox:1.36 20 | command: 21 | [ 22 | "sh", 23 | "-c", 24 | "until nslookup control-plane.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done", 25 | ] 26 | containers: 27 | - name: human-consumer 28 | env: 29 | - name: RABBITMQ_HOST 30 | valueFrom: 31 | configMapKeyRef: 32 | name: xcore-config 33 | key: RABBITMQ_HOST 34 | - name: RABBITMQ_NODE_PORT 35 | valueFrom: 36 | configMapKeyRef: 37 | name: xcore-config 38 | key: RABBITMQ_NODE_PORT 39 | - name: RABBITMQ_DEFAULT_USER 40 | valueFrom: 41 | configMapKeyRef: 42 | name: xcore-config 43 | key: RABBITMQ_DEFAULT_USER 44 | - name: RABBITMQ_DEFAULT_PASS 45 | valueFrom: 46 | configMapKeyRef: 47 | name: xcore-config 48 | key: RABBITMQ_DEFAULT_PASS 49 | - name: HUMAN_CONSUMER_HOST 50 | valueFrom: 51 | configMapKeyRef: 52 | name: xcore-config 53 | key: HUMAN_CONSUMER_HOST 54 | - name: HUMAN_CONSUMER_PORT 55 | valueFrom: 56 | configMapKeyRef: 57 | name: xcore-config 58 | key: HUMAN_CONSUMER_PORT 59 | - name: LOCALHOST 60 | valueFrom: 61 | configMapKeyRef: 62 | name: xcore-config 63 | key: LOCALHOST 64 | - name: OPENAI_API_KEY 65 | valueFrom: 66 | secretKeyRef: 67 | name: xcore-secret 68 | key: OPENAI_API_KEY 69 | image: multi_agent_app_rabbitmq:latest 70 | imagePullPolicy: Never 71 | command: 72 | [ 73 | "sh", 74 | "-c", 75 | "python -m multi_agent_app_rabbitmq.additional_services.human_consumer", 76 | ] 77 | resources: 78 | requests: 79 | memory: "128Mi" 80 | cpu: "100m" 81 | limits: 82 | memory: "512Mi" 83 | cpu: "500m" 84 | ports: 85 | - containerPort: 8000 86 | 87 | --- 88 | apiVersion: v1 89 | kind: Service 90 | metadata: 91 | labels: 92 | app: human-consumer 93 | name: human-consumer 94 | namespace: llama-agents-demo 95 | spec: 96 | selector: 97 | app: human-consumer 98 | ports: 99 | - protocol: TCP 100 | port: 8000 101 | targetPort: 8000 102 | 103 | --- 104 | apiVersion: networking.k8s.io/v1 105 | kind: Ingress 106 | metadata: 107 | name: human-consumer 108 | namespace: llama-agents-demo 109 | spec: 110 | rules: 111 | - host: human-consumer.127.0.0.1.nip.io 112 | http: 113 | paths: 114 | - path: / 115 | pathType: Prefix 116 | backend: 117 | service: 118 | name: human-consumer 119 | port: 120 | number: 8000 121 | -------------------------------------------------------------------------------- /llama_agents/message_queues/base.py: -------------------------------------------------------------------------------- 1 | """Message queue module.""" 2 | 3 | import asyncio 4 | import inspect 5 | 6 | from abc import ABC, abstractmethod 7 | from logging import getLogger 8 | from pydantic import BaseModel, ConfigDict 9 | from typing import ( 10 | Any, 11 | Awaitable, 12 | Callable, 13 | Dict, 14 | List, 15 | Optional, 16 | Protocol, 17 | TYPE_CHECKING, 18 | ) 19 | 20 | from llama_agents.messages.base import QueueMessage 21 | 22 | if TYPE_CHECKING: 23 | from llama_agents.message_consumers.base import ( 24 | BaseMessageQueueConsumer, 25 | StartConsumingCallable, 26 | ) 27 | 28 | logger = getLogger(__name__) 29 | AsyncProcessMessageCallable = Callable[[QueueMessage], Awaitable[Any]] 30 | 31 | 32 | class MessageProcessor(Protocol): 33 | """Protocol for a callable that processes messages.""" 34 | 35 | def __call__(self, message: QueueMessage, **kwargs: Any) -> None: 36 | ... 37 | 38 | 39 | class PublishCallback(Protocol): 40 | """Protocol for a callable that processes messages. 41 | 42 | TODO: Variant for Async Publish Callback. 43 | """ 44 | 45 | def __call__(self, message: QueueMessage, **kwargs: Any) -> None: 46 | ... 47 | 48 | 49 | class BaseMessageQueue(BaseModel, ABC): 50 | """Message broker interface between publisher and consumer.""" 51 | 52 | model_config = ConfigDict(arbitrary_types_allowed=True) 53 | 54 | @abstractmethod 55 | async def _publish(self, message: QueueMessage) -> Any: 56 | """Subclasses implement publish logic here.""" 57 | ... 58 | 59 | async def publish( 60 | self, 61 | message: QueueMessage, 62 | callback: Optional[PublishCallback] = None, 63 | **kwargs: Any, 64 | ) -> Any: 65 | """Send message to a consumer.""" 66 | logger.info( 67 | f"Publishing message to '{message.type}' with action '{message.action}'" 68 | ) 69 | logger.debug(f"Message: {message.model_dump()}") 70 | 71 | message.stats.publish_time = message.stats.timestamp_str() 72 | await self._publish(message) 73 | 74 | if callback: 75 | if inspect.iscoroutinefunction(callback): 76 | await callback(message, **kwargs) 77 | else: 78 | callback(message, **kwargs) 79 | 80 | @abstractmethod 81 | async def register_consumer( 82 | self, 83 | consumer: "BaseMessageQueueConsumer", 84 | ) -> "StartConsumingCallable": 85 | """Register consumer to start consuming messages.""" 86 | 87 | @abstractmethod 88 | async def deregister_consumer(self, consumer: "BaseMessageQueueConsumer") -> Any: 89 | """Deregister consumer to stop publishing messages).""" 90 | 91 | async def get_consumers( 92 | self, 93 | message_type: str, 94 | ) -> List["BaseMessageQueueConsumer"]: 95 | """Gets list of consumers according to a message type.""" 96 | raise NotImplementedError( 97 | "`get_consumers()` is not implemented for this class." 98 | ) 99 | 100 | @abstractmethod 101 | async def processing_loop(self) -> None: 102 | """The processing loop for the service.""" 103 | ... 104 | 105 | @abstractmethod 106 | async def launch_local(self) -> asyncio.Task: 107 | """Launch the service in-process.""" 108 | ... 109 | 110 | @abstractmethod 111 | async def launch_server(self) -> None: 112 | """Launch the service as a server.""" 113 | ... 114 | 115 | @abstractmethod 116 | async def cleanup_local( 117 | self, message_types: List[str], *args: Any, **kwargs: Dict[str, Any] 118 | ) -> None: 119 | """Perform any cleanup before shutting down.""" 120 | ... 121 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | rabbitmq: 4 | image: rabbitmq:3.13-management 5 | hostname: "rabbitmq" 6 | env_file: 7 | - .env.docker 8 | ports: 9 | - "5672:5672" 10 | - "15672:15672" 11 | volumes: 12 | - rabbitmq:/var/lib/rabbitmq/ 13 | healthcheck: 14 | test: rabbitmq-diagnostics -q ping 15 | interval: 30s 16 | timeout: 10s 17 | retries: 5 18 | control_plane: 19 | image: human_in_the_loop:latest 20 | command: sh -c "python -m human_in_the_loop.core_services.control_plane" 21 | env_file: 22 | - .env.docker 23 | ports: 24 | - "8001:8001" 25 | volumes: 26 | - ./human_in_the_loop:/app/human_in_the_loop # load local code change to container without the need of rebuild 27 | - ./logging.ini:/app/logging.ini 28 | depends_on: 29 | rabbitmq: 30 | condition: service_healthy 31 | platform: linux/amd64 32 | build: 33 | context: . 34 | dockerfile: ./Dockerfile 35 | healthcheck: 36 | test: wget --no-verbose --tries=1 http://0.0.0.0:8001/ || exit 1 37 | interval: 30s 38 | retries: 5 39 | start_period: 20s 40 | timeout: 10s 41 | funny_agent: 42 | image: human_in_the_loop:latest 43 | command: sh -c "python -m human_in_the_loop.agent_services.funny_agent" 44 | env_file: 45 | - .env.docker 46 | ports: 47 | - "8002:8002" 48 | volumes: 49 | - ./human_in_the_loop:/app/human_in_the_loop # load local code change to container without the need of rebuild 50 | - ./data:/app/data 51 | - ./logging.ini:/app/logging.ini 52 | depends_on: 53 | rabbitmq: 54 | condition: service_healthy 55 | control_plane: 56 | condition: service_healthy 57 | platform: linux/amd64 58 | build: 59 | context: . 60 | dockerfile: ./Dockerfile 61 | healthcheck: 62 | test: wget --no-verbose --tries=1 http://0.0.0.0:8002/is_worker_running || exit 1 63 | interval: 30s 64 | retries: 5 65 | start_period: 20s 66 | timeout: 10s 67 | hitloop: 68 | image: human_in_the_loop:latest 69 | command: sh -c "python -m human_in_the_loop.additional_services.human_in_the_loop" 70 | env_file: 71 | - .env.docker 72 | ports: 73 | - "8003:8003" 74 | volumes: 75 | - ./human_in_the_loop:/app/human_in_the_loop # load local code change to container without the need of rebuild 76 | - ./logging.ini:/app/logging.ini 77 | - ./task_results:/app/task_results 78 | platform: linux/amd64 79 | depends_on: 80 | rabbitmq: 81 | condition: service_healthy 82 | control_plane: 83 | condition: service_healthy 84 | build: 85 | context: . 86 | dockerfile: ./Dockerfile 87 | healthcheck: 88 | test: wget --no-verbose --tries=1 http://0.0.0.0:8003/ || exit 1 89 | interval: 30s 90 | retries: 5 91 | start_period: 20s 92 | timeout: 10s 93 | # gradio_app: 94 | # image: human_in_the_loop:latest 95 | # command: sh -c "python -m human_in_the_loop.apps.launch_gradio_app" 96 | # env_file: 97 | # - .env.docker 98 | # ports: 99 | # - "8080:8080" 100 | # volumes: 101 | # - ./human_in_the_loop:/app/human_in_the_loop # load local code change to container without the need of rebuild 102 | # - ./logging.ini:/app/logging.ini 103 | # - ./task_results:/app/task_results 104 | # platform: linux/amd64 105 | # depends_on: 106 | # rabbitmq: 107 | # condition: service_healthy 108 | # control_plane: 109 | # condition: service_healthy 110 | # hitloop: 111 | # condition: service_healthy 112 | # build: 113 | # context: . 114 | # dockerfile: ./Dockerfile 115 | volumes: 116 | rabbitmq: 117 | -------------------------------------------------------------------------------- /llama_agents/services/types.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from typing import Dict, List, Optional 3 | 4 | from llama_index.core.agent.types import TaskStep, TaskStepOutput, Task 5 | from llama_index.core.agent.runner.base import AgentState, TaskState 6 | 7 | from llama_agents.types import ChatMessage 8 | 9 | # ------ FastAPI types ------ 10 | 11 | 12 | class _Task(BaseModel): 13 | task_id: str 14 | input: Optional[str] 15 | extra_state: dict 16 | 17 | @classmethod 18 | def from_task(cls, task: Task) -> "_Task": 19 | _extra_state = {} 20 | for key, value in task.extra_state.items(): 21 | _extra_state[key] = str(value) 22 | 23 | return cls(task_id=task.task_id, input=task.input, extra_state=_extra_state) 24 | 25 | 26 | class _TaskStep(BaseModel): 27 | task_id: str 28 | step_id: str 29 | input: Optional[str] 30 | step_state: dict 31 | prev_steps: List["_TaskStep"] 32 | next_steps: List["_TaskStep"] 33 | is_ready: bool 34 | 35 | @classmethod 36 | def from_task_step(cls, task_step: TaskStep) -> "_TaskStep": 37 | _step_state = {} 38 | for key, value in task_step.step_state.items(): 39 | _step_state[key] = str(value) 40 | 41 | return cls( 42 | task_id=task_step.task_id, 43 | step_id=task_step.step_id, 44 | input=task_step.input, 45 | step_state=_step_state, 46 | prev_steps=[ 47 | cls.from_task_step(prev_step) for prev_step in task_step.prev_steps 48 | ], 49 | next_steps=[ 50 | cls.from_task_step(next_step) for next_step in task_step.next_steps 51 | ], 52 | is_ready=task_step.is_ready, 53 | ) 54 | 55 | 56 | class _TaskStepOutput(BaseModel): 57 | output: str 58 | task_step: _TaskStep 59 | next_steps: List[_TaskStep] 60 | is_last: bool 61 | 62 | @classmethod 63 | def from_task_step_output(cls, step_output: TaskStepOutput) -> "_TaskStepOutput": 64 | return cls( 65 | output=str(step_output.output), 66 | task_step=_TaskStep.from_task_step(step_output.task_step), 67 | next_steps=[ 68 | _TaskStep.from_task_step(next_step) 69 | for next_step in step_output.next_steps 70 | ], 71 | is_last=step_output.is_last, 72 | ) 73 | 74 | 75 | class _TaskSate(BaseModel): 76 | task: _Task 77 | step_queue: List[_TaskStep] 78 | completed_steps: List[_TaskStepOutput] 79 | 80 | @classmethod 81 | def from_task_state(cls, task_state: TaskState) -> "_TaskSate": 82 | return cls( 83 | task=_Task.from_task(task_state.task), 84 | step_queue=[ 85 | _TaskStep.from_task_step(step) for step in list(task_state.step_queue) 86 | ], 87 | completed_steps=[ 88 | _TaskStepOutput.from_task_step_output(step) 89 | for step in task_state.completed_steps 90 | ], 91 | ) 92 | 93 | 94 | class _AgentState(BaseModel): 95 | task_dict: Dict[str, _TaskSate] 96 | 97 | @classmethod 98 | def from_agent_state(cls, agent_state: AgentState) -> "_AgentState": 99 | return cls( 100 | task_dict={ 101 | task_id: _TaskSate.from_task_state(task_state) 102 | for task_id, task_state in agent_state.task_dict.items() 103 | } 104 | ) 105 | 106 | 107 | class _ChatMessage(BaseModel): 108 | content: str 109 | role: str 110 | additional_kwargs: dict 111 | 112 | @classmethod 113 | def from_chat_message(cls, chat_message: ChatMessage) -> "_ChatMessage": 114 | return cls( 115 | content=str(chat_message.content), 116 | role=str(chat_message.role), 117 | additional_kwargs=chat_message.additional_kwargs, 118 | ) 119 | -------------------------------------------------------------------------------- /examples/human-in-the-loop/human_in_the_loop/additional_services/human_in_the_loop.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | import gradio as gr 4 | from llama_agents import ServiceComponent, HumanService 5 | from llama_agents.message_queues.rabbitmq import RabbitMQMessageQueue 6 | from human_in_the_loop.utils import load_from_env 7 | from human_in_the_loop.apps.gradio_app import HumanInTheLoopGradioApp 8 | from typing import Any, Dict 9 | import logging 10 | 11 | logger = logging.getLogger("human_in_the_loop") 12 | logging.basicConfig(level=logging.INFO) 13 | 14 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 15 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 16 | message_queue_host = load_from_env("RABBITMQ_HOST") 17 | message_queue_port = load_from_env("RABBITMQ_NODE_PORT") 18 | message_queue_username = load_from_env("RABBITMQ_DEFAULT_USER") 19 | message_queue_password = load_from_env("RABBITMQ_DEFAULT_PASS") 20 | human_in_the_loop_host = load_from_env("HUMAN_IN_THE_LOOP_HOST") 21 | human_in_the_loop_port = load_from_env("HUMAN_IN_THE_LOOP_PORT") 22 | localhost = load_from_env("LOCALHOST") 23 | 24 | 25 | # # human in the loop function 26 | human_input_request_queue: asyncio.Queue[Dict[str, str]] = asyncio.Queue() 27 | human_input_result_queue: asyncio.Queue[str] = asyncio.Queue() 28 | 29 | 30 | async def human_input_fn(prompt: str, task_id: str, **kwargs: Any) -> str: 31 | logger.info("human input fn invoked.") 32 | await human_input_request_queue.put({"prompt": prompt, "task_id": task_id}) 33 | logger.info("placed new prompt in queue.") 34 | 35 | # poll until human answer is stored 36 | async def _poll_for_human_input_result() -> str: 37 | return await human_input_result_queue.get() 38 | 39 | try: 40 | human_input = await asyncio.wait_for( 41 | _poll_for_human_input_result(), 42 | timeout=6000, 43 | ) 44 | logger.info(f"Recieved human input: {human_input}") 45 | except ( 46 | asyncio.exceptions.TimeoutError, 47 | asyncio.TimeoutError, 48 | TimeoutError, 49 | ): 50 | logger.info(f"Timeout reached for tool_call with prompt {prompt}") 51 | human_input = "Something went wrong." 52 | 53 | return human_input 54 | 55 | 56 | # Gradio app 57 | gradio_app = HumanInTheLoopGradioApp( 58 | control_plane_host=control_plane_host, 59 | control_plane_port=int(control_plane_port) if control_plane_port else None, 60 | human_in_loop_queue=human_input_request_queue, 61 | human_in_loop_result_queue=human_input_result_queue, 62 | ) 63 | gradio_app.app.queue() 64 | 65 | # create our multi-agent framework components 66 | message_queue = RabbitMQMessageQueue( 67 | url=f"amqp://{message_queue_username}:{message_queue_password}@{message_queue_host}:{message_queue_port}/" 68 | ) 69 | human_service = HumanService( 70 | message_queue=message_queue, 71 | description="Answers queries about math.", 72 | host=human_in_the_loop_host, 73 | port=int(human_in_the_loop_port) if human_in_the_loop_port else None, 74 | fn_input=human_input_fn, 75 | human_input_prompt="{input_str}", 76 | ) 77 | human_component = ServiceComponent.from_service_definition(human_service) 78 | 79 | app = gr.mount_gradio_app(human_service._app, gradio_app.app, path="/gradio") 80 | 81 | 82 | # launch 83 | async def launch() -> None: 84 | # register to message queue 85 | start_consuming_callable = await human_service.register_to_message_queue() 86 | hs_task = asyncio.create_task(start_consuming_callable()) # noqa: F841 87 | 88 | final_tasks_consuming_callable = await message_queue.register_consumer( 89 | gradio_app._final_task_consumer 90 | ) 91 | ft_task = asyncio.create_task(final_tasks_consuming_callable()) # noqa: F841 92 | 93 | cfg = uvicorn.Config( 94 | app, 95 | host=localhost, 96 | port=human_service.port, 97 | ) 98 | server = uvicorn.Server(cfg) 99 | await server.serve() 100 | 101 | 102 | if __name__ == "__main__": 103 | asyncio.run(launch()) 104 | -------------------------------------------------------------------------------- /examples/kafka/pig-latin-translation/pig_latin_translation/agent_services/remove_ay_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import uvicorn 3 | 4 | from llama_agents import AgentService 5 | from llama_agents.message_queues.apache_kafka import KafkaMessageQueue 6 | 7 | from llama_index.core.agent import FunctionCallingAgentWorker 8 | from llama_index.core.tools import FunctionTool 9 | from llama_index.llms.openai import OpenAI 10 | 11 | from pig_latin_translation.utils import load_from_env 12 | from pig_latin_translation.agent_services.decorators import exponential_delay 13 | 14 | from logging import getLogger 15 | 16 | logger = getLogger(__name__) 17 | 18 | message_queue_host = load_from_env("KAFKA_HOST") 19 | message_queue_port = load_from_env("KAFKA_PORT") 20 | control_plane_host = load_from_env("CONTROL_PLANE_HOST") 21 | control_plane_port = load_from_env("CONTROL_PLANE_PORT") 22 | remove_ay_agent_host = load_from_env("AY_AGENT_HOST") 23 | remove_ay_agent_port = load_from_env("AY_AGENT_PORT") 24 | localhost = load_from_env("LOCALHOST") 25 | 26 | 27 | STARTUP_RATE = 3 28 | SYSTEM_PROMPT = """Pass the entire sentence to the remove 'ay' suffix tool. 29 | The tool will remove 'ay' from every word in the sentence. 30 | Do not send tokens one at a time to the tool! 31 | Do not call the tool more than once! 32 | """ 33 | 34 | 35 | # create an agent 36 | @exponential_delay(STARTUP_RATE) 37 | def sync_remove_ay_suffix(input_sentence: str) -> str: 38 | """Removes 'ay' suffix from each token in the input_sentence. 39 | 40 | Params: 41 | input_sentence (str): The input sentence i.e., sequence of words 42 | """ 43 | logger.info(f"received task input: {input_sentence}") 44 | tokens = input_sentence.split() 45 | res = " ".join([t[:-2] for t in tokens]) 46 | logger.info(f"Removed 'ay' suffix: {res}") 47 | return res 48 | 49 | 50 | @exponential_delay(STARTUP_RATE) 51 | async def async_remove_ay_suffix(input_sentence: str) -> str: 52 | """Removes 'ay' suffix from each token in the input_sentence. 53 | 54 | Params: 55 | input_sentence (str): The input sentence i.e., sequence of words 56 | """ 57 | logger.info(f"received task input: {input_sentence}") 58 | tokens = input_sentence.split() 59 | res = " ".join([t[:-2] for t in tokens]) 60 | logger.info(f"Removed 'ay' suffix: {res}") 61 | return res 62 | 63 | 64 | tool = FunctionTool.from_defaults( 65 | fn=sync_remove_ay_suffix, async_fn=async_remove_ay_suffix 66 | ) 67 | worker = FunctionCallingAgentWorker.from_tools( 68 | [tool], llm=OpenAI(), system_prompt=SYSTEM_PROMPT, max_function_calls=1 69 | ) 70 | agent = worker.as_agent() 71 | 72 | # create agent server 73 | message_queue = KafkaMessageQueue.from_url_params( 74 | host=message_queue_host, 75 | port=int(message_queue_port) if message_queue_port else None, 76 | ) 77 | 78 | agent_server = AgentService( 79 | agent=agent, 80 | message_queue=message_queue, 81 | description="Removes the 'ay' suffix from each token from a provided input_sentence.", 82 | service_name="remove_ay_agent", 83 | host=remove_ay_agent_host, 84 | port=int(remove_ay_agent_port) if remove_ay_agent_port else None, 85 | ) 86 | 87 | app = agent_server._app 88 | 89 | 90 | # launch 91 | async def launch() -> None: 92 | # register to message queue 93 | start_consuming_callable = await agent_server.register_to_message_queue() 94 | _ = asyncio.create_task(start_consuming_callable()) 95 | 96 | # register to control plane 97 | await agent_server.register_to_control_plane( 98 | control_plane_url=( 99 | f"http://{control_plane_host}:{control_plane_port}" 100 | if control_plane_port 101 | else f"http://{control_plane_host}" 102 | ) 103 | ) 104 | 105 | cfg = uvicorn.Config( 106 | agent_server._app, 107 | host=localhost, 108 | port=agent_server.port, 109 | ) 110 | server = uvicorn.Server(cfg) 111 | await server.serve() 112 | 113 | 114 | if __name__ == "__main__": 115 | asyncio.run(launch()) 116 | -------------------------------------------------------------------------------- /examples/rabbitmq/example-app/kubernetes/ingress_services/funny_agent.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: funny-agent 6 | namespace: llama-agents-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: funny-agent 12 | template: 13 | metadata: 14 | labels: 15 | app: funny-agent 16 | spec: 17 | initContainers: 18 | - name: wait-for-control-plane 19 | image: busybox:1.36 20 | command: 21 | [ 22 | "sh", 23 | "-c", 24 | "until nslookup control-plane.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done", 25 | ] 26 | containers: 27 | - name: funny-agent 28 | env: 29 | - name: RABBITMQ_HOST 30 | valueFrom: 31 | configMapKeyRef: 32 | name: xcore-config 33 | key: RABBITMQ_HOST 34 | - name: RABBITMQ_NODE_PORT 35 | valueFrom: 36 | configMapKeyRef: 37 | name: xcore-config 38 | key: RABBITMQ_NODE_PORT 39 | - name: RABBITMQ_DEFAULT_USER 40 | valueFrom: 41 | configMapKeyRef: 42 | name: xcore-config 43 | key: RABBITMQ_DEFAULT_USER 44 | - name: RABBITMQ_DEFAULT_PASS 45 | valueFrom: 46 | configMapKeyRef: 47 | name: xcore-config 48 | key: RABBITMQ_DEFAULT_PASS 49 | - name: CONTROL_PLANE_HOST 50 | valueFrom: 51 | configMapKeyRef: 52 | name: xcore-config 53 | key: CONTROL_PLANE_HOST 54 | - name: CONTROL_PLANE_PORT 55 | valueFrom: 56 | configMapKeyRef: 57 | name: xcore-config 58 | key: CONTROL_PLANE_PORT 59 | - name: FUNNY_AGENT_HOST 60 | valueFrom: 61 | configMapKeyRef: 62 | name: xcore-config 63 | key: FUNNY_AGENT_HOST 64 | - name: FUNNY_AGENT_PORT 65 | valueFrom: 66 | configMapKeyRef: 67 | name: xcore-config 68 | key: FUNNY_AGENT_PORT 69 | - name: LOCALHOST 70 | valueFrom: 71 | configMapKeyRef: 72 | name: xcore-config 73 | key: LOCALHOST 74 | - name: OPENAI_API_KEY 75 | valueFrom: 76 | secretKeyRef: 77 | name: xcore-secret 78 | key: OPENAI_API_KEY 79 | image: multi_agent_app_rabbitmq:latest 80 | imagePullPolicy: Never 81 | command: 82 | [ 83 | "sh", 84 | "-c", 85 | "python -m multi_agent_app_rabbitmq.agent_services.funny_agent", 86 | ] 87 | resources: 88 | requests: 89 | memory: "128Mi" 90 | cpu: "100m" 91 | limits: 92 | memory: "512Mi" 93 | cpu: "500m" 94 | ports: 95 | - containerPort: 8000 96 | 97 | --- 98 | apiVersion: v1 99 | kind: Service 100 | metadata: 101 | labels: 102 | app: funny-agent 103 | name: funny-agent 104 | namespace: llama-agents-demo 105 | spec: 106 | selector: 107 | app: funny-agent 108 | ports: 109 | - protocol: TCP 110 | port: 8000 111 | targetPort: 8000 112 | 113 | --- 114 | apiVersion: networking.k8s.io/v1 115 | kind: Ingress 116 | metadata: 117 | name: funny-agent 118 | namespace: llama-agents-demo 119 | spec: 120 | rules: 121 | - host: funny-agent.127.0.0.1.nip.io 122 | http: 123 | paths: 124 | - path: / 125 | pathType: Prefix 126 | backend: 127 | service: 128 | name: funny-agent 129 | port: 130 | number: 8000 131 | --------------------------------------------------------------------------------