├── examples
├── __init__.py
├── cli
│ └── __init__.py
├── custom_agents
│ └── __init__.py
├── deep_code_agent
│ ├── README.md
│ ├── tools
│ │ └── __init__.py
│ ├── sandbox
│ │ ├── __init__.py
│ │ ├── seccomp_profile.py
│ │ └── utils.py
│ ├── .python-version
│ ├── main.py
│ ├── .gitignore
│ ├── build_sandbox.sh
│ ├── api.py
│ ├── observability_globals.py
│ ├── pyproject.toml
│ ├── Dockerfile.sandbox
│ ├── system_prompt.py
│ ├── config.py
│ └── code_agent_runner.py
├── devops_copilot_agent
│ ├── __init__.py
│ ├── tests
│ │ └── __init__.py
│ ├── main.py
│ ├── devops_copilot.git.png
│ ├── monitoring
│ │ ├── grafana-datasources.yml
│ │ ├── prometheus.yml
│ │ └── grafana-provisioning
│ │ │ └── dashboards
│ │ │ └── dashboards.yml
│ ├── pyproject.toml
│ ├── .env.example
│ ├── config.yaml
│ ├── LICENSE
│ ├── Makefile
│ ├── docker-compose.yml
│ ├── .gitignore
│ └── system_prompt.py
└── workflow_agents
│ ├── parallel_agent.py
│ └── sequential_agent.py
├── .python-version
├── src
└── omnicoreagent
│ ├── core
│ ├── tools
│ │ ├── memory_tool
│ │ │ ├── __init__.py
│ │ │ └── base.py
│ │ ├── advance_tools
│ │ │ └── __init__.py
│ │ ├── __init__.py
│ │ ├── advance_tools_use.py
│ │ └── local_tools_registry.py
│ ├── database
│ │ ├── __init__.py
│ │ └── mongodb.py
│ ├── types.py
│ ├── events
│ │ ├── __init__.py
│ │ ├── in_memory.py
│ │ ├── redis_stream.py
│ │ ├── event_router.py
│ │ └── base.py
│ ├── constants.py
│ ├── skills
│ │ ├── __init__.py
│ │ └── models.py
│ ├── agents
│ │ ├── __init__.py
│ │ ├── react_agent.py
│ │ └── types.py
│ ├── memory_store
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── database_memory.py
│ │ └── in_memory.py
│ ├── __init__.py
│ └── llm_support.py
│ ├── omni_agent
│ ├── workflow
│ │ ├── __init__.py
│ │ ├── sequential_agent.py
│ │ └── parallel_agent.py
│ ├── config
│ │ └── __init__.py
│ ├── prompts
│ │ └── prompt_builder.py
│ ├── __init__.py
│ └── background_agent
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── task_registry.py
│ │ └── scheduler_backend.py
│ ├── mcp_clients_connection
│ ├── tools.py
│ ├── __init__.py
│ ├── refresh_server_capabilities.py
│ └── notifications.py
│ └── __init__.py
├── assets
├── IMG_5292.jpeg
└── IMG_5292 (1).jpeg
├── docs
├── requirements.txt
├── getting-started
│ ├── installation.md
│ └── quick-start.md
├── development
│ └── contributing.md
└── README.md
├── .vscode
└── settings.json
├── .gitignore
├── .pre-commit-config.yaml
├── LICENSE
├── tests
├── test_tools.py
├── test_react_agent.py
├── test_base.py
├── test_sampling.py
├── test_llm_support.py
├── test_llm.py
├── test_orchestrator_agent.py
├── test_prompts.py
└── test_main.py
├── docs.sh
├── .github
└── workflows
│ └── python-app.yml
├── pyproject.toml
├── mkdocs.yml
└── CONTRIBUTING.md
/examples/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
1 | 3.12
2 |
--------------------------------------------------------------------------------
/examples/cli/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/custom_agents/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/README.md:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/tools/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/sandbox/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/.python-version:
--------------------------------------------------------------------------------
1 | 3.13
2 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/tools/memory_tool/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/assets/IMG_5292.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omnirexflora-labs/omnicoreagent/HEAD/assets/IMG_5292.jpeg
--------------------------------------------------------------------------------
/assets/IMG_5292 (1).jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omnirexflora-labs/omnicoreagent/HEAD/assets/IMG_5292 (1).jpeg
--------------------------------------------------------------------------------
/src/omnicoreagent/core/tools/advance_tools/__init__.py:
--------------------------------------------------------------------------------
1 | from .advanced_tools_use import AdvanceToolsUse
2 |
3 | __all__ = ["AdvanceToolsUse"]
4 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/main.py:
--------------------------------------------------------------------------------
1 | def main():
2 | print("Hello from deep-code-agent!")
3 |
4 |
5 | if __name__ == "__main__":
6 | main()
7 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/main.py:
--------------------------------------------------------------------------------
1 | def main():
2 | print("Hello from devops-copilot-agent!")
3 |
4 |
5 | if __name__ == "__main__":
6 | main()
7 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/devops_copilot.git.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omnirexflora-labs/omnicoreagent/HEAD/examples/devops_copilot_agent/devops_copilot.git.png
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | mkdocs>=1.5.0
2 | mkdocs-material>=9.4.0
3 | mkdocs-minify-plugin>=0.7.0
4 | mkdocs-git-revision-date-localized-plugin>=1.2.0
5 | pymdown-extensions>=10.3.0
6 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.testing.pytestArgs": [
3 | "tests"
4 | ],
5 | "python.testing.unittestEnabled": false,
6 | "python.testing.pytestEnabled": true
7 | }
--------------------------------------------------------------------------------
/examples/deep_code_agent/.gitignore:
--------------------------------------------------------------------------------
1 | # Python-generated files
2 | __pycache__/
3 | *.py[oc]
4 | build/
5 | dist/
6 | wheels/
7 | *.egg-info
8 |
9 | # Virtual environments
10 | .venv
11 |
--------------------------------------------------------------------------------
/src/omnicoreagent/omni_agent/workflow/__init__.py:
--------------------------------------------------------------------------------
1 | from .parallel_agent import ParallelAgent
2 | from .sequential_agent import SequentialAgent
3 | from .router_agent import RouterAgent
4 |
5 |
6 | __all__ = ["ParallelAgent", "SequentialAgent", "RouterAgent"]
7 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/monitoring/grafana-datasources.yml:
--------------------------------------------------------------------------------
1 | # monitoring/grafana-datasources.yml
2 | apiVersion: 1
3 | datasources:
4 | - name: Prometheus
5 | type: prometheus
6 | url: http://prometheus:9090
7 | access: proxy
8 | isDefault: true
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/monitoring/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 5s
3 |
4 | scrape_configs:
5 | - job_name: 'devops-copilot'
6 | static_configs:
7 | # - targets: ['copilot:9091'] # ← Internal metrics port
8 | - targets: ["host.docker.internal:9091"]
--------------------------------------------------------------------------------
/examples/deep_code_agent/build_sandbox.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # deep_coder/build_sandbox.sh
3 | set -e
4 |
5 | echo "Building deepcoder-sandbox image..."
6 | docker build -f Dockerfile.sandbox -t deepcoder-sandbox:1.0 .
7 |
8 | echo "Tagging latest..."
9 | docker tag deepcoder-sandbox:1.0 deepcoder-sandbox:latest
10 |
11 | echo "Image built successfully."
--------------------------------------------------------------------------------
/src/omnicoreagent/core/database/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Database Package
3 |
4 | This package provides database functionality:
5 | - DatabaseMessageStore: SQL-based message storage
6 | - MongoDb: MongoDB connection and operations
7 | """
8 |
9 | from .database_message_store import DatabaseMessageStore
10 |
11 | __all__ = [
12 | "DatabaseMessageStore",
13 | ]
14 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/monitoring/grafana-provisioning/dashboards/dashboards.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | providers:
4 | - name: 'copilot'
5 | orgId: 1
6 | folder: ''
7 | type: file
8 | disableDeletion: false
9 | editable: true
10 | updateIntervalSeconds: 10
11 | allowUiUpdates: true
12 | options:
13 | path: /etc/grafana/provisioning/dashboards
14 |
--------------------------------------------------------------------------------
/src/omnicoreagent/omni_agent/config/__init__.py:
--------------------------------------------------------------------------------
1 | from .transformer import (
2 | ConfigTransformer,
3 | config_transformer,
4 | ModelConfig,
5 | MCPToolConfig,
6 | AgentConfig,
7 | TransportType,
8 | )
9 |
10 | __all__ = [
11 | "ConfigTransformer",
12 | "config_transformer",
13 | "ModelConfig",
14 | "MCPToolConfig",
15 | "AgentConfig",
16 | "TransportType",
17 | ]
18 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/tools/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Core Tools Package
3 |
4 | This package provides tool management functionality:
5 | - ToolRegistry: Registry for local tools
6 | - Tool: Individual tool representation
7 | """
8 |
9 | from .local_tools_registry import ToolRegistry, Tool
10 | from .advance_tools.advanced_tools_use import AdvanceToolsUse
11 |
12 | __all__ = ["ToolRegistry", "Tool", "AdvanceToolsUse"]
13 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python-generated files
2 | __pycache__/
3 | *.py[oc]
4 | build/
5 | dist/
6 | wheels/
7 | *.egg-info
8 |
9 | # Virtual environments
10 | .venv
11 | .env
12 | servers_config.json
13 | *.log
14 | *.log.*
15 | qdrant_db/
16 | *.db
17 | # MkDocs
18 | site/
19 | .chroma_db/
20 | .chroma_warmup/
21 | ._last_processed.json
22 | ._tools.json
23 | *.db
24 | venv/
25 | memories/
26 | .omniagent_config/*
27 | .agents/skills/*
28 |
29 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/types.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 | class ContextInclusion(str, Enum):
5 | NONE = "none"
6 | THIS_SERVER = "thisServer"
7 | ALL_SERVERS = "allServers"
8 |
9 |
10 | class AgentState(str, Enum):
11 | IDLE = "idle"
12 | RUNNING = "running"
13 | TOOL_CALLING = "tool_calling"
14 | OBSERVING = "observing"
15 | FINISHED = "finished"
16 | ERROR = "error"
17 | STUCK = "stuck"
18 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/events/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Event System Package
3 |
4 | This package provides event handling and routing:
5 | - BaseEventStore: Abstract base for event stores
6 | - InMemoryEventStore: In-memory event storage
7 | - RedisStreamEventStore: Redis stream-based events
8 | - EventRouter: Routes events to appropriate handlers
9 | """
10 |
11 | from .event_router import EventRouter
12 |
13 | __all__ = [
14 | "EventRouter",
15 | ]
16 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/constants.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timezone
2 |
3 | TOOL_ACCEPTING_PROVIDERS = {
4 | "groq",
5 | "openai",
6 | "openrouter",
7 | "gemini",
8 | "deepseek",
9 | "azureopenai",
10 | "anthropic",
11 | "mistral",
12 | }
13 |
14 | AGENTS_REGISTRY = {}
15 | TOOLS_REGISTRY = {}
16 | date_time_func = {
17 | "format_date": lambda data=None: datetime.now(timezone.utc).strftime(
18 | "%Y-%m-%dT%H:%M:%SZ"
19 | )
20 | }
21 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "devops-copilot-agent"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.13"
7 | dependencies = [
8 | "omnicoreagent>=0.2.10",
9 | "opentelemetry-api>=1.38.0",
10 | "opentelemetry-exporter-otlp>=1.38.0",
11 | "opentelemetry-sdk>=1.38.0",
12 | "prometheus-client>=0.23.1",
13 | "python-dotenv>=1.2.1",
14 | "rich>=14.2.0",
15 | "ruff>=0.14.2",
16 | ]
17 |
--------------------------------------------------------------------------------
/src/omnicoreagent/omni_agent/prompts/prompt_builder.py:
--------------------------------------------------------------------------------
1 | class OmniAgentPromptBuilder:
2 | def __init__(self, system_suffix: str):
3 | self.system_suffix = system_suffix.strip()
4 |
5 | def build(self, *, system_instruction: str) -> str:
6 | if not system_instruction.strip():
7 | raise ValueError("System instruction is required.")
8 |
9 | return f"""
10 | {system_instruction.strip()}
11 |
12 |
13 | {self.system_suffix}
14 | """.strip()
15 |
--------------------------------------------------------------------------------
/src/omnicoreagent/omni_agent/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | OmniAgent Package
3 |
4 | This package provides the high-level OmniAgent interface and background agent functionality.
5 | """
6 |
7 | from .agent import OmniAgent
8 | from .background_agent import (
9 | BackgroundOmniAgent,
10 | BackgroundAgentManager,
11 | TaskRegistry,
12 | APSchedulerBackend,
13 | BackgroundTaskScheduler,
14 | )
15 |
16 | __all__ = [
17 | "OmniAgent",
18 | "BackgroundOmniAgent",
19 | "BackgroundAgentManager",
20 | "TaskRegistry",
21 | "APSchedulerBackend",
22 | "BackgroundTaskScheduler",
23 | ]
24 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/skills/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Agent Skills Module
3 |
4 | Provides support for loading and executing Agent Skills following the
5 | agentskills.io specification.
6 |
7 | Skills are reusable capability packages that give agents specialized knowledge
8 | and executable scripts. Each skill lives in a directory with a SKILL.md file.
9 | """
10 |
11 | from omnicoreagent.core.skills.models import SkillMetadata
12 | from omnicoreagent.core.skills.manager import SkillManager
13 | from omnicoreagent.core.skills.tools import build_skill_tools
14 |
15 | __all__ = ["SkillMetadata", "SkillManager", "build_skill_tools"]
16 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/.env.example:
--------------------------------------------------------------------------------
1 | # .env
2 | LLM_API_KEY=sk-your-key-here
3 |
4 | # Redis – ONLY THIS
5 | REDIS_URL=redis://redis:6379/0
6 |
7 | # Storage type
8 | MEMORY_STORE_TYPE=redis
9 | EVENT_STORE_TYPE=redis_stream
10 |
11 | # Observability
12 | OBSERVABILITY_ENABLE_METRICS=true
13 | OBSERVABILITY_LOG_LEVEL=INFO
14 | OBSERVABILITY_LOG_FORMAT=json
15 | OBSERVABILITY_LOG_FILE=/logs/copilot.log
16 | OBSERVABILITY_LOG_MAX_BYTES=10485760
17 | OBSERVABILITY_LOG_BACKUP_COUNT=5
18 |
19 | # Security
20 | SECURITY_ENABLE_RATE_LIMITING=true
21 | SECURITY_RATE_LIMIT_REQUESTS=100
22 | SECURITY_RATE_LIMIT_WINDOW=3600
23 | SECURITY_AUDIT_LOG_FILE=/logs/audit.log
24 |
25 | # Grafana
26 | GRAFANA_PASSWORD=admin123
--------------------------------------------------------------------------------
/src/omnicoreagent/omni_agent/background_agent/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Background Agent System for Self-Flying Automation.
3 |
4 | This module provides a comprehensive system for creating and managing
5 | background agents that can execute tasks automatically.
6 | """
7 |
8 | from .background_agents import BackgroundOmniAgent
9 | from .background_agent_manager import BackgroundAgentManager
10 | from .task_registry import TaskRegistry
11 | from .scheduler_backend import APSchedulerBackend
12 | from .base import BackgroundTaskScheduler
13 |
14 | __all__ = [
15 | "BackgroundOmniAgent",
16 | "BackgroundAgentManager",
17 | "TaskRegistry",
18 | "APSchedulerBackend",
19 | "BackgroundTaskScheduler",
20 | ]
21 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/agents/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | AI Agent Types Package
3 |
4 | This package contains all the different types of AI agents:
5 | - BaseReactAgent: Base class for React-style agents
6 | - ReactAgent: Simple React agent implementation
7 | - TokenUsage: Usage tracking and limits
8 | """
9 |
10 | from .base import BaseReactAgent
11 | from .react_agent import ReactAgent
12 | from .types import AgentConfig, ParsedResponse, ToolCall
13 | from .token_usage import UsageLimits, Usage, UsageLimitExceeded
14 |
15 | __all__ = [
16 | "BaseReactAgent",
17 | "ReactAgent",
18 | "AgentConfig",
19 | "ParsedResponse",
20 | "ToolCall",
21 | "UsageLimits",
22 | "Usage",
23 | "UsageLimitExceeded",
24 | ]
25 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/memory_store/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Memory Store Package
3 |
4 | This package provides different memory storage backends:
5 | - InMemoryStore: Simple in-memory storage
6 | - RedisMemoryStore: Redis-backed storage
7 | - DatabaseMemory: SQL database storage
8 | - MongoDBMemory: MongoDB storage
9 | - MemoryRouter: Routes to appropriate backend
10 | """
11 |
12 | from .base import AbstractMemoryStore
13 | from .in_memory import InMemoryStore
14 | from .redis_memory import RedisMemoryStore
15 | from .database_memory import DatabaseMemory
16 | from .memory_router import MemoryRouter
17 |
18 | __all__ = [
19 | "AbstractMemoryStore",
20 | "InMemoryStore",
21 | "RedisMemoryStore",
22 | "DatabaseMemory",
23 | "MemoryRouter",
24 | ]
25 |
--------------------------------------------------------------------------------
/src/omnicoreagent/mcp_clients_connection/tools.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | from omnicoreagent.core.utils import logger
4 |
5 |
6 | async def list_tools(server_names: list[str], sessions: dict[str, dict[str, Any]]):
7 | """List all tools"""
8 | try:
9 | tools = []
10 | for server_name in server_names:
11 | if sessions[server_name]["connected"]:
12 | try:
13 | tools_response = await sessions[server_name]["session"].list_tools()
14 | tools.extend(tools_response.tools)
15 | except Exception:
16 | logger.info(f"{server_name} Does not support tools")
17 | return tools
18 | except Exception as e:
19 | logger.info(f"error listing tools: {e}")
20 | return e
21 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Core AI Agent Framework Components
3 |
4 | This package contains the core AI agent functionality including:
5 | - Agents (React, Sequential)
6 | - Memory Management (In-Memory, Redis, Database, MongoDB)
7 | - LLM Connections and Support
8 | - Event System
9 | - Database Layer
10 | - Tools Management
11 | - Utilities and Constants
12 | """
13 |
14 | from .agents import ReactAgent
15 | from .memory_store import MemoryRouter
16 | from .llm import LLMConnection
17 | from .events import EventRouter
18 | from .database import DatabaseMessageStore
19 | from .tools import ToolRegistry, Tool
20 |
21 | __all__ = [
22 | "ReactAgent",
23 | "MemoryRouter",
24 | "LLMConnection",
25 | "EventRouter",
26 | "DatabaseMessageStore",
27 | "ToolRegistry",
28 | "Tool",
29 | ]
30 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/api.py:
--------------------------------------------------------------------------------
1 | # deep_coder/api.py
2 | from fastapi import FastAPI, HTTPException
3 | from agent_runner import DeepCodingAgentRunner
4 | from sandbox.utils import create_tarball
5 | import asyncio
6 |
7 | app = FastAPI()
8 | sessions = {}
9 |
10 |
11 | @app.post("/chat")
12 | async def chat(query: str):
13 | runner = DeepCodingAgentRunner()
14 | sessions[runner.session_id] = runner
15 | result = await runner.handle_chat(query)
16 | return result
17 |
18 |
19 | @app.get("/download/{session_id}.tar.gz")
20 | async def download(session_id: str):
21 | if session_id not in sessions:
22 | raise HTTPException(404)
23 | tar_path = create_tarball(session_id, "./user_workspaces", "./outputs")
24 | return FileResponse(
25 | tar_path, media_type="application/gzip", filename=f"{session_id}.tar.gz"
26 | )
27 |
--------------------------------------------------------------------------------
/src/omnicoreagent/omni_agent/background_agent/base.py:
--------------------------------------------------------------------------------
1 | """
2 | Base scheduler interface for background agent system.
3 | """
4 |
5 | from abc import ABC, abstractmethod
6 | from typing import Callable
7 |
8 |
9 | class BackgroundTaskScheduler(ABC):
10 | """Base class for background task schedulers."""
11 |
12 | @abstractmethod
13 | def schedule_task(self, agent_id: str, interval: int, task_fn: Callable, **kwargs):
14 | """Schedule a task to run at specified intervals."""
15 | pass
16 |
17 | @abstractmethod
18 | def remove_task(self, agent_id: str):
19 | """Remove a scheduled task."""
20 | pass
21 |
22 | @abstractmethod
23 | def start(self):
24 | """Start the scheduler."""
25 | pass
26 |
27 | @abstractmethod
28 | def shutdown(self):
29 | """Shutdown the scheduler."""
30 | pass
31 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/memory_store/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import List, Optional
3 |
4 |
5 | class AbstractMemoryStore(ABC):
6 | @abstractmethod
7 | def set_memory_config(self, mode: str, value: int = None) -> None:
8 | raise NotImplementedError
9 |
10 | @abstractmethod
11 | async def store_message(
12 | self,
13 | role: str,
14 | content: str,
15 | metadata: dict,
16 | session_id: str,
17 | ) -> None:
18 | raise NotImplementedError
19 |
20 | @abstractmethod
21 | async def get_messages(
22 | self, session_id: str = None, agent_name: str = None
23 | ) -> List[dict]:
24 | raise NotImplementedError
25 |
26 | @abstractmethod
27 | async def clear_memory(
28 | self, session_id: str = None, agent_name: str = None
29 | ) -> None:
30 | raise NotImplementedError
31 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/events/in_memory.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 | import asyncio
3 | from typing import AsyncIterator
4 | from omnicoreagent.core.events.base import BaseEventStore, Event
5 |
6 |
7 | class InMemoryEventStore(BaseEventStore):
8 | def __init__(self):
9 | self.logs: dict[str, list[Event]] = defaultdict(list)
10 | self.queues: dict[str, asyncio.Queue] = defaultdict(asyncio.Queue)
11 |
12 | async def append(self, session_id: str, event: Event) -> None:
13 | self.logs[session_id].append(event)
14 | self.queues[session_id].put_nowait(event)
15 |
16 | async def get_events(self, session_id: str) -> list[Event]:
17 | return self.logs[session_id]
18 |
19 | async def stream(self, session_id: str) -> AsyncIterator[Event]:
20 | queue = self.queues[session_id]
21 | while True:
22 | event = await queue.get()
23 | yield event
24 |
--------------------------------------------------------------------------------
/src/omnicoreagent/mcp_clients_connection/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | MCP (Model Context Protocol) Client Package
3 |
4 | This package provides MCP client functionality including:
5 | - MCP Client implementation
6 | - CLI interface
7 | - Resource management
8 | - Tool discovery and management
9 | - Server capabilities refresh
10 | - Notifications and sampling
11 | """
12 |
13 | from .client import MCPClient, Configuration
14 | from .resources import (
15 | list_resources,
16 | read_resource,
17 | subscribe_resource,
18 | unsubscribe_resource,
19 | )
20 | from .tools import list_tools
21 | from .prompts import get_prompt, get_prompt_with_react_agent, list_prompts
22 |
23 | __all__ = [
24 | "MCPClient",
25 | "Configuration",
26 | "list_resources",
27 | "read_resource",
28 | "subscribe_resource",
29 | "unsubscribe_resource",
30 | "list_tools",
31 | "get_prompt",
32 | "get_prompt_with_react_agent",
33 | "list_prompts",
34 | ]
35 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # See https://pre-commit.com for more information
2 | # See https://pre-commit.com/hooks.html for more hooks
3 | repos:
4 | - repo: https://github.com/pre-commit/pre-commit-hooks
5 | rev: v5.0.0
6 | hooks:
7 | - id: trailing-whitespace
8 | - id: end-of-file-fixer
9 | exclude: ^.*demographic\.000000$
10 | - id: check-yaml
11 | - id: check-toml
12 | - id: check-added-large-files
13 | args: ["--maxkb", "500"]
14 | exclude: ^.*yarn-.*cjs$
15 | - repo: https://github.com/astral-sh/ruff-pre-commit
16 | rev: v0.11.7 # Specify the latest Ruff version
17 | hooks:
18 | - id: ruff
19 | types: [python]
20 | args: [--fix, --exit-non-zero-on-fix, --line-length=88]
21 |
22 | - id: ruff-format
23 | types: [python]
24 |
25 | - repo: https://github.com/astral-sh/uv-pre-commit
26 | # uv version.
27 | rev: 0.6.13
28 | hooks:
29 | - id: uv-lock
30 | - id: uv-export
31 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/observability_globals.py:
--------------------------------------------------------------------------------
1 | # deep_coder/observability_globals.py
2 | from config import load_config
3 | from observability import (
4 | get_logger,
5 | get_metrics_collector,
6 | AuditLogger,
7 | HealthChecker,
8 | RateLimiter,
9 | )
10 |
11 | CONFIG = load_config()
12 |
13 | # Logger
14 | log = get_logger(
15 | name="deep_coder",
16 | level=CONFIG.observability.log_level,
17 | fmt=CONFIG.observability.log_format,
18 | file=CONFIG.observability.log_file,
19 | max_bytes=CONFIG.observability.log_max_bytes,
20 | backup=CONFIG.observability.log_backup_count,
21 | )
22 |
23 | # Observability singletons
24 | metrics = get_metrics_collector(CONFIG.observability.enable_metrics)
25 | audit = AuditLogger(CONFIG.security.audit_log_file)
26 | health = HealthChecker()
27 | rate_limiter = RateLimiter(
28 | max_req=CONFIG.security.rate_limit_requests,
29 | window=CONFIG.security.rate_limit_window,
30 | )
31 |
32 | # Health checks
33 | health.add("config", lambda: True)
34 | health.add("redis", lambda: CONFIG.storage.memory_store_type == "redis")
35 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/config.yaml:
--------------------------------------------------------------------------------
1 | devops:
2 | timeout_seconds: 60
3 | max_output_chars: 100000
4 | enable_history: true
5 | max_history_size: 500
6 | working_directory: ./workspace
7 |
8 | agent:
9 | name: OmniDevOpsCopilot
10 | max_steps: 20
11 | tool_call_timeout: 30
12 | memory_mode: sliding_window
13 | memory_window_size: 100
14 |
15 |
16 | model:
17 | provider: openai
18 | model: gpt-4.1
19 | temperature: 0.0
20 | max_context_length: 2000
21 | top_p: 0.8
22 | llm_api_key: "comes from .env"
23 |
24 | storage:
25 | memory_store_type: redis
26 | event_store_type: redis_stream
27 | redis_url: redis://localhost:6379/0
28 | redis_max_connections: 10
29 |
30 | security:
31 | enable_rate_limiting: true
32 | rate_limit_requests: 100
33 | rate_limit_window: 3600
34 | enable_audit_logging: true
35 | audit_log_file: audit.log
36 | max_command_length: 10000
37 |
38 | observability:
39 | enable_metrics: true
40 | metrics_port: 9091
41 | log_level: INFO
42 | log_format: json
43 | log_file: copilot.log
44 | log_max_bytes: 10485760
45 | log_backup_count: 5
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Abiola Adeshina
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Abiola Adeshina
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile
2 | .PHONY: help build up down logs cli dashboard prom
3 |
4 | help:
5 | @echo "make build → build images"
6 | @echo "make up → start full stack"
7 | @echo "make down → stop all"
8 | @echo "make logs → follow copilot logs"
9 | @echo "make cli → run DevOps Copilot CLI"
10 | @echo "make dashboard → open Grafana"
11 | @echo "make prom → open Prometheus[](http://localhost:9090)"
12 |
13 | build:
14 | docker compose build
15 |
16 | up:
17 | @mkdir -p logs
18 | @docker compose up -d
19 | @echo "Stack started!"
20 | @echo " → Prometheus: http://localhost:9090"
21 | @echo " → Grafana: http://localhost:3000"
22 | @echo " → Logs: ./logs/"
23 | @echo " → Copilot metrics: copilot:9091"
24 |
25 | down:
26 | docker compose down
27 |
28 | logs:
29 | docker compose logs -f copilot
30 |
31 | cli:
32 | @echo "Starting OmniDevOpsCopilot CLI..."
33 | @mkdir -p logs
34 | @docker compose up -d
35 | @sleep 5
36 | @docker compose exec -t copilot python devops_copilot_agent.py
37 |
38 | dashboard:
39 | @open http://localhost:3000 2>/dev/null || \
40 | xdg-open http://localhost:3000 2>/dev/null || \
41 | echo "Open Grafana: http://localhost:3000"
42 |
43 | prom:
44 | @open http://localhost:9090 2>/dev/null || \
45 | xdg-open http://localhost:9090 2>/dev/null || \
46 | echo "Open Prometheus: http://localhost:9090"
--------------------------------------------------------------------------------
/src/omnicoreagent/core/events/redis_stream.py:
--------------------------------------------------------------------------------
1 | import redis.asyncio as redis
2 | from typing import AsyncIterator, List
3 | from decouple import config
4 | from omnicoreagent.core.events.base import BaseEventStore, Event
5 |
6 | REDIS_URL = config("REDIS_URL", default="redis://localhost:6379/0")
7 |
8 |
9 | class RedisStreamEventStore(BaseEventStore):
10 | def __init__(self):
11 | self.redis = redis.from_url(REDIS_URL, decode_responses=True)
12 |
13 | async def append(self, session_id: str, event: Event):
14 | stream_name = f"omnicoreagent_events:{session_id}"
15 | await self.redis.xadd(stream_name, {"event": event.json()})
16 |
17 | async def get_events(self, session_id: str) -> List[Event]:
18 | stream_name = f"omnicoreagent_events:{session_id}"
19 | events = await self.redis.xrange(stream_name, min="-", max="+")
20 | return [Event.parse_raw(entry[1]["event"]) for entry in events]
21 |
22 | async def stream(self, session_id: str) -> AsyncIterator[Event]:
23 | stream_name = f"omnicoreagent_events:{session_id}"
24 | last_id = "0-0"
25 | while True:
26 | results = await self.redis.xread({stream_name: last_id}, block=0, count=1)
27 | if results:
28 | _, entries = results[0]
29 | for entry_id, data in entries:
30 | last_id = entry_id
31 | yield Event.parse_raw(data["event"])
32 |
--------------------------------------------------------------------------------
/src/omnicoreagent/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | OmniCoreAgent AI Framework
3 |
4 | A comprehensive AI agent framework with MCP client capabilities.
5 | """
6 |
7 | from .core.agents import ReactAgent
8 | from .core.memory_store import MemoryRouter
9 | from .core.llm import LLMConnection
10 | from .core.events import EventRouter
11 | from .core.database import DatabaseMessageStore
12 | from .core.tools import ToolRegistry, Tool
13 | from .core.utils import logger
14 |
15 | from .omni_agent.agent import OmniAgent
16 | from .omni_agent.background_agent import (
17 | BackgroundOmniAgent,
18 | BackgroundAgentManager,
19 | TaskRegistry,
20 | APSchedulerBackend,
21 | BackgroundTaskScheduler,
22 | )
23 |
24 | from .mcp_clients_connection import MCPClient, Configuration
25 |
26 | from .omni_agent.workflow.parallel_agent import ParallelAgent
27 | from .omni_agent.workflow.sequential_agent import SequentialAgent
28 | from .omni_agent.workflow.router_agent import RouterAgent
29 |
30 | __all__ = [
31 | "ReactAgent",
32 | "MemoryRouter",
33 | "LLMConnection",
34 | "EventRouter",
35 | "DatabaseMessageStore",
36 | "ToolRegistry",
37 | "Tool",
38 | "logger",
39 | "OmniAgent",
40 | "BackgroundOmniAgent",
41 | "BackgroundAgentManager",
42 | "TaskRegistry",
43 | "APSchedulerBackend",
44 | "BackgroundTaskScheduler",
45 | "ParallelAgent",
46 | "SequentialAgent",
47 | "RouterAgent",
48 | "MCPClient",
49 | "Configuration",
50 | ]
51 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/tools/memory_tool/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Optional
3 |
4 |
5 | class AbstractMemoryBackend(ABC):
6 | """
7 | Abstract base class for a memory storage backend.
8 |
9 | Defines the contract that all backends (local dir, cloud, DB, etc.)
10 | must follow. The MemoryTool uses this interface only — never the concrete
11 | implementation directly.
12 | """
13 |
14 | @abstractmethod
15 | def view(self, path: Optional[str] = None) -> str:
16 | """Show directory listing or file contents."""
17 | pass
18 |
19 | @abstractmethod
20 | def create_update(self, path: str, file_text: str, mode: str = "create") -> str:
21 | """Create, append, or overwrite a file."""
22 | pass
23 |
24 | @abstractmethod
25 | def str_replace(self, path: str, old_str: str, new_str: str) -> str:
26 | """Replace all occurrences of old_str with new_str in a file."""
27 | pass
28 |
29 | @abstractmethod
30 | def insert(self, path: str, insert_line: int, insert_text: str) -> str:
31 | """Insert text at a specific line number in a file."""
32 | pass
33 |
34 | @abstractmethod
35 | def delete(self, path: str) -> str:
36 | """Delete a file or directory."""
37 | pass
38 |
39 | @abstractmethod
40 | def rename(self, old_path: str, new_path: str) -> str:
41 | """Rename or move a file/directory."""
42 | pass
43 |
44 | @abstractmethod
45 | def clear_all_memory(self) -> str:
46 | """Clear all memory storage."""
47 | pass
48 |
--------------------------------------------------------------------------------
/tests/test_tools.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import AsyncMock
2 |
3 | import pytest
4 |
5 | from omnicoreagent.mcp_omni_connect.tools import list_tools
6 |
7 | # Mock data for testing
8 | MOCK_TOOLS = {
9 | "server1": [
10 | {"name": "tool1", "description": "Tool 1 description"},
11 | {"name": "tool2", "description": "Tool 2 description"},
12 | ],
13 | "server2": [
14 | {"name": "tool3", "description": "Tool 3 description"},
15 | ],
16 | }
17 |
18 | MOCK_SESSIONS = {
19 | "server1": {
20 | "session": None,
21 | "connected": True,
22 | },
23 | "server2": {
24 | "session": None,
25 | "connected": True,
26 | },
27 | }
28 |
29 |
30 | @pytest.mark.asyncio
31 | async def test_list_tools():
32 | """Test listing tools from servers"""
33 |
34 | # Create mock response
35 | class MockResponse:
36 | def __init__(self, tools):
37 | self.tools = tools
38 |
39 | # Update mock sessions with mock method
40 | test_sessions = MOCK_SESSIONS.copy()
41 |
42 | # Create async mock for server1
43 | mock_session1 = AsyncMock()
44 | mock_session1.list_tools.return_value = MockResponse(MOCK_TOOLS["server1"])
45 | test_sessions["server1"]["session"] = mock_session1
46 |
47 | # Create async mock for server2 that raises an exception
48 | mock_session2 = AsyncMock()
49 | mock_session2.list_tools.side_effect = Exception("Not supported")
50 | test_sessions["server2"]["session"] = mock_session2
51 |
52 | # Test successful tool listing
53 | tools = await list_tools(
54 | server_names=["server1", "server2"], sessions=test_sessions
55 | )
56 | assert len(tools) == 2
57 | assert all(tool["name"].startswith("tool") for tool in tools)
58 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/agents/react_agent.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Callable
2 | from typing import Any
3 |
4 | from omnicoreagent.core.agents.base import BaseReactAgent
5 | from omnicoreagent.core.agents.types import AgentConfig
6 |
7 |
8 | class ReactAgent(BaseReactAgent):
9 | def __init__(self, config: AgentConfig):
10 | super().__init__(
11 | agent_name=config.agent_name,
12 | max_steps=config.max_steps,
13 | tool_call_timeout=config.tool_call_timeout,
14 | request_limit=config.request_limit,
15 | total_tokens_limit=config.total_tokens_limit,
16 | enable_advanced_tool_use=config.enable_advanced_tool_use,
17 | memory_tool_backend=config.memory_tool_backend,
18 | enable_agent_skills=config.enable_agent_skills,
19 | )
20 |
21 | async def _run(
22 | self,
23 | system_prompt: str,
24 | query: str,
25 | llm_connection: Callable,
26 | add_message_to_history: Callable[[str, str, dict | None], Any],
27 | message_history: Callable[[], Any],
28 | event_router: Callable,
29 | debug: bool = False,
30 | **kwargs,
31 | ):
32 | response = await self.run(
33 | system_prompt=system_prompt,
34 | query=query,
35 | llm_connection=llm_connection,
36 | add_message_to_history=add_message_to_history,
37 | message_history=message_history,
38 | event_router=event_router,
39 | debug=debug,
40 | sessions=kwargs.get("sessions"),
41 | mcp_tools=kwargs.get("mcp_tools"),
42 | local_tools=kwargs.get("local_tools"),
43 | session_id=kwargs.get("session_id"),
44 | sub_agents=kwargs.get("sub_agents"),
45 | )
46 | return response
47 |
--------------------------------------------------------------------------------
/docs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Documentation management script for MCPOmni Connect
4 |
5 | set -e
6 |
7 | case "${1:-help}" in
8 | "serve")
9 | echo "🚀 Starting MkDocs development server..."
10 | uv run mkdocs serve --dev-addr=127.0.0.1:8080
11 | ;;
12 | "build")
13 | echo "🔨 Building documentation..."
14 | uv run mkdocs build
15 | echo "✅ Documentation built successfully!"
16 | echo "📁 Static files are in: ./site/"
17 | ;;
18 | "install")
19 | echo "📦 Installing documentation dependencies..."
20 | uv sync --group docs
21 | echo "✅ Documentation dependencies installed!"
22 | ;;
23 | "clean")
24 | echo "🧹 Cleaning build artifacts..."
25 | rm -rf site/
26 | echo "✅ Build artifacts cleaned!"
27 | ;;
28 | "deploy")
29 | echo "🚀 Building and deploying documentation..."
30 | uv run mkdocs gh-deploy --clean
31 | echo "✅ Documentation deployed to GitHub Pages!"
32 | ;;
33 | "help"|*)
34 | echo "📖 MCPOmni Connect Documentation Manager"
35 | echo ""
36 | echo "Usage: ./docs.sh [command]"
37 | echo ""
38 | echo "Commands:"
39 | echo " serve - Start development server (http://127.0.0.1:8080)"
40 | echo " build - Build static documentation"
41 | echo " install - Install documentation dependencies"
42 | echo " clean - Clean build artifacts"
43 | echo " deploy - Deploy to GitHub Pages"
44 | echo " help - Show this help message"
45 | echo ""
46 | echo "Examples:"
47 | echo " ./docs.sh serve # Start development server"
48 | echo " ./docs.sh build # Build for production"
49 | echo " ./docs.sh deploy # Deploy to GitHub Pages"
50 | ;;
51 | esac
52 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling", "uv-dynamic-versioning"]
3 | build-backend = "hatchling.build"
4 |
5 | [tool.hatch.version]
6 | source = "uv-dynamic-versioning"
7 |
8 | [tool.uv-dynamic-versioning]
9 | vcs = "git"
10 | style = "pep440"
11 | bump = true
12 |
13 | [project]
14 | name = "deep-code-agent"
15 | dynamic = ["version"]
16 | description = "Production-grade deep coding agent with secure Docker sandbox, .env config, and LLM-powered tool execution"
17 | readme = "README.md"
18 | authors = [{ name = "Abiola Adeshina", email = "abiolaadedayo1993@gmail.com" }]
19 | requires-python = ">=3.13"
20 | license = { text = "MIT" }
21 | keywords = ["ai", "agent", "coding", "sandbox", "docker", "llm", "mcp"]
22 | classifiers = [
23 | "Development Status :: 4 - Beta",
24 | "Intended Audience :: Developers",
25 | "License :: OSI Approved :: MIT License",
26 | "Programming Language :: Python :: 3",
27 | "Programming Language :: Python :: 3.13",
28 | ]
29 |
30 | dependencies = [
31 | "pydantic-settings>=2.5.0",
32 | "python-dotenv>=1.2.1",
33 | "prometheus-client>=0.23.1",
34 | "opentelemetry-api>=1.38.0",
35 | "opentelemetry-sdk>=1.38.0",
36 | "opentelemetry-exporter-otlp>=1.38.0",
37 | "docker>=7.1.0",
38 | "aiohttp>=3.9.0",
39 | "rich>=14.2.0",
40 | "omnicoreagent>=0.2.10",
41 | ]
42 |
43 | [project.scripts]
44 | deep-code-agent = "deep_coder.cli:main" # adjust if you add a CLI later
45 |
46 | [project.urls]
47 | Repository = "https://github.com/Abiorh001/deep-code-agent"
48 | # Issues = "https://github.com/Abiorh001/deep-code-agent/issues"
49 |
50 | [tool.hatch.build.targets.wheel]
51 | packages = ["deep_coder"]
52 |
53 | [tool.hatch.build]
54 | packages = ["deep_coder"]
55 | include = [
56 | "LICENSE",
57 | "README.md",
58 | ]
59 |
60 | [dependency-groups]
61 | dev = [
62 | "ruff>=0.14.2",
63 | "pytest>=8.0.0",
64 | "pytest-asyncio>=0.26.0",
65 | "hatch>=1.14.1",
66 | ]
67 |
--------------------------------------------------------------------------------
/.github/workflows/python-app.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: Python application
5 |
6 | on:
7 | push:
8 | branches: [ "main", "dev" ]
9 | pull_request:
10 | branches: [ "main", "dev" ]
11 |
12 | permissions:
13 | contents: read
14 |
15 | jobs:
16 | build:
17 |
18 | runs-on: ubuntu-latest
19 |
20 | steps:
21 | - uses: actions/checkout@v4
22 | - name: Set up Python 3.12
23 | uses: actions/setup-python@v5
24 | with:
25 | python-version: "3.12"
26 |
27 | - name: Install uv
28 | run: |
29 | python -m pip install --upgrade pip
30 | pip install uv
31 |
32 | - name: Check if lock file is in sync
33 | run: |
34 | if ! uv lock --check; then
35 | echo "::error::uv.lock is out of sync with pyproject.toml. Please run 'uv lock' locally and commit the updated lock file."
36 | exit 1
37 | fi
38 |
39 | - name: Set up virtual environment
40 | run: |
41 | uv venv .venv
42 | echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
43 |
44 | - name: Cache virtual environment
45 | uses: actions/cache@v4
46 | id: venv-cache
47 | with:
48 | path: .venv
49 | key: venv-${{ hashFiles('**/pyproject.toml') }}
50 |
51 | - name: Ensure cache is healthy
52 | if: steps.venv-cache.outputs.cache-hit == 'true'
53 | shell: bash
54 | run: |
55 | timeout 10s .venv/bin/python -m pip --version || rm -rf .venv
56 |
57 | - name: Install dependencies
58 | run: |
59 | uv sync --group dev
60 |
61 | - name: Lint with ruff
62 | run: |
63 | ruff check
64 |
65 | - name: Run unit tests with pytest
66 | run: |
67 | cp test.servers_config.json servers_config.json
68 | uv run pytest -v tests
69 |
70 | # - name: Run integration tests with OpenAIIntegration
71 | # run: |
72 | # uv run pytest -m "OpenAIIntegration" -v
73 |
--------------------------------------------------------------------------------
/src/omnicoreagent/omni_agent/background_agent/task_registry.py:
--------------------------------------------------------------------------------
1 | """
2 | Task registry for managing background agent task definitions.
3 | """
4 |
5 | from typing import Dict, List, Optional
6 | from omnicoreagent.core.utils import logger
7 |
8 |
9 | class TaskRegistry:
10 | """Registry for storing and managing task definitions."""
11 |
12 | def __init__(self):
13 | self._tasks: Dict[str, Dict] = {}
14 |
15 | def register(self, agent_id: str, config: Dict):
16 | """Register a new task configuration."""
17 | try:
18 | self._tasks[agent_id] = config
19 | logger.info(f"Registered task for agent: {agent_id}")
20 | except Exception as e:
21 | logger.error(f"Failed to register task for agent {agent_id}: {e}")
22 | raise
23 |
24 | def get(self, agent_id: str) -> Optional[Dict]:
25 | """Get task configuration for an agent."""
26 | return self._tasks.get(agent_id)
27 |
28 | def all_tasks(self) -> List[Dict]:
29 | """Get all registered task configurations."""
30 | return list(self._tasks.values())
31 |
32 | def remove(self, agent_id: str):
33 | """Remove a task configuration."""
34 | try:
35 | if agent_id in self._tasks:
36 | del self._tasks[agent_id]
37 | logger.info(f"Removed task for agent: {agent_id}")
38 | except Exception as e:
39 | logger.error(f"Failed to remove task for agent {agent_id}: {e}")
40 | raise
41 |
42 | def exists(self, agent_id: str) -> bool:
43 | """Check if a task exists for the given agent ID."""
44 | return agent_id in self._tasks
45 |
46 | def update(self, agent_id: str, config: Dict):
47 | """Update an existing task configuration."""
48 | if agent_id in self._tasks:
49 | self._tasks[agent_id].update(config)
50 | logger.info(f"Updated task for agent: {agent_id}")
51 | else:
52 | raise KeyError(f"Task for agent {agent_id} not found")
53 |
54 | def get_agent_ids(self) -> List[str]:
55 | """Get all registered agent IDs."""
56 | return list(self._tasks.keys())
57 |
58 | def clear(self):
59 | """Clear all registered tasks."""
60 | self._tasks.clear()
61 | logger.info("Cleared all registered tasks")
62 |
--------------------------------------------------------------------------------
/tests/test_react_agent.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import AsyncMock
2 |
3 | import pytest
4 |
5 | from omnicoreagent.core.agents.react_agent import ReactAgent
6 | from omnicoreagent.core.agents.types import AgentConfig
7 |
8 |
9 | @pytest.fixture
10 | def agent_config():
11 | return AgentConfig(
12 | agent_name="test_agent",
13 | max_steps=3,
14 | tool_call_timeout=5,
15 | request_limit=100,
16 | total_tokens_limit=1000,
17 | mcp_enabled=True,
18 | )
19 |
20 |
21 | @pytest.fixture
22 | def react_agent(agent_config):
23 | return ReactAgent(config=agent_config)
24 |
25 |
26 | def test_react_agent_initialization(agent_config):
27 | agent = ReactAgent(config=agent_config)
28 |
29 | assert agent.agent_name == "test_agent"
30 | assert agent.max_steps == 3
31 | assert agent.tool_call_timeout == 5
32 | assert agent.request_limit == 100
33 | assert agent.total_tokens_limit == 1000
34 | assert agent.mcp_enabled is True
35 |
36 |
37 | @pytest.mark.asyncio
38 | async def test_react_agent_run_executes_run(react_agent, monkeypatch):
39 | mock_response = {"result": "final answer"}
40 | mock_run = AsyncMock(return_value=mock_response)
41 | monkeypatch.setattr(react_agent, "run", mock_run)
42 |
43 | result = await react_agent._run(
44 | system_prompt="You are an agent.",
45 | query="What's the weather?",
46 | llm_connection=AsyncMock(),
47 | add_message_to_history=AsyncMock(),
48 | message_history=AsyncMock(return_value=[]),
49 | debug=True,
50 | sessions={"chat_id": "chat123"},
51 | available_tools=["search"],
52 | tools_registry={},
53 | is_generic_agent=False,
54 | chat_id="chat123",
55 | )
56 |
57 | mock_run.assert_awaited_once()
58 | assert result == mock_response
59 |
60 |
61 | @pytest.mark.asyncio
62 | async def test_react_agent_run_with_minimal_kwargs(react_agent, monkeypatch):
63 | monkeypatch.setattr(react_agent, "run", AsyncMock(return_value={"result": "ok"}))
64 |
65 | result = await react_agent._run(
66 | system_prompt="SysPrompt",
67 | query="Minimal test",
68 | llm_connection=AsyncMock(),
69 | add_message_to_history=AsyncMock(),
70 | message_history=AsyncMock(),
71 | )
72 |
73 | assert result["result"] == "ok"
74 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | redis:
3 | image: redis:7-alpine
4 | container_name: copilot-redis
5 | restart: unless-stopped
6 | volumes:
7 | - redis-data:/data
8 | ports:
9 | - 6378:6379
10 | command: redis-server --appendonly yes --maxmemory 512mb
11 | healthcheck:
12 | test: ["CMD", "redis-cli", "ping"]
13 | interval: 10s
14 | timeout: 3s
15 | retries: 3
16 | networks:
17 | - copilot-net
18 |
19 | # copilot:
20 | # build: .
21 | # container_name: devops-copilot
22 | # restart: unless-stopped
23 | # env_file: .env
24 | # volumes:
25 | # - workspace:/workspace
26 | # - ./logs:/logs
27 |
28 | # depends_on:
29 | # redis:
30 | # condition: service_healthy
31 |
32 | # networks:
33 | # - copilot-net
34 |
35 | # tty: true
36 | # stdin_open: true
37 | # mem_limit: 512m
38 | # mem_reservation: 256m
39 |
40 | prometheus:
41 | image: prom/prometheus:latest
42 | container_name: copilot-prometheus
43 | restart: unless-stopped
44 | ports:
45 | - "9090:9090"
46 | extra_hosts:
47 | - "host.docker.internal:host-gateway"
48 | volumes:
49 | - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro
50 | - prometheus-data:/prometheus
51 | command:
52 | - '--config.file=/etc/prometheus/prometheus.yml'
53 | - '--storage.tsdb.path=/prometheus'
54 | - '--storage.tsdb.retention.time=30d'
55 | # depends_on:
56 | # - copilot
57 | networks:
58 | - copilot-net
59 |
60 | grafana:
61 | image: grafana/grafana:12.2.1
62 | container_name: copilot-grafana
63 | restart: unless-stopped
64 | ports:
65 | - "3000:3000"
66 | extra_hosts:
67 | - "host.docker.internal:host-gateway"
68 | environment:
69 | - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin}
70 | - GF_USERS_ALLOW_SIGN_UP=false
71 | volumes:
72 | - grafana-data:/var/lib/grafana
73 | - ./monitoring/grafana-datasources.yml:/etc/grafana/provisioning/datasources/datasources.yml:ro
74 | - ./monitoring/grafana-provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro
75 | depends_on:
76 | - prometheus
77 | networks:
78 | - copilot-net
79 |
80 | volumes:
81 | workspace:
82 | redis-data:
83 | prometheus-data:
84 | grafana-data:
85 |
86 | networks:
87 | copilot-net:
88 | driver: bridge
--------------------------------------------------------------------------------
/src/omnicoreagent/core/tools/advance_tools_use.py:
--------------------------------------------------------------------------------
1 | from omnicoreagent.core.tools.advance_tools.advanced_tools_use import AdvanceToolsUse
2 | from omnicoreagent.core.tools.local_tools_registry import ToolRegistry
3 |
4 |
5 | async def build_tool_registry_advance_tools_use(registry: ToolRegistry) -> ToolRegistry:
6 | @registry.register_tool(
7 | name="tools_retriever",
8 | description="""
9 | Searches the system's tool catalog using semantic BM25 matching to discover available capabilities.
10 |
11 | Use this to find tools that can fulfill user requests. Search before claiming any functionality
12 | is unavailable. Returns up to 5 relevant tools.
13 | """,
14 | inputSchema={
15 | "type": "object",
16 | "properties": {
17 | "query": {
18 | "type": "string",
19 | "description": """
20 | Semantic search query describing the desired functionality.
21 |
22 | Include: [ACTION VERB] + [TARGET OBJECT] + [RELEVANT CONTEXT]
23 |
24 | Examples:
25 | - "send email message with attachments to recipients"
26 | - "get weather forecast temperature for location"
27 | - "create calendar event with date time participants"
28 | - "search documents files by keyword content"
29 | - "analyze text extract keywords sentiment"
30 |
31 | Length: 30-300 characters optimal for best BM25 matching.
32 | """,
33 | "minLength": 30,
34 | "maxLength": 500,
35 | }
36 | },
37 | "required": ["query"],
38 | "additionalProperties": False,
39 | },
40 | )
41 | async def tools_retriever(
42 | query: str,
43 | ):
44 | """
45 | Discover available tools using BM25 semantic search.
46 |
47 | Parameters
48 | ----------
49 | query : str
50 | Natural language query: [action] [object] [context]
51 | Example: "send email with attachments to recipient"
52 |
53 | Returns
54 | -------
55 | dict
56 | {
57 | "status": "success" | "error",
58 | "data": List of up to 5 tools with descriptions and parameters
59 | }
60 | """
61 | tool_retriever = await AdvanceToolsUse().tools_retrieval(
62 | query=query,
63 | )
64 |
65 | return {"status": "success", "data": str(tool_retriever)}
66 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/memory_store/database_memory.py:
--------------------------------------------------------------------------------
1 | from omnicoreagent.core.memory_store.base import AbstractMemoryStore
2 | from omnicoreagent.core.database.database_message_store import DatabaseMessageStore
3 | from omnicoreagent.core.utils import logger
4 | from typing import Optional
5 |
6 |
7 | class DatabaseMemory(AbstractMemoryStore):
8 | def __init__(self, db_url: str = None):
9 | """
10 | Initialize the database memory store and set up the database message store service.
11 | """
12 | if db_url is None:
13 | raise ValueError("Database URL is required for DatabaseMemory")
14 |
15 | self.db_url = db_url
16 |
17 | self.db_session = DatabaseMessageStore(db_url=db_url)
18 | self.memory_config = {"mode": "sliding_window", "value": 10000}
19 | self.db_session.set_memory_config(
20 | self.memory_config["mode"], self.memory_config["value"]
21 | )
22 |
23 | def set_memory_config(self, mode: str, value: int = None) -> None:
24 | """
25 | Set memory configuration for both this instance and the underlying database session service.
26 | """
27 | self.memory_config["mode"] = mode
28 | self.memory_config["value"] = value
29 | self.db_session.set_memory_config(mode, value)
30 |
31 | async def store_message(
32 | self,
33 | role: str,
34 | content: str,
35 | metadata: dict | None = None,
36 | session_id: str = None,
37 | ) -> None:
38 | """
39 | Store a message in the database for the given session_id.
40 | """
41 | await self.db_session.store_message(
42 | role=role,
43 | content=content,
44 | metadata=metadata,
45 | session_id=session_id,
46 | )
47 |
48 | async def get_messages(self, session_id: str = None, agent_name: str = None):
49 | """
50 | Retrieve all messages for a given session_id from the database.
51 | Returns a list of message dicts.
52 | """
53 | return await self.db_session.get_messages(
54 | session_id=session_id, agent_name=agent_name
55 | )
56 |
57 | async def clear_memory(
58 | self,
59 | session_id: str = None,
60 | agent_name: str = None,
61 | ) -> None:
62 | """
63 | Delete messages for a session_id from the database.
64 | """
65 | await self.db_session.clear_memory(session_id=session_id, agent_name=agent_name)
66 |
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | not_ready/
3 |
4 | __pycache__/
5 | *.py[cod]
6 | *$py.class
7 |
8 | # Python
9 | *.pyc
10 | *.pyo
11 | *.pyd
12 | __pycache__/
13 | *.env
14 |
15 | # Django
16 | *.log
17 | *.pot
18 | *.pyc
19 | __pycache__/
20 | local_settings.py
21 | db.sqlite3
22 | media
23 |
24 | # Environments
25 | .env
26 | .venv
27 | env/
28 | venv/
29 | ENV/
30 | env.bak/
31 | venv.bak/
32 |
33 | # VS Code
34 | .vscode/
35 |
36 | # macOS
37 | .DS_Store
38 |
39 | # MySQL
40 | *.sql
41 | *.sql.gz
42 |
43 | # Byte-compiled / optimized / DLL files
44 | *.py[cod]
45 | *$py.class
46 |
47 | # C extensions
48 | *.so
49 |
50 | # Distribution / packaging
51 | .Python
52 | build/
53 | develop-eggs/
54 | dist/
55 | downloads/
56 | eggs/
57 | .eggs/
58 | lib/
59 | lib64/
60 | parts/
61 | sdist/
62 | var/
63 | wheels/
64 | share/python-wheels/
65 | *.egg-info/
66 | .installed.cfg
67 | *.egg
68 |
69 | # PyInstaller
70 | # Usually these files are written by a python script from a template
71 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
72 | *.manifest
73 | *.spec
74 |
75 | # Installer logs
76 | pip-log.txt
77 | pip-delete-this-directory.txt
78 |
79 | # Unit test / coverage reports
80 | htmlcov/
81 | .tox/
82 | .nox/
83 | .coverage
84 | .coverage.*
85 | .cache
86 | nosetests.xml
87 | coverage.xml
88 | *.cover
89 | *.py,cover
90 | .hypothesis/
91 | .pytest_cache/
92 | cover/
93 |
94 | # Translations
95 | *.mo
96 | *.pot
97 |
98 | # Django stuff:
99 | *.log
100 | local_settings.py
101 | db.sqlite3
102 | media
103 |
104 | # Flask stuff:
105 | instance/
106 | .webassets-cache
107 |
108 | # Scrapy stuff:
109 | .scrapy
110 |
111 | # Sphinx documentation
112 | docs/_build/
113 |
114 | # PyBuilder
115 | target/
116 |
117 | # Jupyter Notebook
118 | .ipynb_checkpoints
119 |
120 | # IPython
121 | profile_default/
122 | ipython_config.py
123 |
124 | # pyenv
125 | .python-version
126 |
127 | # celery beat schedule file
128 | celerybeat-schedule
129 |
130 | # SageMath parsed files
131 | *.sage.py
132 |
133 | # Environments
134 | .env
135 | .venv
136 | env/
137 | venv/
138 | ENV/
139 | env.bak/
140 | venv.bak/
141 |
142 | # Spyder project settings
143 | .spyderproject
144 | .spyproject
145 |
146 | # Rope project settings
147 | .ropeproject
148 |
149 | # mkdocs documentation
150 | /site
151 |
152 | # mypy
153 | .mypy_cache/
154 | .dmypy.json
155 | dmypy.json
156 |
157 | # Pyre type checker
158 | .pyre/
159 | logs/
--------------------------------------------------------------------------------
/src/omnicoreagent/core/llm_support.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 |
4 | class LLMToolSupport:
5 | """Class to handle LLM tool support checking"""
6 |
7 | MODEL_TOOL_SUPPORT = {
8 | "openai": {
9 | "provider": "openai",
10 | "models": None,
11 | },
12 | "groq": {
13 | "provider": "groq",
14 | "models": None,
15 | },
16 | "openrouter": {
17 | "provider": "openrouter",
18 | "models": ["openai", "anthropic", "groq", "mistralai", "gemini"],
19 | },
20 | "gemini": {
21 | "provider": "gemini",
22 | "models": None,
23 | },
24 | "deepseek": {
25 | "provider": "deepseek",
26 | "models": None,
27 | },
28 | "azureopenai": {
29 | "provider": "azureopenai",
30 | "models": None,
31 | },
32 | "anthropic": {
33 | "provider": "anthropic",
34 | "models": None,
35 | },
36 | "mistral": {
37 | "provider": "mistral",
38 | "models": None,
39 | },
40 | }
41 |
42 | @classmethod
43 | def check_tool_support(cls, llm_config: dict[str, Any]) -> bool:
44 | """Check if the current LLM configuration supports tools.
45 |
46 | Args:
47 | llm_config: LLM configuration dictionary containing model and provider
48 |
49 | Returns:
50 | bool: True if the LLM supports tools, False otherwise
51 | """
52 | model = llm_config.get("model", "")
53 | model_provider = llm_config.get("provider", "").lower()
54 |
55 | for provider_info in cls.MODEL_TOOL_SUPPORT.values():
56 | if provider_info["provider"] in model_provider:
57 | if provider_info["models"] is None:
58 | return True
59 | else:
60 | return any(
61 | supported_model in model
62 | for supported_model in provider_info["models"]
63 | )
64 |
65 | return False
66 |
67 | @classmethod
68 | def get_supported_models(cls, provider: str) -> list[str] | None:
69 | """Get list of supported models for a provider.
70 |
71 | Args:
72 | provider: The provider name
73 |
74 | Returns:
75 | Optional[List[str]]: List of supported models or None if all models are supported
76 | """
77 | for provider_info in cls.MODEL_TOOL_SUPPORT.values():
78 | if provider_info["provider"] == provider.lower():
79 | return provider_info["models"]
80 | return None
81 |
--------------------------------------------------------------------------------
/src/omnicoreagent/mcp_clients_connection/refresh_server_capabilities.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Callable
2 | from typing import Any
3 | from omnicoreagent.core.utils import logger
4 |
5 |
6 | async def refresh_capabilities(
7 | sessions: dict[str, Any],
8 | server_names: list[str],
9 | available_tools: dict[str, Any],
10 | available_resources: dict[str, Any],
11 | available_prompts: dict[str, Any],
12 | debug: bool,
13 | ) -> None:
14 | """Refresh the capabilities of the server and update system prompt"""
15 | for server_name in server_names:
16 | if not sessions.get(server_name, {}).get("connected", False):
17 | raise ValueError(f"Not connected to server: {server_name}")
18 |
19 | session = sessions[server_name].get("session")
20 | if not session:
21 | logger.warning(f"No session found for server: {server_name}")
22 | continue
23 |
24 | try:
25 | tools_response = await session.list_tools()
26 | available_tools[server_name] = (
27 | tools_response.tools if tools_response else []
28 | )
29 | except Exception as e:
30 | logger.info(f"{server_name} does not support tools: {e}")
31 | available_tools[server_name] = []
32 |
33 | try:
34 | resources_response = await session.list_resources()
35 | available_resources[server_name] = (
36 | resources_response.resources if resources_response else []
37 | )
38 | except Exception as e:
39 | logger.info(f"{server_name} does not support resources: {e}")
40 | available_resources[server_name] = []
41 |
42 | try:
43 | prompts_response = await session.list_prompts()
44 | available_prompts[server_name] = (
45 | prompts_response.prompts if prompts_response else []
46 | )
47 | except Exception as e:
48 | logger.info(f"{server_name} does not support prompts: {e}")
49 | available_prompts[server_name] = []
50 |
51 | if debug:
52 | logger.info(f"Refreshed capabilities for {server_names}")
53 |
54 | for category, data in {
55 | "Tools": available_tools,
56 | "Resources": available_resources,
57 | "Prompts": available_prompts,
58 | }.items():
59 | logger.info(f"Available {category.lower()} by server:")
60 | for server_name, items in data.items():
61 | logger.info(f" {server_name}:")
62 | for item in items:
63 | logger.info(f" - {item.name}")
64 |
65 | if debug:
66 | logger.info("Updated system prompt with new capabilities")
67 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/skills/models.py:
--------------------------------------------------------------------------------
1 | """
2 | Pydantic models for Agent Skills.
3 |
4 | Follows the Agent Skills specification from agentskills.io.
5 | """
6 |
7 | import re
8 | from pathlib import Path
9 | from typing import Optional, Dict, List
10 |
11 | from pydantic import BaseModel, Field, field_validator
12 |
13 |
14 | class SkillMetadata(BaseModel):
15 | """
16 | Represents the YAML frontmatter metadata from a SKILL.md file.
17 |
18 | Required fields:
19 | - name: 1-64 chars, lowercase alphanumeric + hyphens
20 | - description: 1-1024 chars, describes what skill does and when to use it
21 |
22 | Optional fields:
23 | - license: License applied to the skill
24 | - compatibility: Environment requirements (1-500 chars)
25 | - metadata: Custom key-value pairs
26 | - allowed_tools: Space-delimited list of pre-approved tools
27 | """
28 |
29 | name: str = Field(
30 | ...,
31 | min_length=1,
32 | max_length=64,
33 | description="Skill identifier, must be lowercase alphanumeric with hyphens",
34 | )
35 | description: str = Field(
36 | ...,
37 | min_length=1,
38 | max_length=1024,
39 | description="What the skill does and when to use it",
40 | )
41 | path: Path = Field(..., description="Resolved path to the skill directory")
42 | license: Optional[str] = Field(
43 | default=None, description="License applied to the skill"
44 | )
45 | compatibility: Optional[str] = Field(
46 | default=None, max_length=500, description="Environment requirements"
47 | )
48 | metadata: Optional[Dict[str, str]] = Field(
49 | default=None, description="Custom key-value pairs"
50 | )
51 | allowed_tools: Optional[List[str]] = Field(
52 | default=None, alias="allowed-tools", description="List of pre-approved tools"
53 | )
54 |
55 | @field_validator("name")
56 | @classmethod
57 | def validate_name(cls, v: str) -> str:
58 | """
59 | Validate skill name follows the specification:
60 | - Lowercase alphanumeric characters and hyphens only
61 | - Must not start or end with hyphen
62 | - Must not contain consecutive hyphens
63 | """
64 | if not re.match(r"^[a-z0-9-]+$", v):
65 | raise ValueError(
66 | "Skill name must contain only lowercase letters, numbers, and hyphens"
67 | )
68 |
69 | if v.startswith("-") or v.endswith("-"):
70 | raise ValueError("Skill name must not start or end with a hyphen")
71 |
72 | if "--" in v:
73 | raise ValueError("Skill name must not contain consecutive hyphens")
74 |
75 | return v
76 |
77 | model_config = {"frozen": False, "populate_by_name": True}
78 |
--------------------------------------------------------------------------------
/tests/test_base.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import AsyncMock
2 |
3 | import pytest
4 |
5 | from omnicoreagent.core.agents.base import BaseReactAgent
6 |
7 |
8 | @pytest.fixture
9 | def agent():
10 | return BaseReactAgent(
11 | agent_name="test_agent",
12 | max_steps=5,
13 | tool_call_timeout=10,
14 | request_limit=5,
15 | total_tokens_limit=1000,
16 | )
17 |
18 |
19 | @pytest.mark.asyncio
20 | async def test_extract_action_json_valid(agent):
21 | response = 'Action: {"tool": "search", "input": "weather"}'
22 | result = await agent.extract_action_json(response)
23 | assert result["action"] is True
24 | assert "data" in result
25 |
26 |
27 | @pytest.mark.asyncio
28 | async def test_extract_action_json_missing_action(agent):
29 | response = "Do something without action"
30 | result = await agent.extract_action_json(response)
31 | assert result["action"] is False
32 | assert "error" in result
33 |
34 |
35 | @pytest.mark.asyncio
36 | async def test_extract_action_json_unbalanced(agent):
37 | response = 'Action: {"tool": "search", "input": "weather"'
38 | result = await agent.extract_action_json(response)
39 | assert result["action"] is False
40 | assert "Unbalanced" in result["error"]
41 |
42 |
43 | @pytest.mark.asyncio
44 | async def test_extract_action_or_answer_with_final_answer(agent):
45 | response = "Final Answer: It is sunny today."
46 | result = await agent.extract_action_or_answer(response)
47 | assert result.answer == "It is sunny today."
48 |
49 |
50 | @pytest.mark.asyncio
51 | async def test_extract_action_or_answer_with_action(agent):
52 | response = 'Action: {"tool": "search", "input": "news"}'
53 | result = await agent.extract_action_or_answer(response)
54 | assert result.action is True
55 | assert isinstance(result.data, str)
56 |
57 |
58 | @pytest.mark.asyncio
59 | async def test_extract_action_or_answer_fallback(agent):
60 | response = "This is just a general response."
61 | result = await agent.extract_action_or_answer(response)
62 | assert result.answer == "This is just a general response."
63 |
64 |
65 | @pytest.mark.asyncio
66 | async def test_tool_call_execution(agent):
67 | # Simulate tool call result injected in state
68 | agent.state.tool_response = "The weather is sunny."
69 |
70 | # Next assistant message uses that response
71 | next_msg = await agent.extract_action_or_answer(
72 | "Final Answer: The tool said it is sunny."
73 | )
74 | assert next_msg.answer == "The tool said it is sunny."
75 |
76 |
77 | @pytest.mark.asyncio
78 | async def test_update_llm_working_memory_empty(agent):
79 | message_history = AsyncMock(return_value=[])
80 | await agent.update_llm_working_memory(message_history, "chat456")
81 | assert "test_agent" not in agent.messages or len(agent.messages["test_agent"]) == 0
82 |
--------------------------------------------------------------------------------
/tests/test_sampling.py:
--------------------------------------------------------------------------------
1 | import json
2 | from unittest.mock import MagicMock, patch
3 |
4 | import pytest
5 |
6 | from omnicoreagent.mcp_omni_connect.sampling import samplingCallback
7 | from omnicoreagent.core.types import ContextInclusion
8 |
9 |
10 | @pytest.mark.asyncio
11 | async def test_load_model():
12 | # Create an instance of the samplingCallback class
13 | callback = samplingCallback()
14 |
15 | # Mock the file content for config
16 | mock_config = {
17 | "LLM": {
18 | "provider": "openai",
19 | "model": ["gpt-3.5-turbo", "gpt-4"],
20 | }
21 | }
22 | with patch("builtins.open", new_callable=MagicMock) as mock_open:
23 | mock_open.return_value.__enter__.return_value.read.return_value = json.dumps(
24 | mock_config
25 | )
26 |
27 | available_models, provider = await callback.load_model()
28 |
29 | # Validate that models and provider are correctly loaded
30 | assert available_models == ["gpt-3.5-turbo", "gpt-4"]
31 | assert provider == "openai"
32 |
33 |
34 | @pytest.mark.asyncio
35 | async def test_select_model_no_preferences():
36 | callback = samplingCallback()
37 |
38 | # Simulate the available models
39 | available_models = ["gpt-3.5-turbo", "gpt-4"]
40 |
41 | # No preferences passed
42 | model = await callback._select_model(None, available_models)
43 |
44 | # Validate that the first model is selected
45 | assert model == "gpt-3.5-turbo"
46 |
47 |
48 | @pytest.mark.asyncio
49 | async def test_get_context_no_inclusion():
50 | callback = samplingCallback()
51 |
52 | # Test with no context inclusion
53 | context = await callback._get_context(ContextInclusion.NONE)
54 |
55 | # Validate that no context is returned
56 | assert context == ""
57 |
58 |
59 | @pytest.mark.asyncio
60 | async def test_get_context_this_server():
61 | callback = samplingCallback()
62 |
63 | # Simulate session data
64 | callback.sessions = {"server_1": {"message_history": ["message 1", "message 2"]}}
65 |
66 | # Test with context inclusion for this server
67 | context = await callback._get_context(ContextInclusion.THIS_SERVER, "server_1")
68 |
69 | # Validate the context returned is from the specific server
70 | assert context == "message 1\nmessage 2"
71 |
72 |
73 | @pytest.mark.asyncio
74 | async def test_get_context_all_servers():
75 | callback = samplingCallback()
76 |
77 | # Simulate session data
78 | callback.sessions = {
79 | "server_1": {"message_history": ["message 1"]},
80 | "server_2": {"message_history": ["message 2"]},
81 | }
82 |
83 | # Test with context inclusion for all servers
84 | context = await callback._get_context(ContextInclusion.ALL_SERVERS)
85 |
86 | # Validate the context returned is from all servers
87 | assert context == "message 1\nmessage 2"
88 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/sandbox/seccomp_profile.py:
--------------------------------------------------------------------------------
1 | # Minimal seccomp profile: allow only essential syscalls for user-space code execution
2 | SECCOMP_PROFILE = {
3 | "defaultAction": "SCMP_ACT_ERRNO",
4 | "syscalls": [
5 | {"name": "read", "action": "SCMP_ACT_ALLOW"},
6 | {"name": "write", "action": "SCMP_ACT_ALLOW"},
7 | {"name": "open", "action": "SCMP_ACT_ALLOW"},
8 | {"name": "openat", "action": "SCMP_ACT_ALLOW"},
9 | {"name": "close", "action": "SCMP_ACT_ALLOW"},
10 | {"name": "execve", "action": "SCMP_ACT_ALLOW"},
11 | {"name": "brk", "action": "SCMP_ACT_ALLOW"},
12 | {"name": "mmap", "action": "SCMP_ACT_ALLOW"},
13 | {"name": "munmap", "action": "SCMP_ACT_ALLOW"},
14 | {"name": "exit_group", "action": "SCMP_ACT_ALLOW"},
15 | {"name": "fstat", "action": "SCMP_ACT_ALLOW"},
16 | {"name": "lseek", "action": "SCMP_ACT_ALLOW"},
17 | {"name": "rt_sigreturn", "action": "SCMP_ACT_ALLOW"},
18 | {"name": "clone", "action": "SCMP_ACT_ALLOW"},
19 | {"name": "wait4", "action": "SCMP_ACT_ALLOW"},
20 | {"name": "getpid", "action": "SCMP_ACT_ALLOW"},
21 | {"name": "arch_prctl", "action": "SCMP_ACT_ALLOW"},
22 | {"name": "set_tid_address", "action": "SCMP_ACT_ALLOW"},
23 | {"name": "set_robust_list", "action": "SCMP_ACT_ALLOW"},
24 | {"name": "prctl", "action": "SCMP_ACT_ALLOW"},
25 | {"name": "uname", "action": "SCMP_ACT_ALLOW"},
26 | {"name": "access", "action": "SCMP_ACT_ALLOW"},
27 | {"name": "getcwd", "action": "SCMP_ACT_ALLOW"},
28 | {"name": "readlink", "action": "SCMP_ACT_ALLOW"},
29 | {"name": "mprotect", "action": "SCMP_ACT_ALLOW"},
30 | {"name": "sigaltstack", "action": "SCMP_ACT_ALLOW"},
31 | {"name": "statfs", "action": "SCMP_ACT_ALLOW"},
32 | {"name": "getdents64", "action": "SCMP_ACT_ALLOW"},
33 | {"name": "fcntl", "action": "SCMP_ACT_ALLOW"},
34 | {"name": "ioctl", "action": "SCMP_ACT_ALLOW"},
35 | {"name": "dup2", "action": "SCMP_ACT_ALLOW"},
36 | {"name": "pipe", "action": "SCMP_ACT_ALLOW"},
37 | {"name": "socket", "action": "SCMP_ACT_ALLOW"}, # for localhost-only if needed
38 | {"name": "connect", "action": "SCMP_ACT_ALLOW"},
39 | {"name": "sendto", "action": "SCMP_ACT_ALLOW"},
40 | {"name": "recvfrom", "action": "SCMP_ACT_ALLOW"},
41 | {"name": "shutdown", "action": "SCMP_ACT_ALLOW"},
42 | {"name": "bind", "action": "SCMP_ACT_ALLOW"},
43 | {"name": "listen", "action": "SCMP_ACT_ALLOW"},
44 | {"name": "accept", "action": "SCMP_ACT_ALLOW"},
45 | {"name": "getsockname", "action": "SCMP_ACT_ALLOW"},
46 | {"name": "getpeername", "action": "SCMP_ACT_ALLOW"},
47 | {"name": "setsockopt", "action": "SCMP_ACT_ALLOW"},
48 | {"name": "getsockopt", "action": "SCMP_ACT_ALLOW"},
49 | ],
50 | }
51 |
--------------------------------------------------------------------------------
/tests/test_llm_support.py:
--------------------------------------------------------------------------------
1 | from omnicoreagent.core.llm_support import LLMToolSupport
2 |
3 |
4 | class TestLLMToolSupport:
5 | def test_check_tool_support_openai(self):
6 | """Test tool support checking for OpenAI"""
7 | # Test OpenAI with all models supported
8 | config = {"provider": "openai", "model": "gpt-4"}
9 | assert LLMToolSupport.check_tool_support(config) is True
10 |
11 | config = {"provider": "openai", "model": "gpt-3.5-turbo"}
12 | assert LLMToolSupport.check_tool_support(config) is True
13 |
14 | def test_check_tool_support_groq(self):
15 | """Test tool support checking for Groq"""
16 | # Test Groq with all models supported
17 | config = {"provider": "groq", "model": "mixtral-8x7b-32768"}
18 | assert LLMToolSupport.check_tool_support(config) is True
19 |
20 | config = {"provider": "groq", "model": "llama2-70b-4096"}
21 | assert LLMToolSupport.check_tool_support(config) is True
22 |
23 | def test_check_tool_support_openrouter(self):
24 | """Test tool support checking for OpenRouter"""
25 | # Test OpenRouter with supported models
26 | supported_models = [
27 | "openai/gpt-4",
28 | "anthropic/claude-3-opus",
29 | "groq/mixtral-8x7b",
30 | "mistralai/mistral-7b",
31 | "gemini/gemini-pro",
32 | ]
33 | for model in supported_models:
34 | config = {"provider": "openrouter", "model": model}
35 | assert LLMToolSupport.check_tool_support(config) is True
36 |
37 | # Test OpenRouter with unsupported model
38 | config = {"provider": "openrouter", "model": "unsupported-model"}
39 | assert LLMToolSupport.check_tool_support(config) is False
40 |
41 | def test_check_tool_support_unsupported_provider(self):
42 | """Test tool support checking for unsupported provider"""
43 | config = {"provider": "unsupported", "model": "any-model"}
44 | assert LLMToolSupport.check_tool_support(config) is False
45 |
46 | def test_get_supported_models(self):
47 | """Test getting supported models for providers"""
48 | # Test OpenAI (all models supported)
49 | assert LLMToolSupport.get_supported_models("openai") is None
50 |
51 | # Test Groq (all models supported)
52 | assert LLMToolSupport.get_supported_models("groq") is None
53 |
54 | # Test OpenRouter (specific models supported)
55 | supported_models = LLMToolSupport.get_supported_models("openrouter")
56 | assert supported_models is not None
57 | assert len(supported_models) == 5
58 | assert all(
59 | model in supported_models
60 | for model in ["openai", "anthropic", "groq", "mistralai", "gemini"]
61 | )
62 |
63 | # Test unsupported provider
64 | assert LLMToolSupport.get_supported_models("unsupported") is None
65 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/Dockerfile.sandbox:
--------------------------------------------------------------------------------
1 | # deep_coder/Dockerfile.sandbox
2 | FROM ubuntu:24.04
3 |
4 | ENV DEBIAN_FRONTEND=noninteractive \
5 | PYTHONUNBUFFERED=1 \
6 | PATH="/home/coder/.local/bin:/home/coder/venv/bin:$PATH"
7 |
8 | # Install comprehensive developer toolset
9 | RUN apt-get update && \
10 | apt-get install -y --no-install-recommends \
11 | # Core
12 | ca-certificates \
13 | curl \
14 | wget \
15 | git \
16 | bash \
17 | zsh \
18 | sudo \
19 | # Build & C
20 | build-essential \
21 | gcc \
22 | g++ \
23 | make \
24 | cmake \
25 | pkg-config \
26 | # Python
27 | python3 \
28 | python3-pip \
29 | python3-venv \
30 | python3-dev \
31 | python3-setuptools \
32 | # Node.js & JS
33 | nodejs \
34 | npm \
35 | # Editors
36 | nano \
37 | vim \
38 | less \
39 | # Shell utils
40 | jq \
41 | yq \
42 | tree \
43 | htop \
44 | psmisc \
45 | procps \
46 | net-tools \
47 | iproute2 \
48 | # Text processing
49 | grep \
50 | sed \
51 | gawk \
52 | xz-utils \
53 | unzip \
54 | zip \
55 | # Version control
56 | rsync \
57 | openssh-client \
58 | # Shell linting
59 | shellcheck \
60 | # Other essentials
61 | locales \
62 | tzdata \
63 | && rm -rf /var/lib/apt/lists/*
64 |
65 | # Generate en_US.UTF-8 locale
66 | RUN locale-gen en_US.UTF-8
67 |
68 | ENV LANG=en_US.UTF-8 \
69 | LANGUAGE=en_US:en \
70 | LC_ALL=en_US.UTF-8
71 |
72 | # Create unprivileged user (avoid UID/GID conflicts)
73 | RUN groupadd -g 1001 coder && \
74 | useradd -u 1001 -g 1001 --create-home --shell /bin/bash coder
75 |
76 | # Set up workspace with proper ownership
77 | RUN mkdir -p /home/coder/workspace && \
78 | chown -R coder:coder /home/coder
79 |
80 | # Switch to non-root user
81 | USER coder
82 | WORKDIR /home/coder
83 |
84 | # Create Python virtual environment
85 | RUN python3 -m venv venv
86 |
87 | # Configure npm for user install
88 | RUN mkdir -p /home/coder/.local && \
89 | npm config set prefix '/home/coder/.local'
90 |
91 | # Upgrade pip and install core Python tools
92 | RUN /home/coder/venv/bin/pip install --no-cache-dir --upgrade pip setuptools wheel
93 |
94 | # Install TypeScript, ts-node, and common JS tools
95 | RUN npm install -g \
96 | typescript@5.4.5 \
97 | ts-node@10.9.2 \
98 | eslint@8.57.0 \
99 | prettier@3.3.3
100 |
101 | # Install Python dev tools in venv
102 | RUN /home/coder/venv/bin/pip install --no-cache-dir \
103 | pytest \
104 | pytest-asyncio \
105 | black \
106 | flake8 \
107 | mypy \
108 | pylint \
109 | types-requests \
110 | types-redis
111 |
112 | # Final workspace
113 | WORKDIR /home/coder/workspace
114 |
115 | # Keep container alive indefinitely
116 | CMD ["tail", "-f", "/dev/null"]
--------------------------------------------------------------------------------
/examples/devops_copilot_agent/system_prompt.py:
--------------------------------------------------------------------------------
1 | def get_system_prompt(allowed_commands: set) -> str:
2 | """Return system prompt for LLM integration."""
3 | allowed_commands = ", ".join(list(sorted(allowed_commands))) + ", ..."
4 | return f"""You are DevOps Copilot, a production-grade secure Bash assistant for DevOps tasks, including file operations, system monitoring, container management, Kubernetes, log analysis, configuration auditing, and workspace reporting. You use only safe, allowed commands in a secure sandboxed environment.
5 |
6 | ### Allowed Commands
7 | {allowed_commands}
8 | See full list in ALLOWED_COMMANDS set. Key tools include:
9 | - File operations: ls, cat, grep, find, awk, sed, touch, mkdir, cp, mv
10 | - System monitoring: df, du, free, top, htop, vmstat, iostat, sar
11 | - Version control: git (read-only, e.g., log, status, diff)
12 | - Containers: docker (read-only, e.g., ps, logs, inspect)
13 | - Kubernetes: kubectl (read-only, e.g., get, describe, logs)
14 | - Log analysis: journalctl, dmesg, grep, jq, yq
15 | - Networking: ping, traceroute, netstat, ss, curl, wget
16 |
17 | ### Execution Environment
18 | - Commands execute in a **secure sandboxed environment**
19 | - Supports **pipes (`|`)**, **redirections (`>`, `>>`)**, **conditionals (`&&`, `||`)**, and **control structures**
20 | - Complex quoting is fully supported (e.g., 'text ; with | special chars')
21 | - Disallowed commands return `[BLOCKED]` in `stderr`
22 | - **Allowed parts execute** — always check `stdout` and `stderr`
23 | - Responses must be in XML format: `Your reasoningYour answer`
24 |
25 | ### Response Format
26 | Every command returns:
27 | - `stdout`: Output from successful commands
28 | - `stderr`: Error messages or `[BLOCKED]` notices
29 | - `returncode`: Exit code (0 = success)
30 | - `cwd`: Current working directory
31 |
32 | ### Best Practices
33 | 1. **Inspect `stderr`** for `[BLOCKED]` messages
34 | 2. **Handle partial execution** — some commands may run
35 | 3. **Use output** to guide next steps
36 | 4. **Confirm CWD** with `ls` or `pwd` after `cd`
37 | 5. **Explain blocked commands** and suggest alternatives
38 | 6. **Handle edge cases**: quotes, pipes, special characters
39 | 7. **Be efficient**: combine operations when safe
40 | 8. **For containers/Kubernetes**: use read-only commands (e.g., `docker ps`, `kubectl get`)
41 | 9. **For log analysis**: chain commands (e.g., `journalctl -u service | grep error`)
42 |
43 | ### Example Tasks
44 | - **Log Analysis**: `journalctl -u nginx | grep "error" | tail -n 10`
45 | - **Config Audit**: `find /etc -name "*.conf" | xargs grep "key=value"`
46 | - **Workspace Report**: `ls -R | grep ".md$" && du -sh .`
47 | - **Disk Usage**: `df -h && du -sh * | sort -hr`
48 | - **Container Check**: `docker ps -a && docker logs my-container`
49 | - **Kubernetes Status**: `kubectl get pods --all-namespaces`
50 |
51 | ### Prohibited Actions
52 | - File deletion (rm, shred)
53 | - System modification (sudo, chmod, reboot)
54 | - Code execution (python, bash scripts)
55 | - Package installation (apt, pip)
56 | - Write operations in docker/kubectl (run, apply)
57 |
58 | Be precise, security-conscious, and helpful. Prioritize user safety while enabling powerful DevOps workflows. Always respond in XML format with `` and `` tags."""
59 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling", "uv-dynamic-versioning"]
3 | build-backend = "hatchling.build"
4 |
5 | [tool.hatch.version]
6 | source = "uv-dynamic-versioning"
7 |
8 | [tool.uv-dynamic-versioning]
9 | vcs = "git"
10 | style = "pep440"
11 | bump = true
12 |
13 | [project]
14 | name = "omnicoreagent"
15 | dynamic = ["version"]
16 | description = "OmniCoreAgent is a powerful Python AI Agent framework for building autonomous AI agents that think, reason, and execute complex tasks. Production-ready agents that use tools, manage memory, coordinate workflows, and handle real-world business logic."
17 | readme = "README.md"
18 | authors = [{ name = "Abiola Adeshina", email = "abiolaadedayo1993@gmail.com" }]
19 | requires-python = ">=3.10"
20 |
21 | keywords = ["git", "mcp", "llm", "automation", "agent", "ai", "framework"]
22 | license = { text = "MIT" }
23 | classifiers = [
24 | "Development Status :: 4 - Beta",
25 | "Intended Audience :: Developers",
26 | "License :: OSI Approved :: MIT License",
27 | "Programming Language :: Python :: 3",
28 | "Programming Language :: Python :: 3.10",
29 | "Programming Language :: Python :: 3.11",
30 | "Programming Language :: Python :: 3.12",
31 | "Programming Language :: Python :: 3.13",
32 | ]
33 | dependencies = [
34 | "colorlog>=6.9.0",
35 | "python-dotenv>=1.0.1",
36 | "rich>=13.9.4",
37 | "websockets>=15.0.1",
38 | "httpx>=0.26.0",
39 | "httpx-sse>=0.4.0",
40 | "pydantic[email]>=2.6.0",
41 | "anyio>=4.2.0",
42 | "redis>=5.2.1",
43 | "python-decouple>=3.8",
44 | "fastapi>=0.115.12",
45 | "python-multipart>=0.0.20",
46 | "colorama>=0.4.6",
47 | "mcp[cli]>=1.9.1",
48 | "sqlalchemy>=2.0.0",
49 | "tzlocal>=5.2",
50 | "psycopg2-binary>=2.9.10",
51 | "apscheduler>=3.11.0",
52 | "psutil>=7.0.0",
53 | "pymupdf>=1.26.3",
54 | "litellm>=1.75.2",
55 | "fastapi-sso>=0.18.0",
56 | "opik>=1.8.19",
57 | "cryptography>=45.0.6",
58 | "motor>=3.7.1",
59 | "pymongo>=4.15.1",
60 | ]
61 |
62 | [project.scripts]
63 | omnicoreagent= "omnicoreagent.omni_agent.agent:OmniAgent"
64 |
65 | [project.urls]
66 | Repository = "https://github.com/omnirexflora-labs/omnicoreagent"
67 | Issues = "https://github.com/omnirexflora-labs/omnicoreagent/issues"
68 |
69 | [tool.hatch.build.targets.wheel]
70 | packages = ["src/omnicoreagent"]
71 |
72 | [tool.hatch.build]
73 | packages = ["src/omnicoreagent"]
74 | include = [
75 |
76 | "LICENSE",
77 | "README.md"
78 | ]
79 |
80 | [tool.pytest.ini_options]
81 | markers = [
82 | "slow: marks tests as slow (deselect with '-m \"not slow\"')",
83 | "asyncio: marks tests that use asyncio",
84 | "OpenAIIntegration: marks tests that require an OpenAI API key",
85 | ]
86 |
87 | [tool.uv.sources]
88 | omnicoreagent = { workspace = true }
89 |
90 | [dependency-groups]
91 | dev = [
92 | "pre-commit>=4.2.0",
93 | "pytest>=8.3.5",
94 | "pytest-asyncio>=0.26.0",
95 | "ruff>=0.11.7",
96 | "hatch>=1.14.1",
97 | "twine>=6.1.0",
98 | ]
99 | docs = [
100 | "mkdocs>=1.6.1",
101 | "mkdocs-git-revision-date-localized-plugin>=1.4.7",
102 | "mkdocs-material>=9.6.14",
103 | "mkdocs-minify-plugin>=0.8.0",
104 | "pymdown-extensions>=10.16",
105 | ]
106 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/sandbox/utils.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import tarfile
3 | from pathlib import Path
4 | from typing import Dict, Any
5 |
6 |
7 | def safe_extract_tar(
8 | tar_path: Path,
9 | extract_to: Path,
10 | max_files: int = 1000,
11 | max_size_bytes: int = 100_000_000,
12 | ):
13 | extract_to = extract_to.resolve()
14 | with tarfile.open(tar_path, "r:gz") as tar:
15 | members = tar.getmembers()
16 | if len(members) > max_files:
17 | raise ValueError("Tarball contains too many files (max 1000)")
18 | total_size = 0
19 | for member in members:
20 | if member.name.startswith("/") or ".." in member.name:
21 | raise ValueError("Path traversal detected in tarball")
22 | total_size += member.size
23 | if total_size > max_size_bytes:
24 | raise ValueError("Tarball exceeds 100MB size limit")
25 | dest = (extract_to / member.name).resolve()
26 | if not str(dest).startswith(str(extract_to)):
27 | raise ValueError("Path escape detected")
28 | tar.extractall(path=extract_to)
29 |
30 |
31 | def safe_git_clone(url: str, target_dir: Path):
32 | if not url.startswith("https://") or not url.endswith(".git"):
33 | raise ValueError("Only public HTTPS Git URLs ending in .git are allowed")
34 | env = {
35 | "GIT_CONFIG_GLOBAL": "/dev/null",
36 | "GIT_CONFIG_SYSTEM": "/dev/null",
37 | "PATH": "/usr/bin:/bin",
38 | }
39 | subprocess.run(
40 | [
41 | "git",
42 | "clone",
43 | "--depth=1",
44 | "--shallow-submodules",
45 | "--no-single-branch",
46 | "--filter=blob:none",
47 | url,
48 | str(target_dir),
49 | ],
50 | check=True,
51 | capture_output=True,
52 | timeout=300,
53 | env=env,
54 | )
55 |
56 |
57 | def _snapshot_dir(path: Path) -> Dict[str, str]:
58 | """Recursively snapshot all text files in a directory."""
59 | snapshot = {}
60 | for file_path in path.rglob("*"):
61 | if file_path.is_file():
62 | rel = file_path.relative_to(path)
63 | try:
64 | # Only read text files (skip binaries)
65 | content = file_path.read_text(encoding="utf-8", errors="replace")
66 | snapshot[str(rel)] = content
67 | except Exception:
68 | continue
69 | return snapshot
70 |
71 |
72 | def compute_diff(before: Dict[str, str], after: Dict[str, str]) -> Dict[str, Any]:
73 | added = {k: after[k] for k in after if k not in before}
74 | deleted = {k: before[k] for k in before if k not in after}
75 | modified = {
76 | k: {"before": before[k], "after": after[k]}
77 | for k in before
78 | if k in after and before[k] != after[k]
79 | }
80 | return {"added": added, "deleted": deleted, "modified": modified}
81 |
82 |
83 | def create_tarball(
84 | session_id: str,
85 | workspace_root: str,
86 | output_dir: str = "./outputs",
87 | base_dir: str = "working",
88 | ) -> str:
89 | Path(output_dir).mkdir(parents=True, exist_ok=True)
90 | tar_path = Path(output_dir) / f"{session_id}.tar.gz"
91 | workspace = Path(workspace_root) / session_id / base_dir
92 | with tarfile.open(tar_path, "w:gz") as tar:
93 | tar.add(workspace, arcname=".")
94 | return str(tar_path)
95 |
--------------------------------------------------------------------------------
/tests/test_llm.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import Mock, patch, AsyncMock
2 | import pytest
3 | from omnicoreagent.core.llm import LLMConnection
4 |
5 |
6 | # Shared mock config loader
7 | def make_mock_config(provider="openai", model="gpt-4"):
8 | return {
9 | "llm_api_key": "test-api-key",
10 | "load_config": Mock(
11 | return_value={
12 | "LLM": {
13 | "provider": provider,
14 | "model": model,
15 | "temperature": 0.7,
16 | "max_tokens": 1000,
17 | "top_p": 0.9,
18 | }
19 | }
20 | ),
21 | }
22 |
23 |
24 | @pytest.fixture
25 | def mock_llm_connection():
26 | with patch("mcpomni_connect.llm.litellm"):
27 | return LLMConnection(Mock(**make_mock_config()))
28 |
29 |
30 | class TestLLMConnection:
31 | def test_initialization(self, mock_llm_connection):
32 | cfg = mock_llm_connection.llm_config
33 | assert cfg["provider"] == "openai"
34 | assert cfg["model"] == "openai/gpt-4"
35 | assert cfg["temperature"] == 0.7
36 |
37 | def test_llm_configuration_returns_expected_keys(self, mock_llm_connection):
38 | config = mock_llm_connection.llm_configuration()
39 | assert set(config) >= {
40 | "provider",
41 | "model",
42 | "temperature",
43 | "max_tokens",
44 | "top_p",
45 | }
46 |
47 | @pytest.mark.asyncio
48 | async def test_llm_call_with_tools_and_without(self):
49 | messages = [{"role": "user", "content": "What is AI?"}]
50 | tools = [{"name": "tool", "description": "desc"}]
51 |
52 | with patch(
53 | "mcpomni_connect.llm.litellm.acompletion", new_callable=AsyncMock
54 | ) as mock_completion:
55 | mock_completion.return_value = {"mocked": "response"}
56 |
57 | conn = LLMConnection(Mock(**make_mock_config("groq", "llama-3")))
58 |
59 | # With tools
60 | resp1 = await conn.llm_call(messages, tools)
61 | assert resp1 == {"mocked": "response"}
62 | mock_completion.assert_awaited_once()
63 | args1 = mock_completion.call_args.kwargs
64 | assert args1["model"] == "groq/llama-3"
65 | assert args1["tools"] == tools
66 | assert args1["tool_choice"] == "auto"
67 |
68 | mock_completion.reset_mock()
69 |
70 | # Without tools
71 | resp2 = await conn.llm_call(messages)
72 | assert resp2 == {"mocked": "response"}
73 | args2 = mock_completion.call_args.kwargs
74 | assert "tools" not in args2
75 | assert args2["model"] == "groq/llama-3"
76 |
77 | @pytest.mark.asyncio
78 | async def test_llm_call_handles_exceptions_gracefully(self):
79 | messages = [{"role": "user", "content": "Fail please"}]
80 |
81 | with patch(
82 | "mcpomni_connect.llm.litellm.acompletion", new_callable=AsyncMock
83 | ) as mock_completion:
84 | mock_completion.side_effect = Exception("Boom")
85 |
86 | conn = LLMConnection(Mock(**make_mock_config("gemini", "gemini-pro")))
87 | response = await conn.llm_call(messages)
88 | assert response is None
89 |
90 | def test_removed_method_is_not_present(self, mock_llm_connection):
91 | assert not hasattr(mock_llm_connection, "truncate_messages_for_groq")
92 |
--------------------------------------------------------------------------------
/tests/test_orchestrator_agent.py:
--------------------------------------------------------------------------------
1 | # import pytest
2 | # import asyncio
3 | # import json
4 | # from unittest.mock import AsyncMock, MagicMock
5 | # from mcpomni_connect.agents.orchestrator import OrchestratorAgent
6 | # from mcpomni_connect.agents.types import AgentConfig, ParsedResponse
7 |
8 | # @pytest.fixture
9 | # def agent_config():
10 | # return AgentConfig(
11 | # agent_name="orchestrator",
12 | # max_steps=5,
13 | # tool_call_timeout=10,
14 | # request_limit=100,
15 | # total_tokens_limit=1000,
16 | # mcp_enabled=True,
17 | # )
18 |
19 | # @pytest.fixture
20 | # def agents_registry():
21 | # return {
22 | # "summary": "You summarize text",
23 | # "report": "You write reports",
24 | # }
25 |
26 | # @pytest.fixture
27 | # def orchestrator(agent_config, agents_registry):
28 | # return OrchestratorAgent(
29 | # config=agent_config,
30 | # agents_registry=agents_registry,
31 | # chat_id=123,
32 | # current_date_time="2025-05-03",
33 | # debug=True,
34 | # )
35 |
36 | # @pytest.mark.asyncio
37 | # async def test_extract_action_json_valid(orchestrator):
38 | # response = ParsedResponse(data=json.dumps({"agent_name": "summary", "task": "Summarize this"}))
39 | # result = await orchestrator.extract_action_json(response)
40 | # assert result.get("action") is True
41 | # assert result.get("agent_name") == "summary"
42 | # assert result.get("task") == "Summarize this"
43 |
44 | # @pytest.mark.asyncio
45 | # async def test_extract_action_json_invalid_json(orchestrator):
46 | # response = ParsedResponse(data="{invalid_json}")
47 | # result = await orchestrator.extract_action_json(response)
48 | # assert result.get("action") is False
49 | # assert result.get("error") == "Invalid JSON format"
50 |
51 | # @pytest.mark.asyncio
52 | # async def test_extract_action_json_missing_fields(orchestrator):
53 | # response = ParsedResponse(data=json.dumps({"foo": "bar"}))
54 | # result = await orchestrator.extract_action_json(response)
55 | # assert result.get("action") is False
56 | # assert "error" in result
57 |
58 | # # @pytest.mark.asyncio
59 | # # async def test_create_agent_system_prompt(orchestrator):
60 | # # available_tools = {}
61 | # # prompt = await orchestrator.create_agent_system_prompt("summary", available_tools)
62 | # # assert isinstance(prompt, str)
63 | # # assert "summarize" in prompt.lower()
64 |
65 | # # @pytest.mark.asyncio
66 | # # async def test_update_llm_working_memory(orchestrator):
67 | # # message_history = AsyncMock(return_value=[
68 | # # {"role": "user", "content": "What is the summary?"},
69 | # # {"role": "assistant", "content": "Here is the summary..."},
70 | # # {"role": "system", "content": "System initialized."},
71 | # # ])
72 | # # await orchestrator.update_llm_working_memory(message_history)
73 | # # assert orchestrator.orchestrator_messages == await message_history()
74 |
75 | # @pytest.mark.asyncio
76 | # async def test_act_success(orchestrator):
77 | # llm_connection = AsyncMock(return_value="Here is the output.")
78 | # add_message_to_history = AsyncMock()
79 | # message_history = AsyncMock(return_value=[])
80 |
81 | # result = await orchestrator.act(
82 | # sessions={},
83 | # agent_name="summary",
84 | # task="Summarize this",
85 | # add_message_to_history=add_message_to_history,
86 | # llm_connection=llm_connection,
87 | # available_tools={},
88 | # message_history=message_history,
89 | # tool_call_timeout=5,
90 | # max_steps=2,
91 | # request_limit=100,
92 | # total_tokens_limit=500,
93 | # )
94 | # assert isinstance(result, str)
95 | # assert "output" in result.lower() or "observation" in result.lower()
96 |
97 | # @pytest.mark.asyncio
98 | # async def test_agent_registry_tool(orchestrator):
99 | # result = await orchestrator.agent_registry_tool(available_tools={"tool_a": "desc"})
100 | # assert isinstance(result, str)
101 | # assert "tool" in result.lower()
102 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: MCPOmni Connect Documentation
2 | site_description: Universal Gateway to MCP Servers - Intelligent AI Agent System
3 | site_author: Abiola Adeshina
4 | site_url: https://abiorh001.github.io/mcp_omni_connect
5 |
6 | repo_name: Abiorh001/mcp_omni_connect
7 | repo_url: https://github.com/Abiorh001/mcp_omni_connect
8 | edit_uri: edit/main/docs/
9 |
10 | nav:
11 | - Home: index.md
12 | - Getting Started:
13 | - Installation: getting-started/installation.md
14 | - Quick Start: getting-started/quick-start.md
15 | - Configuration:
16 | - Configuration Guide: configuration/configuration-guide.md
17 | - Transport Types: configuration/transport-types.md
18 | - Authentication: configuration/authentication.md
19 | - LLM Providers: configuration/llm-providers.md
20 | - Troubleshooting: configuration/troubleshooting.md
21 | - User Guide:
22 | - Basic Usage: user-guide/basic-usage.md
23 | - Operation Modes: user-guide/operation-modes.md
24 | - Commands: user-guide/commands.md
25 | - Memory Management: user-guide/memory-management.md
26 | - Prompt Management: user-guide/prompt-management.md
27 | - Features:
28 | - Agent System: features/agent-system.md
29 | - Tool Orchestration: features/tool-orchestration.md
30 | - Resource Management: features/resource-management.md
31 | - Token Management: features/token-management.md
32 | - Advanced:
33 | - Architecture: advanced/architecture.md
34 | - API Reference: advanced/api-reference.md
35 | - Examples: advanced/examples.md
36 | - Development:
37 | - Testing: development/testing.md
38 | - Contributing: development/contributing.md
39 | - Changelog: changelog.md
40 |
41 | theme:
42 | name: material
43 | palette:
44 | # Palette toggle for light mode
45 | - scheme: default
46 | primary: indigo
47 | accent: indigo
48 | toggle:
49 | icon: material/brightness-7
50 | name: Switch to dark mode
51 | # Palette toggle for dark mode
52 | - scheme: slate
53 | primary: indigo
54 | accent: indigo
55 | toggle:
56 | icon: material/brightness-4
57 | name: Switch to light mode
58 | features:
59 | - navigation.tabs
60 | - navigation.sections
61 | - navigation.expand
62 | - navigation.path
63 | - navigation.top
64 | - search.suggest
65 | - search.highlight
66 | - search.share
67 | - content.code.copy
68 | - content.code.select
69 | - content.tabs.link
70 | - toc.follow
71 | - toc.integrate
72 | icon:
73 | repo: fontawesome/brands/github
74 |
75 | plugins:
76 | - search:
77 | separator: '[\s\-,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|(?!\b)(?=[A-Z][a-z])'
78 | - minify:
79 | minify_html: true
80 | - git-revision-date-localized:
81 | type: date
82 |
83 | markdown_extensions:
84 | - abbr
85 | - admonition
86 | - attr_list
87 | - def_list
88 | - footnotes
89 | - md_in_html
90 | - toc:
91 | permalink: true
92 | - pymdownx.arithmatex:
93 | generic: true
94 | - pymdownx.betterem:
95 | smart_enable: all
96 | - pymdownx.caret
97 | - pymdownx.details
98 | - pymdownx.emoji:
99 | emoji_generator: !!python/name:material.extensions.emoji.to_svg
100 | emoji_index: !!python/name:material.extensions.emoji.twemoji
101 | - pymdownx.highlight:
102 | anchor_linenums: true
103 | line_spans: __span
104 | pygments_lang_class: true
105 | - pymdownx.inlinehilite
106 | - pymdownx.keys
107 | - pymdownx.magiclink:
108 | repo_url_shorthand: true
109 | user: abiorh001
110 | repo: mcp_omni_connect
111 | - pymdownx.mark
112 | - pymdownx.smartsymbols
113 | - pymdownx.superfences:
114 | custom_fences:
115 | - name: mermaid
116 | class: mermaid
117 | format: !!python/name:pymdownx.superfences.fence_code_format
118 | - pymdownx.tabbed:
119 | alternate_style: true
120 | - pymdownx.tasklist:
121 | custom_checkbox: true
122 | - pymdownx.tilde
123 |
124 | extra:
125 | social:
126 | - icon: fontawesome/brands/github
127 | link: https://github.com/Abiorh001/mcp_omni_connect
128 | - icon: fontawesome/brands/python
129 | link: https://pypi.org/project/mcpomni-connect/
130 | version:
131 | provider: mike
132 |
133 | copyright: Copyright © 2024 Abiola Adeshina
134 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/events/event_router.py:
--------------------------------------------------------------------------------
1 | """
2 | Event Router for dynamic event store selection.
3 | """
4 |
5 | from typing import Optional, Dict, Any, List
6 | from omnicoreagent.core.utils import logger
7 | from omnicoreagent.core.events.base import BaseEventStore, Event
8 | from omnicoreagent.core.events.in_memory import InMemoryEventStore
9 | from omnicoreagent.core.events.redis_stream import RedisStreamEventStore
10 |
11 |
12 | class EventRouter:
13 | """Router for managing different event store backends."""
14 |
15 | def __init__(self, event_store_type: str = "in_memory"):
16 | """
17 | Initialize EventRouter.
18 |
19 | Args:
20 | event_store_type: Type of event store ("in_memory", "redis_stream")
21 | """
22 | self.event_store_type = event_store_type
23 | self._event_store: Optional[BaseEventStore] = None
24 |
25 | self._initialize_event_store()
26 |
27 | def __str__(self):
28 | """Return a readable string representation of the EventRouter."""
29 | store_type = self.event_store_type
30 | available = self.is_available()
31 | return f"EventRouter(type={store_type}, available={available})"
32 |
33 | def __repr__(self):
34 | """Return a detailed representation of the EventRouter."""
35 | return self.__str__()
36 |
37 | def _initialize_event_store(self):
38 | """Initialize the event store based on type."""
39 | try:
40 | if self.event_store_type == "redis_stream":
41 | self._event_store = RedisStreamEventStore()
42 | logger.info("Initialized Redis Stream Event Store")
43 | elif self.event_store_type == "in_memory":
44 | self._event_store = InMemoryEventStore()
45 | logger.info("Initialized In-Memory Event Store")
46 | else:
47 | logger.warning(
48 | f"Unknown event store type: {self.event_store_type}. Falling back to memory."
49 | )
50 | self._event_store = InMemoryEventStore()
51 |
52 | except Exception as e:
53 | logger.error(
54 | f"Failed to initialize {self.event_store_type} event store: {e}"
55 | )
56 | logger.info("Falling back to in-memory event store")
57 | self._event_store = InMemoryEventStore()
58 |
59 | async def append(self, session_id: str, event: Event) -> None:
60 | """Append an event to the current event store."""
61 | if not self._event_store:
62 | raise RuntimeError("No event store available")
63 |
64 | await self._event_store.append(session_id=session_id, event=event)
65 |
66 | async def get_events(self, session_id: str) -> List[Event]:
67 | """Get events from the current event store."""
68 | if not self._event_store:
69 | raise RuntimeError("No event store available")
70 |
71 | return await self._event_store.get_events(session_id=session_id)
72 |
73 | async def stream(self, session_id: str):
74 | """Stream events from the current event store."""
75 | if not self._event_store:
76 | raise RuntimeError("No event store available")
77 |
78 | async for event in self._event_store.stream(session_id=session_id):
79 | yield event
80 |
81 | def get_event_store_type(self) -> str:
82 | """Get the current event store type."""
83 | return self.event_store_type
84 |
85 | def is_available(self) -> bool:
86 | """Check if the event store is available."""
87 | return self._event_store is not None
88 |
89 | def get_event_store_info(self) -> Dict[str, Any]:
90 | """Get information about the current event store."""
91 | return {"type": self.event_store_type, "available": self.is_available()}
92 |
93 | def switch_event_store(self, event_store_type: str):
94 | """Switch to a different event store type."""
95 | if event_store_type == self.event_store_type:
96 | logger.info(f"Event store already set to {event_store_type}")
97 | return
98 |
99 | logger.info(
100 | f"Switching event store from {self.event_store_type} to {event_store_type}"
101 | )
102 |
103 | self.event_store_type = event_store_type
104 | self._initialize_event_store()
105 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/system_prompt.py:
--------------------------------------------------------------------------------
1 | def get_deep_coding_system_prompt(session_id: str) -> str:
2 | return f"""
3 | # 🔒 DEEP CODING AGENT PROTOCOL — SESSION {session_id}
4 |
5 | You are a principal software engineer at a top-tier tech company. You are responsible for delivering **production-ready, secure, well-tested, and maintainable code**. You operate under strict engineering protocol.
6 |
7 | ## 🧠 CORE PRINCIPLES
8 |
9 | 1. **SESSION ISOLATION**
10 | All persistent reasoning must occur under:
11 | → `/memories/{session_id}/`
12 | Never access `/memories/` root or other sessions.
13 |
14 | 2. **INSPECT BEFORE ACT**
15 | Always inspect existing state before creating or modifying anything.
16 | → Use `memory_view` to check for prior files.
17 | → Never assume a file exists or doesn’t exist.
18 |
19 | 3. **REASONING ≠ CODE**
20 | `/memories/` is for **plans, designs, logs, hypotheses, and reports** — **never source code**.
21 | Code lives exclusively in the sandbox (`/home/coder/workspace`).
22 |
23 | 4. **SHELL SAFETY FIRST**
24 | **NEVER** use `echo "..." > file` or `python -c "..."` for multi-line code.
25 | **ALWAYS** use **heredoc with quoted delimiter** for atomic, safe file writes:
26 | ```bash
27 | cat > file.py <<'EOF'
28 | your multi-line code here
29 | EOF
30 | ```
31 | This prevents quoting errors, command injection, and parsing failures.
32 |
33 | 5. **FULL ENGINEERING RIGOR**
34 | Every change must be:
35 | - Planned
36 | - Justified
37 | - Tested
38 | - Documented
39 | - Reviewable
40 |
41 | ---
42 |
43 | ## 📋 MANDATORY WORKFLOW
44 |
45 | ### PHASE 1: TASK UNDERSTANDING & PLANNING
46 | - Create `/memories/{session_id}/TASK.md`: verbatim user request.
47 | - Create `/memories/{session_id}/PLAN.md`:
48 | - List every file to read/modify/create
49 | - Specify exact test commands to run (e.g., `pytest tests/`, `mypy src/`)
50 | - Define success criteria (e.g., “0 mypy errors”, “100% branch coverage”)
51 | - Identify edge cases and failure modes
52 |
53 | ### PHASE 2: DESIGN & JUSTIFICATION
54 | - Create `/memories/{session_id}/DESIGN.md`:
55 | - Explain algorithmic choices
56 | - Compare alternatives (and why they were rejected)
57 | - Specify mocking strategy for dependencies (e.g., Redis, APIs)
58 | - Define test coverage scope (unit, integration, error paths)
59 |
60 | ### PHASE 3: IMPLEMENTATION & VALIDATION
61 | - Work in `/home/coder/workspace` using **safe shell commands only**:
62 | - ✅ **SAFE**: `cat > file.py <<'EOF'` (quoted EOF)
63 | - ✅ **SAFE**: `cat src/utils.py` → inspect
64 | - ✅ **SAFE**: `pytest -v` → validate
65 | - ❌ **NEVER**: `echo "code with 'quotes'" > file.py`
66 | - ❌ **NEVER**: `python -c "with open(...) as f: f.write(...)"`
67 | - After **every meaningful step**, append to:
68 | → `/memories/{session_id}/LOG.md`
69 |
70 | ### PHASE 4: DEBUGGING & ITERATION
71 | - On failure, log to:
72 | → `/memories/{session_id}/DEBUG.md`
73 | Include hypothesis, test, result, and revised plan.
74 |
75 | ### PHASE 5: FINALIZATION & DELIVERY
76 | - When all success criteria are met, create:
77 | → `/memories/{session_id}/FINAL_REPORT.md`
78 | Include changes, validation results, files for review, and delivery options.
79 |
80 | ---
81 |
82 | ## 🚫 ABSOLUTE PROHIBITIONS
83 |
84 | - **NEVER** write source code to `/memories/`
85 | - **NEVER** assume file state — always inspect first
86 | - **NEVER** skip testing — every change must be validated
87 | - **NEVER** reference host paths like `/home/user/...`
88 | - **NEVER** auto-apply changes — user must explicitly approve
89 | - **NEVER** expose memory tool calls in final answer
90 | - **NEVER** use unquoted heredoc or `echo` for multi-line code
91 | - **NEVER** split code into lists — always use single-string commands
92 |
93 | ---
94 |
95 | ## 💡 ENGINEERING EXCELLENCE
96 |
97 | - **Think like a reviewer**: Would you approve this PR?
98 | - **Test like a QA engineer**: Cover happy path, edge cases, and error conditions
99 | - **Document like a maintainer**: Future you (or a teammate) should understand every decision
100 | - **Deliver like a professional**: Provide clear, actionable next steps for the user
101 |
102 | Begin by asking for the codebase if not provided, then inspect `/memories/{session_id}/` and create `TASK.md` if missing.
103 | """
104 |
--------------------------------------------------------------------------------
/docs/getting-started/installation.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | ## Prerequisites
4 |
5 | Before installing MCPOmni Connect, ensure you have the following:
6 |
7 | !!! info "System Requirements"
8 | - **Python 3.10+** (Python 3.11+ recommended)
9 | - **LLM API key** from any supported provider
10 | - **UV package manager** (recommended) or pip
11 | - **Redis server** (optional, for persistent memory)
12 |
13 | ### Check Python Version
14 |
15 | ```bash
16 | python --version
17 | # Should show Python 3.10.0 or higher
18 | ```
19 |
20 | ### Install UV (Recommended)
21 |
22 | UV is the fastest Python package manager and is recommended for MCPOmni Connect:
23 |
24 | === "macOS/Linux"
25 | ```bash
26 | curl -LsSf https://astral.sh/uv/install.sh | sh
27 | ```
28 |
29 | === "Windows"
30 | ```powershell
31 | powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
32 | ```
33 |
34 | === "Python pip"
35 | ```bash
36 | pip install uv
37 | ```
38 |
39 | ## Installation Methods
40 |
41 | ### Method 1: UV (Recommended)
42 |
43 | ```bash
44 | uv add mcpomni-connect
45 | ```
46 |
47 | ### Method 2: pip
48 |
49 | ```bash
50 | pip install mcpomni-connect
51 | ```
52 |
53 | ### Method 3: From Source
54 |
55 | For development or latest features:
56 |
57 | ```bash
58 | git clone https://github.com/Abiorh001/mcp_omni_connect.git
59 | cd mcp_omni_connect
60 | uv sync
61 | ```
62 |
63 | ## Verify Installation
64 |
65 | After installation, verify MCPOmni Connect is correctly installed:
66 |
67 | ```bash
68 | mcpomni_connect --version
69 | ```
70 |
71 | You should see the version number displayed.
72 |
73 | ## Optional Dependencies
74 |
75 | ### Redis (For Persistent Memory)
76 |
77 | MCPOmni Connect can use Redis for persistent conversation memory:
78 |
79 | === "Ubuntu/Debian"
80 | ```bash
81 | sudo apt update
82 | sudo apt install redis-server
83 | sudo systemctl start redis-server
84 | sudo systemctl enable redis-server
85 | ```
86 |
87 | === "macOS"
88 | ```bash
89 | brew install redis
90 | brew services start redis
91 | ```
92 |
93 | === "Windows"
94 | Download from [Redis Windows releases](https://github.com/microsoftarchive/redis/releases) or use WSL with Linux instructions.
95 |
96 | === "Docker"
97 | ```bash
98 | docker run -d --name redis -p 6379:6379 redis:alpine
99 | ```
100 |
101 | ### Verify Redis Connection
102 |
103 | ```bash
104 | redis-cli ping
105 | # Should respond with: PONG
106 | ```
107 |
108 | ## Next Steps
109 |
110 | Once installation is complete:
111 |
112 | 1. **[Set up configuration](../configuration/configuration-guide.md)** - Create your `.env` and `servers_config.json` files
113 | 2. **[Follow the Quick Start guide](quick-start.md)** - Get your first MCP connection working
114 | 3. **[Explore operation modes](../user-guide/operation-modes.md)** - Learn about chat, autonomous, and orchestrator modes
115 |
116 | ## Troubleshooting Installation
117 |
118 | ### Common Issues
119 |
120 | !!! failure "Python Version Error"
121 | **Error**: `MCPOmni Connect requires Python 3.10+`
122 |
123 | **Solution**: Upgrade your Python version:
124 | ```bash
125 | # Check available Python versions
126 | python3.10 --version # or python3.11, python3.12
127 |
128 | # Use specific Python version with UV
129 | uv python install 3.11
130 | uv add mcpomni-connect
131 | ```
132 |
133 | !!! failure "Permission Denied"
134 | **Error**: Permission denied during installation
135 |
136 | **Solution**: Use user installation:
137 | ```bash
138 | pip install --user mcpomni-connect
139 | ```
140 |
141 | !!! failure "Command Not Found"
142 | **Error**: `mcpomni_connect: command not found`
143 |
144 | **Solution**: Add to PATH or use full path:
145 | ```bash
146 | # Check installation path
147 | pip show mcpomni-connect
148 |
149 | # Or run with python -m
150 | python -m mcpomni_connect
151 | ```
152 |
153 | ### Getting Help
154 |
155 | If you encounter issues:
156 |
157 | 1. Check the [troubleshooting guide](../configuration/troubleshooting.md)
158 | 2. Search [existing issues](https://github.com/Abiorh001/mcp_omni_connect/issues)
159 | 3. Create a [new issue](https://github.com/Abiorh001/mcp_omni_connect/issues/new) with:
160 | - Your operating system
161 | - Python version
162 | - Installation method used
163 | - Complete error message
164 |
165 | ---
166 |
167 | **Next**: [Quick Start Guide →](quick-start.md)
168 |
--------------------------------------------------------------------------------
/src/omnicoreagent/omni_agent/workflow/sequential_agent.py:
--------------------------------------------------------------------------------
1 | from omnicoreagent.omni_agent.agent import OmniAgent
2 | from typing import List, Optional
3 | from omnicoreagent.core.utils import logger
4 | import uuid
5 |
6 |
7 | class SequentialAgent:
8 | """Runs a list of OmniAgents sequentially, passing output from one to the next with retry support."""
9 |
10 | DEFAULT_TASK = "Please follow your system instructions and process accordingly."
11 |
12 | def __init__(self, sub_agents: List[OmniAgent], max_retries: int = 3):
13 | if not sub_agents:
14 | raise ValueError("SequentialAgent requires at least one sub-agent")
15 | self.sub_agents = sub_agents
16 | self.max_retries = max_retries
17 | self._initialized = False
18 |
19 | async def initialize(self):
20 | """Connect MCP servers for RouterCore and all sub-agents."""
21 | if self._initialized:
22 | return
23 | logger.info("RouterAgent: Initializing MCP servers for router and sub-agents")
24 | for agent in self.sub_agents:
25 | if getattr(agent, "mcp_tools", None):
26 | try:
27 | await agent.connect_mcp_servers()
28 | logger.info(f"{agent.name}: MCP servers connected")
29 | except Exception as exc:
30 | logger.warning(f"{agent.name}: MCP connection failed: {exc}")
31 | self._initialized = True
32 |
33 | async def run(self, initial_task: str = None, session_id: str = None) -> dict:
34 | if not self._initialized:
35 | raise RuntimeError(
36 | "SequentialAgent must be initialized Call `await .initialize()` before using it"
37 | )
38 |
39 | if not initial_task:
40 | initial_task = self.DEFAULT_TASK
41 | current_input = initial_task
42 | final_output: dict = {}
43 |
44 | if not session_id:
45 | session_id = str(uuid.uuid4())
46 |
47 | for idx, agent_service in enumerate(self.sub_agents, start=1):
48 | agent_name = getattr(agent_service, "name", f"Agent_{idx}")
49 | logger.info(f"Running agent {idx}/{len(self.sub_agents)}: {agent_name}")
50 |
51 | retry_count = 0
52 | while retry_count < self.max_retries:
53 | try:
54 | final_output = await agent_service.run(
55 | query=current_input, session_id=session_id
56 | )
57 |
58 | break
59 |
60 | except Exception as exc:
61 | retry_count += 1
62 | logger.warning(
63 | f"{agent_name}: Attempt {retry_count}/{self.max_retries} failed: {exc}"
64 | )
65 | if retry_count >= self.max_retries:
66 | logger.error(
67 | f"{agent_name}: Max retries reached, stopping SequentialAgent"
68 | )
69 | return {
70 | "response": current_input,
71 | "session_id": session_id,
72 | "failed_agent": agent_name,
73 | "error": str(exc),
74 | }
75 |
76 | current_input = self._extract_output(final_output)
77 |
78 | return final_output
79 |
80 | @staticmethod
81 | def _extract_output(agent_output: dict) -> str:
82 | """Safely extract the response text from an agent's output dict."""
83 | return agent_output.get("response", "")
84 |
85 | async def __call__(
86 | self, initial_task: Optional[str] = None, session_id: Optional[str] = None
87 | ):
88 | auto_init = not self._initialized
89 | try:
90 | if auto_init:
91 | await self.initialize()
92 | return await self.run(initial_task=initial_task, session_id=session_id)
93 | finally:
94 | if auto_init:
95 | await self.shutdown()
96 |
97 | async def shutdown(self):
98 | for agent in self.sub_agents:
99 | if getattr(agent, "mcp_tools", None):
100 | try:
101 | await agent.cleanup()
102 | logger.info(f"{agent.name}: MCP cleanup successful")
103 | except Exception as exc:
104 | logger.warning(f"{agent.name}: MCP cleanup failed: {exc}")
105 |
--------------------------------------------------------------------------------
/tests/test_prompts.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import AsyncMock, MagicMock
2 |
3 | import pytest
4 |
5 | from omnicoreagent.mcp_omni_connect.prompts import (
6 | find_prompt_server,
7 | get_prompt,
8 | get_prompt_with_react_agent,
9 | list_prompts,
10 | )
11 |
12 |
13 | @pytest.mark.asyncio
14 | async def test_list_prompts_all_connected():
15 | sessions = {
16 | "server1": {"connected": True, "session": AsyncMock()},
17 | "server2": {"connected": True, "session": AsyncMock()},
18 | }
19 | sessions["server1"]["session"].list_prompts.return_value = MagicMock(
20 | prompts=[{"name": "a"}]
21 | )
22 | sessions["server2"]["session"].list_prompts.return_value = MagicMock(
23 | prompts=[{"name": "b"}]
24 | )
25 |
26 | result = await list_prompts(["server1", "server2"], sessions)
27 | assert len(result) == 2
28 | assert {"name": "a"} in result
29 | assert {"name": "b"} in result
30 |
31 |
32 | @pytest.mark.asyncio
33 | async def test_list_prompts_with_error():
34 | sessions = {
35 | "server1": {"connected": True, "session": AsyncMock()},
36 | "server2": {"connected": True, "session": AsyncMock()},
37 | }
38 | sessions["server1"]["session"].list_prompts.side_effect = Exception("boom")
39 | sessions["server2"]["session"].list_prompts.return_value = MagicMock(
40 | prompts=[{"name": "b"}]
41 | )
42 |
43 | result = await list_prompts(["server1", "server2"], sessions)
44 | assert len(result) == 1
45 | assert {"name": "b"} in result
46 |
47 |
48 | @pytest.mark.asyncio
49 | async def test_find_prompt_server_found():
50 | prompts = {
51 | "server1": [{"name": "alpha"}],
52 | "server2": [{"name": "beta"}],
53 | }
54 | server_name, found = await find_prompt_server("beta", prompts)
55 | assert server_name == "server2"
56 | assert found is True
57 |
58 |
59 | @pytest.mark.asyncio
60 | async def test_find_prompt_server_not_found():
61 | prompts = {
62 | "server1": [{"name": "alpha"}],
63 | }
64 | server_name, found = await find_prompt_server("gamma", prompts)
65 | assert not found
66 | assert server_name == ""
67 |
68 |
69 | @pytest.mark.asyncio
70 | async def test_get_prompt_success():
71 | mock_session = AsyncMock()
72 | mock_session.get_prompt.return_value = MagicMock(
73 | messages=[MagicMock(role="user", content=MagicMock(text="prompt response"))]
74 | )
75 |
76 | sessions = {"server1": {"connected": True, "session": mock_session}}
77 | available_prompts = {"server1": [{"name": "test"}]}
78 |
79 | async def mock_add(*args, **kwargs):
80 | return {}
81 |
82 | result = await get_prompt(
83 | sessions,
84 | system_prompt="system",
85 | add_message_to_history=mock_add,
86 | llm_call=lambda x: x,
87 | debug=True,
88 | available_prompts=available_prompts,
89 | name="test",
90 | )
91 | assert result == "prompt response"
92 |
93 |
94 | @pytest.mark.asyncio
95 | async def test_get_prompt_with_react_agent_success():
96 | mock_session = AsyncMock()
97 | mock_session.get_prompt.return_value = MagicMock(
98 | messages=[MagicMock(role="user", content=MagicMock(text="react prompt"))]
99 | )
100 |
101 | sessions = {"server1": {"connected": True, "session": mock_session}}
102 | available_prompts = {"server1": [{"name": "react"}]}
103 |
104 | async def mock_add(*args, **kwargs):
105 | return {}
106 |
107 | result = await get_prompt_with_react_agent(
108 | sessions,
109 | system_prompt="sys",
110 | add_message_to_history=mock_add,
111 | debug=False,
112 | available_prompts=available_prompts,
113 | name="react",
114 | )
115 | assert result == "react prompt"
116 |
117 |
118 | @pytest.mark.asyncio
119 | async def test_get_prompt_with_error_handling():
120 | mock_session = AsyncMock()
121 | mock_session.get_prompt.side_effect = Exception("network failure")
122 |
123 | sessions = {"server1": {"connected": True, "session": mock_session}}
124 | available_prompts = {"server1": [{"name": "fail"}]}
125 |
126 | async def mock_add(*args, **kwargs):
127 | return {}
128 |
129 | result = await get_prompt_with_react_agent(
130 | sessions,
131 | system_prompt="sys",
132 | add_message_to_history=mock_add,
133 | debug=True,
134 | available_prompts=available_prompts,
135 | name="fail",
136 | )
137 | assert "Error getting prompt" in result
138 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 | from typing import Literal, Optional, List
4 | from pydantic import BaseModel, Field, field_validator
5 | from pydantic_settings import BaseSettings, SettingsConfigDict
6 |
7 |
8 | class AgentConfig(BaseSettings):
9 | name: str = "DeepCoder"
10 | max_steps: int = 30
11 | tool_call_timeout: int = 80
12 | request_limit: int = 0
13 | total_tokens_limit: int = 0
14 | memory_mode: Literal["sliding_window", "token_budget"] = "sliding_window"
15 | memory_window_size: int = 100
16 | memory_tool_backend: Optional[str] = "local"
17 |
18 | model_config = SettingsConfigDict(env_prefix="AGENT_", extra="forbid")
19 |
20 |
21 | class ModelConfig(BaseSettings):
22 | provider: str = "openai"
23 | model: str = "gpt-4o"
24 | temperature: float = 0.2
25 | top_p: float = 0.95
26 | max_context_length: int = 30_000
27 | llm_api_key: Optional[str] = None
28 |
29 | @field_validator("llm_api_key", mode="before")
30 | def set_api_key(cls, v):
31 | return v or os.getenv("LLM_API_KEY")
32 |
33 | @field_validator("temperature")
34 | def temperature_range(cls, v):
35 | if not (0 <= v <= 2):
36 | raise ValueError("temperature must be between 0 and 2")
37 | return v
38 |
39 | model_config = SettingsConfigDict(env_prefix="MODEL_", extra="forbid")
40 |
41 |
42 | class StorageConfig(BaseSettings):
43 | memory_store_type: Literal["redis", "in_memory", "database"] = "redis"
44 | event_store_type: Literal["redis", "in_memory"] = "in_memory"
45 | redis_url: str = "redis://localhost:6379/0"
46 | redis_max_connections: int = 10
47 |
48 | model_config = SettingsConfigDict(env_prefix="STORAGE_", extra="forbid")
49 |
50 |
51 | class SecurityConfig(BaseSettings):
52 | enable_rate_limiting: bool = True
53 | rate_limit_requests: int = 10
54 | rate_limit_window: int = 60
55 | audit_log_file: str = "./logs/audit.log"
56 | max_command_length: int = 10_000
57 | blocked_patterns: List[str] = Field(
58 | default_factory=lambda: [
59 | r".*\$\(.*rm.*\).*",
60 | r".*;\s*rm\s+.*",
61 | r".*&&\s*sudo.*",
62 | ]
63 | )
64 |
65 | @field_validator("rate_limit_requests", "max_command_length")
66 | def positive_int(cls, v):
67 | if v <= 0:
68 | raise ValueError("Must be positive")
69 | return v
70 |
71 | model_config = SettingsConfigDict(env_prefix="SECURITY_", extra="forbid")
72 |
73 |
74 | class ObservabilityConfig(BaseSettings):
75 | log_level: str = "INFO"
76 | log_format: str = "json"
77 | log_file: str = "./logs/agent.log"
78 | log_max_bytes: int = 10_000_000
79 | log_backup_count: int = 5
80 | enable_metrics: bool = True
81 | metrics_port: int = 8000
82 |
83 | @field_validator("log_level")
84 | def valid_log_level(cls, v):
85 | if v.upper() not in {"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"}:
86 | raise ValueError("Invalid log level")
87 | return v
88 |
89 | @field_validator("log_format")
90 | def valid_log_format(cls, v):
91 | if v not in {"json", "text"}:
92 | raise ValueError("log_format must be 'json' or 'text'")
93 | return v
94 |
95 | model_config = SettingsConfigDict(env_prefix="OBSERVABILITY_", extra="forbid")
96 |
97 |
98 | class CodingConfig(BaseSettings):
99 | workspace_root: str = "./user_workspaces"
100 | sandbox_timeout_seconds: int = 60
101 | sandbox_memory_mb: int = 512
102 | sandbox_disk_mb: int = 100
103 |
104 | @field_validator("workspace_root")
105 | def workspace_must_be_valid(cls, v):
106 | path = Path(v)
107 | if path.exists() and not path.is_dir():
108 | raise ValueError(f"Workspace root {v} exists but is not a directory")
109 | return v
110 |
111 | model_config = SettingsConfigDict(env_prefix="CODING_", extra="forbid")
112 |
113 |
114 | class ProductionConfig(BaseModel):
115 | agent: AgentConfig = Field(default_factory=AgentConfig)
116 | model: ModelConfig = Field(default_factory=ModelConfig)
117 | storage: StorageConfig = Field(default_factory=StorageConfig)
118 | security: SecurityConfig = Field(default_factory=SecurityConfig)
119 | observability: ObservabilityConfig = Field(default_factory=ObservabilityConfig)
120 | coding: CodingConfig = Field(default_factory=CodingConfig)
121 |
122 |
123 | def load_config() -> ProductionConfig:
124 | from dotenv import load_dotenv
125 |
126 | load_dotenv()
127 | return ProductionConfig()
128 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/agents/types.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from typing import Any, Optional
3 | from uuid import UUID, uuid4
4 | from pydantic import BaseModel, Field, field_validator, model_validator
5 | import json
6 | from omnicoreagent.core.utils import RobustLoopDetector
7 |
8 |
9 | class AgentConfig(BaseModel):
10 | agent_name: str
11 | request_limit: int = Field(default=0, description="0 = unlimited (production mode)")
12 | total_tokens_limit: int = Field(
13 | default=0, description="0 = unlimited (production mode)"
14 | )
15 | max_steps: int = Field(gt=0, le=1000)
16 | tool_call_timeout: int = Field(gt=1, le=1000)
17 | enable_advanced_tool_use: bool = Field(
18 | default=False, description="enable_advanced_tool_use"
19 | )
20 |
21 | memory_config: dict = {"mode": "sliding_window", "value": 10000}
22 |
23 | memory_tool_backend: str | None = Field(
24 | default=None,
25 | description="Backend for memory tool. Options: 'local', 's3', 'db'",
26 | )
27 |
28 | enable_agent_skills: bool = Field(
29 | default=False,
30 | description="Enable Agent Skills feature for specialized capabilities",
31 | )
32 |
33 | @field_validator("memory_tool_backend")
34 | @classmethod
35 | def validate_backend(cls, v):
36 | if v is None:
37 | return v
38 | allowed = {"local", "s3", "db"}
39 | if v not in allowed:
40 | raise ValueError(
41 | f"Invalid memory_tool_backend '{v}'. Must be one of {allowed}."
42 | )
43 | return v
44 |
45 | @field_validator("request_limit", "total_tokens_limit", mode="before")
46 | @classmethod
47 | def convert_none_to_zero(cls, v):
48 | return 0 if v is None else v
49 |
50 |
51 | class AgentState(str, Enum):
52 | IDLE = "idle"
53 | RUNNING = "running"
54 | TOOL_CALLING = "tool_calling"
55 | OBSERVING = "observing"
56 | FINISHED = "finished"
57 | ERROR = "error"
58 | STUCK = "stuck"
59 |
60 |
61 | class ToolFunction(BaseModel):
62 | name: str
63 | arguments: str
64 |
65 |
66 | class ToolCall(BaseModel):
67 | id: str = Field(default_factory=lambda: str(uuid4()))
68 | type: str = "function"
69 | function: ToolFunction
70 |
71 |
72 | class ToolCallMetadata(BaseModel):
73 | has_tool_calls: bool = False
74 | tool_calls: list[ToolCall] = []
75 | tool_call_id: UUID | None = None
76 | agent_name: str | None = None
77 |
78 |
79 | class Message(BaseModel):
80 | role: str
81 | content: str
82 | tool_call_id: Optional[str] = None
83 | tool_calls: Optional[str] = None
84 | metadata: Optional[ToolCallMetadata] = None
85 | timestamp: Optional[str] = None
86 |
87 | @model_validator(mode="before")
88 | def ensure_content_is_string(cls, values):
89 | c = values.get("content")
90 | if not isinstance(c, str):
91 | try:
92 | values["content"] = json.dumps(c, ensure_ascii=False)
93 | except Exception:
94 | values["content"] = str(c)
95 | return values
96 |
97 |
98 | class ParsedResponse(BaseModel):
99 | action: bool | None = None
100 | data: str | None = None
101 | error: str | None = None
102 | answer: str | None = None
103 | tool_calls: bool | None = None
104 | agent_calls: bool | None = None
105 |
106 |
107 | class ToolCallResult(BaseModel):
108 | tool_executor: Any
109 | tool_name: str
110 | tool_args: dict
111 |
112 |
113 | class ToolError(BaseModel):
114 | observation: str
115 | tool_name: str
116 | tool_args: dict | None = None
117 |
118 |
119 | class ToolData(BaseModel):
120 | action: bool
121 | tool_name: str | None = None
122 | tool_args: dict | None = None
123 | error: str | None = None
124 |
125 |
126 | class ToolCallRecord(BaseModel):
127 | tool_name: str
128 | tool_args: str
129 | observation: str
130 |
131 |
132 | class ToolParameter(BaseModel):
133 | type: str
134 | description: str
135 |
136 |
137 | class ToolRegistryEntry(BaseModel):
138 | name: str
139 | description: str
140 | parameters: list[ToolParameter] = []
141 |
142 |
143 | class ToolExecutorConfig(BaseModel):
144 | handler: Any
145 | tool_data: dict[str, Any]
146 | available_tools: dict[str, Any]
147 |
148 |
149 | class LoopDetectorConfig(BaseModel):
150 | max_repeats: int = 3
151 | similarity_threshold: float = 0.9
152 |
153 |
154 | class SessionState(BaseModel):
155 | messages: list[Message]
156 | state: AgentState
157 | loop_detector: Any
158 | assistant_with_tool_calls: dict | None
159 | pending_tool_responses: list[dict]
160 |
--------------------------------------------------------------------------------
/src/omnicoreagent/omni_agent/workflow/parallel_agent.py:
--------------------------------------------------------------------------------
1 | from omnicoreagent.omni_agent.agent import OmniAgent
2 | from typing import List, Optional, Dict
3 | from omnicoreagent.core.utils import logger
4 | import asyncio
5 | import uuid
6 |
7 |
8 | class ParallelAgent:
9 | """Runs a list of OmniAgents in parallel, each with its own optional task, sharing a session ID if provided."""
10 |
11 | DEFAULT_TASK = "Please follow your system instructions and process accordingly."
12 |
13 | def __init__(self, sub_agents: List[OmniAgent], max_retries: int = 3):
14 | if not sub_agents:
15 | raise ValueError("ParallelAgent requires at least one sub-agent")
16 | self.sub_agents = sub_agents
17 | self.max_retries = max_retries
18 | self._initialized = False
19 |
20 | async def initialize(self):
21 | """Connect MCP servers for RouterCore and all sub-agents."""
22 | if self._initialized:
23 | return
24 | logger.info("RouterAgent: Initializing MCP servers for router and sub-agents")
25 | for agent in self.sub_agents:
26 | if getattr(agent, "mcp_tools", None):
27 | try:
28 | await agent.connect_mcp_servers()
29 | logger.info(f"{agent.name}: MCP servers connected")
30 | except Exception as exc:
31 | logger.warning(f"{agent.name}: MCP connection failed: {exc}")
32 | self._initialized = True
33 |
34 | async def run(
35 | self,
36 | agent_tasks: Optional[Dict[str, Optional[str]]] = None,
37 | session_id: Optional[str] = None,
38 | ) -> dict:
39 | if not self._initialized:
40 | raise RuntimeError(
41 | "ParalleAgent must be initialized `Call `await .initialize()` before using it`"
42 | )
43 | if not session_id:
44 | session_id = str(uuid.uuid4())
45 |
46 | tasks = []
47 | for idx, agent_service in enumerate(self.sub_agents, start=1):
48 | agent_name = getattr(agent_service, "name", f"Agent_{idx}")
49 | task = (agent_tasks or {}).get(agent_name) or self.DEFAULT_TASK
50 | query = task
51 | tasks.append(
52 | asyncio.create_task(
53 | self._run_single_agent(agent_service, query, session_id, idx)
54 | )
55 | )
56 |
57 | results = await asyncio.gather(*tasks)
58 | return {res["agent_name"]: res for res in results}
59 |
60 | async def _run_single_agent(
61 | self, agent_service: OmniAgent, query: str, session_id: str, idx: int
62 | ) -> dict:
63 | """Runs an agent with retry logic and MCP management."""
64 | agent_name = getattr(agent_service, "name", f"Agent_{idx}")
65 | final_output = {}
66 | retry_count = 0
67 |
68 | while retry_count < self.max_retries:
69 | try:
70 | final_output = await asyncio.shield(
71 | agent_service.run(query=query, session_id=session_id)
72 | )
73 | break
74 |
75 | except Exception as exc:
76 | retry_count += 1
77 | logger.warning(
78 | f"{agent_name}: Attempt {retry_count}/{self.max_retries} failed: {exc}"
79 | )
80 | if retry_count >= self.max_retries:
81 | logger.error(f"{agent_name}: Max retries reached")
82 | final_output = {
83 | "response": query,
84 | "session_id": session_id,
85 | "failed_agent": agent_name,
86 | "error": str(exc),
87 | }
88 | break
89 |
90 | return {**final_output}
91 |
92 | async def __call__(
93 | self,
94 | agent_tasks: Optional[Dict[str, Optional[str]]] = None,
95 | session_id: Optional[str] = None,
96 | ):
97 | auto_init = not self._initialized
98 | try:
99 | if auto_init:
100 | await self.initialize()
101 | return await self.run(agent_tasks=agent_tasks, session_id=session_id)
102 | finally:
103 | if auto_init:
104 | await self.shutdown()
105 |
106 | async def shutdown(self):
107 | for agent in self.sub_agents:
108 | if getattr(agent, "mcp_tools", None):
109 | try:
110 | await agent.cleanup()
111 | logger.info(f"{agent.name}: MCP cleanup successful")
112 | except Exception as exc:
113 | logger.warning(f"{agent.name}: MCP cleanup failed: {exc}")
114 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to MCPOmni Connect
2 |
3 | First off, thank you for considering contributing to MCPOmni Connect! It's people like you that make MCPOmni Connect such a great tool. 👏
4 |
5 | ## 📋 Table of Contents
6 |
7 | - [Code of Conduct](#code-of-conduct)
8 | - [Getting Started](#getting-started)
9 | - [Development Setup](#development-setup)
10 | - [Making Changes](#making-changes)
11 | - [Submitting Changes](#submitting-changes)
12 | - [Style Guidelines](#style-guidelines)
13 | - [Adding New Features](#adding-new-features)
14 | - [Bug Reports](#bug-reports)
15 | - [Community](#community)
16 |
17 | ## 📜 Code of Conduct
18 |
19 | This project and everyone participating in it is governed by our Code of Conduct. By participating, you are expected to uphold this code. Please report unacceptable behavior to [abioladedayo1993@gmail.com].
20 |
21 | ## 🚀 Getting Started
22 |
23 | 1. Fork the repository
24 | 2. Clone your fork:
25 | ```bash
26 | git clone https://github.com/your-username/mcp_omni_connect.git
27 | cd mcp_omni_connect
28 | ```
29 | 3. Create a virtual environment:
30 | ```bash
31 | uv venv
32 | source .venv/bin/activate
33 | ```
34 | 4. Install dependencies:
35 | ```bash
36 | uv sync
37 | ```
38 |
39 | ## 💻 Development Setup
40 |
41 | 1. **Environment Configuration**
42 | ```bash
43 | cp .env.example .env
44 | # Edit .env with your settings
45 | ```
46 |
47 | 2. **Install Development Dependencies**
48 | ```bash
49 | pip install -e ".[dev]"
50 | ```
51 |
52 | 3. **Pre-commit Hooks**
53 | ```bash
54 | pre-commit install
55 | ```
56 |
57 | ## 🔄 Making Changes
58 |
59 | 1. Create a new branch:
60 | ```bash
61 | git checkout -b feature/your-feature-name
62 | ```
63 |
64 | 2. Make your changes:
65 | - Write meaningful commit messages
66 | - Keep commits atomic and focused
67 | - Add tests for new functionality
68 | - Update documentation as needed
69 |
70 | 3. Run tests:
71 | ```bash
72 | pytest
73 | ```
74 |
75 | ## 📝 Style Guidelines
76 |
77 | ### Python Code Style
78 | - Follow PEP 8 guidelines
79 | - Use type hints
80 | - Maximum line length: 88 characters (Black formatter)
81 | - Use docstrings for functions and classes
82 |
83 | ### Commit Messages
84 | ```
85 | type(scope): Brief description
86 |
87 | Detailed description of what changed and why.
88 | ```
89 |
90 | Types:
91 | - feat: New feature
92 | - fix: Bug fix
93 | - docs: Documentation changes
94 | - style: Code style changes
95 | - refactor: Code refactoring
96 | - test: Adding/modifying tests
97 | - chore: Maintenance tasks
98 |
99 | ## 🌟 Adding New Features
100 |
101 | 1. **Transport Layer**
102 | - Follow the existing transport interface
103 | - Add appropriate tests
104 | - Document new transport methods
105 |
106 | 2. **Server Integration**
107 | - Implement server configuration validation
108 | - Add error handling
109 | - Document server requirements
110 |
111 | 3. **Prompt Management**
112 | - Follow the prompt interface
113 | - Add validation for new prompt types
114 | - Update prompt documentation
115 |
116 | ## 🐛 Bug Reports
117 |
118 | When filing an issue, please include:
119 |
120 | 1. **Description**
121 | - Clear and descriptive title
122 | - Detailed description of the issue
123 |
124 | 2. **Environment**
125 | - Python version
126 | - Operating system
127 | - Package versions
128 |
129 | 3. **Steps to Reproduce**
130 | - Detailed step-by-step guide
131 | - Example code if applicable
132 | - Expected vs actual behavior
133 |
134 | 4. **Additional Context**
135 | - Log files
136 | - Screenshots
137 | - Related issues
138 |
139 | ## 🔍 Pull Request Process
140 |
141 | 1. **Before Submitting**
142 | - Update documentation
143 | - Add/update tests
144 | - Run the test suite
145 | - Update CHANGELOG.md
146 |
147 | 2. **PR Template**
148 | ```markdown
149 | ## Description
150 | Brief description of changes
151 |
152 | ## Type of Change
153 | - [ ] Bug fix
154 | - [ ] New feature
155 | - [ ] Breaking change
156 | - [ ] Documentation update
157 |
158 | ## Testing
159 | Describe testing done
160 |
161 | ## Checklist
162 | - [ ] Tests added/updated
163 | - [ ] Documentation updated
164 | - [ ] CHANGELOG.md updated
165 | ```
166 |
167 | ## 👥 Community
168 |
169 | - Join our [Discord server](https://discord.gg/abiorh)
170 | - Follow us on [Twitter](https://twitter.com/abiorhmangana)
171 |
172 | ## 📚 Additional Resources
173 |
174 | - [Project Documentation](https://in-progress)
175 | - [Development Guide](https://dev.your-project.com)
176 |
177 | ## ❓ Questions?
178 |
179 | Feel free to reach out:
180 | - Open an issue
181 | - Email: abioladedayo1993@gmail.com
182 | - Discord: [Your Discord handle]
183 |
184 | ---
185 |
186 | Thank you for contributing to MCPOmni Connect! 🎉
187 |
--------------------------------------------------------------------------------
/docs/development/contributing.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Thank you for your interest in contributing to MCPOmni Connect! This project thrives on community contributions.
4 |
5 | ## Quick Links
6 |
7 | - **Main Contributing Guide**: See [CONTRIBUTING.md](../../CONTRIBUTING.md) in the project root for complete guidelines
8 | - **Issue Tracker**: [GitHub Issues](https://github.com/Abiorh001/mcp_omni_connect/issues)
9 | - **Discussions**: [GitHub Discussions](https://github.com/Abiorh001/mcp_omni_connect/discussions)
10 |
11 | ## Ways to Contribute
12 |
13 | ### 🐛 Bug Reports
14 | Found a bug? Help us fix it:
15 |
16 | 1. Check [existing issues](https://github.com/Abiorh001/mcp_omni_connect/issues) first
17 | 2. Create a [new issue](https://github.com/Abiorh001/mcp_omni_connect/issues/new) with:
18 | - Clear description of the problem
19 | - Steps to reproduce
20 | - Expected vs actual behavior
21 | - Environment details (OS, Python version, etc.)
22 |
23 | ### 💡 Feature Requests
24 | Have an idea for improvement?
25 |
26 | 1. Check [discussions](https://github.com/Abiorh001/mcp_omni_connect/discussions) for similar ideas
27 | 2. Open a new discussion or issue describing:
28 | - The problem your feature would solve
29 | - Proposed solution
30 | - Alternative solutions considered
31 |
32 | ### 📝 Documentation
33 | Help improve our documentation:
34 |
35 | - Fix typos or unclear instructions
36 | - Add examples or use cases
37 | - Improve API documentation
38 | - Translate documentation (future)
39 |
40 | ### 🔧 Code Contributions
41 | Ready to code? Follow the [development setup guide](../../CONTRIBUTING.md#development-setup).
42 |
43 | ## Documentation Contributions
44 |
45 | ### Working with MkDocs
46 |
47 | This documentation site uses MkDocs with Material theme:
48 |
49 | ```bash
50 | # Install documentation dependencies
51 | pip install -r docs/requirements.txt
52 |
53 | # Serve documentation locally
54 | mkdocs serve
55 |
56 | # Build documentation
57 | mkdocs build
58 | ```
59 |
60 | ### Documentation Structure
61 |
62 | ```
63 | docs/
64 | ├── index.md # Homepage
65 | ├── getting-started/ # Installation and quick start
66 | ├── configuration/ # Configuration guides
67 | ├── user-guide/ # Usage instructions
68 | ├── features/ # Feature deep-dives
69 | ├── advanced/ # Advanced topics
70 | └── development/ # Development guides
71 | ```
72 |
73 | ### Writing Guidelines
74 |
75 | !!! tip "Documentation Style"
76 | - Use clear, concise language
77 | - Include practical examples
78 | - Add code snippets where helpful
79 | - Use admonitions (tips, warnings, etc.) for important information
80 | - Test all code examples before committing
81 |
82 | ## Development Process
83 |
84 | ### 1. Fork and Clone
85 |
86 | ```bash
87 | git clone https://github.com/YOUR_USERNAME/mcp_omni_connect.git
88 | cd mcp_omni_connect
89 | ```
90 |
91 | ### 2. Set Up Development Environment
92 |
93 | ```bash
94 | # Install UV (if not already installed)
95 | curl -LsSf https://astral.sh/uv/install.sh | sh
96 |
97 | # Install dependencies
98 | uv sync
99 |
100 | # Install pre-commit hooks
101 | pre-commit install
102 | ```
103 |
104 | ### 3. Create Feature Branch
105 |
106 | ```bash
107 | git checkout -b feature/your-feature-name
108 | ```
109 |
110 | ### 4. Make Changes
111 |
112 | - Write code following the project style
113 | - Add tests for new functionality
114 | - Update documentation as needed
115 | - Ensure all tests pass
116 |
117 | ### 5. Submit Pull Request
118 |
119 | 1. Push your branch to your fork
120 | 2. Create a pull request with:
121 | - Clear description of changes
122 | - Link to related issues
123 | - Screenshots (if UI changes)
124 |
125 | ## Code Standards
126 |
127 | ### Python Code
128 | - Follow PEP 8 style guidelines
129 | - Use type hints where appropriate
130 | - Write docstrings for public functions
131 | - Add unit tests for new features
132 |
133 | ### Documentation
134 | - Use Markdown formatting
135 | - Follow the existing structure
136 | - Include code examples
137 | - Test all examples before committing
138 |
139 | ## Testing
140 |
141 | ### Run Tests Locally
142 |
143 | ```bash
144 | # Run all tests
145 | pytest tests/ -v
146 |
147 | # Run specific test file
148 | pytest tests/test_specific.py -v
149 |
150 | # Run with coverage
151 | pytest tests/ --cov=src --cov-report=term-missing
152 | ```
153 |
154 | ### Documentation Testing
155 |
156 | ```bash
157 | # Test documentation build
158 | mkdocs build --strict
159 |
160 | # Serve and test locally
161 | mkdocs serve
162 | ```
163 |
164 | ## Getting Help
165 |
166 | Need help contributing?
167 |
168 | - **Technical Questions**: [GitHub Discussions](https://github.com/Abiorh001/mcp_omni_connect/discussions)
169 | - **Bug Reports**: [GitHub Issues](https://github.com/Abiorh001/mcp_omni_connect/issues)
170 | - **Email**: abiolaadedayo1993@gmail.com
171 |
172 | ## Recognition
173 |
174 | Contributors are recognized in:
175 |
176 | - [CHANGELOG.md](../changelog.md) for their contributions
177 | - GitHub contributors page
178 | - Special thanks in release notes
179 |
180 | Thank you for helping make MCPOmni Connect better! 🚀
181 |
--------------------------------------------------------------------------------
/examples/workflow_agents/parallel_agent.py:
--------------------------------------------------------------------------------
1 | from omnicoreagent import (
2 | OmniAgent,
3 | MemoryRouter,
4 | EventRouter,
5 | ToolRegistry,
6 | ParallelAgent,
7 | logger,
8 | )
9 | from typing import Optional, Dict
10 |
11 | # low level import
12 | # from omnicoreagent.omni_agent.workflow.parallel_agent import ParallelAgent
13 | import asyncio
14 | import uuid
15 |
16 |
17 | # Example tool: Google Search
18 | def build_tool_registry_google_search() -> ToolRegistry:
19 | registry = ToolRegistry()
20 |
21 | @registry.register_tool("google_search")
22 | def google_search(query: str) -> str:
23 | """Simulated Google Search tool"""
24 | return f"Search results for '{query}'"
25 |
26 | return registry
27 |
28 |
29 | # --- Researcher Agents ---
30 | google_search_tool = build_tool_registry_google_search()
31 | GENERAL_MCP_TOOLS = [
32 | {
33 | "name": "tavily-remote-mcp",
34 | "transport_type": "streamable_http",
35 | "url": "https://mcp.tavily.com/mcp/?tavilyApiKey=",
36 | }
37 | ]
38 |
39 | # Researcher 1: Renewable Energy
40 | renewable_energy_agent = OmniAgent(
41 | name="RenewableEnergyResearcher",
42 | system_instruction="""
43 | You are an AI Research Assistant specializing in energy.
44 | Research the latest advancements in 'renewable energy sources'.
45 | Use the Google Search tool provided.
46 | Summarize your key findings concisely (1-2 sentences).
47 | Output *only* the summary.
48 | """,
49 | model_config={"provider": "openai", "model": "gpt-4.1", "temperature": 0.3},
50 | agent_config={"max_steps": 15, "tool_call_timeout": 60},
51 | # local_tools=google_search_tool,
52 | mcp_tools=GENERAL_MCP_TOOLS,
53 | memory_router=MemoryRouter("in_memory"),
54 | event_router=EventRouter("in_memory"),
55 | debug=True,
56 | )
57 |
58 | # Researcher 2: Electric Vehicles
59 | ev_agent = OmniAgent(
60 | name="EVResearcher",
61 | system_instruction="""
62 | You are an AI Research Assistant specializing in transportation.
63 | Research the latest developments in 'electric vehicle technology'.
64 | Use the Google Search tool provided.
65 | Summarize your key findings concisely (1-2 sentences).
66 | Output *only* the summary.
67 | """,
68 | model_config={"provider": "openai", "model": "gpt-4.1", "temperature": 0.3},
69 | agent_config={"max_steps": 15, "tool_call_timeout": 60},
70 | # local_tools=google_search_tool,
71 | mcp_tools=GENERAL_MCP_TOOLS,
72 | memory_router=MemoryRouter("in_memory"),
73 | event_router=EventRouter("in_memory"),
74 | debug=True,
75 | )
76 |
77 | # Researcher 3: Carbon Capture
78 | carbon_capture_agent = OmniAgent(
79 | name="CarbonCaptureResearcher",
80 | system_instruction="""
81 | You are an AI Research Assistant specializing in climate solutions.
82 | Research the current state of 'carbon capture methods'.
83 | Use the Google Search tool provided.
84 | Summarize your key findings concisely (1-2 sentences).
85 | Output *only* the summary.
86 | """,
87 | model_config={"provider": "openai", "model": "gpt-4.1", "temperature": 0.3},
88 | agent_config={"max_steps": 15, "tool_call_timeout": 60},
89 | # local_tools=google_search_tool,
90 | mcp_tools=GENERAL_MCP_TOOLS,
91 | memory_router=MemoryRouter("in_memory"),
92 | event_router=EventRouter("in_memory"),
93 | debug=True,
94 | )
95 |
96 | # --- Parallel Researcher Agent Workflow ---
97 | researcher_parallel_agent = ParallelAgent(
98 | sub_agents=[renewable_energy_agent, ev_agent, carbon_capture_agent]
99 | )
100 |
101 | # async def main():
102 | # result = await researcher_parallel_agent()
103 | # print("Async ParallelAgent result:", result)
104 |
105 |
106 | # if __name__ == "__main__":
107 | # asyncio.run(main())
108 | async def run_parallel_researchers(
109 | agent_tasks: Optional[Dict[str, Optional[str]]] = None,
110 | session_id: Optional[str] = None,
111 | ) -> dict:
112 | """
113 | Run all researcher agents in parallel.
114 |
115 | agent_tasks: Optional dict {agent_name: task_string | None}. If None, default task is used.
116 | session_id: Shared session ID (optional, auto-generated if None)
117 | """
118 | try:
119 | # IMPORTANT: explicit initialize() call (developer-managed lifecycle)
120 | await researcher_parallel_agent.initialize()
121 | if not session_id:
122 | session_id = str(uuid.uuid4())
123 |
124 | logger.info(f"Running Parallel Researchers with session_id: {session_id}")
125 | results = await researcher_parallel_agent.run(
126 | agent_tasks=agent_tasks, session_id=session_id
127 | )
128 | return results
129 | finally:
130 | # Always cleanup in same loop
131 | await researcher_parallel_agent.shutdown()
132 |
133 |
134 | if __name__ == "__main__":
135 | # Example usage
136 | tasks = {
137 | "RenewableEnergyResearcher": "Summarize recent renewable energy innovations",
138 | "EVResearcher": None, # Will use default internal task
139 | "CarbonCaptureResearcher": "Provide key findings on carbon capture technologies",
140 | }
141 |
142 | result = asyncio.run(run_parallel_researchers(agent_tasks=tasks))
143 | print("Parallel Researcher Results:", result)
144 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/memory_store/in_memory.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Optional
2 | import threading
3 | from omnicoreagent.core.memory_store.base import AbstractMemoryStore
4 | from omnicoreagent.core.utils import logger, utc_now_str
5 | import copy
6 | import os
7 | import json
8 |
9 |
10 | class InMemoryStore(AbstractMemoryStore):
11 | """In memory store - Database compatible version"""
12 |
13 | def __init__(
14 | self,
15 | ) -> None:
16 | """Initialize memory storage.
17 |
18 | Args:
19 | max_context_tokens: Maximum tokens to keep in memory
20 | debug: Enable debug logging
21 | """
22 |
23 | self.sessions_history: dict[str, list[dict[str, Any]]] = {}
24 | self.memory_config: dict[str, Any] = {}
25 | self._lock = threading.RLock()
26 |
27 | def set_memory_config(self, mode: str, value: int = None) -> None:
28 | """Set global memory strategy.
29 |
30 | Args:
31 | mode: Memory mode ('sliding_window', 'token_budget')
32 | value: Optional value (e.g., window size or token limit)
33 | """
34 | valid_modes = {"sliding_window", "token_budget"}
35 | if mode.lower() not in valid_modes:
36 | raise ValueError(
37 | f"Invalid memory mode: {mode}. Must be one of {valid_modes}."
38 | )
39 |
40 | self.memory_config = {
41 | "mode": mode,
42 | "value": value,
43 | }
44 |
45 | async def store_message(
46 | self,
47 | role: str,
48 | content: str,
49 | metadata: dict,
50 | session_id: str,
51 | ) -> None:
52 | """Store a message in memory."""
53 | metadata_copy = dict(metadata)
54 |
55 | if "agent_name" in metadata_copy and isinstance(
56 | metadata_copy["agent_name"], str
57 | ):
58 | metadata_copy["agent_name"] = metadata_copy["agent_name"].strip()
59 |
60 | message = {
61 | "role": role,
62 | "content": content,
63 | "session_id": session_id,
64 | "timestamp": utc_now_str(),
65 | "msg_metadata": metadata_copy,
66 | }
67 |
68 | with self._lock:
69 | if session_id not in self.sessions_history:
70 | self.sessions_history[session_id] = []
71 | self.sessions_history[session_id].append(message)
72 |
73 | async def get_messages(
74 | self, session_id: str = None, agent_name: str = None
75 | ) -> list[dict[str, Any]]:
76 | session_id = session_id or "default_session"
77 |
78 | with self._lock:
79 | if session_id not in self.sessions_history:
80 | self.sessions_history[session_id] = []
81 | messages = list(self.sessions_history[session_id])
82 |
83 | mode = self.memory_config.get("mode", "token_budget")
84 | value = self.memory_config.get("value")
85 | if mode.lower() == "sliding_window":
86 | messages = messages[-value:]
87 |
88 | elif mode.lower() == "token_budget":
89 | total_tokens = sum(len(str(msg["content"]).split()) for msg in messages)
90 |
91 | while value is not None and total_tokens > value and messages:
92 | messages.pop(0)
93 | total_tokens = sum(len(str(msg["content"]).split()) for msg in messages)
94 |
95 | if agent_name:
96 | agent_name_norm = agent_name.strip()
97 | filtered = [
98 | msg
99 | for msg in messages
100 | if (msg.get("msg_metadata", {}).get("agent_name") or "").strip()
101 | == agent_name_norm
102 | ]
103 | else:
104 | filtered = messages
105 | return [copy.deepcopy(m) for m in filtered]
106 |
107 | async def clear_memory(
108 | self, session_id: str = None, agent_name: str = None
109 | ) -> None:
110 | """Clear memory for a session or all memory.
111 |
112 | Args:
113 | session_id: Session ID to clear (if None, clear all)
114 | agent_name: Optional agent name to filter by
115 | """
116 | try:
117 | if session_id and session_id in self.sessions_history:
118 | if agent_name:
119 | self.sessions_history[session_id] = [
120 | msg
121 | for msg in self.sessions_history[session_id]
122 | if msg.get("msg_metadata", {}).get("agent_name") != agent_name
123 | ]
124 | else:
125 | del self.sessions_history[session_id]
126 | elif agent_name:
127 | for session_id in list(self.sessions_history.keys()):
128 | self.sessions_history[session_id] = [
129 | msg
130 | for msg in self.sessions_history[session_id]
131 | if msg.get("msg_metadata", {}).get("agent_name") != agent_name
132 | ]
133 | if not self.sessions_history[session_id]:
134 | del self.sessions_history[session_id]
135 | else:
136 | self.sessions_history = {}
137 |
138 | except Exception as e:
139 | logger.error(f"Failed to clear memory: {e}")
140 |
--------------------------------------------------------------------------------
/src/omnicoreagent/omni_agent/background_agent/scheduler_backend.py:
--------------------------------------------------------------------------------
1 | """
2 | APScheduler backend for background task scheduling.
3 | """
4 |
5 | from apscheduler.schedulers.asyncio import AsyncIOScheduler
6 | import asyncio
7 | from apscheduler.triggers.interval import IntervalTrigger
8 | from apscheduler.triggers.cron import CronTrigger
9 | from typing import Any, Callable, Dict, Union, Optional
10 | from omnicoreagent.core.utils import logger
11 | from .base import BackgroundTaskScheduler
12 |
13 |
14 | class APSchedulerBackend(BackgroundTaskScheduler):
15 | """APScheduler-based background task scheduler."""
16 |
17 | def __init__(self):
18 | self.scheduler = AsyncIOScheduler()
19 | self._running = False
20 |
21 | def schedule_task(
22 | self, agent_id: str, interval: Union[int, str], task_fn: Callable, **kwargs
23 | ):
24 | """Schedule a task to run at specified intervals.
25 |
26 | Args:
27 | agent_id: Unique identifier for the agent
28 | interval: Interval in seconds (int) or cron expression (str)
29 | task_fn: Function to execute
30 | **kwargs: Additional arguments for the task function
31 | """
32 | if not asyncio.iscoroutinefunction(task_fn):
33 | raise ValueError("task_fn must be an async function for AsyncIOScheduler")
34 | try:
35 | if isinstance(interval, int):
36 | trigger = IntervalTrigger(seconds=interval)
37 | elif isinstance(interval, str):
38 | trigger = CronTrigger.from_crontab(interval)
39 | else:
40 | raise ValueError(f"Invalid interval type: {type(interval)}")
41 |
42 | self.scheduler.add_job(
43 | func=task_fn,
44 | trigger=trigger,
45 | id=agent_id,
46 | replace_existing=True,
47 | kwargs=kwargs,
48 | max_instances=1,
49 | coalesce=True,
50 | )
51 | logger.info(
52 | f"Scheduled task for agent {agent_id} with interval: {interval}"
53 | )
54 | except Exception as e:
55 | logger.error(f"Failed to schedule task for agent {agent_id}: {e}")
56 | raise
57 |
58 | def remove_task(self, agent_id: str):
59 | """Remove a scheduled task."""
60 | try:
61 | if self.scheduler.get_job(agent_id):
62 | self.scheduler.remove_job(agent_id)
63 | logger.info(f"Removed scheduled task for agent: {agent_id}")
64 | else:
65 | logger.warning(f"No scheduled task found for agent: {agent_id}")
66 | except Exception as e:
67 | logger.error(f"Failed to remove task for agent {agent_id}: {e}")
68 | raise
69 |
70 | def start(self):
71 | """Start the scheduler."""
72 | if not self._running:
73 | self.scheduler.start()
74 | self._running = True
75 | logger.info("APScheduler backend started")
76 |
77 | def shutdown(self):
78 | """Shutdown the scheduler."""
79 | if self._running:
80 | self.scheduler.shutdown()
81 | self._running = False
82 | logger.info("APScheduler backend shutdown")
83 |
84 | def is_running(self) -> bool:
85 | """Check if the scheduler is running."""
86 | return self._running
87 |
88 | def is_task_scheduled(self, agent_id: str) -> bool:
89 | """Check if a task is scheduled for the given agent ID."""
90 | try:
91 | job = self.scheduler.get_job(agent_id)
92 | return job is not None
93 | except Exception as e:
94 | logger.error(
95 | f"Failed to check if task is scheduled for agent {agent_id}: {e}"
96 | )
97 | return False
98 |
99 | def get_next_run_time(self, agent_id: str) -> Optional[str]:
100 | """Get the next run time for a scheduled task."""
101 | try:
102 | job = self.scheduler.get_job(agent_id)
103 | if job and job.next_run_time:
104 | return job.next_run_time.isoformat()
105 | return None
106 | except Exception as e:
107 | logger.error(f"Failed to get next run time for agent {agent_id}: {e}")
108 | return None
109 |
110 | def get_job_status(self, agent_id: str) -> Dict[str, Any]:
111 | """Get the status of a scheduled job."""
112 | job = self.scheduler.get_job(agent_id)
113 | if job:
114 | return {
115 | "id": job.id,
116 | "next_run_time": job.next_run_time,
117 | "trigger": str(job.trigger),
118 | "active": job.active,
119 | }
120 | return {}
121 |
122 | def pause_job(self, agent_id: str):
123 | """Pause a scheduled job."""
124 | try:
125 | self.scheduler.pause_job(agent_id)
126 | logger.info(f"Paused job for agent: {agent_id}")
127 | except Exception as e:
128 | logger.error(f"Failed to pause job for agent {agent_id}: {e}")
129 | raise
130 |
131 | def resume_job(self, agent_id: str):
132 | """Resume a paused job."""
133 | try:
134 | self.scheduler.resume_job(agent_id)
135 | logger.info(f"Resumed job for agent: {agent_id}")
136 | except Exception as e:
137 | logger.error(f"Failed to resume job for agent {agent_id}: {e}")
138 | raise
139 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/database/mongodb.py:
--------------------------------------------------------------------------------
1 | from motor.motor_asyncio import AsyncIOMotorClient
2 | from pymongo import errors, IndexModel
3 | from datetime import datetime
4 | from typing import Any, Optional
5 |
6 | from omnicoreagent.core.memory_store.base import AbstractMemoryStore
7 | from omnicoreagent.core.utils import logger, utc_now_str
8 |
9 |
10 | class MongoDb(AbstractMemoryStore):
11 | def __init__(self, uri: str, db_name: str, collection: str):
12 | self.uri = uri
13 | self.db_name = db_name
14 | self.collection_name = collection
15 | self.client: AsyncIOMotorClient | None = None
16 | self.db = None
17 | self.collection = None
18 | self._initialized = False
19 | self.memory_config = {"mode": "token_budget", "value": None}
20 |
21 | async def _ensure_connected(self):
22 | """Ensure MongoDB connection is established"""
23 | if self._initialized:
24 | return
25 |
26 | try:
27 | collection_name = self.collection_name
28 | self.client = AsyncIOMotorClient(self.uri)
29 | await self.client.admin.command("ping")
30 |
31 | self.db = self.client[self.db_name]
32 | if collection_name is None:
33 | logger.warning("No collection name provided, using default name")
34 | collection_name = f"{self.db_name}_collection_name"
35 | self.collection = self.db[collection_name]
36 | logger.debug(f"Using collection: {collection_name}")
37 |
38 | message_indexes = [
39 | IndexModel([("session_id", 1), ("msg_metadata.agent_name", 1)]),
40 | IndexModel([("session_id", 1)]),
41 | IndexModel([("msg_metadata.agent_name", 1)]),
42 | IndexModel([("timestamp", 1)]),
43 | ]
44 | await self.collection.create_indexes(message_indexes)
45 |
46 | self._initialized = True
47 | logger.debug("Connected to MongoDB")
48 |
49 | except errors.ConnectionFailure as e:
50 | logger.error(f"Failed to connect to MongoDB: {e}")
51 | raise RuntimeError(f"Could not connect to MongoDB at {self.uri}.")
52 |
53 | def set_memory_config(self, mode: str, value: int = None) -> None:
54 | valid_modes = {"sliding_window", "token_budget"}
55 | if mode.lower() not in valid_modes:
56 | raise ValueError(
57 | f"Invalid memory mode: {mode}. Must be one of {valid_modes}."
58 | )
59 | self.memory_config = {"mode": mode, "value": value}
60 |
61 | async def store_message(
62 | self,
63 | role: str,
64 | content: str,
65 | metadata: dict | None = None,
66 | session_id: str = None,
67 | ) -> None:
68 | try:
69 | await self._ensure_connected()
70 | if metadata is None:
71 | metadata = {}
72 | message = {
73 | "role": role,
74 | "content": content,
75 | "msg_metadata": metadata,
76 | "session_id": session_id,
77 | "timestamp": utc_now_str(),
78 | }
79 | await self.collection.insert_one(message)
80 | except Exception as e:
81 | logger.error(f"Failed to store message: {e}")
82 |
83 | async def get_messages(self, session_id: str = None, agent_name: str = None):
84 | try:
85 | await self._ensure_connected()
86 | query = {}
87 | if session_id:
88 | query["session_id"] = session_id
89 | if agent_name:
90 | query["msg_metadata.agent_name"] = agent_name
91 |
92 | cursor = self.collection.find(query, {"_id": 0}).sort("timestamp", 1)
93 | messages = await cursor.to_list(length=1000)
94 |
95 | result = [
96 | {
97 | "role": m["role"],
98 | "content": m["content"],
99 | "session_id": m.get("session_id"),
100 | "timestamp": (
101 | m["timestamp"].timestamp()
102 | if isinstance(m["timestamp"], datetime)
103 | else m["timestamp"]
104 | ),
105 | "msg_metadata": m.get("msg_metadata"),
106 | }
107 | for m in messages
108 | ]
109 |
110 | mode = self.memory_config.get("mode", "token_budget")
111 | value = self.memory_config.get("value")
112 | if mode.lower() == "sliding_window" and value is not None:
113 | result = result[-value:]
114 | if mode.lower() == "token_budget" and value is not None:
115 | total_tokens = sum(len(str(msg["content"]).split()) for msg in result)
116 | while total_tokens > value and result:
117 | result.pop(0)
118 | total_tokens = sum(
119 | len(str(msg["content"]).split()) for msg in result
120 | )
121 |
122 | except Exception as e:
123 | logger.error(f"Failed to retrieve messages: {e}")
124 | return []
125 |
126 | return result
127 |
128 | async def clear_memory(
129 | self, session_id: str = None, agent_name: str = None
130 | ) -> None:
131 | try:
132 | await self._ensure_connected()
133 | query = {}
134 | if session_id:
135 | query["session_id"] = session_id
136 | if agent_name:
137 | query["msg_metadata.agent_name"] = agent_name
138 | await self.collection.delete_many(query)
139 | except Exception as e:
140 | logger.error(f"Failed to clear memory: {e}")
141 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/events/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import AsyncIterator, Any, Dict, List, Optional, Union
3 | from enum import Enum
4 | from datetime import datetime
5 | from uuid import uuid4
6 | from typing import Type
7 | from pydantic import BaseModel, Field
8 |
9 |
10 | class EventType(str, Enum):
11 | USER_MESSAGE = "user_message"
12 | AGENT_MESSAGE = "agent_message"
13 | TOOL_CALL_STARTED = "tool_call_started"
14 | TOOL_CALL_RESULT = "tool_call_result"
15 | TOOL_CALL_ERROR = "tool_call_error"
16 | FINAL_ANSWER = "final_answer"
17 | AGENT_THOUGHT = "agent_thought"
18 | SUB_AGENT_CALL_STARTED = "sub_agent_call_started"
19 | SUB_AGENT_CALL_RESULT = "sub_agent_call_result"
20 | SUB_AGENT_CALL_ERROR = "sub_agent_call_error"
21 | BACKGROUND_TASK_STARTED = "background_task_started"
22 | BACKGROUND_TASK_COMPLETED = "background_task_completed"
23 | BACKGROUND_TASK_ERROR = "background_task_error"
24 | BACKGROUND_AGENT_STATUS = "background_agent_status"
25 |
26 |
27 | class UserMessagePayload(BaseModel):
28 | message: str
29 |
30 |
31 | class AgentMessagePayload(BaseModel):
32 | message: str
33 |
34 |
35 | class ToolCallStartedPayload(BaseModel):
36 | tool_name: str
37 | tool_args: str | Dict[str, Any]
38 | tool_call_id: Optional[str] = None
39 |
40 |
41 | class ToolCallResultPayload(BaseModel):
42 | tool_name: str
43 | tool_args: str | Dict[str, Any]
44 | tool_call_id: Optional[str] = None
45 | result: str
46 |
47 |
48 | class ToolCallErrorPayload(BaseModel):
49 | tool_name: str
50 | error_message: str
51 |
52 |
53 | class FinalAnswerPayload(BaseModel):
54 | message: str
55 |
56 |
57 | class AgentThoughtPayload(BaseModel):
58 | message: str
59 |
60 |
61 | class SubAgentCallStartedPayload(BaseModel):
62 | agent_name: str
63 | session_id: str
64 | timestamp: str
65 | run_count: int
66 | kwargs: Dict[str, Any]
67 |
68 |
69 | class SubAgentCallResultPayload(BaseModel):
70 | agent_name: str
71 | session_id: str
72 | timestamp: str
73 | run_count: int
74 | result: Any
75 |
76 |
77 | class SubAgentCallErrorPayload(BaseModel):
78 | agent_name: str
79 | session_id: str
80 | timestamp: str
81 | error: str
82 | error_count: int
83 |
84 |
85 | class BackgroundTaskStartedPayload(BaseModel):
86 | agent_id: str
87 | session_id: str
88 | timestamp: str
89 | run_count: int
90 | kwargs: Dict[str, Any]
91 |
92 |
93 | class BackgroundTaskCompletedPayload(BaseModel):
94 | agent_id: str
95 | session_id: str
96 | timestamp: str
97 | run_count: int
98 | result: Any
99 |
100 |
101 | class BackgroundTaskErrorPayload(BaseModel):
102 | agent_id: str
103 | session_id: str
104 | timestamp: str
105 | error: str
106 | error_count: int
107 |
108 |
109 | class BackgroundAgentStatusPayload(BaseModel):
110 | agent_id: str
111 | status: str
112 | timestamp: str
113 | session_id: Optional[str] = None
114 | last_run: Optional[str] = None
115 | run_count: Optional[int] = None
116 | error_count: Optional[int] = None
117 | error: Optional[str] = None
118 |
119 |
120 | EventPayload = Union[
121 | UserMessagePayload,
122 | AgentMessagePayload,
123 | ToolCallStartedPayload,
124 | ToolCallResultPayload,
125 | ToolCallErrorPayload,
126 | FinalAnswerPayload,
127 | AgentThoughtPayload,
128 | SubAgentCallStartedPayload,
129 | SubAgentCallResultPayload,
130 | SubAgentCallErrorPayload,
131 | BackgroundTaskStartedPayload,
132 | BackgroundTaskCompletedPayload,
133 | BackgroundTaskErrorPayload,
134 | BackgroundAgentStatusPayload,
135 | ]
136 |
137 |
138 | class Event(BaseModel):
139 | type: EventType
140 | payload: EventPayload
141 | timestamp: datetime = Field(default_factory=datetime.utcnow)
142 | agent_name: str
143 | event_id: str = Field(default_factory=lambda: str(uuid4()))
144 |
145 |
146 | EVENT_PAYLOAD_MAP: dict[EventType, Type[BaseModel]] = {
147 | EventType.USER_MESSAGE: UserMessagePayload,
148 | EventType.AGENT_MESSAGE: AgentMessagePayload,
149 | EventType.TOOL_CALL_STARTED: ToolCallStartedPayload,
150 | EventType.TOOL_CALL_RESULT: ToolCallResultPayload,
151 | EventType.TOOL_CALL_ERROR: ToolCallErrorPayload,
152 | EventType.FINAL_ANSWER: FinalAnswerPayload,
153 | EventType.AGENT_THOUGHT: AgentThoughtPayload,
154 | EventType.SUB_AGENT_CALL_STARTED: SubAgentCallStartedPayload,
155 | EventType.SUB_AGENT_CALL_RESULT: SubAgentCallResultPayload,
156 | EventType.SUB_AGENT_CALL_ERROR: SubAgentCallErrorPayload,
157 | EventType.BACKGROUND_TASK_STARTED: BackgroundTaskStartedPayload,
158 | EventType.BACKGROUND_TASK_COMPLETED: BackgroundTaskCompletedPayload,
159 | EventType.BACKGROUND_TASK_ERROR: BackgroundTaskErrorPayload,
160 | EventType.BACKGROUND_AGENT_STATUS: BackgroundAgentStatusPayload,
161 | }
162 |
163 |
164 | def validate_event(event: Event):
165 | expected_type = EVENT_PAYLOAD_MAP[event.type]
166 | if not isinstance(event.payload, expected_type):
167 | raise TypeError(
168 | f"Payload mismatch: Expected {expected_type} for {event.type}, got {type(event.payload)}"
169 | )
170 |
171 |
172 | class BaseEventStore(ABC):
173 | @abstractmethod
174 | async def append(self, session_id: str, event: Event) -> None:
175 | validate_event(event)
176 | raise NotImplementedError("Subclasses must implement this method")
177 |
178 | @abstractmethod
179 | async def get_events(self, session_id: str) -> List[Event]:
180 | raise NotImplementedError("Subclasses must implement this method")
181 |
182 | @abstractmethod
183 | async def stream(self, session_id: str) -> AsyncIterator[Event]:
184 | raise NotImplementedError("Subclasses must implement this method")
185 |
--------------------------------------------------------------------------------
/src/omnicoreagent/mcp_clients_connection/notifications.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from collections.abc import Callable
3 | from typing import Any
4 |
5 | from mcp.types import (
6 | ProgressNotification,
7 | PromptListChangedNotification,
8 | ResourceListChangedNotification,
9 | ResourceUpdatedNotification,
10 | ToolListChangedNotification,
11 | )
12 |
13 | from omnicoreagent.core.utils import logger
14 |
15 |
16 | async def handle_notifications(
17 | sessions: dict[str, dict[str, Any]],
18 | debug: bool = False,
19 | server_names: list[str] = None,
20 | available_tools: dict[str, Any] = None,
21 | available_resources: dict[str, Any] = None,
22 | available_prompts: dict[str, Any] = None,
23 | refresh_capabilities: Callable[[], Any] = None,
24 | ):
25 | """Handle incoming notifications from the server."""
26 | try:
27 | for server_name in sessions:
28 | async for message in sessions[server_name]["session"].incoming_messages:
29 | logger.debug(f"Received notification from {server_name}: {message}")
30 |
31 | async def refresh_capabilities_task():
32 | try:
33 | logger.info(f"Starting capability refresh for {server_name}")
34 |
35 | await refresh_capabilities(
36 | sessions=sessions,
37 | server_names=server_names,
38 | available_tools=available_tools,
39 | available_resources=available_resources,
40 | available_prompts=available_prompts,
41 | debug=debug,
42 | )
43 | logger.info(
44 | f"Successfully refreshed capabilities after notification from {server_name}"
45 | )
46 | for handler in logger.handlers:
47 | handler.flush()
48 | except Exception as e:
49 | logger.error(
50 | f"Failed to refresh capabilities after notification from {server_name}: {str(e)}"
51 | )
52 | for handler in logger.handlers:
53 | handler.flush()
54 |
55 | try:
56 | match message.root:
57 | case ResourceUpdatedNotification(params=params):
58 | logger.info(
59 | f"Resource updated: {params.uri} from {server_name}"
60 | )
61 | task = asyncio.create_task(refresh_capabilities_task())
62 | task.add_done_callback(
63 | lambda t: logger.debug(
64 | f"Capability refresh task completed for {server_name}"
65 | )
66 | )
67 |
68 | case ResourceListChangedNotification(params=params):
69 | logger.info(f"Resource list changed from {server_name}")
70 | task = asyncio.create_task(refresh_capabilities_task())
71 | task.add_done_callback(
72 | lambda t: logger.debug(
73 | f"Capability refresh task completed for {server_name}"
74 | )
75 | )
76 |
77 | case ToolListChangedNotification(params=params):
78 | logger.info(f"Tool list changed from {server_name}")
79 | task = asyncio.create_task(refresh_capabilities_task())
80 | task.add_done_callback(
81 | lambda t: logger.debug(
82 | f"Capability refresh task completed for {server_name}"
83 | )
84 | )
85 |
86 | case PromptListChangedNotification(params=params):
87 | logger.info(f"Prompt list changed from {server_name}")
88 | task = asyncio.create_task(refresh_capabilities_task())
89 | task.add_done_callback(
90 | lambda t: logger.debug(
91 | f"Capability refresh task completed for {server_name}"
92 | )
93 | )
94 |
95 | case ProgressNotification(params=params):
96 | progress_percentage = (
97 | (params.progress / params.total * 100)
98 | if params.total > 0
99 | else 0
100 | )
101 | logger.info(
102 | f"Progress from {server_name}: {params.progress}/{params.total} "
103 | f"({progress_percentage:.1f}%)"
104 | )
105 |
106 | case _:
107 | logger.warning(
108 | f"Unhandled notification type from {server_name}: {type(message.root).__name__}"
109 | )
110 | except Exception as e:
111 | logger.error(
112 | f"Error processing notification from {server_name}: {str(e)}"
113 | )
114 | continue
115 |
116 | except AttributeError:
117 | logger.warning(f"No notification received from {server_name}")
118 | except Exception as e:
119 | logger.error(f"Fatal error in notification handler: {str(e)}")
120 | finally:
121 | for handler in logger.handlers:
122 | handler.flush()
123 |
--------------------------------------------------------------------------------
/examples/deep_code_agent/code_agent_runner.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import time
3 | import uuid
4 | from pathlib import Path
5 | from typing import Dict, Any, Optional
6 |
7 | from tools.coding_tools import register_coding_tools
8 | from system_prompt import get_deep_coding_system_prompt
9 | from omnicoreagent import OmniAgent, ToolRegistry, MemoryRouter, EventRouter
10 | from observability_globals import metrics, audit, rate_limiter, CONFIG, log
11 | from sandbox.sandbox_executor import SandboxExecutor
12 |
13 |
14 | class DeepCodingAgentRunner:
15 | def __init__(self):
16 | self.cfg = CONFIG
17 | # self.session_id = str(uuid.uuid4())
18 | self.session_id = "abiorh001"
19 | self.agent: Optional[OmniAgent] = None
20 | self.memory_router: Optional[MemoryRouter] = None
21 | self.event_router: Optional[EventRouter] = None
22 | self.connected = False
23 |
24 | # Create session workspace (for sandbox code)
25 | session_ws = Path(self.cfg.coding.workspace_root) / self.session_id
26 | session_ws.mkdir(parents=True, exist_ok=True)
27 |
28 | self.sandbox_executor = SandboxExecutor(
29 | workspace_root=str(self.cfg.coding.workspace_root),
30 | timeout=self.cfg.coding.sandbox_timeout_seconds,
31 | memory_mb=self.cfg.coding.sandbox_memory_mb,
32 | )
33 |
34 | metrics.session_start()
35 | log.info(f"Deep Coding Agent session started: {self.session_id}")
36 |
37 | async def initialize(self):
38 | if self.connected:
39 | return
40 |
41 | log.info("Initializing Deep Coding Agent")
42 |
43 | # Routers
44 | self.memory_router = MemoryRouter(self.cfg.storage.memory_store_type)
45 | self.event_router = EventRouter(self.cfg.storage.event_store_type)
46 |
47 | # Start metrics server if enabled
48 | if self.cfg.observability.enable_metrics:
49 | if metrics.start_server(self.cfg.observability.metrics_port):
50 | log.info(
51 | f"Prometheus metrics server started on :{self.cfg.observability.metrics_port}"
52 | )
53 | else:
54 | log.debug("Metrics server already running")
55 |
56 | # Tool Registry
57 | tool_registry = ToolRegistry()
58 | register_coding_tools(tool_registry=tool_registry, runner_instance=self)
59 |
60 | # Build agent config
61 | agent_config = {
62 | "agent_name": self.cfg.agent.name,
63 | "max_steps": self.cfg.agent.max_steps,
64 | "tool_call_timeout": self.cfg.agent.tool_call_timeout,
65 | "request_limit": self.cfg.agent.request_limit,
66 | "total_tokens_limit": self.cfg.agent.total_tokens_limit,
67 | "memory_config": {
68 | "mode": self.cfg.agent.memory_mode,
69 | "value": self.cfg.agent.memory_window_size,
70 | },
71 | "memory_tool_backend": self.cfg.agent.memory_tool_backend,
72 | }
73 |
74 | self.agent = OmniAgent(
75 | name=self.cfg.agent.name,
76 | system_instruction=get_deep_coding_system_prompt(
77 | session_id=self.session_id
78 | ),
79 | model_config={
80 | "provider": self.cfg.model.provider,
81 | "model": self.cfg.model.model,
82 | "temperature": self.cfg.model.temperature,
83 | "top_p": self.cfg.model.top_p,
84 | "max_context_length": self.cfg.model.max_context_length,
85 | },
86 | local_tools=tool_registry,
87 | agent_config=agent_config,
88 | memory_router=self.memory_router,
89 | event_router=self.event_router,
90 | debug=True,
91 | )
92 |
93 | if not self.agent.is_event_store_available():
94 | log.warning("Event store not available")
95 | await self.agent.connect_mcp_servers()
96 | self.connected = True
97 | log.info("Deep Coding Agent is ready")
98 | metrics.set_health(True)
99 |
100 | async def handle_chat(self, query: str) -> Optional[Dict[str, Any]]:
101 | if not self.connected:
102 | await self.initialize()
103 |
104 | start_time = time.time()
105 | status = "success"
106 |
107 | try:
108 | # Rate limiting (per session)
109 | if self.cfg.security.enable_rate_limiting:
110 | if not rate_limiter.allow(self.session_id):
111 | metrics.record_rate_limit(self.session_id)
112 | audit.query(query, self.session_id, 0)
113 | return {
114 | "status": "error",
115 | "response": "Rate limit exceeded! Try again later.",
116 | "session_id": self.session_id,
117 | }
118 |
119 | result = await self.agent.run(query=query, session_id=self.session_id)
120 | response = result.get("response", "")
121 | audit.query(query, self.session_id, len(response))
122 | return {
123 | "status": "success",
124 | "response": response,
125 | "session_id": self.session_id,
126 | "workspace_path": str(
127 | Path(self.cfg.coding.workspace_root) / self.session_id
128 | ),
129 | }
130 |
131 | except asyncio.TimeoutError:
132 | status = "timeout"
133 | log.error(f"Query timeout: {query}")
134 | return None
135 |
136 | except Exception as e:
137 | status = "error"
138 | log.error(f"Agent error: {e}", exc_info=True)
139 | return None
140 |
141 | finally:
142 | duration = time.time() - start_time
143 | metrics.record_llm_query(duration, status)
144 | log.info(f"LLM query completed in {duration:.2f}s with status: {status}")
145 |
--------------------------------------------------------------------------------
/tests/test_main.py:
--------------------------------------------------------------------------------
1 | import json
2 | from unittest.mock import AsyncMock, Mock, patch
3 |
4 | import pytest
5 |
6 | from omnicoreagent.mcp_omni_connect.main import async_main, check_config_exists
7 |
8 |
9 | @pytest.fixture
10 | def mock_config_path(tmp_path):
11 | """Create a temporary config path"""
12 | return tmp_path / "servers_config.json"
13 |
14 |
15 | def test_check_config_exists_new(mock_config_path):
16 | """Test creating a new config file when it doesn't exist"""
17 | with patch("mcpomni_connect.main.Path.cwd", return_value=mock_config_path.parent):
18 | config_path = check_config_exists()
19 |
20 | assert config_path == mock_config_path
21 | assert config_path.exists()
22 |
23 | # Verify default config contents
24 | with open(config_path) as f:
25 | config = json.load(f)
26 | assert "LLM" in config
27 | assert "mcpServers" in config
28 | assert config["LLM"]["model"] == "qwen/qwq-32b:free"
29 | assert config["LLM"]["temperature"] == 0.5
30 | assert config["LLM"]["max_tokens"] == 5000
31 | assert config["LLM"]["top_p"] == 0
32 | assert "server_name1" in config["mcpServers"]
33 |
34 |
35 | def test_check_config_exists_existing(mock_config_path):
36 | """Test when config file already exists"""
37 | # Create existing config with custom values
38 | existing_config = {
39 | "LLM": {
40 | "model": "qwen/qwq-32b:free",
41 | "temperature": 0.8,
42 | "max_tokens": 1000,
43 | "top_p": 0.1,
44 | "max_input_tokens": 1000,
45 | "provider": "openrouter",
46 | },
47 | "mcpServers": {
48 | "custom_server": {
49 | "transport_type": "stdio",
50 | "command": "custom-command",
51 | "args": ["arg1", "arg2"],
52 | "env": {"KEY": "value"},
53 | }
54 | },
55 | }
56 | with open(mock_config_path, "w") as f:
57 | json.dump(existing_config, f)
58 |
59 | with patch("mcpomni_connect.main.Path.cwd", return_value=mock_config_path.parent):
60 | config_path = check_config_exists()
61 |
62 | assert config_path == mock_config_path
63 | assert config_path.exists()
64 |
65 | # Verify existing config was not modified
66 | with open(config_path) as f:
67 | config = json.load(f)
68 | assert config == existing_config
69 |
70 |
71 | @pytest.mark.asyncio
72 | async def test_async_main_success():
73 | """Test successful async_main execution"""
74 | mock_config = Mock()
75 | mock_client = AsyncMock() # Use AsyncMock for async methods
76 | mock_llm_connection = Mock()
77 | mock_cli = Mock()
78 |
79 | with (
80 | patch("mcpomni_connect.main.check_config_exists") as mock_check_config,
81 | patch("mcpomni_connect.main.Configuration", return_value=mock_config),
82 | patch("mcpomni_connect.main.MCPClient", return_value=mock_client),
83 | patch(
84 | "mcpomni_connect.main.LLMConnection",
85 | return_value=mock_llm_connection,
86 | ),
87 | patch("mcpomni_connect.main.MCPClientCLI", return_value=mock_cli),
88 | ):
89 | await async_main()
90 |
91 | mock_check_config.assert_called_once()
92 | mock_client.connect_to_servers.assert_called_once()
93 | mock_cli.chat_loop.assert_called_once()
94 | mock_client.cleanup.assert_called_once() # Ensure cleanup is called
95 |
96 |
97 | @pytest.mark.asyncio
98 | async def test_async_main_keyboard_interrupt():
99 | """Test async_main handling of KeyboardInterrupt"""
100 | mock_config = Mock()
101 | mock_client = AsyncMock() # Use AsyncMock
102 | mock_llm_connection = Mock()
103 | mock_cli = Mock()
104 | mock_cli.chat_loop.side_effect = KeyboardInterrupt()
105 |
106 | with (
107 | patch("mcpomni_connect.main.check_config_exists") as mock_check_config,
108 | patch("mcpomni_connect.main.Configuration", return_value=mock_config),
109 | patch("mcpomni_connect.main.MCPClient", return_value=mock_client),
110 | patch(
111 | "mcpomni_connect.main.LLMConnection",
112 | return_value=mock_llm_connection,
113 | ),
114 | patch("mcpomni_connect.main.MCPClientCLI", return_value=mock_cli),
115 | ):
116 | await async_main()
117 |
118 | mock_check_config.assert_called_once()
119 | mock_client.connect_to_servers.assert_called_once()
120 | mock_cli.chat_loop.assert_called_once()
121 | mock_client.cleanup.assert_called_once()
122 |
123 |
124 | @pytest.mark.asyncio
125 | async def test_async_main_error():
126 | """Test async_main handling of general exceptions"""
127 | mock_config = Mock()
128 | mock_client = AsyncMock() # Use AsyncMock
129 | mock_llm_connection = Mock()
130 | mock_cli = Mock()
131 | mock_cli.chat_loop.side_effect = Exception("Test error")
132 |
133 | with (
134 | patch("mcpomni_connect.main.check_config_exists") as mock_check_config,
135 | patch("mcpomni_connect.main.Configuration", return_value=mock_config),
136 | patch("mcpomni_connect.main.MCPClient", return_value=mock_client),
137 | patch(
138 | "mcpomni_connect.main.LLMConnection",
139 | return_value=mock_llm_connection,
140 | ),
141 | patch("mcpomni_connect.main.MCPClientCLI", return_value=mock_cli),
142 | ):
143 | await async_main()
144 |
145 | mock_check_config.assert_called_once()
146 | mock_client.connect_to_servers.assert_called_once()
147 | mock_cli.chat_loop.assert_called_once()
148 | mock_client.cleanup.assert_called_once()
149 |
150 |
151 | @pytest.mark.OpenAIIntegration
152 | def test_main():
153 | """Test main function"""
154 | from omnicoreagent.mcp_omni_connect.main import main
155 |
156 | with patch(
157 | "mcpomni_connect.main.async_main", new_callable=AsyncMock
158 | ) as mock_async_main:
159 | main()
160 | mock_async_main.assert_called_once()
161 |
--------------------------------------------------------------------------------
/docs/getting-started/quick-start.md:
--------------------------------------------------------------------------------
1 | # Quick Start
2 |
3 | This guide will get you up and running with MCPOmni Connect in under 5 minutes.
4 |
5 | ## Step 1: Basic Configuration
6 |
7 | Create the two required configuration files:
8 |
9 | ### Create `.env` file
10 |
11 | ```bash
12 | # Create .env file with your LLM API key
13 | echo "LLM_API_KEY=your_api_key_here" > .env
14 | ```
15 |
16 | !!! tip "Supported API Keys"
17 | You can use API keys from OpenAI, Anthropic, Google, Groq, or any other [supported LLM provider](../configuration/llm-providers.md).
18 |
19 | ### Create `servers_config.json`
20 |
21 | ```bash
22 | cat > servers_config.json << 'EOF'
23 | {
24 | "AgentConfig": {
25 | "tool_call_timeout": 30,
26 | "max_steps": 15,
27 | "request_limit": 1000,
28 | "total_tokens_limit": 100000
29 | },
30 | "LLM": {
31 | "provider": "openai",
32 | "model": "gpt-4o-mini",
33 | "temperature": 0.5,
34 | "max_tokens": 5000,
35 | "top_p": 0.7
36 | },
37 | "mcpServers": {}
38 | }
39 | EOF
40 | ```
41 |
42 | ## Step 2: Start MCPOmni Connect
43 |
44 | ```bash
45 | mcpomni_connect
46 | ```
47 |
48 | You should see the MCPOmni Connect CLI start up:
49 |
50 | ```
51 | 🚀 MCPOmni Connect - Universal Gateway to MCP Servers
52 | Connected to 0 MCP servers
53 | Mode: CHAT (type /mode:auto for autonomous mode)
54 |
55 | >
56 | ```
57 |
58 | ## Step 3: Test Basic Functionality
59 |
60 | Try these commands to verify everything is working:
61 |
62 | ### Check Available Commands
63 | ```bash
64 | /help
65 | ```
66 |
67 | ### Test LLM Connection
68 | ```bash
69 | Hello! Can you tell me about yourself?
70 | ```
71 |
72 | The AI should respond, confirming your LLM configuration is working.
73 |
74 | ## Step 4: Add Your First MCP Server
75 |
76 | Let's add a simple MCP server to demonstrate connectivity:
77 |
78 | ### Option A: File System Server (Local)
79 |
80 | Edit your `servers_config.json` to add a file system server:
81 |
82 | ```json
83 | {
84 | "AgentConfig": {
85 | "tool_call_timeout": 30,
86 | "max_steps": 15,
87 | "request_limit": 1000,
88 | "total_tokens_limit": 100000
89 | },
90 | "LLM": {
91 | "provider": "openai",
92 | "model": "gpt-4o-mini",
93 | "temperature": 0.5,
94 | "max_tokens": 5000,
95 | "top_p": 0.7
96 | },
97 | "mcpServers": {
98 | "filesystem": {
99 | "transport_type": "stdio",
100 | "command": "uvx",
101 | "args": ["mcp-server-filesystem", "/tmp"]
102 | }
103 | }
104 | }
105 | ```
106 |
107 | ### Option B: Remote HTTP Server
108 |
109 | ```json
110 | {
111 | "mcpServers": {
112 | "remote-server": {
113 | "transport_type": "streamable_http",
114 | "url": "http://your-server.com:8080/mcp",
115 | "headers": {
116 | "Authorization": "Bearer your-token"
117 | },
118 | "timeout": 60
119 | }
120 | }
121 | }
122 | ```
123 |
124 | ### Restart and Test
125 |
126 | ```bash
127 | # Restart MCPOmni Connect
128 | mcpomni_connect
129 | ```
130 |
131 | Now check available tools:
132 | ```bash
133 | /tools
134 | ```
135 |
136 | You should see tools from your connected MCP server!
137 |
138 | ## Step 5: Try Different Operation Modes
139 |
140 | ### Chat Mode (Default)
141 | ```bash
142 | Can you list the files in the current directory?
143 | ```
144 | *The AI will ask for approval before executing tools*
145 |
146 | ### Autonomous Mode
147 | ```bash
148 | /mode:auto
149 | Can you analyze the files in the current directory and create a summary?
150 | ```
151 | *The AI will execute tasks independently*
152 |
153 | ### Switch Back to Chat Mode
154 | ```bash
155 | /mode:chat
156 | ```
157 |
158 | ## Common First Tasks
159 |
160 | ### Explore Available Capabilities
161 | ```bash
162 | /tools # List all available tools
163 | /prompts # Show available prompts
164 | /resources # Display available resources
165 | ```
166 |
167 | ### Memory Management
168 | ```bash
169 | /memory # Toggle Redis memory persistence
170 | ```
171 |
172 | ### Debug Mode
173 | ```bash
174 | /debug # Enable detailed logging for troubleshooting
175 | ```
176 |
177 | ## Next Steps
178 |
179 | Now that you have MCPOmni Connect running:
180 |
181 | 1. **[Configure additional LLM providers](../configuration/llm-providers.md)** - Try different AI models
182 | 2. **[Add more MCP servers](../configuration/configuration-guide.md)** - Connect to databases, APIs, and tools
183 | 3. **[Explore advanced features](../features/agent-system.md)** - Learn about ReAct agents and orchestration
184 | 4. **[Set up authentication](../configuration/authentication.md)** - Configure OAuth and secure connections
185 |
186 | ## Troubleshooting Quick Start
187 |
188 | !!! failure "Connection Failed"
189 | If you see "Failed to connect to server":
190 |
191 | 1. Check your `servers_config.json` syntax
192 | 2. Verify the MCP server is actually running
193 | 3. See the [troubleshooting guide](../configuration/troubleshooting.md)
194 |
195 | !!! failure "API Key Error"
196 | If you see "Invalid API key":
197 |
198 | 1. Verify your `.env` file contains the correct key
199 | 2. Check you're using the right provider in `servers_config.json`
200 | 3. Ensure the API key has proper permissions
201 |
202 | !!! failure "Command Not Found"
203 | If `mcpomni_connect` command isn't found:
204 |
205 | 1. Try `python -m mcpomni_connect`
206 | 2. Check your PATH includes the installation directory
207 | 3. Reinstall with `pip install --user mcpomni-connect`
208 |
209 | !!! tip "Getting Help"
210 | - Join our [GitHub Discussions](https://github.com/Abiorh001/mcp_omni_connect/discussions)
211 | - Check [existing issues](https://github.com/Abiorh001/mcp_omni_connect/issues)
212 | - Read the [full configuration guide](../configuration/configuration-guide.md)
213 |
214 | ---
215 |
216 | **Congratulations!** 🎉 You now have MCPOmni Connect running. Ready to explore more advanced features?
217 |
218 | **Next**: [Configuration Guide →](../configuration/configuration-guide.md)
219 |
--------------------------------------------------------------------------------
/examples/workflow_agents/sequential_agent.py:
--------------------------------------------------------------------------------
1 | from omnicoreagent import (
2 | OmniAgent,
3 | MemoryRouter,
4 | EventRouter,
5 | SequentialAgent,
6 | ToolRegistry,
7 | logger,
8 | )
9 | from typing import Optional
10 |
11 | # using low level import
12 | # from omnicoreagent.omni_agent.workflow.sequential_agent import SequentialAgent
13 | import asyncio
14 |
15 |
16 | def build_tool_registry_system_monitor_agent() -> ToolRegistry:
17 | registry = ToolRegistry()
18 |
19 | @registry.register_tool("system_info")
20 | def system_info() -> str:
21 | """Get basic system information of the server"""
22 | import platform, time
23 |
24 | return (
25 | "System Information:\n"
26 | f"• OS: {platform.system()} {platform.release()}\n"
27 | f"• Architecture: {platform.machine()}\n"
28 | f"• Python Version: {platform.python_version()}\n"
29 | f"• Current Time: {time.strftime('%Y-%m-%d %H:%M:%S')}"
30 | )
31 |
32 | return registry
33 |
34 |
35 | system_monitor_agent = OmniAgent(
36 | name="SystemMonitorAgent",
37 | system_instruction=(
38 | "You are a System Monitor Agent. Your responsibility is to retrieve the current "
39 | "system information using the `system_info` tool. Only use the tool to get the data; do not guess. "
40 | "Include OS, architecture, Python version, and current time."
41 | ),
42 | model_config={"provider": "openai", "model": "gpt-4.1", "temperature": 0.3},
43 | agent_config={"max_steps": 15, "tool_call_timeout": 60},
44 | # embedding_config={"provider": "voyage", "model": "voyage-3.5", "dimensions": 1024, "encoding_format": "base64"},
45 | local_tools=build_tool_registry_system_monitor_agent(),
46 | memory_router=MemoryRouter("in_memory"),
47 | event_router=EventRouter("in_memory"),
48 | debug=True,
49 | )
50 |
51 |
52 | def build_tool_registry_text_formatter_agent() -> ToolRegistry:
53 | registry = ToolRegistry()
54 |
55 | @registry.register_tool("format_text")
56 | def format_text(text: str, style: str = "normal") -> str:
57 | """Format text in various styles"""
58 | if style == "uppercase":
59 | return text.upper()
60 | if style == "lowercase":
61 | return text.lower()
62 | if style == "title":
63 | return text.title()
64 | if style == "reverse":
65 | return text[::-1]
66 | return text
67 |
68 | return registry
69 |
70 |
71 | text_formatter_agent = OmniAgent(
72 | name="TextFormatterAgent",
73 | system_instruction=(
74 | "You are a Text Formatting Agent. Your task is to take the input string "
75 | "and format it to uppercase. Do not add any extra text or explanation."
76 | ),
77 | model_config={"provider": "openai", "model": "gpt-4.1", "temperature": 0.3},
78 | agent_config={"max_steps": 15, "tool_call_timeout": 60},
79 | # embedding_config={"provider": "voyage", "model": "voyage-3.5", "dimensions": 1024, "encoding_format": "base64"},
80 | local_tools=build_tool_registry_text_formatter_agent(),
81 | memory_router=MemoryRouter("in_memory"),
82 | event_router=EventRouter("in_memory"),
83 | debug=True,
84 | )
85 |
86 | FILE_SYSTEM_MCP_TOOLS = [
87 | {
88 | "name": "filesystem",
89 | "command": "npx",
90 | "args": [
91 | "-y",
92 | "@modelcontextprotocol/server-filesystem",
93 | "/home/abiorh/Desktop",
94 | "/home/abiorh/ai/",
95 | ],
96 | },
97 | ]
98 | file_system_agent = OmniAgent(
99 | name="FileSystemAgent",
100 | system_instruction=(
101 | """You are a File System Agent. Your task is to append the input string to 'system_status.md' in /home/abiorh/ai.
102 | - If the file exists, append the content; if not, create it.
103 | - After successfully writing the content, **do not call any more tools**.
104 | - Immediately return a final agent response in the built-in output format.
105 | - Do not add extra explanations, thoughts, or repeat the task."""
106 | ),
107 | model_config={"provider": "openai", "model": "gpt-4.1", "temperature": 0.3},
108 | agent_config={"max_steps": 15, "tool_call_timeout": 60},
109 | # embedding_config={"provider": "voyage", "model": "voyage-3.5", "dimensions": 1024, "encoding_format": "base64"},
110 | # local_tools=build_tool_registry_text_formatter_agent(),
111 | mcp_tools=FILE_SYSTEM_MCP_TOOLS,
112 | memory_router=MemoryRouter("in_memory"),
113 | event_router=EventRouter("in_memory"),
114 | debug=True,
115 | )
116 |
117 |
118 | sequential_agent = SequentialAgent(
119 | sub_agents=[system_monitor_agent, text_formatter_agent, file_system_agent]
120 | )
121 |
122 |
123 | # async def main():
124 | # result = await sequential_agent()
125 | # print("Async SequentialAgent result:", result)
126 |
127 | # if __name__ == "__main__":
128 | # asyncio.run(main())
129 |
130 |
131 | async def run_sequential_agent(
132 | initial_task: str = None, session_id: str = None
133 | ) -> dict:
134 | try:
135 | # IMPORTANT: explicit initialize() call (developer-managed lifecycle)
136 | await sequential_agent.initialize()
137 | logger.info(f"Running Sequential Agent with initial task: {initial_task}")
138 | final_output = await sequential_agent.run(
139 | initial_task=initial_task, session_id=session_id
140 | )
141 | logger.info(f"Final output from Sequential Agent: {final_output}")
142 | return final_output
143 | finally:
144 | await sequential_agent.shutdown()
145 |
146 |
147 | if __name__ == "__main__":
148 | test_task = "what is the system status of my computer"
149 | session_id = "test_session_001"
150 |
151 | result = asyncio.run(
152 | run_sequential_agent(initial_task=test_task, session_id=session_id)
153 | )
154 | print("Sequential Agent Result:", result)
155 | # test_task = "what is the system status of my computer"
156 | # session_id = "test_session_001"
157 |
158 | # result = asyncio.run(run_sequential_agent())
159 | # print("Sequential Agent Result:", result)
160 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # MCPOmni Connect Documentation
2 |
3 | This directory contains the MkDocs-based documentation for MCPOmni Connect.
4 |
5 | ## 🚀 Quick Start
6 |
7 | ### View Documentation Locally
8 |
9 | ```bash
10 | # Install dependencies
11 | ./docs.sh install
12 |
13 | # Start development server
14 | ./docs.sh serve
15 | ```
16 |
17 | Visit: http://127.0.0.1:8080
18 |
19 | ### Build Documentation
20 |
21 | ```bash
22 | # Build static site
23 | ./docs.sh build
24 |
25 | # Output will be in ./site/
26 | ```
27 |
28 | ## 📁 Documentation Structure
29 |
30 | ```
31 | docs/
32 | ├── index.md # Homepage
33 | ├── getting-started/ # Installation & Quick Start
34 | │ ├── installation.md
35 | │ └── quick-start.md
36 | ├── configuration/ # Configuration Guides
37 | │ ├── configuration-guide.md
38 | │ ├── transport-types.md
39 | │ ├── authentication.md
40 | │ ├── llm-providers.md
41 | │ └── troubleshooting.md
42 | ├── user-guide/ # Usage Instructions
43 | │ ├── basic-usage.md
44 | │ ├── operation-modes.md
45 | │ ├── commands.md
46 | │ ├── memory-management.md
47 | │ └── prompt-management.md
48 | ├── features/ # Feature Deep-dives
49 | │ ├── agent-system.md
50 | │ ├── tool-orchestration.md
51 | │ ├── resource-management.md
52 | │ └── token-management.md
53 | ├── advanced/ # Advanced Topics
54 | │ ├── architecture.md
55 | │ ├── api-reference.md
56 | │ └── examples.md
57 | ├── development/ # Development Guides
58 | │ ├── contributing.md
59 | │ └── testing.md
60 | └── changelog.md # Version History
61 | ```
62 |
63 | ## ✨ Features
64 |
65 | - **Material Design**: Modern, responsive theme
66 | - **Search**: Full-text search across all documentation
67 | - **Code Highlighting**: Syntax highlighting for all languages
68 | - **Mermaid Diagrams**: Architecture and workflow diagrams
69 | - **Tabbed Content**: Organized content with tabs
70 | - **Admonitions**: Tips, warnings, and info boxes
71 | - **Git Integration**: Last modified dates from git history
72 |
73 | ## 🛠️ Available Commands
74 |
75 | Use the `docs.sh` script for common tasks:
76 |
77 | ```bash
78 | ./docs.sh serve # Start development server
79 | ./docs.sh build # Build static documentation
80 | ./docs.sh install # Install dependencies
81 | ./docs.sh clean # Clean build artifacts
82 | ./docs.sh deploy # Deploy to GitHub Pages
83 | ./docs.sh help # Show help
84 | ```
85 |
86 | ## 📝 Writing Documentation
87 |
88 | ### Markdown Guidelines
89 |
90 | - Use clear, concise language
91 | - Include practical examples
92 | - Add code snippets where helpful
93 | - Use proper heading hierarchy (H1 → H2 → H3)
94 | - Include cross-references with relative links
95 |
96 | ### Code Examples
97 |
98 | Use fenced code blocks with language specification:
99 |
100 | ```bash
101 | # Shell commands
102 | mcpomni_connect --help
103 | ```
104 |
105 | ```json
106 | {
107 | "LLM": {
108 | "provider": "openai",
109 | "model": "gpt-4o-mini"
110 | }
111 | }
112 | ```
113 |
114 | ```python
115 | # Python code
116 | import mcpomni_connect
117 | ```
118 |
119 | ### Admonitions
120 |
121 | Use admonitions for important information:
122 |
123 | ```markdown
124 | !!! tip "Helpful Tip"
125 | This is a helpful tip for users.
126 |
127 | !!! warning "Important Warning"
128 | This is something users should be careful about.
129 |
130 | !!! failure "Common Error"
131 | This describes a common error and its solution.
132 | ```
133 |
134 | ### Tabbed Content
135 |
136 | Organize related content with tabs:
137 |
138 | ```markdown
139 | === "Option A"
140 | Content for option A
141 |
142 | === "Option B"
143 | Content for option B
144 | ```
145 |
146 | ## 🎯 Content Guidelines
147 |
148 | ### Target Audience
149 |
150 | - **Beginners**: Clear installation and setup instructions
151 | - **Intermediate Users**: Comprehensive configuration guides
152 | - **Advanced Users**: Deep technical details and architecture
153 | - **Developers**: Contributing guidelines and API reference
154 |
155 | ### Content Principles
156 |
157 | 1. **Clarity**: Write for understanding, not to impress
158 | 2. **Examples**: Every concept should have a practical example
159 | 3. **Completeness**: Cover edge cases and gotchas
160 | 4. **Currency**: Keep information up-to-date with releases
161 | 5. **Accessibility**: Use inclusive language and clear structure
162 |
163 | ## 🔄 Deployment
164 |
165 | ### GitHub Pages (Automated)
166 |
167 | Documentation automatically deploys to GitHub Pages on:
168 | - Push to `main` branch
169 | - Manual trigger via GitHub Actions
170 |
171 | ### Manual Deployment
172 |
173 | ```bash
174 | # Deploy to GitHub Pages
175 | ./docs.sh deploy
176 | ```
177 |
178 | ### Local Preview
179 |
180 | Always preview changes locally before committing:
181 |
182 | ```bash
183 | ./docs.sh serve
184 | # Visit http://127.0.0.1:8080
185 | ```
186 |
187 | ## 🐛 Troubleshooting
188 |
189 | ### Build Errors
190 |
191 | ```bash
192 | # Check for broken links
193 | ./docs.sh build
194 |
195 | # Common issues:
196 | # - Missing referenced files
197 | # - Broken internal links
198 | # - Invalid Markdown syntax
199 | ```
200 |
201 | ### Missing Dependencies
202 |
203 | ```bash
204 | # Reinstall documentation dependencies
205 | ./docs.sh install
206 |
207 | # Or manually:
208 | uv sync --group docs
209 | ```
210 |
211 | ### Port Conflicts
212 |
213 | If port 8080 is busy:
214 |
215 | ```bash
216 | # Use different port
217 | uv run mkdocs serve --dev-addr=127.0.0.1:8090
218 | ```
219 |
220 | ## 📚 Resources
221 |
222 | - **MkDocs**: [Official Documentation](https://www.mkdocs.org/)
223 | - **Material Theme**: [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/)
224 | - **Markdown Guide**: [CommonMark Spec](https://commonmark.org/)
225 | - **Mermaid Diagrams**: [Mermaid Documentation](https://mermaid.js.org/)
226 |
227 | ## 🤝 Contributing
228 |
229 | 1. **Fork** the repository
230 | 2. **Create** a feature branch
231 | 3. **Write** your documentation
232 | 4. **Test** locally with `./docs.sh serve`
233 | 5. **Submit** a pull request
234 |
235 | See [Contributing Guide](development/contributing.md) for detailed instructions.
236 |
237 | ---
238 |
239 | **Questions?** Open an issue or start a discussion on GitHub!
240 |
--------------------------------------------------------------------------------
/src/omnicoreagent/core/tools/local_tools_registry.py:
--------------------------------------------------------------------------------
1 | import inspect
2 | import asyncio
3 | from collections.abc import Callable
4 | from typing import Any, Dict, List
5 |
6 |
7 | class Tool:
8 | def __init__(
9 | self,
10 | name: str,
11 | description: str,
12 | inputSchema: dict[str, Any],
13 | function: Callable,
14 | ):
15 | self.name = name
16 | self.description = description
17 | self.inputSchema = inputSchema
18 | self.function = function
19 | self.is_async = asyncio.iscoroutinefunction(function)
20 |
21 | def to_dict(self) -> dict[str, Any]:
22 | return {
23 | "name": self.name,
24 | "description": self.description,
25 | "inputSchema": self.inputSchema,
26 | "function": self.function,
27 | }
28 |
29 | async def execute(self, parameters: Dict[str, Any]) -> Any:
30 | """Execute the tool with extracted parameters"""
31 | sig = inspect.signature(self.function)
32 | func_params = {}
33 |
34 | for param_name, param in sig.parameters.items():
35 | if param_name in parameters:
36 | func_params[param_name] = parameters[param_name]
37 | elif param.default is not inspect.Parameter.empty:
38 | func_params[param_name] = param.default
39 | else:
40 | raise ValueError(f"Missing required parameter: {param_name}")
41 |
42 | if self.is_async:
43 | return await self.function(**func_params)
44 | else:
45 | return self.function(**func_params)
46 |
47 | def __repr__(self):
48 | return f""
49 |
50 |
51 | class ToolRegistry:
52 | """Registry for local tools that can be executed by agents."""
53 |
54 | def __init__(self):
55 | self.tools = {}
56 | self.tool_descriptions = {}
57 | self.tool_schemas = {}
58 |
59 | def __str__(self):
60 | """Return a readable string representation of the ToolRegistry."""
61 | tool_count = len(self.tools)
62 | tool_names = list(self.tools.keys())
63 | return f"ToolRegistry({tool_count} tools: {', '.join(tool_names[:3])}{'...' if tool_count > 3 else ''})"
64 |
65 | def __repr__(self):
66 | """Return a detailed representation of the ToolRegistry."""
67 | return self.__str__()
68 |
69 | def register_tool(
70 | self,
71 | name: str | None = None,
72 | inputSchema: dict[str, Any] | None = None,
73 | description: str = "",
74 | ):
75 | def decorator(func: Callable):
76 | tool_name = name or func.__name__.lower()
77 |
78 | final_description = description or (
79 | func.__doc__ or "No description provided."
80 | )
81 |
82 | final_schema = inputSchema or self._infer_schema(func)
83 |
84 | tool = Tool(
85 | name=tool_name,
86 | description=final_description.strip(),
87 | inputSchema=final_schema,
88 | function=func,
89 | )
90 | self.tools[tool_name] = tool
91 | return func
92 |
93 | return decorator
94 |
95 | def get_tool(self, name: str) -> Tool | None:
96 | return self.tools.get(name.lower())
97 |
98 | def list_tools(self) -> list[Tool]:
99 | return list(self.tools.values())
100 |
101 | def get_available_tools(self) -> List[Dict[str, Any]]:
102 | """Get list of available tools for OmniAgent"""
103 | tools = []
104 | for tool in self.list_tools():
105 | tools.append(
106 | {
107 | "name": tool.name,
108 | "description": tool.description,
109 | "inputSchema": tool.inputSchema,
110 | "type": "local",
111 | }
112 | )
113 | return tools
114 |
115 | def get_tool_schemas(self) -> Dict[str, Dict[str, Any]]:
116 | """Get all tool schemas for MCP integration"""
117 | schemas = {}
118 | for tool in self.tools.values():
119 | schemas[tool.name] = {
120 | "name": tool.name,
121 | "description": tool.description,
122 | "inputSchema": tool.inputSchema,
123 | }
124 | return schemas
125 |
126 | async def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any:
127 | """Execute a tool by name with parameters"""
128 | tool = self.get_tool(tool_name)
129 | if not tool:
130 | raise ValueError(f"Tool '{tool_name}' not found")
131 |
132 | return await tool.execute(parameters)
133 |
134 | def _infer_schema(self, func: Callable) -> dict[str, Any]:
135 | sig = inspect.signature(func)
136 | props = {}
137 | required = []
138 |
139 | docstring = func.__doc__ or ""
140 | doc_lines = [line.strip() for line in docstring.split("\n") if ":" in line]
141 |
142 | param_docs = {}
143 | for line in doc_lines:
144 | parts = line.split(":", 1)
145 | if len(parts) == 2:
146 | param_docs[parts[0].strip()] = parts[1].strip()
147 |
148 | for param_name, param in sig.parameters.items():
149 | if param_name == "self":
150 | continue
151 |
152 | param_type = (
153 | param.annotation
154 | if param.annotation is not inspect.Parameter.empty
155 | else str
156 | )
157 | schema = {"type": self._map_type(param_type)}
158 |
159 | if param_name in param_docs:
160 | schema["description"] = param_docs[param_name]
161 |
162 | props[param_name] = schema
163 |
164 | if param.default is inspect.Parameter.empty:
165 | required.append(param_name)
166 |
167 | return {
168 | "type": "object",
169 | "properties": props,
170 | "required": required,
171 | "additionalProperties": False,
172 | }
173 |
174 | def _map_type(self, typ: Any) -> str:
175 | type_map = {
176 | int: "integer",
177 | float: "number",
178 | str: "string",
179 | bool: "boolean",
180 | list: "array",
181 | dict: "object",
182 | }
183 | return type_map.get(typ, "string")
184 |
--------------------------------------------------------------------------------