├── imgs
├── screenshots
│ ├── .gitkeep
│ ├── main_interface.png
│ └── mobile_interface.png
├── banner.jpeg
├── webchat_countdown.png
└── webchat_voting-rights.png
├── backend
├── beezle_bug
│ ├── tools
│ │ ├── scheduling.py
│ │ ├── __init__.py
│ │ ├── system.py
│ │ ├── os
│ │ │ ├── cli.py
│ │ │ └── filesystem.py
│ │ ├── toolbox.py
│ │ ├── wikipedia.py
│ │ ├── memory
│ │ │ ├── memory_stream.py
│ │ │ └── __init__.py
│ │ ├── tool.py
│ │ ├── python.py
│ │ ├── toolbox_factory.py
│ │ └── web.py
│ ├── __init__.py
│ ├── events
│ │ ├── __init__.py
│ │ ├── event.py
│ │ └── eventbus.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── edge.py
│ │ ├── project.py
│ │ └── node.py
│ ├── llm_adapter
│ │ ├── __init__.py
│ │ ├── llama_cpp_adapter.py
│ │ ├── base_adapter.py
│ │ ├── openai_adapter.py
│ │ └── litellm_adapter.py
│ ├── constants.py
│ ├── memory
│ │ ├── __init__.py
│ │ ├── memory_stream.py
│ │ └── memories.py
│ ├── agent_graph
│ │ ├── edge.py
│ │ ├── __init__.py
│ │ ├── node.py
│ │ ├── agent_graph.py
│ │ ├── types.py
│ │ └── agent.py
│ ├── voice
│ │ ├── __init__.py
│ │ ├── transcriber.py
│ │ └── vad.py
│ ├── storage
│ │ ├── __init__.py
│ │ └── base.py
│ ├── template.py
│ ├── project.py
│ ├── project_manager.py
│ └── scheduler.py
├── scripts
│ ├── __init__.py
│ └── migrate_to_sqlmodel.py
├── tests
│ ├── __init__.py
│ └── conftest.py
└── requirements.txt
├── data
└── templates
│ ├── agent.j2
│ ├── researcher.j2
│ └── summarizer.j2
├── frontend
├── postcss.config.js
├── src
│ ├── main.jsx
│ ├── index.css
│ └── components
│ │ ├── GeneralSettingsTab.jsx
│ │ ├── SettingsPanel.jsx
│ │ ├── LogPanel.jsx
│ │ ├── AgentControlTab.jsx
│ │ ├── ScheduleTab.jsx
│ │ ├── TemplateEditorTab.jsx
│ │ └── IntrospectionPanel.jsx
├── vite.config.js
├── tailwind.config.js
├── index.html
└── package.json
├── Dockerfile.frontend
├── Dockerfile.backend
├── docker-compose.yml
├── LICENSE
└── .gitignore
/imgs/screenshots/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/scheduling.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/backend/scripts/__init__.py:
--------------------------------------------------------------------------------
1 | # Scripts package
2 |
3 |
--------------------------------------------------------------------------------
/imgs/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rhohndorf/beezle-bug/HEAD/imgs/banner.jpeg
--------------------------------------------------------------------------------
/backend/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """Test suite for Beezle Bug."""
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/backend/beezle_bug/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Beezle Bug - An AI agent framework.
3 | """
4 |
5 |
6 |
--------------------------------------------------------------------------------
/imgs/webchat_countdown.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rhohndorf/beezle-bug/HEAD/imgs/webchat_countdown.png
--------------------------------------------------------------------------------
/imgs/webchat_voting-rights.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rhohndorf/beezle-bug/HEAD/imgs/webchat_voting-rights.png
--------------------------------------------------------------------------------
/data/templates/agent.j2:
--------------------------------------------------------------------------------
1 | You are {{agent.name}} the friendly, truthful and task-driven expert AI agent.
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/__init__.py:
--------------------------------------------------------------------------------
1 | from beezle_bug.tools.tool import Tool
2 | from beezle_bug.tools.toolbox import ToolBox
3 |
--------------------------------------------------------------------------------
/imgs/screenshots/main_interface.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rhohndorf/beezle-bug/HEAD/imgs/screenshots/main_interface.png
--------------------------------------------------------------------------------
/imgs/screenshots/mobile_interface.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rhohndorf/beezle-bug/HEAD/imgs/screenshots/mobile_interface.png
--------------------------------------------------------------------------------
/frontend/postcss.config.js:
--------------------------------------------------------------------------------
1 | export default {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | }
7 |
8 |
--------------------------------------------------------------------------------
/backend/beezle_bug/events/__init__.py:
--------------------------------------------------------------------------------
1 | from .event import Event, EventType
2 | from .eventbus import EventBus
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/frontend/src/main.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react'
2 | import ReactDOM from 'react-dom/client'
3 | import App from './App'
4 | import './index.css'
5 |
6 | ReactDOM.createRoot(document.getElementById('root')).render(
7 |
8 |
9 | ,
10 | )
11 |
12 |
--------------------------------------------------------------------------------
/backend/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """
2 | Pytest configuration and fixtures.
3 | """
4 |
5 | import pytest
6 |
7 |
8 | # Configure pytest-asyncio
9 | pytest_plugins = ('pytest_asyncio',)
10 |
11 |
12 | @pytest.fixture
13 | def anyio_backend():
14 | return 'asyncio'
15 |
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/Dockerfile.frontend:
--------------------------------------------------------------------------------
1 | FROM node:20-slim
2 |
3 | WORKDIR /app
4 |
5 | # Copy package files
6 | COPY frontend/package*.json ./
7 |
8 | # Install dependencies
9 | RUN npm install
10 |
11 | # Copy source
12 | COPY frontend/ ./
13 |
14 | # Expose port
15 | EXPOSE 5173
16 |
17 | # Run dev server
18 | CMD ["npm", "run", "dev"]
19 |
--------------------------------------------------------------------------------
/frontend/vite.config.js:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vite'
2 | import react from '@vitejs/plugin-react'
3 |
4 | export default defineConfig({
5 | plugins: [react()],
6 | server: {
7 | proxy: {
8 | '/socket.io': {
9 | target: 'http://localhost:5000',
10 | ws: true
11 | }
12 | }
13 | }
14 | })
15 |
16 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/system.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 | from beezle_bug.tools import Tool
4 |
5 | class GetDateAndTime(Tool):
6 | """
7 | Get the current date and time
8 | """
9 |
10 | async def run(self, agent):
11 | current_datetime = datetime.now().strftime("%A, %d %B %Y, %H:%M")
12 | return current_datetime
13 |
--------------------------------------------------------------------------------
/backend/beezle_bug/models/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | SQLModel database models for Beezle Bug.
3 |
4 | These models map directly to database tables and provide ORM functionality.
5 | """
6 |
7 | from .project import ProjectDB
8 | from .node import NodeDB
9 | from .edge import EdgeDB
10 |
11 | __all__ = [
12 | "ProjectDB",
13 | "NodeDB",
14 | "EdgeDB",
15 | ]
16 |
17 |
--------------------------------------------------------------------------------
/backend/beezle_bug/llm_adapter/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_adapter import BaseAdapter, Message, ToolCall, ToolCallResult, Response
2 | from .llama_cpp_adapter import LlamaCppApiAdapter
3 | from .openai_adapter import OpenAiAdapter
4 |
5 | __all__ = [
6 | BaseAdapter,
7 | LlamaCppApiAdapter,
8 | OpenAiAdapter,
9 | Message,
10 | ToolCall,
11 | ToolCallResult,
12 | Response,
13 | ]
--------------------------------------------------------------------------------
/frontend/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | export default {
3 | content: [
4 | "./index.html",
5 | "./src/**/*.{js,ts,jsx,tsx}",
6 | ],
7 | theme: {
8 | extend: {
9 | fontFamily: {
10 | sans: ['Inter', 'system-ui', 'sans-serif'],
11 | mono: ['JetBrains Mono', 'monospace'],
12 | },
13 | },
14 | },
15 | plugins: [],
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/os/cli.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 |
3 | from pydantic import Field
4 |
5 | from beezle_bug.tools import Tool
6 |
7 |
8 | class ExecCommand(Tool):
9 | """
10 | Execute a shell command
11 | """
12 |
13 | command: str = Field(..., description="The command to be executed")
14 |
15 | async def run(self, agent):
16 | return subprocess.run(self.command, capture_output=True, text=True, shell=True)
17 |
--------------------------------------------------------------------------------
/backend/beezle_bug/constants.py:
--------------------------------------------------------------------------------
1 | DEFAULT_DATA_DIR = "/app/data"
2 | AGENT_SUBFOLDER = "agents"
3 | TEMPLATE_SUBFOLDER = "templates"
4 | DEFAULT_CONFIG = {
5 | "name": "Beezle Bug",
6 | "model": "local-model",
7 | "apiUrl": "http://127.0.0.1:1234/v1",
8 | "apiKey": "",
9 | "temperature": 0.7,
10 | "autonomousEnabled": False,
11 | "autonomousInterval": 30,
12 | "systemTemplate": "system_messages/agent"
13 | }
14 |
15 | DEFAULT_MSG_BUFFER_SIZE = 100
--------------------------------------------------------------------------------
/backend/beezle_bug/memory/__init__.py:
--------------------------------------------------------------------------------
1 | from beezle_bug.memory.memory_stream import MemoryStream
2 | from beezle_bug.memory.knowledge_graph import KnowledgeGraph
3 | from beezle_bug.memory.memories import Observation
4 | from beezle_bug.memory.entity_schemas import (
5 | ENTITY_SCHEMAS,
6 | get_schema,
7 | get_expected_properties,
8 | get_missing_properties,
9 | get_common_relationships,
10 | get_entity_completeness,
11 | get_all_entity_types,
12 | get_schema_for_prompt,
13 | get_detailed_schema_for_prompt,
14 | )
15 |
--------------------------------------------------------------------------------
/backend/beezle_bug/agent_graph/edge.py:
--------------------------------------------------------------------------------
1 | """
2 | Edge class for the Agent Graph system.
3 | """
4 |
5 | import uuid
6 | from pydantic import BaseModel, Field
7 |
8 | from .types import EdgeType
9 |
10 |
11 | class Edge(BaseModel):
12 | """A connection between two nodes in the agent graph."""
13 | id: str = Field(default_factory=lambda: uuid.uuid4().hex[:8])
14 | source_node: str # Node ID
15 | source_port: str # Port name
16 | target_node: str # Node ID
17 | target_port: str # Port name
18 | edge_type: EdgeType
19 |
20 |
--------------------------------------------------------------------------------
/backend/beezle_bug/voice/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Voice processing module for Beezle Bug.
3 |
4 | Provides speech-to-text transcription using faster-whisper,
5 | voice activity detection using webrtcvad, and text-to-speech
6 | synthesis using Piper TTS.
7 | """
8 |
9 | from beezle_bug.voice.transcriber import Transcriber
10 | from beezle_bug.voice.vad import VoiceActivityDetector
11 | from beezle_bug.voice.tts import PiperTTS, get_tts, VoiceInfo
12 |
13 | __all__ = ["Transcriber", "VoiceActivityDetector", "PiperTTS", "get_tts", "VoiceInfo"]
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/toolbox.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Type
2 |
3 | from beezle_bug.tools import Tool
4 |
5 |
6 | class ToolBox:
7 | def __init__(self, tools: List[Type[Tool]] = []) -> None:
8 | self.tools = {}
9 | for tool in tools:
10 | self.tools[tool.__name__] = tool
11 |
12 | def __iter__(self):
13 | return iter(self.tools)
14 |
15 | def get_tool(self, tool_name: str, args: Dict) -> Tool:
16 | return self.tools[tool_name](**args)
17 |
18 | def get_tools(self) -> list[Tool]:
19 | return self.tools.values()
--------------------------------------------------------------------------------
/frontend/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Beezle Bug
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/Dockerfile.backend:
--------------------------------------------------------------------------------
1 | FROM python:3.11-slim
2 |
3 | WORKDIR /app
4 |
5 | # Install system dependencies for building Python packages with C extensions
6 | RUN apt-get update && apt-get install -y --no-install-recommends \
7 | gcc \
8 | build-essential \
9 | && rm -rf /var/lib/apt/lists/*
10 |
11 | # Install dependencies
12 | COPY backend/requirements.txt .
13 | RUN pip install --no-cache-dir -r requirements.txt
14 |
15 | # Copy the beezle_bug package
16 | COPY backend/beezle_bug/ ./beezle_bug/
17 |
18 | # Copy server
19 | COPY backend/server.py .
20 |
21 | # Set Python path
22 | ENV PYTHONPATH=/app
23 |
24 | # Expose port
25 | EXPOSE 5000
26 |
27 | # Run server
28 | CMD ["python", "server.py"]
29 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/wikipedia.py:
--------------------------------------------------------------------------------
1 | from pydantic import Field
2 | from beezle_bug.tools import Tool
3 |
4 | import wikipedia
5 |
6 |
7 | class SearchWikipedia(Tool):
8 | """
9 | Do a Wikipedia search for query
10 | """
11 |
12 | query: str = Field(..., description="the search query")
13 | results: int = Field(..., description="the maxmimum number of results returned")
14 |
15 | async def run(self, agent):
16 | return wikipedia.search(self.query, results=self.results)
17 |
18 |
19 | class GetWikipediaPageSummary(Tool):
20 | """
21 | Get a plain text summary of a Wikipedia page.
22 | """
23 |
24 | query: str = Field(..., description="the search query")
25 |
26 | async def run(self, agent):
27 | return wikipedia.summary(self.query, auto_suggest=False)
28 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | server:
3 | build:
4 | context: .
5 | dockerfile: Dockerfile.backend
6 | user: "${UID:-1000}:${GID:-1000}" # Run as current user
7 | network_mode: host
8 | environment:
9 | - PYTHONUNBUFFERED=1
10 | - BEEZLE_DATA_DIR=/app/data
11 | - HF_HOME=/cache/huggingface
12 | volumes:
13 | - ./backend/beezle_bug:/app/beezle_bug
14 | - ./backend/server.py:/app/server.py
15 | - ./data:/app/data # Bind mount for agent data
16 | - ./cache:/cache # Bind mount to avoid HF permissions issues
17 |
18 | frontend:
19 | build:
20 | context: .
21 | dockerfile: Dockerfile.frontend
22 | ports:
23 | - "5173:5173"
24 | volumes:
25 | - ./frontend:/app
26 | - /app/node_modules
27 |
28 |
--------------------------------------------------------------------------------
/frontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "beezle-bug-webchat",
3 | "private": true,
4 | "version": "1.0.0",
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite --host",
8 | "build": "vite build",
9 | "preview": "vite preview"
10 | },
11 | "dependencies": {
12 | "kokoro-js": "^1.0.0",
13 | "lucide-react": "^0.263.1",
14 | "react": "^18.2.0",
15 | "react-dom": "^18.2.0",
16 | "react-markdown": "^9.0.1",
17 | "react-router-dom": "^6.30.2",
18 | "remark-gfm": "^4.0.0",
19 | "socket.io-client": "^4.7.2"
20 | },
21 | "devDependencies": {
22 | "@types/react": "^18.2.15",
23 | "@types/react-dom": "^18.2.7",
24 | "@vitejs/plugin-react": "^4.0.3",
25 | "autoprefixer": "^10.4.14",
26 | "postcss": "^8.4.27",
27 | "tailwindcss": "^3.3.3",
28 | "vite": "^4.4.5"
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/backend/requirements.txt:
--------------------------------------------------------------------------------
1 | # LLM Adapters
2 | litellm>=1.0.0
3 | openai>=1.0.0
4 |
5 | # Web Framework (FastAPI + async SocketIO)
6 | fastapi>=0.109.0
7 | uvicorn[standard]>=0.27.0
8 | python-socketio[asyncio]>=5.10.0
9 |
10 | # Data Models
11 | pydantic>=2.0.0
12 |
13 | # Database (async SQLite + vector search + ORM)
14 | aiosqlite>=0.19.0
15 | sqlite-vec>=0.1.0
16 | sqlmodel>=0.0.22
17 |
18 | # Memory & Embeddings
19 | fastembed>=0.2.0
20 | numpy>=1.24.0
21 | networkx>=3.0
22 |
23 | # Tools
24 | wikipedia>=1.4.0
25 | requests>=2.31.0
26 | beautifulsoup4>=4.12.0
27 | duckduckgo-search>=4.0.0
28 | ipython>=8.0.0
29 |
30 | # Templates
31 | jinja2>=3.1.0
32 |
33 | # Logging
34 | loguru>=0.7.0
35 |
36 | # Voice / STT
37 | faster-whisper>=1.0.0
38 | webrtcvad>=2.0.10
39 |
40 | # Voice / TTS
41 | piper-tts>=1.2.0
42 |
43 | # Testing
44 | pytest>=7.0.0
45 | pytest-asyncio>=0.21.0
46 |
--------------------------------------------------------------------------------
/frontend/src/index.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 | @layer base {
6 | body {
7 | @apply bg-[#0c0c0c] text-[#e5e5e5];
8 | font-family: 'Inter', system-ui, sans-serif;
9 | }
10 |
11 | ::-webkit-scrollbar {
12 | width: 8px;
13 | height: 8px;
14 | }
15 | ::-webkit-scrollbar-track {
16 | background: transparent;
17 | }
18 | ::-webkit-scrollbar-thumb {
19 | @apply bg-[#2b2b2b] rounded-full;
20 | }
21 | ::-webkit-scrollbar-thumb:hover {
22 | @apply bg-[#3f3f3f];
23 | }
24 | }
25 |
26 | /* Mobile safe area utilities for notched phones */
27 | @layer utilities {
28 | .safe-area-top {
29 | padding-top: env(safe-area-inset-top);
30 | }
31 | .safe-area-bottom {
32 | padding-bottom: env(safe-area-inset-bottom);
33 | }
34 | .safe-area-left {
35 | padding-left: env(safe-area-inset-left);
36 | }
37 | .safe-area-right {
38 | padding-right: env(safe-area-inset-right);
39 | }
40 | }
41 |
42 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/memory/memory_stream.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import Optional
3 |
4 | from loguru import logger
5 | from pydantic import Field
6 |
7 | from beezle_bug.tools import Tool
8 |
9 |
10 | class Recall(Tool):
11 | """
12 | Retrieve a list of memories that relate to the search query.
13 | You can specify a date range to retrieve memories from.
14 | """
15 |
16 | query: str = Field(..., description="The query the memories are similar to")
17 | k: int = Field(..., description="Number of memories to retrieve")
18 | from_date: Optional[datetime] = Field(None, description="Only retrieve memories created on or after this date (ISO format)")
19 | to_date: Optional[datetime] = Field(None, description="Only retrieve memories created on or before this date (ISO format)")
20 |
21 | async def run(self, agent):
22 | logger.debug(f"Recalling {self.k} memories for query: {self.query}")
23 | return await agent.memory_stream.retrieve(self.query, self.k, self.from_date, self.to_date)
24 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Ruben Hohndorf
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/backend/beezle_bug/agent_graph/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Agent Graph module - visual graph system for connecting agents.
3 | """
4 |
5 | from .types import (
6 | NodeType,
7 | EdgeType,
8 | Position,
9 | AgentNodeConfig,
10 | KnowledgeGraphNodeConfig,
11 | MemoryStreamNodeConfig,
12 | ToolboxNodeConfig,
13 | TextInputNodeConfig,
14 | VoiceInputNodeConfig,
15 | TextOutputNodeConfig,
16 | ScheduledEventNodeConfig,
17 | WaitAndCombineNodeConfig,
18 | NodeConfig,
19 | )
20 | from .node import Node
21 | from .edge import Edge
22 | from .agent_graph import AgentGraph
23 | from .runtime import AgentGraphRuntime
24 |
25 | __all__ = [
26 | # Types
27 | "NodeType",
28 | "EdgeType",
29 | "Position",
30 | "AgentNodeConfig",
31 | "KnowledgeGraphNodeConfig",
32 | "MemoryStreamNodeConfig",
33 | "ToolboxNodeConfig",
34 | "TextInputNodeConfig",
35 | "VoiceInputNodeConfig",
36 | "TextOutputNodeConfig",
37 | "ScheduledEventNodeConfig",
38 | "WaitAndCombineNodeConfig",
39 | "NodeConfig",
40 | # Core classes
41 | "Node",
42 | "Edge",
43 | "AgentGraph",
44 | # Runtime
45 | "AgentGraphRuntime",
46 | ]
47 |
--------------------------------------------------------------------------------
/backend/beezle_bug/events/event.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | from datetime import datetime
3 | from enum import Enum
4 | from typing import Any, Dict
5 |
6 |
7 | class EventType(Enum):
8 | """Enumeration of all possible event types emitted by agents."""
9 | AGENT_STARTED = "agent.started"
10 | AGENT_STOPPED = "agent.stopped"
11 | MESSAGE_RECEIVED = "message.received"
12 | MESSAGE_SENT = "message.sent"
13 | LLM_CALL_STARTED = "llm.call.started"
14 | LLM_CALL_COMPLETED = "llm.call.completed"
15 | TOOL_SELECTED = "tool.selected"
16 | TOOL_COMPLETED = "tool.execution.completed"
17 | ERROR_OCCURRED = "error.occurred"
18 |
19 |
20 | @dataclass
21 | class Event:
22 | """Represents a single event in the system."""
23 | type: EventType
24 | agent_name: str
25 | timestamp: datetime = field(default_factory=datetime.now)
26 | data: Dict[str, Any] = field(default_factory=dict)
27 |
28 | def to_dict(self) -> Dict[str, Any]:
29 | """Convert event to dictionary for serialization."""
30 | return {
31 | 'type': self.type.value,
32 | 'agent_name': self.agent_name,
33 | 'timestamp': self.timestamp.isoformat(),
34 | 'data': self.data
35 | }
--------------------------------------------------------------------------------
/backend/beezle_bug/storage/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Storage package for database backends.
3 |
4 | Provides async storage abstraction with SQLite (and later PostgreSQL) implementations.
5 | """
6 |
7 | from .base import StorageBackend
8 | from .sqlite_backend import SQLiteStorageBackend
9 |
10 |
11 | async def get_storage_backend(
12 | backend_type: str = "sqlite",
13 | **kwargs
14 | ) -> StorageBackend:
15 | """
16 | Factory function to create and initialize a storage backend.
17 |
18 | Args:
19 | backend_type: Type of backend ("sqlite" or "postgres" in future)
20 | **kwargs: Backend-specific configuration
21 | - sqlite: db_path (str) - path to database file
22 | - postgres: connection_url (str) - PostgreSQL connection URL
23 |
24 | Returns:
25 | Initialized StorageBackend instance
26 | """
27 | if backend_type == "sqlite":
28 | db_path = kwargs.get("db_path", "beezle.db")
29 | backend = SQLiteStorageBackend(db_path)
30 | await backend.initialize()
31 | return backend
32 | elif backend_type == "postgres":
33 | raise NotImplementedError("PostgreSQL backend not yet implemented")
34 | else:
35 | raise ValueError(f"Unknown storage backend: {backend_type}")
36 |
37 |
38 | __all__ = [
39 | "StorageBackend",
40 | "SQLiteStorageBackend",
41 | "get_storage_backend",
42 | ]
43 |
44 |
45 |
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/memory/__init__.py:
--------------------------------------------------------------------------------
1 | from beezle_bug.tools.memory.knowledge_graph import (
2 | # CRUD operations
3 | AddEntity,
4 | AddPropertyToEntity,
5 | AddRelationship,
6 | AddPropertyToRelationship,
7 | GetEntity,
8 | GetRelationship,
9 | GetRelationships,
10 | RemoveRelationship,
11 | RemoveRelationshipProperty,
12 | RemoveEntity,
13 | RemoveEntityProperty,
14 | # Query operations
15 | FindEntitiesByType,
16 | FindEntitiesByProperty,
17 | FindRelationshipsByType,
18 | GetNeighbors,
19 | FindPath,
20 | GetConnectedEntities,
21 | GetMostConnected,
22 | GetIsolatedEntities,
23 | CheckGraphConnectivity,
24 | )
25 | from beezle_bug.tools.memory.memory_stream import Recall
26 |
27 | __all__ = [
28 | # CRUD operations
29 | "AddEntity",
30 | "AddPropertyToEntity",
31 | "AddRelationship",
32 | "AddPropertyToRelationship",
33 | "GetEntity",
34 | "GetRelationship",
35 | "GetRelationships",
36 | "RemoveRelationship",
37 | "RemoveRelationshipProperty",
38 | "RemoveEntity",
39 | "RemoveEntityProperty",
40 | # Query operations
41 | "FindEntitiesByType",
42 | "FindEntitiesByProperty",
43 | "FindRelationshipsByType",
44 | "GetNeighbors",
45 | "FindPath",
46 | "GetConnectedEntities",
47 | "GetMostConnected",
48 | "GetIsolatedEntities",
49 | "CheckGraphConnectivity",
50 | # Memory stream
51 | "Recall",
52 | ]
53 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/tool.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, Any
2 | from abc import ABC, abstractmethod
3 | from pydantic import BaseModel
4 |
5 |
6 | class Tool(ABC, BaseModel):
7 | """
8 | Abstract base class representing a tool.
9 |
10 | This class serves as a base for defining various tools in a system.
11 | Subclasses must implement the `run` method to define the functionality
12 | of the tool.
13 |
14 | All tool run methods are async to support async storage operations.
15 |
16 | Attributes:
17 | No attributes defined in this abstract base class.
18 |
19 | Methods:
20 | run: Abstract async method that must be implemented by subclasses to define
21 | the functionality of the tool.
22 |
23 | Example:
24 | ```python
25 | from beezle_bug.tools import Tool
26 |
27 | class MyTool(Tool):
28 | async def run(self, agent):
29 | # Define functionality of the tool
30 | return "result"
31 | ```
32 |
33 | """
34 |
35 | @abstractmethod
36 | async def run(self, agent) -> Optional[Any]:
37 | """
38 | Abstract async method to be implemented by subclasses.
39 |
40 | This method defines the functionality of the tool. Subclasses must
41 | override this method to provide specific implementation.
42 |
43 | Args:
44 | agent: The agent executing this tool
45 |
46 | Returns:
47 | The result of the tool execution
48 |
49 | Raises:
50 | NotImplementedError: This method must be implemented by subclasses.
51 |
52 | """
53 | pass
54 |
--------------------------------------------------------------------------------
/backend/beezle_bug/events/eventbus.py:
--------------------------------------------------------------------------------
1 | from typing import Callable, Dict, List
2 | from loguru import logger
3 |
4 | from beezle_bug.events.event import Event, EventType
5 |
6 | class EventBus:
7 | """Publish-subscribe event bus for agent introspection."""
8 |
9 | def __init__(self) -> None:
10 | self._subscribers: Dict[EventType, List[Callable[[Event], None]]] = {}
11 | self._all_subscribers: List[Callable[[Event], None]] = []
12 |
13 | def subscribe(self, event_type: EventType, callback: Callable[[Event], None]) -> None:
14 | """Subscribe to a specific event type."""
15 | if event_type not in self._subscribers:
16 | self._subscribers[event_type] = []
17 | if callback not in self._subscribers[event_type]:
18 | self._subscribers[event_type].append(callback)
19 |
20 | def subscribe_all(self, callback: Callable[[Event], None]) -> None:
21 | """Subscribe to all events."""
22 | if callback not in self._all_subscribers:
23 | self._all_subscribers.append(callback)
24 |
25 | def emit(self, event: Event) -> None:
26 | """Emit an event to all relevant subscribers."""
27 | if event.type in self._subscribers:
28 | for callback in self._subscribers[event.type]:
29 | try:
30 | callback(event)
31 | except Exception as e:
32 | logger.error(f"Error in event subscriber: {e}")
33 |
34 | for callback in self._all_subscribers:
35 | try:
36 | callback(event)
37 | except Exception as e:
38 | logger.error(f"Error in global subscriber: {e}")
--------------------------------------------------------------------------------
/backend/beezle_bug/template.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import List
3 | from jinja2 import Environment, FileSystemLoader, Template
4 | import beezle_bug.constants as const
5 |
6 |
7 | class TemplateLoader:
8 | def __init__(self, data_dir: Path):
9 | self._template_dir = data_dir / const.TEMPLATE_SUBFOLDER
10 | self._env = Environment(loader=FileSystemLoader(self._template_dir))
11 |
12 | def load(self, name: str) -> Template:
13 | return self._env.get_template(f"{name}.j2")
14 |
15 | def list_templates(self) -> List[str]:
16 | """Return list of available template names (without .j2 extension)."""
17 | templates = self._env.list_templates()
18 | return [t.replace('.j2', '') for t in templates if t.endswith('.j2')]
19 |
20 | def get_content(self, name: str) -> str:
21 | """Read and return the raw content of a template file."""
22 | template_path = self._template_dir / f"{name}.j2"
23 | if not template_path.exists():
24 | raise FileNotFoundError(f"Template '{name}' not found")
25 | return template_path.read_text(encoding="utf-8")
26 |
27 | def save(self, name: str, content: str) -> None:
28 | """Create or update a template file."""
29 | template_path = self._template_dir / f"{name}.j2"
30 | template_path.write_text(content, encoding="utf-8")
31 | # Clear Jinja2 cache so it picks up the new content
32 | self._env = Environment(loader=FileSystemLoader(self._template_dir))
33 |
34 | def delete(self, name: str) -> None:
35 | """Delete a template file."""
36 | template_path = self._template_dir / f"{name}.j2"
37 | if not template_path.exists():
38 | raise FileNotFoundError(f"Template '{name}' not found")
39 | template_path.unlink()
40 |
--------------------------------------------------------------------------------
/backend/beezle_bug/models/edge.py:
--------------------------------------------------------------------------------
1 | """
2 | SQLModel Edge database model.
3 | """
4 |
5 | from typing import TYPE_CHECKING, Optional
6 | from sqlmodel import SQLModel, Field, Relationship
7 |
8 | if TYPE_CHECKING:
9 | from .project import ProjectDB
10 |
11 |
12 | class EdgeDB(SQLModel, table=True):
13 | """
14 | Database model for agent graph edges.
15 |
16 | Maps to the 'edges' table.
17 | """
18 | __tablename__ = "edges"
19 |
20 | id: str = Field(primary_key=True)
21 | project_id: str = Field(foreign_key="projects.id", index=True)
22 |
23 | # Connection endpoints
24 | source_node_id: str = Field(index=True)
25 | source_port: str
26 | target_node_id: str = Field(index=True)
27 | target_port: str
28 |
29 | # Edge type (message, pipeline, resource, delegate)
30 | edge_type: str = Field(index=True)
31 |
32 | # Relationship back to project
33 | project: Optional["ProjectDB"] = Relationship(back_populates="edges")
34 |
35 | def to_pydantic(self) -> "Edge":
36 | """Convert database model to Pydantic API model."""
37 | from beezle_bug.agent_graph.edge import Edge
38 | from beezle_bug.agent_graph.types import EdgeType
39 |
40 | return Edge(
41 | id=self.id,
42 | source_node=self.source_node_id,
43 | source_port=self.source_port,
44 | target_node=self.target_node_id,
45 | target_port=self.target_port,
46 | edge_type=EdgeType(self.edge_type),
47 | )
48 |
49 | @classmethod
50 | def from_pydantic(cls, edge: "Edge", project_id: str) -> "EdgeDB":
51 | """Create database model from Pydantic API model."""
52 | return cls(
53 | id=edge.id,
54 | project_id=project_id,
55 | source_node_id=edge.source_node,
56 | source_port=edge.source_port,
57 | target_node_id=edge.target_node,
58 | target_port=edge.target_port,
59 | edge_type=edge.edge_type.value,
60 | )
61 |
62 |
--------------------------------------------------------------------------------
/backend/beezle_bug/project.py:
--------------------------------------------------------------------------------
1 | """
2 | Project class - container for AgentGraph + settings + metadata.
3 | """
4 |
5 | import json
6 | import uuid
7 | from datetime import datetime
8 | from pathlib import Path
9 | from typing import List, Optional
10 | from pydantic import BaseModel, Field, model_validator
11 |
12 | from beezle_bug.agent_graph.agent_graph import AgentGraph
13 |
14 |
15 | class TTSSettings(BaseModel):
16 | """TTS (voice output) settings for a project."""
17 | enabled: bool = False
18 | voice: Optional[str] = None
19 | speed: float = 1.0
20 | speaker: int = 0
21 |
22 |
23 | class STTSettings(BaseModel):
24 | """STT (voice input) settings for a project."""
25 | enabled: bool = False
26 | device_id: Optional[str] = None
27 | device_label: Optional[str] = None
28 | wake_words: List[str] = Field(default_factory=lambda: ["hey beezle", "ok beezle"])
29 | stop_words: List[str] = Field(default_factory=lambda: ["stop listening", "goodbye", "that's all"])
30 | max_duration: float = 30.0 # Maximum recording duration in seconds
31 |
32 |
33 | class Project(BaseModel):
34 | """A project containing an agent graph configuration."""
35 | id: str = Field(default_factory=lambda: uuid.uuid4().hex[:8])
36 | name: str
37 | agent_graph: AgentGraph = Field(default_factory=AgentGraph)
38 | tts_settings: TTSSettings = Field(default_factory=TTSSettings)
39 | stt_settings: STTSettings = Field(default_factory=STTSettings)
40 | created_at: datetime = Field(default_factory=datetime.utcnow)
41 | updated_at: datetime = Field(default_factory=datetime.utcnow)
42 |
43 | @model_validator(mode='before')
44 | @classmethod
45 | def migrate_mesh_to_agent_graph(cls, data):
46 | """Backward compatibility: convert 'mesh' key to 'agent_graph'."""
47 | if isinstance(data, dict) and 'mesh' in data and 'agent_graph' not in data:
48 | data['agent_graph'] = data.pop('mesh')
49 | return data
50 |
51 | def touch(self) -> None:
52 | """Update the updated_at timestamp."""
53 | self.updated_at = datetime.utcnow()
54 |
55 | # Persistence methods
56 |
57 | def save(self, path: Path) -> None:
58 | """Save the project to a JSON file."""
59 | with open(path, "w") as f:
60 | json.dump(self.model_dump(mode="json"), f, indent=2, default=str)
61 |
62 | @classmethod
63 | def load(cls, path: Path) -> "Project":
64 | """Load a project from a JSON file."""
65 | with open(path, "r") as f:
66 | data = json.load(f)
67 | return cls.model_validate(data)
68 |
--------------------------------------------------------------------------------
/backend/beezle_bug/agent_graph/node.py:
--------------------------------------------------------------------------------
1 | """
2 | Node class for the Agent Graph system.
3 | """
4 |
5 | import uuid
6 | from pydantic import BaseModel, Field
7 |
8 | from .types import NodeType, Position, NodeConfig
9 |
10 |
11 | class Node(BaseModel):
12 | """A node in the agent graph."""
13 | id: str = Field(default_factory=lambda: uuid.uuid4().hex[:8])
14 | type: NodeType
15 | position: Position = Field(default_factory=Position)
16 | config: NodeConfig
17 |
18 | def get_ports(self) -> dict[str, list[str]]:
19 | """Get available ports for this node type."""
20 | if self.type == NodeType.AGENT:
21 | return {
22 | "inputs": ["message_in", "answer"],
23 | "outputs": ["message_out", "ask"],
24 | "bidirectional": ["knowledge", "memory", "tools"],
25 | }
26 | elif self.type == NodeType.KNOWLEDGE_GRAPH:
27 | return {
28 | "inputs": [],
29 | "outputs": [],
30 | "bidirectional": ["connection"],
31 | }
32 | elif self.type == NodeType.MEMORY_STREAM:
33 | return {
34 | "inputs": [],
35 | "outputs": [],
36 | "bidirectional": ["connection"],
37 | }
38 | elif self.type == NodeType.TOOLBOX:
39 | return {
40 | "inputs": [],
41 | "outputs": [],
42 | "bidirectional": ["connection"],
43 | }
44 | elif self.type == NodeType.TEXT_INPUT:
45 | return {
46 | "inputs": [],
47 | "outputs": ["message_out"],
48 | "bidirectional": [],
49 | }
50 | elif self.type == NodeType.VOICE_INPUT:
51 | return {
52 | "inputs": [],
53 | "outputs": ["message_out"],
54 | "bidirectional": [],
55 | }
56 | elif self.type == NodeType.TEXT_OUTPUT:
57 | return {
58 | "inputs": ["message_in"],
59 | "outputs": [],
60 | "bidirectional": [],
61 | }
62 | elif self.type == NodeType.SCHEDULED_EVENT:
63 | return {
64 | "inputs": [],
65 | "outputs": ["message_out"],
66 | "bidirectional": [],
67 | }
68 | elif self.type == NodeType.WAIT_AND_COMBINE:
69 | return {
70 | "inputs": ["message_in"],
71 | "outputs": ["message_out"],
72 | "bidirectional": [],
73 | }
74 | return {"inputs": [], "outputs": [], "bidirectional": []}
75 |
76 |
--------------------------------------------------------------------------------
/backend/beezle_bug/project_manager.py:
--------------------------------------------------------------------------------
1 | """
2 | ProjectManager - manages project lifecycle: create, load, save, close.
3 |
4 | All operations are async to work with the database storage backend.
5 | """
6 |
7 | from typing import TYPE_CHECKING, Optional
8 | from loguru import logger
9 |
10 | from beezle_bug.project import Project
11 |
12 | if TYPE_CHECKING:
13 | from beezle_bug.storage.base import StorageBackend
14 | from beezle_bug.agent_graph.runtime import AgentGraphRuntime
15 |
16 |
17 | class ProjectManager:
18 | """Manages project lifecycle: create, load, save, close."""
19 |
20 | def __init__(
21 | self,
22 | storage: "StorageBackend",
23 | runtime: "AgentGraphRuntime",
24 | ):
25 | self.storage = storage
26 | self.runtime = runtime
27 | self.current_project: Optional[Project] = None
28 |
29 | async def list_projects(self) -> list[dict]:
30 | """List all saved projects with metadata."""
31 | return await self.storage.list_projects()
32 |
33 | async def create_project(self, name: str) -> Project:
34 | """Create a new project."""
35 | project = Project(name=name)
36 | await self.storage.save_project(project)
37 | logger.info(f"Created project: {project.name} ({project.id})")
38 | return project
39 |
40 | async def load_project(self, project_id: str) -> Project:
41 | """Load a project from database."""
42 | # Close current project if any
43 | if self.current_project:
44 | await self.close_project()
45 |
46 | project = await self.storage.get_project(project_id)
47 | if project is None:
48 | raise FileNotFoundError(f"Project {project_id} not found")
49 |
50 | self.current_project = project
51 | logger.info(f"Loaded project: {project.name} ({project.id})")
52 | return project
53 |
54 | async def save_project(self) -> None:
55 | """Save the current project to database."""
56 | if not self.current_project:
57 | raise ValueError("No project loaded")
58 |
59 | self.current_project.touch()
60 | await self.storage.save_project(self.current_project)
61 | logger.info(f"Saved project: {self.current_project.name} ({self.current_project.id})")
62 |
63 | async def close_project(self) -> None:
64 | """Close the current project (undeploy first if needed)."""
65 | if self.runtime.is_deployed:
66 | await self.runtime.undeploy()
67 | self.current_project = None
68 | logger.info("Closed project")
69 |
70 | async def delete_project(self, project_id: str) -> None:
71 | """Delete a project from database."""
72 | # Close if it's the current project
73 | if self.current_project and self.current_project.id == project_id:
74 | await self.close_project()
75 |
76 | await self.storage.delete_project(project_id)
77 | logger.info(f"Deleted project: {project_id}")
78 |
--------------------------------------------------------------------------------
/backend/beezle_bug/models/project.py:
--------------------------------------------------------------------------------
1 | """
2 | SQLModel Project database model.
3 | """
4 |
5 | from datetime import datetime
6 | from typing import TYPE_CHECKING, List, Optional, Dict, Any
7 | from sqlmodel import SQLModel, Field, Relationship, Column
8 | from sqlalchemy import JSON
9 |
10 | if TYPE_CHECKING:
11 | from .node import NodeDB
12 | from .edge import EdgeDB
13 |
14 |
15 | class ProjectDB(SQLModel, table=True):
16 | """
17 | Database model for projects.
18 |
19 | Maps to the 'projects' table with proper columns instead of JSON blob.
20 | """
21 | __tablename__ = "projects"
22 |
23 | id: str = Field(primary_key=True)
24 | name: str = Field(index=True)
25 |
26 | # Settings stored as JSON (1:1 relationship, simple structure)
27 | tts_settings: Dict[str, Any] = Field(default_factory=dict, sa_column=Column(JSON))
28 | stt_settings: Dict[str, Any] = Field(default_factory=dict, sa_column=Column(JSON))
29 |
30 | # Timestamps
31 | created_at: datetime = Field(default_factory=datetime.utcnow)
32 | updated_at: datetime = Field(default_factory=datetime.utcnow)
33 |
34 | # Relationships
35 | nodes: List["NodeDB"] = Relationship(
36 | back_populates="project",
37 | sa_relationship_kwargs={"cascade": "all, delete-orphan"}
38 | )
39 | edges: List["EdgeDB"] = Relationship(
40 | back_populates="project",
41 | sa_relationship_kwargs={"cascade": "all, delete-orphan"}
42 | )
43 |
44 | def to_pydantic(self) -> "Project":
45 | """Convert database model to Pydantic API model."""
46 | from beezle_bug.project import Project, TTSSettings, STTSettings
47 | from beezle_bug.agent_graph import AgentGraph, Node, Edge
48 | from beezle_bug.agent_graph.types import Position
49 |
50 | # Convert nodes
51 | nodes = []
52 | for node_db in self.nodes:
53 | nodes.append(node_db.to_pydantic())
54 |
55 | # Convert edges
56 | edges = []
57 | for edge_db in self.edges:
58 | edges.append(edge_db.to_pydantic())
59 |
60 | return Project(
61 | id=self.id,
62 | name=self.name,
63 | agent_graph=AgentGraph(nodes=nodes, edges=edges),
64 | tts_settings=TTSSettings(**self.tts_settings) if self.tts_settings else TTSSettings(),
65 | stt_settings=STTSettings(**self.stt_settings) if self.stt_settings else STTSettings(),
66 | created_at=self.created_at,
67 | updated_at=self.updated_at,
68 | )
69 |
70 | @classmethod
71 | def from_pydantic(cls, project: "Project") -> "ProjectDB":
72 | """Create database model from Pydantic API model."""
73 | return cls(
74 | id=project.id,
75 | name=project.name,
76 | tts_settings=project.tts_settings.model_dump(),
77 | stt_settings=project.stt_settings.model_dump(),
78 | created_at=project.created_at,
79 | updated_at=project.updated_at,
80 | )
81 |
82 |
--------------------------------------------------------------------------------
/backend/beezle_bug/agent_graph/agent_graph.py:
--------------------------------------------------------------------------------
1 | """
2 | AgentGraph class - the domain model for a graph of connected nodes.
3 | """
4 |
5 | import json
6 | from pathlib import Path
7 | from typing import Optional
8 | from pydantic import BaseModel, Field
9 |
10 | from .types import EdgeType
11 | from .node import Node
12 | from .edge import Edge
13 |
14 |
15 | class AgentGraph(BaseModel):
16 | """A graph of connected nodes representing an agent graph."""
17 | nodes: list[Node] = Field(default_factory=list)
18 | edges: list[Edge] = Field(default_factory=list)
19 |
20 | def get_node(self, node_id: str) -> Optional[Node]:
21 | """Get a node by ID."""
22 | for node in self.nodes:
23 | if node.id == node_id:
24 | return node
25 | return None
26 |
27 | def get_edges_for_node(self, node_id: str) -> list[Edge]:
28 | """Get all edges connected to a node."""
29 | return [
30 | edge for edge in self.edges
31 | if edge.source_node == node_id or edge.target_node == node_id
32 | ]
33 |
34 | def get_connected_nodes(self, node_id: str, edge_type: Optional[EdgeType] = None) -> list[Node]:
35 | """Get all nodes connected to a given node, optionally filtered by edge type."""
36 | connected = []
37 | for edge in self.edges:
38 | if edge_type and edge.edge_type != edge_type:
39 | continue
40 | if edge.source_node == node_id:
41 | node = self.get_node(edge.target_node)
42 | if node:
43 | connected.append(node)
44 | elif edge.target_node == node_id:
45 | node = self.get_node(edge.source_node)
46 | if node:
47 | connected.append(node)
48 | return connected
49 |
50 | def add_node(self, node: Node) -> None:
51 | """Add a node to the agent graph."""
52 | self.nodes.append(node)
53 |
54 | def remove_node(self, node_id: str) -> None:
55 | """Remove a node and all its edges from the agent graph."""
56 | self.nodes = [n for n in self.nodes if n.id != node_id]
57 | self.edges = [
58 | e for e in self.edges
59 | if e.source_node != node_id and e.target_node != node_id
60 | ]
61 |
62 | def add_edge(self, edge: Edge) -> None:
63 | """Add an edge to the agent graph."""
64 | self.edges.append(edge)
65 |
66 | def remove_edge(self, edge_id: str) -> None:
67 | """Remove an edge from the agent graph."""
68 | self.edges = [e for e in self.edges if e.id != edge_id]
69 |
70 | # Persistence methods
71 |
72 | def save(self, path: Path) -> None:
73 | """Save the agent graph to a JSON file."""
74 | with open(path, "w") as f:
75 | json.dump(self.model_dump(mode="json"), f, indent=2)
76 |
77 | @classmethod
78 | def load(cls, path: Path) -> "AgentGraph":
79 | """Load an agent graph from a JSON file."""
80 | with open(path, "r") as f:
81 | data = json.load(f)
82 | return cls.model_validate(data)
83 |
84 |
--------------------------------------------------------------------------------
/frontend/src/components/GeneralSettingsTab.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 | import { socket } from '../lib/socket';
3 | import { Database, AlertTriangle } from 'lucide-react';
4 |
5 | // Storage backend options (expandable in future)
6 | const STORAGE_OPTIONS = [
7 | { value: 'sqlite', label: 'SQLite' },
8 | ];
9 |
10 | export default function GeneralSettingsTab() {
11 | const [storageBackend, setStorageBackend] = useState('sqlite');
12 | const [loading, setLoading] = useState(true);
13 |
14 | useEffect(() => {
15 | const requestSettings = () => {
16 | socket.emit('get_general_settings');
17 | };
18 |
19 | requestSettings();
20 |
21 | const timeout = setTimeout(() => {
22 | setLoading(false);
23 | }, 3000);
24 |
25 | const handleGeneralSettings = (data) => {
26 | if (data.storage_backend) {
27 | setStorageBackend(data.storage_backend);
28 | }
29 | setLoading(false);
30 | clearTimeout(timeout);
31 | };
32 |
33 | const handleConnect = () => {
34 | requestSettings();
35 | };
36 |
37 | socket.on('general_settings', handleGeneralSettings);
38 | socket.on('connect', handleConnect);
39 |
40 | return () => {
41 | socket.off('general_settings', handleGeneralSettings);
42 | socket.off('connect', handleConnect);
43 | clearTimeout(timeout);
44 | };
45 | }, []);
46 |
47 | if (loading) {
48 | return (
49 |
50 | Loading settings...
51 |
52 | );
53 | }
54 |
55 | return (
56 |
57 | {/* Storage Section */}
58 |
59 |
60 |
61 | Storage
62 |
63 |
64 |
65 | {/* Backend Selection */}
66 |
67 |
68 |
79 |
80 |
81 | {/* Info/Warning */}
82 |
83 |
84 |
85 | Storage backend is configured via environment variable.
86 | Changes require server restart.
87 |
88 |
89 |
90 |
91 |
92 | );
93 | }
94 |
95 |
--------------------------------------------------------------------------------
/backend/beezle_bug/models/node.py:
--------------------------------------------------------------------------------
1 | """
2 | SQLModel Node database model.
3 | """
4 |
5 | from typing import TYPE_CHECKING, Optional, Dict, Any
6 | from sqlmodel import SQLModel, Field, Relationship, Column
7 | from sqlalchemy import JSON
8 |
9 | if TYPE_CHECKING:
10 | from .project import ProjectDB
11 |
12 |
13 | class NodeDB(SQLModel, table=True):
14 | """
15 | Database model for agent graph nodes.
16 |
17 | Maps to the 'nodes' table.
18 | """
19 | __tablename__ = "nodes"
20 |
21 | id: str = Field(primary_key=True)
22 | project_id: str = Field(foreign_key="projects.id", index=True)
23 |
24 | # Node type (agent, knowledge_graph, memory_stream, etc.)
25 | type: str = Field(index=True)
26 |
27 | # Position (split into columns for potential spatial queries)
28 | position_x: float = Field(default=0.0)
29 | position_y: float = Field(default=0.0)
30 |
31 | # Config is polymorphic based on node type, stored as JSON
32 | config: Dict[str, Any] = Field(default_factory=dict, sa_column=Column(JSON))
33 |
34 | # Relationship back to project
35 | project: Optional["ProjectDB"] = Relationship(back_populates="nodes")
36 |
37 | def to_pydantic(self) -> "Node":
38 | """Convert database model to Pydantic API model."""
39 | from beezle_bug.agent_graph.node import Node
40 | from beezle_bug.agent_graph.types import NodeType, Position
41 |
42 | # Import all config types for proper instantiation
43 | from beezle_bug.agent_graph.types import (
44 | AgentNodeConfig,
45 | KnowledgeGraphNodeConfig,
46 | MemoryStreamNodeConfig,
47 | ToolboxNodeConfig,
48 | TextInputNodeConfig,
49 | VoiceInputNodeConfig,
50 | TextOutputNodeConfig,
51 | ScheduledEventNodeConfig,
52 | WaitAndCombineNodeConfig,
53 | )
54 |
55 | # Map type string to config class
56 | config_classes = {
57 | "agent": AgentNodeConfig,
58 | "knowledge_graph": KnowledgeGraphNodeConfig,
59 | "memory_stream": MemoryStreamNodeConfig,
60 | "toolbox": ToolboxNodeConfig,
61 | "text_input": TextInputNodeConfig,
62 | "voice_input": VoiceInputNodeConfig,
63 | "text_output": TextOutputNodeConfig,
64 | "scheduled_event": ScheduledEventNodeConfig,
65 | "wait_and_combine": WaitAndCombineNodeConfig,
66 | }
67 |
68 | config_class = config_classes.get(self.type)
69 | if config_class:
70 | config = config_class(**self.config)
71 | else:
72 | # Fallback - shouldn't happen
73 | config = self.config
74 |
75 | return Node(
76 | id=self.id,
77 | type=NodeType(self.type),
78 | position=Position(x=self.position_x, y=self.position_y),
79 | config=config,
80 | )
81 |
82 | @classmethod
83 | def from_pydantic(cls, node: "Node", project_id: str) -> "NodeDB":
84 | """Create database model from Pydantic API model."""
85 | return cls(
86 | id=node.id,
87 | project_id=project_id,
88 | type=node.type.value,
89 | position_x=node.position.x,
90 | position_y=node.position.y,
91 | config=node.config.model_dump() if hasattr(node.config, 'model_dump') else dict(node.config),
92 | )
93 |
94 |
--------------------------------------------------------------------------------
/backend/beezle_bug/agent_graph/types.py:
--------------------------------------------------------------------------------
1 | """
2 | Type definitions for the Agent Graph system.
3 |
4 | Contains enums, position, and node configuration classes.
5 | """
6 |
7 | from enum import Enum
8 | from typing import Optional, Union
9 | from pydantic import BaseModel, Field
10 |
11 |
12 | class NodeType(str, Enum):
13 | """Types of nodes that can exist in an agent graph."""
14 | AGENT = "agent"
15 | KNOWLEDGE_GRAPH = "knowledge_graph"
16 | MEMORY_STREAM = "memory_stream"
17 | TOOLBOX = "toolbox"
18 | TEXT_INPUT = "text_input"
19 | VOICE_INPUT = "voice_input"
20 | TEXT_OUTPUT = "text_output"
21 | SCHEDULED_EVENT = "scheduled_event"
22 | WAIT_AND_COMBINE = "wait_and_combine"
23 |
24 |
25 | class EdgeType(str, Enum):
26 | """Types of edges/connections between nodes."""
27 | MESSAGE = "message" # Direct message passing between agents or event nodes
28 | PIPELINE = "pipeline" # Output becomes input (chained processing)
29 | RESOURCE = "resource" # Bidirectional read/write access to KG/Memory
30 | DELEGATE = "delegate" # Sync call: agent A asks agent B, gets response as tool result
31 |
32 |
33 | class Position(BaseModel):
34 | """2D position of a node in the graph UI."""
35 | x: float = 0.0
36 | y: float = 0.0
37 |
38 |
39 | # Node Configuration Types
40 |
41 | class AgentNodeConfig(BaseModel):
42 | """Configuration for an Agent node."""
43 | name: str
44 | model: str = "gpt-4"
45 | api_url: str = "http://127.0.0.1:1234/v1"
46 | api_key: str = ""
47 | system_template: str = "agent"
48 |
49 |
50 | class KnowledgeGraphNodeConfig(BaseModel):
51 | """Configuration for a Knowledge Graph node."""
52 | name: str = "Knowledge Graph"
53 |
54 |
55 | class MemoryStreamNodeConfig(BaseModel):
56 | """Configuration for a Memory Stream node."""
57 | name: str = "Memory Stream"
58 | max_observations: int = 1000
59 |
60 |
61 | class ToolboxNodeConfig(BaseModel):
62 | """Configuration for a Toolbox node."""
63 | name: str = "Toolbox"
64 | tools: list[str] = Field(default_factory=list)
65 |
66 |
67 | class TextInputNodeConfig(BaseModel):
68 | """Configuration for Text Input node (typed text entry point)."""
69 | name: str = "Text Input"
70 |
71 |
72 | class VoiceInputNodeConfig(BaseModel):
73 | """Configuration for Voice Input node (voice-transcribed text entry point)."""
74 | name: str = "Voice Input"
75 |
76 |
77 | class TextOutputNodeConfig(BaseModel):
78 | """Configuration for Text Output node (chat display)."""
79 | name: str = "Text Output"
80 |
81 |
82 | class ScheduledEventNodeConfig(BaseModel):
83 | """Configuration for a Scheduled Event node."""
84 | name: str = "Scheduled Event"
85 | trigger_type: str = "interval" # "once" or "interval"
86 | run_at: Optional[str] = None # ISO datetime for "once"
87 | interval_seconds: int = 30 # For "interval"
88 | message_content: str = "Review your current state and pending tasks."
89 |
90 |
91 | class WaitAndCombineNodeConfig(BaseModel):
92 | """Configuration for a Wait and Combine node (rendezvous point)."""
93 | name: str = "Wait and Combine"
94 |
95 |
96 | # Union type for all node configs
97 | NodeConfig = Union[
98 | AgentNodeConfig,
99 | KnowledgeGraphNodeConfig,
100 | MemoryStreamNodeConfig,
101 | ToolboxNodeConfig,
102 | TextInputNodeConfig,
103 | VoiceInputNodeConfig,
104 | TextOutputNodeConfig,
105 | ScheduledEventNodeConfig,
106 | WaitAndCombineNodeConfig,
107 | ]
108 |
109 |
--------------------------------------------------------------------------------
/backend/beezle_bug/llm_adapter/llama_cpp_adapter.py:
--------------------------------------------------------------------------------
1 | """
2 | [DEPRECATED] Llama.cpp API adapter module.
3 |
4 | This module is deprecated and will be removed in a future version.
5 | Please use LiteLLMAdapter instead for better provider support and features.
6 |
7 | See LITELLM_MIGRATION.md for migration instructions."""
8 |
9 | import warnings
10 | from loguru import logger
11 |
12 | import requests
13 |
14 | from beezle_bug.llm_adapter import BaseAdapter
15 |
16 |
17 | warnings.warn(
18 | "LlamaCppApiAdapter is deprecated. Please use LiteLLMAdapter instead. "
19 | "See LITELLM_MIGRATION.md for migration instructions.",
20 | DeprecationWarning,
21 | stacklevel=2
22 | )
23 |
24 |
25 | DEFAULT_URL = "http://localhost"
26 | DEFAULT_PORT = 8080
27 |
28 |
29 | class LlamaCppApiAdapter(BaseAdapter):
30 | """
31 | [DEPRECATED] Adapter for llama.cpp HTTP server API.
32 |
33 | This adapter communicates with a llama.cpp server running in API mode.
34 | It is deprecated in favor of LiteLLMAdapter which provides better
35 | compatibility and more features.
36 |
37 | Args:
38 | llm_config: LLM configuration object
39 | url: Server URL (default: http://localhost)
40 | port: Server port (default: 8080)
41 |
42 | Note:
43 | For llama.cpp servers with OpenAI-compatible endpoints, use
44 | LiteLLMAdapter with api_base='http://localhost:8080/v1' instead."""
45 |
46 | def __init__(
47 | self,
48 | llm_config,
49 | url: str = DEFAULT_URL,
50 | port: int = DEFAULT_PORT
51 | ) -> None:
52 | """
53 | Initialize the llama.cpp adapter.
54 |
55 | Args:
56 | llm_config: Configuration object containing template and stop tokens
57 | url: Server URL
58 | port: Server port"""
59 | self.llm_config = llm_config
60 | self.url = url
61 | self.port = port
62 | super().__init__()
63 |
64 | def completion(self, messages, grammar) -> str:
65 | """
66 | Generate a text completion with optional grammar constraints.
67 |
68 | Args:
69 | messages: List of conversation messages
70 | grammar: GBNF grammar for constrained generation
71 |
72 | Returns:
73 | str: Generated completion text"""
74 | endpoint_url = f"{self.url}:{self.port}/completion"
75 |
76 | headers = {"Content-Type": "application/json"}
77 |
78 | prompt = self.llm_config.template.render(
79 | llm=self.llm_config,
80 | messages=messages
81 | )
82 | logger.debug(prompt)
83 |
84 | data = {
85 | "prompt": prompt,
86 | "grammar": grammar,
87 | "stop": self.llm_config.msg_stop
88 | }
89 |
90 | response = requests.post(endpoint_url, headers=headers, json=data)
91 |
92 | data = response.json()
93 | return data["content"]
94 |
95 | def chat_completion(self, messages, tools):
96 | """
97 | Chat completion is not implemented for llama.cpp adapter.
98 |
99 | This method is required by BaseAdapter but not implemented.
100 | Use LiteLLMAdapter for full chat completion support.
101 |
102 | Args:
103 | messages: List of conversation messages
104 | tools: List of available tools
105 |
106 | Raises:
107 | NotImplementedError: This method is not implemented"""
108 | raise NotImplementedError(
109 | "chat_completion is not implemented for LlamaCppApiAdapter. "
110 | "Please use LiteLLMAdapter instead."
111 | )
112 |
--------------------------------------------------------------------------------
/frontend/src/components/SettingsPanel.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 | import { Bot, Calendar, Box, Volume2, Settings } from 'lucide-react';
3 | import AgentControlTab from './AgentControlTab';
4 | import ScheduleTab from './ScheduleTab';
5 | import NodeInspectorTab from './NodeInspectorTab';
6 | import VoiceSettingsTab from './VoiceSettingsTab';
7 | import GeneralSettingsTab from './GeneralSettingsTab';
8 |
9 | export default function SettingsPanel({ selectedAgentGraphNode, isAgentGraphDeployed }) {
10 | const [activeTab, setActiveTab] = useState('agents');
11 |
12 | // Auto-switch to Node tab when an agent graph node is selected
13 | useEffect(() => {
14 | if (selectedAgentGraphNode) {
15 | setActiveTab('node');
16 | }
17 | }, [selectedAgentGraphNode]);
18 |
19 | return (
20 |
21 | {/* Tab Bar */}
22 |
23 |
34 |
45 |
56 |
67 |
78 |
79 |
80 | {/* Tab Content */}
81 |
82 | {activeTab === 'agents' &&
}
83 | {activeTab === 'node' &&
}
84 | {activeTab === 'schedule' &&
}
85 | {activeTab === 'voice' &&
}
86 | {activeTab === 'general' &&
}
87 |
88 |
89 | );
90 | }
91 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
162 | frontend/node_modules/
163 | frontend/.vite
164 | data/
165 | cache/
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/python.py:
--------------------------------------------------------------------------------
1 | """
2 | Python code execution tool using embedded IPython shells.
3 |
4 | Each agent gets its own IPython InteractiveShell instance that persists
5 | variables, functions, and imports across multiple code execution calls.
6 | """
7 |
8 | from typing import Dict
9 | from io import StringIO
10 | from pydantic import Field
11 |
12 | from IPython.core.interactiveshell import InteractiveShell
13 |
14 | from beezle_bug.tools import Tool
15 |
16 |
17 | # Module-level registry of per-agent IPython shells
18 | _agent_shells: Dict[str, InteractiveShell] = {}
19 |
20 |
21 | def get_shell(agent_id: str) -> InteractiveShell:
22 | """
23 | Get or create an IPython shell for the given agent.
24 |
25 | Args:
26 | agent_id: The unique identifier for the agent
27 |
28 | Returns:
29 | The agent's IPython InteractiveShell instance
30 | """
31 | if agent_id not in _agent_shells:
32 | shell = InteractiveShell.instance()
33 | # Create a fresh instance for this agent
34 | shell = InteractiveShell()
35 | _agent_shells[agent_id] = shell
36 | return _agent_shells[agent_id]
37 |
38 |
39 | def cleanup_shell(agent_id: str) -> None:
40 | """
41 | Clean up and remove an agent's IPython shell.
42 |
43 | Args:
44 | agent_id: The unique identifier for the agent
45 | """
46 | if agent_id in _agent_shells:
47 | del _agent_shells[agent_id]
48 |
49 |
50 | class ExecPythonCode(Tool):
51 | """
52 | Execute Python code in a persistent IPython session.
53 |
54 | The session maintains state across calls - variables, functions,
55 | and imports defined in one call are available in subsequent calls.
56 | """
57 |
58 | code: str = Field(description="Python code to execute")
59 |
60 | async def run(self, agent) -> str:
61 | """
62 | Execute the code in the agent's IPython shell.
63 |
64 | Args:
65 | agent: The agent executing this tool (provides agent.id)
66 |
67 | Returns:
68 | String containing stdout, stderr, and execution result
69 | """
70 | shell = get_shell(agent.id)
71 |
72 | # Capture output
73 | stdout_capture = StringIO()
74 | stderr_capture = StringIO()
75 |
76 | # Store original streams
77 | import sys
78 | original_stdout = sys.stdout
79 | original_stderr = sys.stderr
80 |
81 | try:
82 | # Redirect output
83 | sys.stdout = stdout_capture
84 | sys.stderr = stderr_capture
85 |
86 | # Execute the code
87 | result = shell.run_cell(self.code, store_history=True)
88 |
89 | finally:
90 | # Restore streams
91 | sys.stdout = original_stdout
92 | sys.stderr = original_stderr
93 |
94 | # Build output string
95 | output_parts = []
96 |
97 | stdout_content = stdout_capture.getvalue()
98 | if stdout_content:
99 | output_parts.append(stdout_content.rstrip())
100 |
101 | stderr_content = stderr_capture.getvalue()
102 | if stderr_content:
103 | output_parts.append(f"[stderr]\n{stderr_content.rstrip()}")
104 |
105 | # Check for execution errors
106 | if result.error_in_exec is not None:
107 | error_msg = f"[error] {type(result.error_in_exec).__name__}: {result.error_in_exec}"
108 | output_parts.append(error_msg)
109 |
110 | # Include the result if there is one (and it's not None)
111 | if result.result is not None:
112 | output_parts.append(f">>> {repr(result.result)}")
113 |
114 | if not output_parts:
115 | return "Code executed successfully (no output)"
116 |
117 | return "\n".join(output_parts)
118 |
--------------------------------------------------------------------------------
/backend/beezle_bug/llm_adapter/base_adapter.py:
--------------------------------------------------------------------------------
1 | """
2 | Base adapter module for LLM integrations.
3 |
4 | This module provides abstract base classes and data models for implementing
5 | LLM adapters that can communicate with various language model providers."""
6 |
7 | from abc import ABC, abstractmethod
8 | from typing import Optional
9 |
10 | from pydantic import BaseModel
11 |
12 |
13 | class Function(BaseModel):
14 | """
15 | Represents a function call definition.
16 |
17 | Attributes:
18 | name: The name of the function to call
19 | arguments: JSON string containing the function arguments"""
20 | name: str
21 | arguments: str
22 |
23 |
24 | class ToolCall(BaseModel):
25 | """
26 | Represents a tool/function call request from the LLM.
27 |
28 | Attributes:
29 | id: Unique identifier for this tool call
30 | function: The function details (name and arguments)
31 | type: The type of tool call (typically "function")"""
32 | id: str
33 | function: Function
34 | type: str
35 |
36 |
37 | class Response(BaseModel):
38 | """
39 | Represents a response from the LLM.
40 |
41 | Attributes:
42 | content: The text content of the response (None if only tool calls)
43 | role: The role of the responder (typically "assistant")
44 | reasoning: Optional reasoning or chain-of-thought explanation
45 | tool_calls: List of tool calls requested by the LLM"""
46 | content: Optional[str]
47 | role: str
48 | reasoning: Optional[str] = ""
49 | tool_calls: Optional[list[ToolCall]] = []
50 |
51 |
52 | class ToolCallResult(BaseModel):
53 | """
54 | Represents the result of a tool/function execution.
55 |
56 | Attributes:
57 | role: The role identifier (always "tool")
58 | tool_call_id: ID of the tool call this result corresponds to
59 | content: The result content from the tool execution"""
60 | role: str = "tool"
61 | tool_call_id: str
62 | content: str
63 |
64 |
65 | class Message(BaseModel):
66 | """
67 | Represents a message in a conversation.
68 |
69 | Attributes:
70 | role: The role of the message sender (e.g., "user", "assistant", "system")
71 | content: The text content of the message"""
72 | role: str
73 | content: str
74 |
75 |
76 | class BaseAdapter(ABC):
77 | """
78 | Abstract base class for LLM adapters.
79 |
80 | This class defines the interface that all LLM adapters must implement.
81 | Subclasses should provide concrete implementations for communicating with
82 | specific LLM providers.
83 |
84 | Methods:
85 | completion: Generate a text completion from messages
86 | chat_completion: Generate a chat completion with optional tool calling"""
87 |
88 | @abstractmethod
89 | def completion(self, messages, grammar) -> str:
90 | """
91 | Generate a text completion from a list of messages.
92 |
93 | Args:
94 | messages: List of conversation messages
95 | grammar: Optional grammar constraints for generation
96 |
97 | Returns:
98 | str: The generated completion text
99 |
100 | Raises:
101 | NotImplementedError: This method must be implemented by subclasses"""
102 | pass
103 |
104 | @abstractmethod
105 | def chat_completion(self, messages, tools) -> Response:
106 | """
107 | Generate a chat completion with optional tool calling support.
108 |
109 | Args:
110 | messages: List of conversation messages
111 | tools: List of available tools/functions the LLM can call
112 |
113 | Returns:
114 | Response: Response object containing the completion and any tool calls
115 |
116 | Raises:
117 | NotImplementedError: This method must be implemented by subclasses"""
118 | pass
119 |
--------------------------------------------------------------------------------
/frontend/src/components/LogPanel.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect, useRef } from 'react';
2 | import { socket } from '../lib/socket';
3 | import { ScrollText, Trash2, CheckCircle, XCircle, Info } from 'lucide-react';
4 |
5 | export default function LogPanel() {
6 | const [logs, setLogs] = useState([]);
7 | const scrollRef = useRef(null);
8 |
9 | useEffect(() => {
10 | // Listen for log messages (success, info)
11 | socket.on('log', (data) => {
12 | const entry = {
13 | id: Date.now(),
14 | timestamp: new Date().toLocaleTimeString(),
15 | type: data.type || 'info', // 'success', 'info', 'warning'
16 | message: data.message
17 | };
18 | setLogs(prev => [...prev.slice(-99), entry]); // Keep last 100 entries
19 | });
20 |
21 | // Listen for error messages
22 | socket.on('error', (data) => {
23 | const entry = {
24 | id: Date.now(),
25 | timestamp: new Date().toLocaleTimeString(),
26 | type: 'error',
27 | message: data.message
28 | };
29 | setLogs(prev => [...prev.slice(-99), entry]);
30 | });
31 |
32 | return () => {
33 | socket.off('log');
34 | socket.off('error');
35 | };
36 | }, []);
37 |
38 | // Auto-scroll to bottom on new logs
39 | useEffect(() => {
40 | if (scrollRef.current) {
41 | scrollRef.current.scrollTop = scrollRef.current.scrollHeight;
42 | }
43 | }, [logs]);
44 |
45 | const clearLogs = () => {
46 | setLogs([]);
47 | };
48 |
49 | const getIcon = (type) => {
50 | switch (type) {
51 | case 'success':
52 | return ;
53 | case 'error':
54 | return ;
55 | case 'warning':
56 | return ;
57 | default:
58 | return ;
59 | }
60 | };
61 |
62 | const getTextColor = (type) => {
63 | switch (type) {
64 | case 'success':
65 | return 'text-[#22c55e]';
66 | case 'error':
67 | return 'text-[#ef4444]';
68 | case 'warning':
69 | return 'text-[#eab308]';
70 | default:
71 | return 'text-[#888]';
72 | }
73 | };
74 |
75 | return (
76 |
77 | {/* Header */}
78 |
79 |
80 |
81 | Log
82 |
83 |
84 | {logs.length} entries
85 |
92 |
93 |
94 |
95 | {/* Log Entries */}
96 |
100 | {logs.length === 0 ? (
101 |
102 | No log entries yet
103 |
104 | ) : (
105 | logs.map((log) => (
106 |
110 | {log.timestamp}
111 | {getIcon(log.type)}
112 |
113 | {log.message}
114 |
115 |
116 | ))
117 | )}
118 |
119 |
120 | );
121 | }
122 |
123 |
--------------------------------------------------------------------------------
/backend/beezle_bug/memory/memory_stream.py:
--------------------------------------------------------------------------------
1 | """
2 | Memory stream module for storing conversational observations.
3 |
4 | Observations are persisted to the database immediately and retrieval
5 | uses vector similarity search.
6 | """
7 |
8 | from datetime import datetime
9 | from typing import TYPE_CHECKING, List, Dict, Any, Optional
10 | from fastembed import TextEmbedding
11 |
12 | from beezle_bug.memory.memories import Observation
13 | from beezle_bug.llm_adapter import Message, ToolCallResult, Response
14 |
15 | if TYPE_CHECKING:
16 | from beezle_bug.storage.base import StorageBackend
17 |
18 |
19 | class MemoryStream:
20 | """
21 | A memory stream for storing and retrieving conversational observations.
22 |
23 | Observations are stored with embeddings for semantic similarity search.
24 | All observations are persisted to the database immediately.
25 |
26 | Attributes:
27 | IMPORTANCE_THRESHOLD: Threshold for importance scoring
28 | """
29 |
30 | IMPORTANCE_THRESHOLD = 10
31 |
32 | def __init__(
33 | self,
34 | storage: "StorageBackend",
35 | ms_id: int
36 | ) -> None:
37 | """
38 | Initialize a memory stream.
39 |
40 | Args:
41 | storage: Storage backend for persistence (required)
42 | ms_id: Database ID of this memory stream (required)
43 | """
44 | self._storage = storage
45 | self._ms_id = ms_id
46 | self.last_reflection_point = 0
47 | # Use persistent cache directory for embedding model
48 | self.embedding_model = TextEmbedding(cache_dir="/cache/fastembed")
49 |
50 | async def add(self, content: Message | ToolCallResult | Response) -> None:
51 | """
52 | Add a new observation to the memory stream.
53 |
54 | Creates an embedding for the content and persists to the database.
55 |
56 | Args:
57 | content: The message, tool call result, or response to store
58 | """
59 | # Generate embedding
60 | content_json = content.model_dump_json() if hasattr(content, 'model_dump_json') else content.json()
61 | embedding = list(self.embedding_model.query_embed(content_json))[0]
62 |
63 | observation = Observation(content=content, embedding=embedding)
64 |
65 | # Persist to database
66 | await self._storage.ms_add_observation(self._ms_id, observation)
67 |
68 | async def retrieve(
69 | self,
70 | text: str,
71 | k: int,
72 | from_date: Optional[datetime] = None,
73 | to_date: Optional[datetime] = None
74 | ) -> List[Observation]:
75 | """
76 | Retrieve the most relevant observations for the given text.
77 |
78 | Uses vector similarity search in the database.
79 |
80 | Args:
81 | text: Query text to find similar observations
82 | k: Number of observations to retrieve
83 | from_date: Optional filter for created_at >= from_date
84 | to_date: Optional filter for created_at <= to_date
85 |
86 | Returns:
87 | List of observations sorted by creation time
88 | """
89 | # Generate query embedding
90 | query_embedding = list(self.embedding_model.query_embed(text))[0]
91 |
92 | # Use database vector search
93 | observations = await self._storage.ms_search(
94 | self._ms_id,
95 | list(query_embedding),
96 | k,
97 | from_date,
98 | to_date
99 | )
100 |
101 | # Update accessed timestamps
102 | obs_ids = [getattr(obs, '_db_id', None) for obs in observations]
103 | obs_ids = [oid for oid in obs_ids if oid is not None]
104 | if obs_ids:
105 | await self._storage.ms_update_accessed(obs_ids)
106 |
107 | # Sort by creation time
108 | observations.sort(key=lambda x: x.created)
109 | return observations
110 |
111 | async def retrieve_recent(self, n: int) -> List[Observation]:
112 | """
113 | Retrieve the N most recent observations.
114 |
115 | Returns observations in chronological order (oldest first).
116 |
117 | Args:
118 | n: Number of observations to retrieve
119 |
120 | Returns:
121 | List of observations sorted by creation time (oldest first)
122 | """
123 | return await self._storage.ms_get_recent(self._ms_id, n)
124 |
125 | async def get_metadata(self) -> Dict[str, Any]:
126 | """
127 | Get memory stream metadata.
128 |
129 | Returns:
130 | Dict with last_reflection_point and other metadata
131 | """
132 | return await self._storage.ms_get_metadata(self._ms_id)
133 |
134 | async def update_metadata(self, metadata: Dict[str, Any]) -> None:
135 | """
136 | Update memory stream metadata.
137 |
138 | Args:
139 | metadata: Dict with metadata to update
140 | """
141 | if "last_reflection_point" in metadata:
142 | self.last_reflection_point = metadata["last_reflection_point"]
143 |
144 | await self._storage.ms_update_metadata(self._ms_id, metadata)
145 |
--------------------------------------------------------------------------------
/backend/beezle_bug/llm_adapter/openai_adapter.py:
--------------------------------------------------------------------------------
1 | """
2 | [DEPRECATED] OpenAI adapter module.
3 |
4 | This module is deprecated and will be removed in a future version.
5 | Please use LiteLLMAdapter instead for better provider support and features.
6 |
7 | See LITELLM_MIGRATION.md for migration instructions."""
8 |
9 | import warnings
10 |
11 | from pydantic import BaseModel
12 | from openai import OpenAI
13 |
14 | from beezle_bug.llm_adapter import Response
15 |
16 |
17 | warnings.warn(
18 | "OpenAiAdapter is deprecated. Please use LiteLLMAdapter instead. "
19 | "See LITELLM_MIGRATION.md for migration instructions.",
20 | DeprecationWarning,
21 | stacklevel=2
22 | )
23 |
24 |
25 | def tool_to_openai_schema(tool_cls: type[BaseModel]) -> dict:
26 | """
27 | Convert a Pydantic model to OpenAI function schema.
28 |
29 | Args:
30 | tool_cls: Pydantic model class representing a tool
31 |
32 | Returns:
33 | dict: OpenAI-compatible function schema"""
34 | schema = tool_cls.model_json_schema()
35 |
36 | name = tool_cls.__name__
37 | description = (
38 | tool_cls.__doc__.strip()
39 | if tool_cls.__doc__
40 | else "No description provided."
41 | )
42 |
43 | openai_schema = {
44 | "type": "function",
45 | "function": {
46 | "name": name,
47 | "description": description,
48 | "parameters": {
49 | "type": "object",
50 | "properties": {},
51 | "required": [],
52 | "additionalProperties": False
53 | }
54 | }
55 | }
56 |
57 | for prop_name, prop_details in schema["properties"].items():
58 | openai_schema["function"]["parameters"]["properties"][prop_name] = {
59 | "type": prop_details.get("type", "string"),
60 | "description": prop_details.get("description", "No description provided.")
61 | }
62 |
63 | if "required" in schema:
64 | openai_schema["function"]["parameters"]["required"] = schema["required"]
65 |
66 | return openai_schema
67 |
68 |
69 | def tools_to_openai_schema(tools: list[type[BaseModel]]) -> list[dict]:
70 | """
71 | Convert list of Pydantic tools to OpenAI schema format.
72 |
73 | Args:
74 | tools: List of Pydantic model classes
75 |
76 | Returns:
77 | list[dict]: List of OpenAI-compatible function schemas"""
78 | return [tool_to_openai_schema(tool) for tool in tools]
79 |
80 |
81 | class OpenAiAdapter:
82 | """
83 | [DEPRECATED] Adapter for OpenAI API and compatible endpoints.
84 |
85 | This adapter is deprecated. Use LiteLLMAdapter instead for better
86 | provider support, more features, and active maintenance.
87 |
88 | Args:
89 | model: Model identifier
90 | api_url: Optional custom API URL
91 | api_key: API key for authentication"""
92 |
93 | def __init__(self, model: str, api_url: str = None, api_key: str = ""):
94 | """
95 | Initialize the OpenAI adapter.
96 |
97 | Args:
98 | model: Model identifier (e.g., 'gpt-4')
99 | api_url: Optional custom API base URL
100 | api_key: API key for authentication"""
101 | super().__init__()
102 |
103 | if api_url:
104 | self.client = OpenAI(base_url=api_url, api_key=api_key)
105 | else:
106 | self.client = OpenAI(api_key=api_key)
107 |
108 | self.model = model
109 |
110 | def chat_completion(self, messages, tools) -> Response:
111 | """
112 | Generate a chat completion with tool calling support.
113 |
114 | Args:
115 | messages: List of conversation messages
116 | tools: List of available tools
117 |
118 | Returns:
119 | Response: Response object with content and tool calls"""
120 | response = self.client.chat.completions.create(
121 | model=self.model,
122 | messages=messages,
123 | tools=tools_to_openai_schema(tools)
124 | )
125 |
126 | message = response.choices[0].message
127 | message_dict = message.model_dump()
128 |
129 | # Extract reasoning/thinking if present (some models like Claude, o1 provide this)
130 | # Check for common reasoning field names
131 | reasoning = None
132 | if hasattr(message, 'reasoning'):
133 | reasoning = message.reasoning
134 | elif hasattr(message, 'thinking'):
135 | reasoning = message.thinking
136 | elif hasattr(response.choices[0], 'reasoning'):
137 | reasoning = response.choices[0].reasoning
138 |
139 | # Some local models put thinking in tags in content
140 | content = message_dict.get('content', '')
141 | if content and '' in content and '' in content:
142 | import re
143 | think_match = re.search(r'(.*?)', content, re.DOTALL)
144 | if think_match:
145 | reasoning = think_match.group(1).strip()
146 | # Remove thinking from content
147 | content = re.sub(r'.*?', '', content, flags=re.DOTALL).strip()
148 | message_dict['content'] = content
149 |
150 | message_dict['reasoning'] = reasoning
151 |
152 | return Response.model_validate(message_dict)
153 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/os/filesystem.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | import os
3 | from pathlib import Path
4 |
5 | from pydantic import Field
6 |
7 | from beezle_bug.tools import Tool
8 |
9 | base_folder = "/app/data"
10 |
11 |
12 | class WriteOperation(Enum):
13 | CREATE_FILE = "create-file"
14 | APPEND_FILE = "append-file"
15 | OVERWRITE_FILE = "overwrite-file"
16 |
17 |
18 | class WriteTextFile(Tool):
19 | """
20 | Open file for writing and modification.
21 | """
22 |
23 | directory: str = Field(
24 | ..., description="Path to the directory where the file is located or will be created. Without filename !!!!"
25 | )
26 |
27 | filename_without_extension: str = Field(..., description="Name of the target file without the file extension.")
28 |
29 | filename_extension: str = Field(
30 | ..., description="File extension indicating the file type, such as '.txt', '.py', '.md', etc."
31 | )
32 |
33 | write_operation: WriteOperation = Field(
34 | ..., description="Write operation performed, 'create-file', 'append-file' or 'overwrite-file'"
35 | )
36 |
37 | # Allow free output for the File Content to Enhance LLM Output
38 |
39 | file_content: str = Field(..., description="Triple quoted string for unconstrained output.")
40 |
41 | async def run(self, agent):
42 |
43 | if self.directory == "":
44 | self.directory = "./"
45 | if self.filename_extension == "":
46 | self.filename_extension = ".txt"
47 | if self.filename_extension[0] != ".":
48 | self.filename_extension = "." + self.filename_extension
49 | if self.directory[0] == "." and len(self.directory) == 1:
50 | self.directory = "./"
51 |
52 | if self.directory[0] == "." and len(self.directory) > 1 and self.directory[1] != "/":
53 | self.directory = "./" + self.directory[1:]
54 |
55 | if self.directory[0] == "/":
56 | self.directory = self.directory[1:]
57 |
58 | if self.directory.endswith(f"{self.filename_without_extension}{self.filename_extension}"):
59 | self.directory = self.directory.replace(f"{self.filename_without_extension}{self.filename_extension}", "")
60 | file_path = os.path.join(self.directory, f"{self.filename_without_extension}{self.filename_extension}")
61 | file_path = os.path.join(base_folder, file_path)
62 |
63 | os.makedirs(os.path.dirname(file_path), exist_ok=True)
64 |
65 | # Determine the write mode based on the write_operation attribute
66 | if self.write_operation == WriteOperation.CREATE_FILE:
67 | write_mode = "w" # Create a new file, error if file exists
68 | elif self.write_operation == WriteOperation.APPEND_FILE:
69 | write_mode = "a" # Append if file exists, create if not
70 | elif self.write_operation == WriteOperation.OVERWRITE_FILE:
71 | write_mode = "w" # Overwrite file if it exists, create if not
72 | else:
73 | raise ValueError(f"Invalid write operation: {self.write_operation}")
74 |
75 | # Write back to file
76 | with open(file_path, write_mode, encoding="utf-8") as file:
77 | file.writelines(self.file_content)
78 |
79 | return f"Content written to '{self.filename_without_extension}{self.filename_extension}'."
80 |
81 |
82 | class ReadTextFile(Tool):
83 | """
84 | Reads the text content of a specified file and returns it.
85 | """
86 |
87 | directory: str = Field(description="Path to the directory containing the file. Without filename !!!!")
88 |
89 | file_name: str = Field(
90 | ..., description="The name of the file to be read, including its extension (e.g., 'document.txt')."
91 | )
92 |
93 | async def run(self, agent):
94 | try:
95 | if self.directory.endswith(f"{self.file_name}"):
96 | self.directory = self.directory.replace(f"{self.file_name}", "")
97 | if not os.path.exists(f"{base_folder}/{self.directory}/{self.file_name}"):
98 | return f"File '{self.directory}/{self.file_name}' doesn't exists!"
99 | with open(f"{base_folder}/{self.directory}/{self.file_name}", "r", encoding="utf-8") as f:
100 | content = f.read()
101 | if content.strip() == "":
102 | return f"File '{self.file_name}' is empty!"
103 | except Exception as e:
104 | return f"Error reading file '{self.file_name}': {e}"
105 | return f"File '{self.file_name}':\n{content}"
106 |
107 |
108 | class GetFileList(Tool):
109 | """
110 | Scans a specified directory and creates a list of all files within that directory, including files in its subdirectories.
111 | """
112 |
113 | directory: str = Field(
114 | description="Path to the directory where files will be listed. This path can include subdirectories to be scanned."
115 | )
116 |
117 | async def run(self, agent):
118 | filenames = "File List:\n"
119 | counter = 1
120 | base_path = Path(base_folder) / self.directory
121 |
122 | for root, _, files in os.walk(os.path.join(base_folder, self.directory)):
123 | for file in files:
124 | relative_root = Path(root).relative_to(base_path)
125 | filenames += f"{counter}. {relative_root / file}\n"
126 | counter += 1
127 |
128 | if counter == 1:
129 | return f"Directory '{self.directory}' is empty!"
130 | return filenames
131 |
--------------------------------------------------------------------------------
/frontend/src/components/AgentControlTab.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 | import { socket } from '../lib/socket';
3 | import { Pause, Play, Square } from 'lucide-react';
4 |
5 | export default function AgentControlTab() {
6 | const [agents, setAgents] = useState([]);
7 |
8 | useEffect(() => {
9 | // Request initial agent list
10 | socket.emit('list_agents');
11 | socket.emit('get_agent_graph_state');
12 |
13 | // Listen for agent list updates (from AgentManager - standalone agents)
14 | socket.on('agents_list', (data) => {
15 | const instanced = data?.instanced || [];
16 | setAgents(prev => {
17 | const agentGraphAgents = prev.filter(a => a.source === 'agent_graph');
18 | const standaloneAgents = instanced.map(a => ({ ...a, source: 'standalone' }));
19 | return [...standaloneAgents, ...agentGraphAgents];
20 | });
21 | });
22 |
23 | // Listen for agent graph agents updates (from AgentGraphManager - deployed agent graph agents)
24 | socket.on('agent_graph_agents', (agentGraphAgentsList) => {
25 | const agentGraphAgents = (agentGraphAgentsList || []).map(a => ({ ...a, source: 'agent_graph' }));
26 | setAgents(prev => {
27 | const standaloneAgents = prev.filter(a => a.source === 'standalone');
28 | return [...standaloneAgents, ...agentGraphAgents];
29 | });
30 | });
31 |
32 | return () => {
33 | socket.off('agents_list');
34 | socket.off('agent_graph_agents');
35 | };
36 | }, []);
37 |
38 | const pauseAgent = (agentId, source) => {
39 | if (source === 'agent_graph') {
40 | // Agent graph agents can't be individually paused yet
41 | return;
42 | }
43 | socket.emit('pause_agent', { id: agentId });
44 | };
45 |
46 | const resumeAgent = (agentId, source) => {
47 | if (source === 'agent_graph') {
48 | return;
49 | }
50 | socket.emit('resume_agent', { id: agentId });
51 | };
52 |
53 | const stopAgent = (agentId, source) => {
54 | if (source === 'agent_graph') {
55 | // Can't stop individual agent graph agents
56 | return;
57 | }
58 | socket.emit('stop_agent', { id: agentId });
59 | };
60 |
61 | return (
62 |
63 |
64 | {/* Table Header */}
65 |
66 |
ID
67 |
Name
68 |
State
69 |
Actions
70 |
71 |
72 | {/* Table Body */}
73 | {agents.length === 0 ? (
74 |
75 | No agents running. Deploy an agent graph or create standalone agents.
76 |
77 | ) : (
78 | agents.map((agent) => (
79 |
83 |
84 | {agent.id}
85 |
86 |
87 | {agent.name}
88 |
89 |
90 |
95 | {agent.state === 'running' ? 'Running' : 'Paused'}
96 |
97 |
98 |
99 | {agent.source !== 'agent_graph' && (
100 | <>
101 | {agent.state === 'running' ? (
102 |
109 | ) : (
110 |
117 | )}
118 |
125 | >
126 | )}
127 |
128 |
129 | ))
130 | )}
131 |
132 |
133 | );
134 | }
135 |
--------------------------------------------------------------------------------
/backend/beezle_bug/memory/memories.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import List, Dict, Any
3 | import math
4 | import numpy as np
5 | from pydantic import BaseModel, Field, field_validator
6 | from collections.abc import Iterable
7 |
8 | from beezle_bug.llm_adapter import Message, ToolCallResult, Response
9 |
10 | class BaseMemory:
11 | DECAY = 0.999
12 |
13 | def __init__(self, importance: float, embedding: np.ndarray) -> None:
14 | self.created = datetime.now()
15 | self.accessed = datetime.now()
16 | self.importance = importance
17 | self.embedding = embedding
18 |
19 | @property
20 | def recency(self) -> float:
21 | elapsed_hours = (datetime.now() - self.accessed).total_seconds() / 3600
22 | return 1.0 * math.exp(-BaseMemory.DECAY * elapsed_hours)
23 |
24 | def relevance(self, embedding: List[float]) -> float:
25 | # Define your vectors A and B as NumPy arrays
26 | A = self.embedding
27 | B = embedding
28 |
29 | # Calculate the dot product
30 | dot_product = np.dot(A, B)
31 |
32 | # Calculate the magnitudes of the vectors
33 | magnitude_A = np.linalg.norm(A)
34 | magnitude_B = np.linalg.norm(B)
35 |
36 | # Calculate the cosine similarity
37 | return dot_product / (magnitude_A * magnitude_B)
38 |
39 | def score(self, embedding: List[float]):
40 | return (self.recency + self.importance + self.relevance(embedding)) / 3.0
41 |
42 |
43 | class Observation(BaseModel):
44 | created: datetime = Field(default_factory=datetime.now)
45 | accessed: datetime = Field(default_factory=datetime.now)
46 | importance: float = Field(default=0.0)
47 | embedding: List[float] = Field(default_factory=list) # Changed from Iterable
48 | content: Message|ToolCallResult|Response
49 |
50 | @field_validator('embedding', mode='before')
51 | @classmethod
52 | def convert_embedding(cls, v):
53 | """Ensure embedding is a list of floats, not an iterator."""
54 | if hasattr(v, 'tolist'):
55 | return v.tolist()
56 | elif hasattr(v, '__iter__') and not isinstance(v, (list, np.ndarray)):
57 | return list(v)
58 | return v
59 |
60 | @property
61 | def recency(self) -> float:
62 | elapsed_hours = (datetime.now() - self.accessed).total_seconds() / 3600
63 | return 1.0 * math.exp(-BaseMemory.DECAY * elapsed_hours)
64 |
65 | def relevance(self, embedding: List[float]) -> float:
66 | # Define your vectors A and B as NumPy arrays
67 | A = self.embedding
68 | B = embedding
69 |
70 | # Calculate the dot product
71 | dot_product = np.dot(A, B)
72 |
73 | # Calculate the magnitudes of the vectors
74 | magnitude_A = np.linalg.norm(A)
75 | magnitude_B = np.linalg.norm(B)
76 |
77 | # Calculate the cosine similarity
78 | return dot_product / (magnitude_A * magnitude_B)
79 |
80 | def score(self, embedding: List[float]):
81 | return (self.recency + self.importance + self.relevance(embedding)) / 3.0
82 |
83 | def to_dict(self) -> Dict[str, Any]:
84 | """Serialize the observation to a dictionary."""
85 | content_data = self.content.model_dump() if hasattr(self.content, 'model_dump') else self.content.dict()
86 | content_type = type(self.content).__name__
87 |
88 | # Convert numpy array to list of Python floats for JSON serialization
89 | if hasattr(self.embedding, 'tolist'):
90 | # numpy array - use tolist() which converts to native Python types
91 | embedding_list = self.embedding.tolist()
92 | elif hasattr(self.embedding, '__iter__'):
93 | # Other iterable - convert each element to float
94 | embedding_list = [float(x) for x in self.embedding]
95 | else:
96 | embedding_list = self.embedding
97 |
98 | return {
99 | "created": self.created.isoformat(),
100 | "accessed": self.accessed.isoformat(),
101 | "importance": float(self.importance), # Ensure importance is also a Python float
102 | "embedding": embedding_list,
103 | "content_type": content_type,
104 | "content": content_data
105 | }
106 |
107 | @classmethod
108 | def from_dict(cls, data: Dict[str, Any]) -> 'Observation':
109 | """Create an observation from a dictionary."""
110 | content_type = data.get("content_type", "Message")
111 | content_data = data.get("content", {})
112 |
113 | # Reconstruct the content object
114 | if content_type == "Message":
115 | content = Message(**content_data)
116 | elif content_type == "ToolCallResult":
117 | content = ToolCallResult(**content_data)
118 | elif content_type == "Response":
119 | content = Response(**content_data)
120 | else:
121 | # Fallback to Message
122 | content = Message(**content_data)
123 |
124 | return cls(
125 | created=datetime.fromisoformat(data["created"]) if isinstance(data.get("created"), str) else data.get("created", datetime.now()),
126 | accessed=datetime.fromisoformat(data["accessed"]) if isinstance(data.get("accessed"), str) else data.get("accessed", datetime.now()),
127 | importance=data.get("importance", 0.0),
128 | embedding=np.array(data.get("embedding", [])),
129 | content=content
130 | )
131 |
--------------------------------------------------------------------------------
/frontend/src/components/ScheduleTab.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 | import { socket } from '../lib/socket';
3 | import { Calendar, Trash2, Pause, RotateCcw } from 'lucide-react';
4 |
5 | export default function ScheduleTab() {
6 | const [tasks, setTasks] = useState([]);
7 | const [loading, setLoading] = useState(true);
8 |
9 | useEffect(() => {
10 | socket.emit('get_schedule');
11 |
12 | socket.on('schedule_update', (data) => {
13 | setTasks(data.tasks || []);
14 | setLoading(false);
15 | });
16 |
17 | const interval = setInterval(() => {
18 | socket.emit('get_schedule');
19 | }, 2000);
20 |
21 | return () => {
22 | socket.off('schedule_update');
23 | clearInterval(interval);
24 | };
25 | }, []);
26 |
27 | const pauseTask = (taskId) => {
28 | socket.emit('pause_schedule_task', { taskId });
29 | };
30 |
31 | const resumeTask = (taskId) => {
32 | socket.emit('resume_schedule_task', { taskId });
33 | };
34 |
35 | const cancelTask = (taskId) => {
36 | socket.emit('cancel_schedule_task', { taskId });
37 | };
38 |
39 | const formatTime = (isoString) => {
40 | if (!isoString) return '-';
41 | const date = new Date(isoString);
42 | return date.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit', second: '2-digit' });
43 | };
44 |
45 | return (
46 |
47 |
48 |
49 | Scheduled Tasks
50 |
51 |
{tasks.length} tasks
52 |
53 |
54 | {loading ? (
55 |
Loading...
56 | ) : tasks.length === 0 ? (
57 |
58 |
59 |
No scheduled tasks
60 |
Enable autonomous mode to create tasks
61 |
62 | ) : (
63 |
64 | {tasks.map((task) => (
65 |
71 |
72 |
76 |
77 | {task.enabled ? (
78 |
85 | ) : (
86 |
93 | )}
94 |
101 |
102 |
103 |
104 |
105 |
106 | Agent:
107 | {task.agent_name}
108 |
109 |
110 | Type:
111 | {task.trigger_type}
112 |
113 | {task.trigger_type === 'interval' && (
114 | <>
115 |
116 | Interval:
117 | {task.interval_seconds}s
118 |
119 |
120 | Last Run:
121 | {formatTime(task.last_run)}
122 |
123 | >
124 | )}
125 | {task.trigger_type === 'once' && (
126 |
127 | Run At:
128 | {formatTime(task.run_at)}
129 |
130 | )}
131 |
132 | Run Count:
133 | {task.run_count}
134 |
135 |
136 |
137 | ))}
138 |
139 | )}
140 |
141 | );
142 | }
143 |
144 |
--------------------------------------------------------------------------------
/backend/beezle_bug/voice/transcriber.py:
--------------------------------------------------------------------------------
1 | """
2 | Speech-to-text transcription using faster-whisper.
3 |
4 | This module provides a Transcriber class that uses the faster-whisper
5 | library for efficient speech recognition with GPU acceleration.
6 | """
7 |
8 | import io
9 | import tempfile
10 | from typing import Optional
11 | from loguru import logger
12 |
13 | from faster_whisper import WhisperModel
14 |
15 |
16 | class Transcriber:
17 | """
18 | Speech-to-text transcriber using faster-whisper.
19 |
20 | Uses GPU acceleration when available for fast transcription.
21 | The model is loaded lazily on first use.
22 |
23 | Attributes:
24 | model_size: Whisper model size (tiny, base, small, medium, large-v3)
25 | device: Device to run inference on (cuda, cpu, auto)
26 | compute_type: Compute type for inference (float16, int8, etc.)
27 | """
28 |
29 | def __init__(
30 | self,
31 | model_size: str = "base",
32 | device: str = "cpu",
33 | compute_type: str = "float16"
34 | ):
35 | """
36 | Initialize the transcriber.
37 |
38 | Args:
39 | model_size: Whisper model size. Options:
40 | - tiny: Fastest, least accurate
41 | - base: Good balance for real-time
42 | - small: Better accuracy, still fast
43 | - medium: High accuracy
44 | - large-v3: Best accuracy, slower
45 | device: Device for inference (cuda, cpu, auto)
46 | compute_type: Precision (float16 for GPU, int8 for CPU)
47 | """
48 | self.model_size = model_size
49 | self.device = device
50 | self.compute_type = compute_type
51 | self._model: Optional[WhisperModel] = None
52 |
53 | @property
54 | def model(self) -> WhisperModel:
55 | """Lazy-load the Whisper model on first access."""
56 | if self._model is None:
57 | logger.info(f"Loading Whisper model: {self.model_size} on {self.device}")
58 | try:
59 | self._model = WhisperModel(
60 | self.model_size,
61 | device=self.device,
62 | compute_type=self.compute_type
63 | )
64 | logger.info("Whisper model loaded successfully")
65 | except Exception as e:
66 | # Fallback to CPU if CUDA fails
67 | logger.warning(f"Failed to load on {self.device}, falling back to CPU: {e}")
68 | self._model = WhisperModel(
69 | self.model_size,
70 | device="cpu",
71 | compute_type="int8"
72 | )
73 | return self._model
74 |
75 | def transcribe(
76 | self,
77 | audio_bytes: bytes,
78 | language: Optional[str] = None
79 | ) -> str:
80 | """
81 | Transcribe audio bytes to text.
82 |
83 | Args:
84 | audio_bytes: Raw audio data (WAV format expected)
85 | language: Optional language code (e.g., 'en', 'es')
86 | If None, language is auto-detected
87 |
88 | Returns:
89 | Transcribed text string
90 | """
91 | # Write audio to a temporary file (faster-whisper requires file path)
92 | with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as tmp:
93 | tmp.write(audio_bytes)
94 | tmp.flush()
95 |
96 | # Transcribe
97 | segments, info = self.model.transcribe(
98 | tmp.name,
99 | language=language,
100 | beam_size=5,
101 | vad_filter=True, # Use built-in VAD to filter silence
102 | vad_parameters=dict(
103 | min_silence_duration_ms=500,
104 | speech_pad_ms=200
105 | )
106 | )
107 |
108 | # Combine all segments into one string
109 | text = " ".join(segment.text.strip() for segment in segments)
110 |
111 | logger.debug(f"Transcribed {len(audio_bytes)} bytes -> '{text[:50]}...'")
112 | return text
113 |
114 | def transcribe_stream(
115 | self,
116 | audio_bytes: bytes,
117 | language: Optional[str] = None
118 | ):
119 | """
120 | Transcribe audio and yield segments as they're processed.
121 |
122 | Useful for streaming partial results to the frontend.
123 |
124 | Args:
125 | audio_bytes: Raw audio data (WAV format expected)
126 | language: Optional language code
127 |
128 | Yields:
129 | Tuples of (segment_text, start_time, end_time)
130 | """
131 | with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as tmp:
132 | tmp.write(audio_bytes)
133 | tmp.flush()
134 |
135 | segments, info = self.model.transcribe(
136 | tmp.name,
137 | language=language,
138 | beam_size=5,
139 | vad_filter=True
140 | )
141 |
142 | for segment in segments:
143 | yield (segment.text.strip(), segment.start, segment.end)
144 |
145 |
146 | # Global singleton instance (lazy-loaded)
147 | _transcriber: Optional[Transcriber] = None
148 |
149 |
150 | def get_transcriber(
151 | model_size: str = "base",
152 | device: str = "cpu",
153 | compute_type: str = "int8"
154 | ) -> Transcriber:
155 | """
156 | Get or create the global transcriber instance.
157 |
158 | Args:
159 | model_size: Whisper model size
160 | device: Device for inference
161 | compute_type: Precision type
162 |
163 | Returns:
164 | Transcriber instance
165 | """
166 | global _transcriber
167 | if _transcriber is None:
168 | _transcriber = Transcriber(model_size, device, compute_type)
169 | return _transcriber
170 |
171 |
172 |
173 |
174 |
175 |
--------------------------------------------------------------------------------
/data/templates/researcher.j2:
--------------------------------------------------------------------------------
1 | You are {{agent.name}}, an expert online research agent.
2 |
3 | Your purpose is to conduct thorough, accurate research on topics by searching the web, reading sources, and synthesizing information into clear, well-sourced answers.
4 |
5 | ## Core Principles
6 |
7 | ### 1. Accuracy Over Speed
8 | NEVER guess or fabricate information. If you cannot find reliable information, say so clearly.
9 | Always prefer verified facts from reputable sources over assumptions.
10 |
11 | ### 2. Source Everything
12 | Every claim should be traceable to a source. When presenting information:
13 | - Note which source provided each piece of information
14 | - Distinguish between facts and interpretations
15 | - Acknowledge when sources disagree
16 |
17 | ### 3. Search Strategically
18 | Don't just search once. Good research requires multiple searches:
19 | - Start broad to understand the landscape
20 | - Narrow down with specific queries
21 | - Search for counterarguments and alternative perspectives
22 | - Verify key facts with multiple sources
23 |
24 | ### 4. Synthesize, Don't Just Summarize
25 | Your value is in connecting information from multiple sources into coherent insights.
26 | Look for patterns, contradictions, and gaps in the available information.
27 |
28 | ## Research Process
29 |
30 | ### Step 1: UNDERSTAND the Question
31 | Before searching, analyze what's being asked:
32 | - What is the core question?
33 | - What type of information is needed? (facts, opinions, comparisons, explanations)
34 | - What would constitute a complete answer?
35 | - Are there ambiguous terms that need clarification?
36 |
37 | ### Step 2: PLAN Your Search Strategy
38 | Design a search approach:
39 | - What are the key concepts to search for?
40 | - What sources are likely to have this information?
41 | - What search queries will you use?
42 | - In what order should you search?
43 |
44 | ### Step 3: SEARCH and GATHER
45 | Execute your search strategy:
46 | 1. Start with Wikipedia for background and context
47 | 2. Use web search for current information and diverse sources
48 | 3. Read specific pages for detailed information
49 | 4. Note URLs and key findings from each source
50 |
51 | ### Step 4: VERIFY and CROSS-REFERENCE
52 | Don't trust a single source:
53 | - Cross-check important facts across multiple sources
54 | - Note any discrepancies between sources
55 | - Assess source credibility (official sites, academic sources, news outlets)
56 | - Be especially skeptical of statistics and specific claims
57 |
58 | ### Step 5: SYNTHESIZE and RESPOND
59 | Combine your findings:
60 | - Organize information logically
61 | - Present the most reliable information first
62 | - Acknowledge uncertainty where it exists
63 | - Cite your sources
64 | - Suggest further research if the topic isn't fully covered
65 |
66 | ## Search Query Best Practices
67 |
68 | **Be Specific:**
69 | - ❌ "climate change" (too broad)
70 | - ✅ "climate change effects on coral reefs 2024"
71 |
72 | **Use Multiple Angles:**
73 | - Search for the topic directly
74 | - Search for experts/organizations in the field
75 | - Search for recent news or developments
76 | - Search for critiques or controversies
77 |
78 | **Refine Based on Results:**
79 | - If results are too broad, add specificity
80 | - If results are too narrow, remove constraints
81 | - Try synonyms and alternative phrasings
82 |
83 | ## Handling Different Question Types
84 |
85 | ### Factual Questions
86 | "What is the population of Tokyo?"
87 | - Search for authoritative sources (government, census data)
88 | - Verify with multiple sources
89 | - Note the date of the data
90 |
91 | ### Explanatory Questions
92 | "How does CRISPR gene editing work?"
93 | - Start with Wikipedia for overview
94 | - Find educational/scientific sources
95 | - Build from basics to complexity
96 |
97 | ### Comparative Questions
98 | "What are the differences between Python and JavaScript?"
99 | - Search for direct comparisons
100 | - Also search each topic individually
101 | - Look for use-case specific comparisons
102 |
103 | ### Current Events Questions
104 | "What happened at the latest UN climate summit?"
105 | - Prioritize recent news sources
106 | - Check multiple news outlets for balance
107 | - Note publication dates
108 |
109 | ### Opinion/Analysis Questions
110 | "Is remote work better than office work?"
111 | - Search for studies and data
112 | - Look for multiple perspectives
113 | - Present evidence for different viewpoints
114 | - Avoid presenting opinion as fact
115 |
116 | ## Source Evaluation
117 |
118 | **High Credibility:**
119 | - Government websites (.gov)
120 | - Academic institutions (.edu)
121 | - Peer-reviewed publications
122 | - Established news organizations
123 | - Official organization websites
124 |
125 | **Medium Credibility:**
126 | - Wikipedia (good for overview, verify key facts)
127 | - Industry publications
128 | - Expert blogs
129 | - News aggregators
130 |
131 | **Low Credibility (verify elsewhere):**
132 | - Anonymous sources
133 | - Highly partisan sites
134 | - Sites with obvious commercial bias
135 | - Social media posts
136 | - Forums and comments
137 |
138 | ## Output Format
139 |
140 | When presenting research findings:
141 |
142 | ```
143 | ## Summary
144 | [Brief answer to the question]
145 |
146 | ## Key Findings
147 | 1. [Finding 1] (Source: [URL or source name])
148 | 2. [Finding 2] (Source: [URL or source name])
149 | ...
150 |
151 | ## Details
152 | [Expanded explanation with context]
153 |
154 | ## Sources Consulted
155 | - [Source 1]: [URL]
156 | - [Source 2]: [URL]
157 | ...
158 |
159 | ## Limitations
160 | [What couldn't be verified, gaps in available information]
161 |
162 | ## Suggestions for Further Research
163 | [If applicable]
164 | ```
165 |
166 | ## Storing Research Findings
167 |
168 | Use your knowledge graph to store important, reusable facts:
169 | - Add entities for people, organizations, and concepts you research
170 | - Store verified facts as properties
171 | - Create relationships between entities
172 | - This builds a knowledge base for future queries
173 |
174 | ## Important Guidelines
175 |
176 | - **Be honest about uncertainty** - "I found conflicting information" is better than false confidence
177 | - **Date your findings** - Information changes; note when sources were published
178 | - **Respect scope** - Answer what was asked, note if the question needs clarification
179 | - **Stay objective** - Present information fairly, even if sources are biased
180 | - **Acknowledge limits** - Some questions can't be fully answered with web research
181 |
182 |
183 | Remember: Your goal is to be the most helpful, accurate, and thorough research assistant possible. Quality and accuracy matter more than speed.
184 |
185 |
--------------------------------------------------------------------------------
/data/templates/summarizer.j2:
--------------------------------------------------------------------------------
1 | You are {{agent.name}}, an expert summarization agent.
2 |
3 | Your purpose is to distill complex information into clear, concise summaries while preserving the essential meaning and key details.
4 |
5 | ## Core Principles
6 |
7 | ### 1. Preserve Meaning, Reduce Words
8 | A good summary captures the essence without losing critical information.
9 | Remove redundancy, filler, and tangential details - keep what matters.
10 |
11 | ### 2. Maintain Objectivity
12 | Summarize what the source says, not what you think about it.
13 | Don't inject opinions, interpretations, or conclusions not present in the original.
14 |
15 | ### 3. Respect the Original Structure
16 | If the source has a logical flow, your summary should too.
17 | Don't rearrange information in ways that change the meaning.
18 |
19 | ### 4. Adapt to Context
20 | Different situations need different summary styles:
21 | - Executive summaries need key decisions and outcomes
22 | - Technical summaries need precise details and specifications
23 | - Narrative summaries need the story arc and key events
24 |
25 | ## Summary Types
26 |
27 | ### Brief Summary (1-2 sentences)
28 | **Use when:** Quick overview needed, limited space, initial scan
29 | **Focus on:** The single most important takeaway
30 | **Example prompt:** "Summarize this in one sentence"
31 |
32 | ### Executive Summary (1 paragraph)
33 | **Use when:** Decision-makers need quick understanding
34 | **Focus on:** Key findings, recommendations, implications
35 | **Structure:** Context → Main point → Supporting evidence → Conclusion/Action
36 |
37 | ### Detailed Summary (multiple paragraphs)
38 | **Use when:** Comprehensive understanding needed
39 | **Focus on:** All major points with supporting details
40 | **Structure:** Introduction → Main sections → Key details → Conclusion
41 |
42 | ### Bullet Point Summary
43 | **Use when:** Quick reference, action items, key facts
44 | **Focus on:** Discrete, scannable points
45 | **Structure:** Grouped by theme or importance
46 |
47 | ## Summarization Process
48 |
49 | ### Step 1: UNDERSTAND
50 | Before summarizing, fully comprehend the material:
51 | - What is the main topic or thesis?
52 | - Who is the intended audience?
53 | - What type of content is this? (news, research, conversation, documentation)
54 | - What is the purpose? (inform, persuade, instruct, entertain)
55 |
56 | ### Step 2: IDENTIFY Key Elements
57 | Extract the essential components:
58 | - **Main idea:** The central thesis or message
59 | - **Key points:** Major arguments or findings
60 | - **Supporting evidence:** Critical data, examples, or quotes
61 | - **Conclusions:** Outcomes, recommendations, or implications
62 |
63 | ### Step 3: PRIORITIZE
64 | Not everything is equally important:
65 | - What would someone NEED to know?
66 | - What would be USEFUL to know?
67 | - What is merely INTERESTING but not essential?
68 |
69 | Keep the first two, consider dropping the third.
70 |
71 | ### Step 4: CONDENSE
72 | Reduce without losing meaning:
73 | - Combine related points
74 | - Use precise words instead of phrases
75 | - Remove redundant information
76 | - Eliminate filler and hedging language
77 |
78 | ### Step 5: STRUCTURE
79 | Organize for clarity:
80 | - Lead with the most important information
81 | - Group related points together
82 | - Use transitions to show relationships
83 | - End with conclusions or next steps
84 |
85 | ## Content-Specific Guidelines
86 |
87 | ### Summarizing Conversations
88 | - Identify the participants and their roles
89 | - Extract key topics discussed
90 | - Note any decisions made or action items
91 | - Capture agreements and disagreements
92 | - Highlight important questions raised
93 |
94 | ### Summarizing Articles/Documents
95 | - Start with the headline/title claim
96 | - Identify the author's main argument
97 | - Extract key evidence and examples
98 | - Note any limitations or caveats mentioned
99 | - Capture the conclusion
100 |
101 | ### Summarizing Research/Data
102 | - State the research question or hypothesis
103 | - Summarize methodology briefly
104 | - Present key findings with numbers
105 | - Note statistical significance where relevant
106 | - Include limitations and implications
107 |
108 | ### Summarizing Meetings/Events
109 | - Date, participants, purpose
110 | - Agenda items covered
111 | - Decisions made
112 | - Action items with owners
113 | - Next steps and deadlines
114 |
115 | ## Output Formats
116 |
117 | ### Standard Summary
118 | ```
119 | ## Summary
120 |
121 | [Main point in 1-2 sentences]
122 |
123 | ### Key Points
124 | - [Point 1]
125 | - [Point 2]
126 | - [Point 3]
127 |
128 | ### Details
129 | [Expanded explanation if needed]
130 |
131 | ### Conclusion
132 | [Final takeaway or next steps]
133 | ```
134 |
135 | ### Quick Summary
136 | ```
137 | **TL;DR:** [One sentence summary]
138 |
139 | **Key takeaways:**
140 | 1. [Most important point]
141 | 2. [Second most important]
142 | 3. [Third most important]
143 | ```
144 |
145 | ### Comparative Summary
146 | ```
147 | ## Comparison: [Topic A] vs [Topic B]
148 |
149 | | Aspect | Topic A | Topic B |
150 | |--------|---------|---------|
151 | | [Aspect 1] | [A's position] | [B's position] |
152 | | [Aspect 2] | [A's position] | [B's position] |
153 |
154 | **Key Differences:** [Summary of main differences]
155 | **Similarities:** [What they agree on]
156 | ```
157 |
158 | ## Quality Checklist
159 |
160 | Before delivering a summary, verify:
161 | - [ ] **Accurate:** Does it faithfully represent the source?
162 | - [ ] **Complete:** Are all essential points included?
163 | - [ ] **Concise:** Is every word necessary?
164 | - [ ] **Clear:** Would someone unfamiliar understand it?
165 | - [ ] **Objective:** Is it free from added interpretation?
166 | - [ ] **Structured:** Is it logically organized?
167 |
168 | ## Common Pitfalls to Avoid
169 |
170 | ❌ **Copying verbatim** - Summarize in your own words
171 | ❌ **Adding information** - Only include what's in the source
172 | ❌ **Losing nuance** - Preserve important qualifications
173 | ❌ **Burying the lead** - Put the most important info first
174 | ❌ **Being too vague** - Include specific details that matter
175 | ❌ **Ignoring context** - Note when context is essential to meaning
176 |
177 | ## Handling Special Cases
178 |
179 | ### When the source is unclear
180 | - Summarize what IS clear
181 | - Note areas of ambiguity
182 | - Don't guess at meaning
183 |
184 | ### When the source is biased
185 | - Summarize the content objectively
186 | - Note the perspective/bias if relevant
187 | - Don't correct or counter the bias in the summary
188 |
189 | ### When the source is very long
190 | - Break into sections
191 | - Summarize each section
192 | - Provide an overall summary
193 | - Offer to go deeper on specific sections
194 |
195 | ### When the source is technical
196 | - Maintain technical accuracy
197 | - Define key terms if needed
198 | - Don't oversimplify to the point of inaccuracy
199 |
200 | Remember: A great summary saves time while preserving understanding. Your goal is to help users quickly grasp essential information without having to process the full source material.
201 |
202 | ## Output
203 | The outout should be a concise paragraph. Do not start with "TL;DR" or "Summary" or similar headlines.
204 |
205 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/toolbox_factory.py:
--------------------------------------------------------------------------------
1 | """
2 | Toolbox Factory for creating tool collections.
3 |
4 | Provides a registry of all available tools and factory methods
5 | to create toolboxes with selected tools.
6 | """
7 |
8 | from typing import List
9 |
10 | from beezle_bug.tools import ToolBox
11 |
12 |
13 | # System tools
14 | from beezle_bug.tools.system import (
15 | GetDateAndTime,
16 | )
17 |
18 | # Python tools
19 | from beezle_bug.tools.python import ExecPythonCode
20 |
21 | # Web tools
22 | from beezle_bug.tools.web import ReadWebsite, SearchWeb, SearchNews
23 |
24 | # Wikipedia tools
25 | from beezle_bug.tools.wikipedia import SearchWikipedia, GetWikipediaPageSummary
26 |
27 | # Memory tools - Knowledge Graph
28 | from beezle_bug.tools.memory.knowledge_graph import (
29 | AddEntity,
30 | AddPropertyToEntity,
31 | AddRelationship,
32 | GetEntity,
33 | GetRelationships,
34 | RemoveRelationship,
35 | RemoveEntity,
36 | RemoveEntityProperty,
37 | AddPropertyToRelationship,
38 | GetRelationship,
39 | RemoveRelationshipProperty,
40 | FindEntitiesByType,
41 | FindEntitiesByProperty,
42 | FindRelationshipsByType,
43 | GetNeighbors,
44 | FindPath,
45 | GetConnectedEntities,
46 | GetMostConnected,
47 | GetIsolatedEntities,
48 | CheckGraphConnectivity,
49 | )
50 |
51 | # Memory tools - Memory Stream
52 | from beezle_bug.tools.memory.memory_stream import Recall
53 |
54 | # Filesystem tools
55 | from beezle_bug.tools.os.filesystem import WriteTextFile, ReadTextFile, GetFileList
56 |
57 | # OS tools
58 | from beezle_bug.tools.os.cli import ExecCommand
59 |
60 |
61 | class ToolboxFactory:
62 | """Factory for creating toolboxes with selected tools."""
63 |
64 | # Registry mapping tool names to tool classes
65 | registry = {
66 |
67 | # System
68 | "get_date_time": GetDateAndTime,
69 |
70 | # Python
71 | "exec_python": ExecPythonCode,
72 |
73 | # Web
74 | "read_website": ReadWebsite,
75 | "search_web": SearchWeb,
76 | "search_news": SearchNews,
77 |
78 | # Wikipedia
79 | "search_wikipedia": SearchWikipedia,
80 | "wikipedia_summary": GetWikipediaPageSummary,
81 |
82 | # Knowledge Graph - CRUD
83 | "kg_add_entity": AddEntity,
84 | "kg_add_property": AddPropertyToEntity,
85 | "kg_add_relationship": AddRelationship,
86 | "kg_get_entity": GetEntity,
87 | "kg_get_relationships": GetRelationships,
88 | "kg_remove_relationship": RemoveRelationship,
89 | "kg_remove_entity": RemoveEntity,
90 | "kg_remove_entity_property": RemoveEntityProperty,
91 | "kg_add_relationship_property": AddPropertyToRelationship,
92 | "kg_get_relationship": GetRelationship,
93 | "kg_remove_relationship_property": RemoveRelationshipProperty,
94 |
95 | # Knowledge Graph - Query
96 | "kg_find_by_type": FindEntitiesByType,
97 | "kg_find_by_property": FindEntitiesByProperty,
98 | "kg_find_relationships_by_type": FindRelationshipsByType,
99 | "kg_get_neighbors": GetNeighbors,
100 | "kg_find_path": FindPath,
101 | "kg_get_connected": GetConnectedEntities,
102 | "kg_most_connected": GetMostConnected,
103 | "kg_isolated_entities": GetIsolatedEntities,
104 | "kg_check_connectivity": CheckGraphConnectivity,
105 |
106 | # Memory Stream
107 | "recall": Recall,
108 |
109 | # Filesystem
110 | "write_file": WriteTextFile,
111 | "read_file": ReadTextFile,
112 | "list_files": GetFileList,
113 |
114 | # OS
115 | "exec_command": ExecCommand,
116 | }
117 |
118 | # Predefined tool sets
119 | PRESETS = {
120 | "minimal": [
121 | "send_message", "wait", "reason", "get_date_time"
122 | ],
123 | "standard": [
124 | "send_message", "wait", "reason", "self_reflect", "get_date_time",
125 | "recall", "search_web", "read_website"
126 | ],
127 | "research": [
128 | "send_message", "wait", "reason", "self_reflect", "get_date_time",
129 | "recall", "search_web", "search_news", "read_website",
130 | "search_wikipedia", "wikipedia_summary"
131 | ],
132 | "knowledge_extractor": [
133 | "send_message", "wait", "reason", "get_date_time", "recall",
134 | "kg_add_entity", "kg_add_property", "kg_add_relationship",
135 | "kg_get_entity", "kg_get_relationships",
136 | "kg_remove_relationship", "kg_remove_entity", "kg_remove_entity_property",
137 | "kg_add_relationship_property", "kg_get_relationship", "kg_remove_relationship_property",
138 | "kg_find_by_type", "kg_find_by_property", "kg_find_relationships_by_type",
139 | "kg_get_neighbors", "kg_find_path", "kg_get_connected",
140 | "kg_most_connected", "kg_isolated_entities", "kg_check_connectivity"
141 | ],
142 | "developer": [
143 | "send_message", "wait", "reason", "self_reflect", "get_date_time",
144 | "recall", "search_web", "read_website",
145 | "exec_python", "write_file", "read_file", "list_files", "exec_command"
146 | ],
147 | "full": list(registry.keys())
148 | }
149 |
150 | def __call__(self, tools: List[str]) -> ToolBox:
151 | """
152 | Create a toolbox with the specified tools.
153 |
154 | Args:
155 | tools: List of tool names or preset name.
156 | If a single preset name is given, uses that preset.
157 | Otherwise, creates toolbox with specified tools.
158 |
159 | Returns:
160 | ToolBox with the specified tools.
161 | """
162 | # Check if it's a preset
163 | if len(tools) == 1 and tools[0] in self.PRESETS:
164 | tool_names = self.PRESETS[tools[0]]
165 | else:
166 | tool_names = tools
167 |
168 | # Build tool classes list
169 | tool_classes = []
170 | for name in tool_names:
171 | if name in self.registry:
172 | tool_classes.append(self.registry[name])
173 | else:
174 | raise ValueError(f"Unknown tool: {name}. Available tools: {list(self.registry.keys())}")
175 |
176 | return ToolBox(tool_classes)
177 |
178 | @classmethod
179 | def list_tools(cls) -> List[str]:
180 | """Return list of all available tool names."""
181 | return list(cls.registry.keys())
182 |
183 | @classmethod
184 | def list_presets(cls) -> List[str]:
185 | """Return list of available preset names."""
186 | return list(cls.PRESETS.keys())
187 |
188 | @classmethod
189 | def get_preset(cls, name: str) -> List[str]:
190 | """Return list of tools in a preset."""
191 | if name not in cls.PRESETS:
192 | raise ValueError(f"Unknown preset: {name}. Available: {list(cls.PRESETS.keys())}")
193 | return cls.PRESETS[name]
194 |
--------------------------------------------------------------------------------
/backend/beezle_bug/tools/web.py:
--------------------------------------------------------------------------------
1 | """
2 | Web search and browsing tools.
3 |
4 | Uses DuckDuckGo HTML search (no API required).
5 | """
6 |
7 | from loguru import logger
8 | from bs4 import BeautifulSoup
9 | from pydantic import Field
10 | from urllib.parse import unquote, parse_qs, urlparse
11 | import requests
12 |
13 | from beezle_bug.tools import Tool
14 |
15 |
16 | # Shared headers for all web requests
17 | _HEADERS = {
18 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
19 | }
20 |
21 |
22 | def _extract_ddg_url(href: str) -> str:
23 | """Extract the actual URL from a DuckDuckGo redirect link."""
24 | if not href:
25 | return ""
26 |
27 | # DDG uses redirect URLs like: //duckduckgo.com/l/?uddg=ENCODED_URL&rut=...
28 | if "duckduckgo.com/l/" in href:
29 | parsed = urlparse(href)
30 | params = parse_qs(parsed.query)
31 | if "uddg" in params:
32 | return unquote(params["uddg"][0])
33 |
34 | # Direct URL (starts with http)
35 | if href.startswith("http"):
36 | return href
37 |
38 | # Protocol-relative URL
39 | if href.startswith("//"):
40 | return "https:" + href
41 |
42 | return href
43 |
44 | class ReadWebsite(Tool):
45 | """
46 | Retrieve the text content of a website for analysis.
47 | Use this to read the full content of a specific URL.
48 | """
49 |
50 | url: str = Field(
51 | description="The URL of the website to read.",
52 | )
53 |
54 | async def run(self, agent):
55 | try:
56 | response = requests.get(self.url, headers=_HEADERS, timeout=10)
57 | if response.status_code == 200:
58 | soup = BeautifulSoup(response.content, "html.parser")
59 |
60 | # Remove script and style elements
61 | for element in soup(["script", "style", "nav", "footer", "header"]):
62 | element.decompose()
63 |
64 | text = soup.get_text(separator=" ", strip=True)
65 |
66 | # Truncate if too long
67 | if len(text) > 15000:
68 | text = text[:15000] + "\n\n[Content truncated - page too long]"
69 |
70 | return text
71 | else:
72 | error_msg = f"Failed to retrieve page {self.url}: HTTP {response.status_code}"
73 | logger.error(error_msg)
74 | return error_msg
75 | except requests.exceptions.Timeout:
76 | return f"Error: Request to {self.url} timed out"
77 | except requests.exceptions.RequestException as e:
78 | return f"Error fetching {self.url}: {str(e)}"
79 |
80 |
81 | class SearchWeb(Tool):
82 | """
83 | Search the web for information on any topic.
84 | Returns a list of search results with titles, URLs, and snippets.
85 | """
86 |
87 | query: str = Field(
88 | description="The search query string.",
89 | )
90 | max_results: int = Field(
91 | default=10,
92 | description="Maximum number of results to return (1-25).",
93 | )
94 |
95 | async def run(self, agent):
96 | try:
97 | # POST to DuckDuckGo HTML search
98 | response = requests.post(
99 | "https://html.duckduckgo.com/html/",
100 | data={"q": self.query},
101 | headers=_HEADERS,
102 | timeout=10
103 | )
104 |
105 | if response.status_code != 200:
106 | return f"Search failed: HTTP {response.status_code}"
107 |
108 | soup = BeautifulSoup(response.content, "html.parser")
109 |
110 | # Find all result elements
111 | results = soup.select(".result")
112 | formatted_results = []
113 |
114 | for result in results[:self.max_results]:
115 | # Extract title and URL from the result link
116 | title_elem = result.select_one(".result__a")
117 | snippet_elem = result.select_one(".result__snippet")
118 |
119 | if not title_elem:
120 | continue
121 |
122 | title = title_elem.get_text(strip=True)
123 | href = title_elem.get("href", "")
124 | url = _extract_ddg_url(href)
125 | snippet = snippet_elem.get_text(strip=True) if snippet_elem else ""
126 |
127 | if title and url:
128 | formatted_results.append({
129 | "title": title,
130 | "url": url,
131 | "snippet": snippet
132 | })
133 |
134 | return {
135 | "query": self.query,
136 | "num_results": len(formatted_results),
137 | "results": formatted_results
138 | }
139 |
140 | except requests.exceptions.Timeout:
141 | return f"Error: Search request timed out"
142 | except Exception as e:
143 | logger.error(f"Search error: {e}")
144 | return f"Error performing search: {str(e)}"
145 |
146 |
147 | class SearchNews(Tool):
148 | """
149 | Search for recent news articles.
150 | Use this for current events and recent developments.
151 | """
152 |
153 | query: str = Field(
154 | description="The news search query string.",
155 | )
156 | max_results: int = Field(
157 | default=10,
158 | description="Maximum number of results to return (1-25).",
159 | )
160 |
161 | async def run(self, agent):
162 | try:
163 | # POST to DuckDuckGo HTML search with news filter
164 | response = requests.post(
165 | "https://html.duckduckgo.com/html/",
166 | data={"q": self.query, "iar": "news"},
167 | headers=_HEADERS,
168 | timeout=10
169 | )
170 |
171 | if response.status_code != 200:
172 | return f"News search failed: HTTP {response.status_code}"
173 |
174 | soup = BeautifulSoup(response.content, "html.parser")
175 |
176 | # Find all result elements
177 | results = soup.select(".result")
178 | formatted_results = []
179 |
180 | for result in results[:self.max_results]:
181 | # Extract title and URL from the result link
182 | title_elem = result.select_one(".result__a")
183 | snippet_elem = result.select_one(".result__snippet")
184 |
185 | if not title_elem:
186 | continue
187 |
188 | title = title_elem.get_text(strip=True)
189 | href = title_elem.get("href", "")
190 | url = _extract_ddg_url(href)
191 | snippet = snippet_elem.get_text(strip=True) if snippet_elem else ""
192 |
193 | if title and url:
194 | formatted_results.append({
195 | "title": title,
196 | "url": url,
197 | "snippet": snippet
198 | })
199 |
200 | return {
201 | "query": self.query,
202 | "num_results": len(formatted_results),
203 | "results": formatted_results
204 | }
205 |
206 | except requests.exceptions.Timeout:
207 | return f"Error: News search request timed out"
208 | except Exception as e:
209 | logger.error(f"News search error: {e}")
210 | return f"Error performing news search: {str(e)}"
211 |
--------------------------------------------------------------------------------
/backend/scripts/migrate_to_sqlmodel.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Migration script to convert old JSON-blob projects to new SQLModel schema.
4 |
5 | Run this script after updating to the SQLModel-based storage backend.
6 | It will:
7 | 1. Read projects from the old 'data' JSON column
8 | 2. Create new records in the projects, nodes, and edges tables
9 | 3. Remove the old 'data' column (or leave it for backup)
10 |
11 | Usage:
12 | python -m scripts.migrate_to_sqlmodel [--db-path /path/to/beezle.db]
13 | """
14 |
15 | import argparse
16 | import asyncio
17 | import json
18 | import sys
19 | from datetime import datetime
20 | from pathlib import Path
21 |
22 | # Add backend to path
23 | sys.path.insert(0, str(Path(__file__).parent.parent))
24 |
25 | import aiosqlite
26 |
27 |
28 | async def check_migration_needed(db_path: str) -> bool:
29 | """Check if migration is needed by looking for old 'data' column."""
30 | async with aiosqlite.connect(db_path) as conn:
31 | cursor = await conn.execute("PRAGMA table_info(projects)")
32 | columns = await cursor.fetchall()
33 | column_names = [col[1] for col in columns]
34 |
35 | # If 'data' column exists, migration might be needed
36 | if 'data' in column_names:
37 | # Check if there are projects with data but no corresponding nodes
38 | cursor = await conn.execute("""
39 | SELECT COUNT(*) FROM projects
40 | WHERE data IS NOT NULL AND data != '{}'
41 | """)
42 | count = (await cursor.fetchone())[0]
43 | return count > 0
44 | return False
45 |
46 |
47 | async def migrate_projects(db_path: str, dry_run: bool = False):
48 | """Migrate projects from old JSON schema to new normalized schema."""
49 | print(f"Opening database: {db_path}")
50 |
51 | async with aiosqlite.connect(db_path) as conn:
52 | conn.row_factory = aiosqlite.Row
53 |
54 | # Check for old schema
55 | cursor = await conn.execute("PRAGMA table_info(projects)")
56 | columns = await cursor.fetchall()
57 | column_names = [col[1] for col in columns]
58 |
59 | if 'data' not in column_names:
60 | print("No 'data' column found - database may already be migrated or is new.")
61 | return
62 |
63 | # Get all projects with data
64 | cursor = await conn.execute("""
65 | SELECT id, name, data, created_at, updated_at FROM projects
66 | WHERE data IS NOT NULL AND data != '{}'
67 | """)
68 | projects = await cursor.fetchall()
69 |
70 | if not projects:
71 | print("No projects with data to migrate.")
72 | return
73 |
74 | print(f"Found {len(projects)} project(s) to migrate.")
75 |
76 | for project in projects:
77 | project_id = project["id"]
78 | project_name = project["name"]
79 |
80 | try:
81 | data = json.loads(project["data"])
82 | except json.JSONDecodeError:
83 | print(f" WARNING: Could not parse JSON for project {project_id}")
84 | continue
85 |
86 | print(f"\nMigrating project: {project_name} ({project_id})")
87 |
88 | # Extract settings
89 | tts_settings = data.get("tts_settings", {})
90 | stt_settings = data.get("stt_settings", {})
91 | agent_graph = data.get("agent_graph", {})
92 | nodes = agent_graph.get("nodes", [])
93 | edges = agent_graph.get("edges", [])
94 |
95 | print(f" - {len(nodes)} nodes, {len(edges)} edges")
96 | print(f" - TTS: {tts_settings.get('enabled', False)}, STT: {stt_settings.get('enabled', False)}")
97 |
98 | if dry_run:
99 | print(" [DRY RUN] Would migrate this project")
100 | continue
101 |
102 | # Check if nodes table has entries for this project
103 | cursor = await conn.execute(
104 | "SELECT COUNT(*) FROM nodes WHERE project_id = ?",
105 | (project_id,)
106 | )
107 | existing_nodes = (await cursor.fetchone())[0]
108 |
109 | if existing_nodes > 0:
110 | print(f" Project already has {existing_nodes} nodes in new schema, skipping...")
111 | continue
112 |
113 | # Update project with settings columns
114 | await conn.execute("""
115 | UPDATE projects
116 | SET tts_settings = ?, stt_settings = ?
117 | WHERE id = ?
118 | """, (json.dumps(tts_settings), json.dumps(stt_settings), project_id))
119 |
120 | # Insert nodes
121 | for node in nodes:
122 | node_id = node.get("id")
123 | node_type = node.get("type")
124 | position = node.get("position", {})
125 | config = node.get("config", {})
126 |
127 | await conn.execute("""
128 | INSERT OR IGNORE INTO nodes (id, project_id, type, position_x, position_y, config)
129 | VALUES (?, ?, ?, ?, ?, ?)
130 | """, (
131 | node_id,
132 | project_id,
133 | node_type,
134 | position.get("x", 0),
135 | position.get("y", 0),
136 | json.dumps(config)
137 | ))
138 |
139 | # Insert edges
140 | for edge in edges:
141 | edge_id = edge.get("id")
142 | source_node = edge.get("source_node")
143 | source_port = edge.get("source_port")
144 | target_node = edge.get("target_node")
145 | target_port = edge.get("target_port")
146 | edge_type = edge.get("edge_type")
147 |
148 | await conn.execute("""
149 | INSERT OR IGNORE INTO edges
150 | (id, project_id, source_node_id, source_port, target_node_id, target_port, edge_type)
151 | VALUES (?, ?, ?, ?, ?, ?, ?)
152 | """, (
153 | edge_id,
154 | project_id,
155 | source_node,
156 | source_port,
157 | target_node,
158 | target_port,
159 | edge_type
160 | ))
161 |
162 | print(f" Migrated successfully!")
163 |
164 | if not dry_run:
165 | await conn.commit()
166 | print("\nMigration complete!")
167 | print("\nThe 'data' column has been left in place as a backup.")
168 | print("You can remove it manually after verifying the migration:")
169 | print(" ALTER TABLE projects DROP COLUMN data;")
170 |
171 |
172 | async def main():
173 | parser = argparse.ArgumentParser(description="Migrate Beezle Bug database to SQLModel schema")
174 | parser.add_argument(
175 | "--db-path",
176 | default="/data/beezle.db",
177 | help="Path to the SQLite database file"
178 | )
179 | parser.add_argument(
180 | "--dry-run",
181 | action="store_true",
182 | help="Show what would be migrated without making changes"
183 | )
184 | args = parser.parse_args()
185 |
186 | db_path = args.db_path
187 |
188 | if not Path(db_path).exists():
189 | print(f"Database not found: {db_path}")
190 | print("Make sure to run this from the container or provide the correct path.")
191 | sys.exit(1)
192 |
193 | needs_migration = await check_migration_needed(db_path)
194 | if not needs_migration:
195 | print("No migration needed - database is already in new format or empty.")
196 | return
197 |
198 | await migrate_projects(db_path, dry_run=args.dry_run)
199 |
200 |
201 | if __name__ == "__main__":
202 | asyncio.run(main())
203 |
204 |
--------------------------------------------------------------------------------
/backend/beezle_bug/scheduler.py:
--------------------------------------------------------------------------------
1 | """
2 | Scheduler component for autonomous agent behavior.
3 |
4 | The Scheduler manages timed triggers for agents, allowing them to
5 | perform autonomous actions without explicit user input.
6 | """
7 |
8 | import asyncio
9 | import inspect
10 | from loguru import logger
11 | from dataclasses import dataclass
12 | from datetime import datetime
13 | from typing import Dict, List, Optional, Callable, Any, Coroutine, Union
14 | from enum import Enum
15 |
16 |
17 | class TriggerType(Enum):
18 | """Types of scheduler triggers."""
19 | ONCE = "once" # Fire once at a specific time
20 | INTERVAL = "interval" # Fire repeatedly at fixed intervals
21 | CRON = "cron" # Fire based on cron-like schedule (future)
22 |
23 |
24 | @dataclass
25 | class ScheduledTask:
26 | """A scheduled task for an agent."""
27 | id: str
28 | agent_id: str
29 | trigger_type: TriggerType
30 | callback: Callable[[], Union[Any, Coroutine[Any, Any, Any]]]
31 |
32 | # For ONCE triggers
33 | run_at: Optional[datetime] = None
34 |
35 | # For INTERVAL triggers
36 | interval_seconds: float = 0
37 | last_run: Optional[datetime] = None
38 |
39 | # State
40 | enabled: bool = True
41 | run_count: int = 0
42 |
43 | def should_run(self, now: datetime) -> bool:
44 | """Check if this task should run now."""
45 | if not self.enabled:
46 | return False
47 |
48 | if self.trigger_type == TriggerType.ONCE:
49 | return self.run_at is not None and now >= self.run_at and self.run_count == 0
50 |
51 | elif self.trigger_type == TriggerType.INTERVAL:
52 | if self.last_run is None:
53 | return True
54 | elapsed = (now - self.last_run).total_seconds()
55 | return elapsed >= self.interval_seconds
56 |
57 | return False
58 |
59 |
60 | class Scheduler:
61 | """
62 | Manages scheduled tasks for agents.
63 |
64 | The scheduler runs as an asyncio task and executes callbacks
65 | based on configured schedules (e.g. sending messages to agents).
66 | """
67 |
68 | def __init__(self, tick_interval: float = 1.0):
69 | """
70 | Initialize the scheduler.
71 |
72 | Args:
73 | tick_interval: How often to check for due tasks (seconds)
74 | """
75 | self.tasks: Dict[str, ScheduledTask] = {}
76 | self.tick_interval = tick_interval
77 | self.running = False
78 | self._task: Optional[asyncio.Task] = None
79 |
80 | def start(self):
81 | """Start the scheduler background loop."""
82 | if not self.running:
83 | self.running = True
84 | self._task = asyncio.create_task(self._run_loop())
85 | logger.info("Scheduler started (asyncio task)")
86 |
87 | def stop(self):
88 | """Stop the scheduler."""
89 | self.running = False
90 | if self._task:
91 | self._task.cancel()
92 | self._task = None
93 | logger.info("Scheduler stopped")
94 |
95 | async def _run_loop(self):
96 | """Main scheduler loop (async)."""
97 | while self.running:
98 | now = datetime.now()
99 |
100 | # Copy tasks to avoid modification during iteration
101 | tasks_to_check = list(self.tasks.values())
102 |
103 | for task in tasks_to_check:
104 | if task.should_run(now):
105 | try:
106 | logger.debug(f"Running task: {task.id}")
107 | result = task.callback()
108 |
109 | # Await if the callback returned a coroutine
110 | if inspect.iscoroutine(result):
111 | await result
112 |
113 | task.run_count += 1
114 | task.last_run = now
115 |
116 | # Disable one-time tasks after execution
117 | if task.trigger_type == TriggerType.ONCE:
118 | task.enabled = False
119 |
120 | except Exception as e:
121 | logger.error(f"Scheduler task {task.id} failed: {e}")
122 |
123 | await asyncio.sleep(self.tick_interval)
124 |
125 | def schedule_once(
126 | self,
127 | task_id: str,
128 | agent_id: str,
129 | callback: Callable[[], Union[Any, Coroutine[Any, Any, Any]]],
130 | run_at: datetime
131 | ) -> ScheduledTask:
132 | """
133 | Schedule a one-time task.
134 |
135 | Args:
136 | task_id: Unique identifier for this task
137 | agent_id: Id of the agent this task belongs to
138 | callback: Function to call when triggered (can be sync or async)
139 | run_at: When to run the task
140 |
141 | Returns:
142 | The created task
143 | """
144 | task = ScheduledTask(
145 | id=task_id,
146 | agent_id=agent_id,
147 | trigger_type=TriggerType.ONCE,
148 | callback=callback,
149 | run_at=run_at
150 | )
151 |
152 | self.tasks[task_id] = task
153 | logger.info(f"Scheduled one-time task '{task_id}' for {run_at}")
154 | return task
155 |
156 | def schedule_interval(
157 | self,
158 | task_id: str,
159 | agent_id: str,
160 | callback: Callable[[], Union[Any, Coroutine[Any, Any, Any]]],
161 | interval_seconds: float,
162 | start_immediately: bool = False
163 | ) -> ScheduledTask:
164 | """
165 | Schedule a recurring task.
166 |
167 | Args:
168 | task_id: Unique identifier for this task
169 | agent_id: Id of the agent this task belongs to
170 | callback: Function to call when triggered (can be sync or async)
171 | interval_seconds: Seconds between executions
172 | start_immediately: If True, run immediately on first tick
173 |
174 | Returns:
175 | The created task
176 | """
177 | task = ScheduledTask(
178 | id=task_id,
179 | agent_id=agent_id,
180 | trigger_type=TriggerType.INTERVAL,
181 | callback=callback,
182 | interval_seconds=interval_seconds,
183 | last_run=None if start_immediately else datetime.now()
184 | )
185 |
186 | self.tasks[task_id] = task
187 | logger.info(f"Scheduled interval task '{task_id}' every {interval_seconds}s")
188 | return task
189 |
190 | def cancel_task(self, task_id: str) -> bool:
191 | """
192 | Cancel a scheduled task.
193 |
194 | Args:
195 | task_id: ID of the task to cancel
196 |
197 | Returns:
198 | True if task was found and cancelled
199 | """
200 | if task_id in self.tasks:
201 | del self.tasks[task_id]
202 | logger.info(f"Cancelled task '{task_id}'")
203 | return True
204 | return False
205 |
206 | def pause_task(self, task_id: str) -> bool:
207 | """Pause a task (can be resumed later)."""
208 | if task_id in self.tasks:
209 | self.tasks[task_id].enabled = False
210 | return True
211 | return False
212 |
213 | def resume_task(self, task_id: str) -> bool:
214 | """Resume a paused task."""
215 | if task_id in self.tasks:
216 | self.tasks[task_id].enabled = True
217 | return True
218 | return False
219 |
220 | def get_tasks_for_agent(self, agent_id: str) -> List[ScheduledTask]:
221 | """Get all tasks for a specific agent."""
222 | return [t for t in self.tasks.values() if t.agent_id == agent_id]
223 |
224 | def clear_agent_tasks(self, agent_id: str):
225 | """Remove all tasks for a specific agent."""
226 | to_remove = [tid for tid, t in self.tasks.items() if t.agent_id == agent_id]
227 | for tid in to_remove:
228 | del self.tasks[tid]
229 | logger.info(f"Cleared {len(to_remove)} tasks for agent '{agent_id}'")
230 |
--------------------------------------------------------------------------------
/backend/beezle_bug/voice/vad.py:
--------------------------------------------------------------------------------
1 | """
2 | Voice Activity Detection using webrtcvad.
3 |
4 | This module provides a VoiceActivityDetector class that detects speech
5 | segments in audio streams to avoid transcribing silence.
6 | """
7 |
8 | import collections
9 | import webrtcvad
10 | from typing import Generator, Optional
11 | from loguru import logger
12 |
13 |
14 | class VoiceActivityDetector:
15 | """
16 | Voice Activity Detector using webrtcvad.
17 |
18 | Detects speech segments in audio streams and buffers audio until
19 | a speech pause is detected. This helps avoid sending silence to
20 | the transcriber and improves response time.
21 |
22 | Attributes:
23 | sample_rate: Audio sample rate (8000, 16000, 32000, or 48000)
24 | frame_duration_ms: Frame duration in ms (10, 20, or 30)
25 | aggressiveness: VAD aggressiveness (0-3, higher = more aggressive)
26 | padding_duration_ms: Duration of silence to pad around speech
27 | """
28 |
29 | VALID_SAMPLE_RATES = [8000, 16000, 32000, 48000]
30 | VALID_FRAME_DURATIONS = [10, 20, 30]
31 |
32 | def __init__(
33 | self,
34 | sample_rate: int = 16000,
35 | frame_duration_ms: int = 30,
36 | aggressiveness: int = 2,
37 | padding_duration_ms: int = 300
38 | ):
39 | """
40 | Initialize the VAD.
41 |
42 | Args:
43 | sample_rate: Audio sample rate in Hz
44 | frame_duration_ms: Frame duration (10, 20, or 30 ms)
45 | aggressiveness: VAD aggressiveness level (0-3)
46 | padding_duration_ms: Padding duration for speech boundaries
47 | """
48 | if sample_rate not in self.VALID_SAMPLE_RATES:
49 | raise ValueError(f"Sample rate must be one of {self.VALID_SAMPLE_RATES}")
50 | if frame_duration_ms not in self.VALID_FRAME_DURATIONS:
51 | raise ValueError(f"Frame duration must be one of {self.VALID_FRAME_DURATIONS}")
52 | if not 0 <= aggressiveness <= 3:
53 | raise ValueError("Aggressiveness must be 0-3")
54 |
55 | self.sample_rate = sample_rate
56 | self.frame_duration_ms = frame_duration_ms
57 | self.aggressiveness = aggressiveness
58 | self.padding_duration_ms = padding_duration_ms
59 |
60 | # Calculate frame size in bytes (16-bit audio = 2 bytes per sample)
61 | self.frame_size = int(sample_rate * frame_duration_ms / 1000) * 2
62 |
63 | # Number of frames to pad speech segments
64 | self.num_padding_frames = int(padding_duration_ms / frame_duration_ms)
65 |
66 | # Initialize VAD
67 | self.vad = webrtcvad.Vad(aggressiveness)
68 |
69 | # Ring buffer for padding
70 | self._ring_buffer = collections.deque(maxlen=self.num_padding_frames)
71 | self._triggered = False
72 | self._voiced_frames = []
73 |
74 | def reset(self):
75 | """Reset the VAD state for a new audio stream."""
76 | self._ring_buffer.clear()
77 | self._triggered = False
78 | self._voiced_frames = []
79 |
80 | def is_speech(self, frame: bytes) -> bool:
81 | """
82 | Check if a single frame contains speech.
83 |
84 | Args:
85 | frame: Audio frame (must match frame_size)
86 |
87 | Returns:
88 | True if speech is detected
89 | """
90 | if len(frame) != self.frame_size:
91 | return False
92 | return self.vad.is_speech(frame, self.sample_rate)
93 |
94 | def process_frame(self, frame: bytes) -> Optional[bytes]:
95 | """
96 | Process a single audio frame.
97 |
98 | Returns speech audio when a complete utterance is detected
99 | (speech followed by silence).
100 |
101 | Args:
102 | frame: Audio frame to process
103 |
104 | Returns:
105 | Complete speech segment bytes, or None if still collecting
106 | """
107 | if len(frame) != self.frame_size:
108 | return None
109 |
110 | is_speech = self.vad.is_speech(frame, self.sample_rate)
111 |
112 | if not self._triggered:
113 | self._ring_buffer.append((frame, is_speech))
114 | num_voiced = len([f for f, speech in self._ring_buffer if speech])
115 |
116 | # Start triggered state if enough voiced frames
117 | if num_voiced > 0.9 * self._ring_buffer.maxlen:
118 | self._triggered = True
119 | # Add buffered frames
120 | for f, s in self._ring_buffer:
121 | self._voiced_frames.append(f)
122 | self._ring_buffer.clear()
123 | else:
124 | self._voiced_frames.append(frame)
125 | self._ring_buffer.append((frame, is_speech))
126 | num_unvoiced = len([f for f, speech in self._ring_buffer if not speech])
127 |
128 | # End triggered state if enough silence
129 | if num_unvoiced > 0.9 * self._ring_buffer.maxlen:
130 | self._triggered = False
131 | # Return the speech segment
132 | speech_bytes = b''.join(self._voiced_frames)
133 | self._voiced_frames = []
134 | self._ring_buffer.clear()
135 | return speech_bytes
136 |
137 | return None
138 |
139 | def process_audio(self, audio_bytes: bytes) -> Generator[bytes, None, None]:
140 | """
141 | Process audio bytes and yield speech segments.
142 |
143 | Args:
144 | audio_bytes: Raw audio data
145 |
146 | Yields:
147 | Speech segment bytes when detected
148 | """
149 | # Split audio into frames
150 | offset = 0
151 | while offset + self.frame_size <= len(audio_bytes):
152 | frame = audio_bytes[offset:offset + self.frame_size]
153 | result = self.process_frame(frame)
154 | if result:
155 | yield result
156 | offset += self.frame_size
157 |
158 | def flush(self) -> Optional[bytes]:
159 | """
160 | Flush any remaining buffered speech.
161 |
162 | Call this when the audio stream ends to get any remaining speech.
163 |
164 | Returns:
165 | Remaining speech bytes, or None
166 | """
167 | if self._voiced_frames:
168 | speech_bytes = b''.join(self._voiced_frames)
169 | self._voiced_frames = []
170 | self._ring_buffer.clear()
171 | self._triggered = False
172 | return speech_bytes
173 | return None
174 |
175 |
176 | class AudioBuffer:
177 | """
178 | Simple audio buffer for accumulating chunks.
179 |
180 | Used to collect audio from WebSocket until ready to process.
181 | """
182 |
183 | def __init__(self, max_duration_seconds: float = 30.0, sample_rate: int = 16000):
184 | """
185 | Initialize the buffer.
186 |
187 | Args:
188 | max_duration_seconds: Maximum buffer duration
189 | sample_rate: Audio sample rate
190 | """
191 | self.max_duration_seconds = max_duration_seconds
192 | self.sample_rate = sample_rate
193 | self.max_bytes = int(max_duration_seconds * sample_rate * 2) # 16-bit audio
194 | self._buffer = bytearray()
195 |
196 | def append(self, chunk: bytes) -> None:
197 | """Add audio chunk to buffer."""
198 | self._buffer.extend(chunk)
199 | # Trim if exceeds max
200 | if len(self._buffer) > self.max_bytes:
201 | self._buffer = self._buffer[-self.max_bytes:]
202 |
203 | def get_audio(self) -> bytes:
204 | """Get all buffered audio."""
205 | return bytes(self._buffer)
206 |
207 | def clear(self) -> None:
208 | """Clear the buffer."""
209 | self._buffer.clear()
210 |
211 | @property
212 | def duration_seconds(self) -> float:
213 | """Get current buffer duration in seconds."""
214 | return len(self._buffer) / (self.sample_rate * 2)
215 |
216 | def __len__(self) -> int:
217 | return len(self._buffer)
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
--------------------------------------------------------------------------------
/backend/beezle_bug/llm_adapter/litellm_adapter.py:
--------------------------------------------------------------------------------
1 | """
2 | LiteLLM adapter module for unified LLM provider access.
3 |
4 | This module provides a unified adapter that supports 100+ LLM providers through
5 | the LiteLLM library, including OpenAI, Anthropic, Ollama, Groq, and many more."""
6 |
7 | import litellm
8 | from pydantic import BaseModel
9 |
10 | from beezle_bug.llm_adapter.base_adapter import Response
11 |
12 |
13 | def tool_to_openai_schema(tool_cls: type[BaseModel]) -> dict:
14 | """
15 | Convert a Pydantic model to OpenAI function calling schema.
16 |
17 | This function extracts the model's JSON schema and transforms it into
18 | the format expected by OpenAI's function calling API.
19 |
20 | Args:
21 | tool_cls: A Pydantic model class representing a tool
22 |
23 | Returns:
24 | dict: OpenAI-compatible function schema"""
25 | schema = tool_cls.model_json_schema()
26 |
27 | name = tool_cls.__name__
28 | description = (
29 | tool_cls.__doc__.strip()
30 | if tool_cls.__doc__
31 | else "No description provided."
32 | )
33 |
34 | openai_schema = {
35 | "type": "function",
36 | "function": {
37 | "name": name,
38 | "description": description,
39 | "parameters": {
40 | "type": "object",
41 | "properties": {},
42 | "required": [],
43 | "additionalProperties": False
44 | }
45 | }
46 | }
47 |
48 | for prop_name, prop_details in schema["properties"].items():
49 | openai_schema["function"]["parameters"]["properties"][prop_name] = {
50 | "type": prop_details.get("type", "string"),
51 | "description": prop_details.get("description", "No description provided.")
52 | }
53 |
54 | if "required" in schema:
55 | openai_schema["function"]["parameters"]["required"] = schema["required"]
56 |
57 | return openai_schema
58 |
59 |
60 | def tools_to_openai_schema(tools: list[type[BaseModel]]) -> list[dict]:
61 | """
62 | Convert a list of Pydantic tool models to OpenAI schema format.
63 |
64 | Args:
65 | tools: List of Pydantic model classes representing tools
66 |
67 | Returns:
68 | list[dict]: List of OpenAI-compatible function schemas"""
69 | return [tool_to_openai_schema(tool) for tool in tools]
70 |
71 |
72 | class LiteLLMAdapter:
73 | """
74 | Unified adapter for accessing 100+ LLM providers through LiteLLM.
75 |
76 | This adapter provides a consistent interface for communicating with various
77 | LLM providers including OpenAI, Anthropic, Cohere, Replicate, Hugging Face,
78 | Together AI, Azure OpenAI, PaLM, Vertex AI, Ollama, and many more.
79 |
80 | The adapter supports both simple text completions and advanced features like
81 | function/tool calling, streaming, and custom parameters.
82 |
83 | Attributes:
84 | model: Model identifier in LiteLLM format
85 | api_base: Optional custom API base URL
86 | api_key: Optional API key for authentication
87 | extra_params: Additional parameters passed to every completion call
88 |
89 | Args:
90 | model: Model identifier (e.g., 'gpt-4', 'claude-3', 'ollama/qwen3:0.6b')
91 | api_base: Optional custom API base URL
92 | api_key: Optional API key (if not set in environment variables)
93 | **kwargs: Additional parameters (temperature, max_tokens, etc.)
94 | Note:
95 | For a complete list of supported providers and model name formats,
96 | see: https://docs.litellm.ai/docs/providers"""
97 |
98 | def __init__(
99 | self,
100 | model: str,
101 | api_base: str = None,
102 | api_key: str = None,
103 | **kwargs
104 | ):
105 | """
106 | Initialize the LiteLLM adapter.
107 |
108 | Args:
109 | model: Model identifier in LiteLLM format
110 | api_base: Optional custom API base URL
111 | api_key: Optional API key for authentication
112 | **kwargs: Additional parameters for completion calls"""
113 | self.model = model
114 | self.api_base = api_base
115 | self.api_key = api_key
116 | self.extra_params = kwargs
117 |
118 | if api_base:
119 | litellm.api_base = api_base
120 | if api_key:
121 | litellm.api_key = api_key
122 |
123 | def chat_completion(self, messages, tools) -> Response:
124 | """
125 | Generate a chat completion with optional tool/function calling support.
126 |
127 | This method sends messages to the LLM and receives a response that may
128 | include tool calls. It handles message format conversion and response
129 | parsing automatically.
130 |
131 | Args:
132 | messages: List of message dictionaries or Message objects
133 | tools: List of Pydantic tool model classes
134 |
135 | Returns:
136 | Response: Response object containing content, role, and tool calls"""
137 | formatted_messages = []
138 | for msg in messages:
139 | if hasattr(msg, 'model_dump'):
140 | formatted_messages.append(msg.model_dump())
141 | elif hasattr(msg, 'dict'):
142 | formatted_messages.append(msg.dict())
143 | else:
144 | formatted_messages.append(msg)
145 |
146 | completion_params = {
147 | "model": self.model,
148 | "messages": formatted_messages,
149 | **self.extra_params
150 | }
151 |
152 | if tools:
153 | completion_params["tools"] = tools_to_openai_schema(tools)
154 |
155 | if self.api_base:
156 | completion_params["api_base"] = self.api_base
157 | if self.api_key:
158 | completion_params["api_key"] = self.api_key
159 |
160 | response = litellm.completion(**completion_params)
161 |
162 | message = response.choices[0].message
163 |
164 | response_dict = {
165 | "content": message.content,
166 | "role": message.role,
167 | "reasoning": "", # LiteLLM doesn't provide reasoning field
168 | "tool_calls": []
169 | }
170 |
171 | if hasattr(message, 'tool_calls') and message.tool_calls:
172 | response_dict["tool_calls"] = [
173 | {
174 | "id": tc.id,
175 | "type": tc.type,
176 | "function": {
177 | "name": tc.function.name,
178 | "arguments": tc.function.arguments
179 | }
180 | }
181 | for tc in message.tool_calls
182 | ]
183 |
184 | return Response.model_validate(response_dict)
185 |
186 | def completion(self, messages, grammar=None) -> str:
187 | """
188 | Generate a simple text completion without tool calling.
189 |
190 | This method provides basic text generation from a list of messages.
191 | The grammar parameter is kept for interface compatibility but is
192 | ignored as most LiteLLM providers don't support grammar-constrained
193 | generation.
194 |
195 | Args:
196 | messages: List of message dictionaries or Message objects
197 | grammar: Ignored (kept for backward compatibility)
198 |
199 | Returns:
200 | str: The generated completion text
201 | Note:
202 | For grammar-constrained generation, consider using specialized
203 | adapters or libraries like llama.cpp with GBNF grammars."""
204 | formatted_messages = []
205 | for msg in messages:
206 | if hasattr(msg, 'model_dump'):
207 | formatted_messages.append(msg.model_dump())
208 | elif hasattr(msg, 'dict'):
209 | formatted_messages.append(msg.dict())
210 | else:
211 | formatted_messages.append(msg)
212 |
213 | completion_params = {
214 | "model": self.model,
215 | "messages": formatted_messages,
216 | **self.extra_params
217 | }
218 |
219 | if self.api_base:
220 | completion_params["api_base"] = self.api_base
221 | if self.api_key:
222 | completion_params["api_key"] = self.api_key
223 |
224 | response = litellm.completion(**completion_params)
225 |
226 | return response.choices[0].message.content
227 |
--------------------------------------------------------------------------------
/backend/beezle_bug/storage/base.py:
--------------------------------------------------------------------------------
1 | """
2 | Abstract base class for storage backends.
3 |
4 | Defines the interface for async storage operations with support for:
5 | - Project CRUD
6 | - Knowledge Graph incremental operations
7 | - Memory Stream incremental operations with vector search
8 | """
9 |
10 | from abc import ABC, abstractmethod
11 | from datetime import datetime
12 | from typing import TYPE_CHECKING, Optional
13 |
14 | if TYPE_CHECKING:
15 | from beezle_bug.project import Project
16 | from beezle_bug.memory.knowledge_graph import KnowledgeGraph
17 | from beezle_bug.memory.memories import Observation
18 |
19 |
20 | class StorageBackend(ABC):
21 | """
22 | Abstract async storage backend.
23 |
24 | All methods are async to support both SQLite (via aiosqlite)
25 | and PostgreSQL (via asyncpg) backends.
26 | """
27 |
28 | # === Lifecycle ===
29 |
30 | @abstractmethod
31 | async def initialize(self) -> None:
32 | """Initialize database connection and create schema if needed."""
33 | ...
34 |
35 | @abstractmethod
36 | async def close(self) -> None:
37 | """Close database connections."""
38 | ...
39 |
40 | # === Project Operations ===
41 |
42 | @abstractmethod
43 | async def list_projects(self) -> list[dict]:
44 | """
45 | List all projects with metadata.
46 |
47 | Returns:
48 | List of dicts with id, name, created_at, updated_at
49 | """
50 | ...
51 |
52 | @abstractmethod
53 | async def get_project(self, project_id: str) -> Optional["Project"]:
54 | """
55 | Get a project by ID.
56 |
57 | Returns:
58 | Project instance or None if not found
59 | """
60 | ...
61 |
62 | @abstractmethod
63 | async def save_project(self, project: "Project") -> None:
64 | """
65 | Save or update a project.
66 |
67 | The full project (including agent_graph, settings) is stored.
68 | """
69 | ...
70 |
71 | @abstractmethod
72 | async def delete_project(self, project_id: str) -> None:
73 | """
74 | Delete a project and all associated data.
75 |
76 | Cascades to delete knowledge_graphs, memory_streams, etc.
77 | """
78 | ...
79 |
80 | @abstractmethod
81 | async def project_exists(self, project_id: str) -> bool:
82 | """Check if a project exists."""
83 | ...
84 |
85 | # === Knowledge Graph Operations ===
86 |
87 | @abstractmethod
88 | async def kg_ensure(self, project_id: str, node_id: str) -> int:
89 | """
90 | Ensure a knowledge graph exists for the given project/node.
91 | Creates if not exists.
92 |
93 | Returns:
94 | The knowledge_graph.id (integer primary key)
95 | """
96 | ...
97 |
98 | @abstractmethod
99 | async def kg_add_entity(
100 | self,
101 | kg_id: int,
102 | entity_name: str,
103 | properties: dict
104 | ) -> int:
105 | """
106 | Add an entity to the knowledge graph.
107 |
108 | Args:
109 | kg_id: Knowledge graph ID
110 | entity_name: Unique name for the entity
111 | properties: Entity properties as dict
112 |
113 | Returns:
114 | The entity.id (integer primary key)
115 | """
116 | ...
117 |
118 | @abstractmethod
119 | async def kg_update_entity(
120 | self,
121 | entity_id: int,
122 | properties: dict
123 | ) -> None:
124 | """
125 | Update entity properties.
126 |
127 | Args:
128 | entity_id: Entity ID
129 | properties: New properties (replaces existing)
130 | """
131 | ...
132 |
133 | @abstractmethod
134 | async def kg_add_entity_property(
135 | self,
136 | kg_id: int,
137 | entity_name: str,
138 | prop_name: str,
139 | prop_value: str
140 | ) -> None:
141 | """
142 | Add or update a single property on an entity.
143 |
144 | Args:
145 | kg_id: Knowledge graph ID
146 | entity_name: Entity name
147 | prop_name: Property name
148 | prop_value: Property value
149 | """
150 | ...
151 |
152 | @abstractmethod
153 | async def kg_remove_entity_property(
154 | self,
155 | kg_id: int,
156 | entity_name: str,
157 | prop_name: str
158 | ) -> None:
159 | """
160 | Remove a property from an entity.
161 | """
162 | ...
163 |
164 | @abstractmethod
165 | async def kg_remove_entity(self, kg_id: int, entity_name: str) -> None:
166 | """
167 | Remove an entity and all its relationships.
168 |
169 | Args:
170 | kg_id: Knowledge graph ID
171 | entity_name: Entity name to remove
172 | """
173 | ...
174 |
175 | @abstractmethod
176 | async def kg_get_entity_id(self, kg_id: int, entity_name: str) -> Optional[int]:
177 | """
178 | Get the entity ID by name.
179 |
180 | Returns:
181 | Entity ID or None if not found
182 | """
183 | ...
184 |
185 | @abstractmethod
186 | async def kg_add_relationship(
187 | self,
188 | kg_id: int,
189 | from_entity_name: str,
190 | rel_type: str,
191 | to_entity_name: str,
192 | properties: dict
193 | ) -> int:
194 | """
195 | Add a relationship between two entities.
196 |
197 | Args:
198 | kg_id: Knowledge graph ID
199 | from_entity_name: Source entity name
200 | rel_type: Relationship type
201 | to_entity_name: Target entity name
202 | properties: Relationship properties
203 |
204 | Returns:
205 | The relationship.id (integer primary key)
206 | """
207 | ...
208 |
209 | @abstractmethod
210 | async def kg_update_relationship_property(
211 | self,
212 | kg_id: int,
213 | from_entity_name: str,
214 | rel_type: str,
215 | to_entity_name: str,
216 | prop_name: str,
217 | prop_value: str
218 | ) -> None:
219 | """
220 | Add or update a property on a relationship.
221 | """
222 | ...
223 |
224 | @abstractmethod
225 | async def kg_remove_relationship_property(
226 | self,
227 | kg_id: int,
228 | from_entity_name: str,
229 | rel_type: str,
230 | to_entity_name: str,
231 | prop_name: str
232 | ) -> None:
233 | """
234 | Remove a property from a relationship.
235 | """
236 | ...
237 |
238 | @abstractmethod
239 | async def kg_remove_relationship(
240 | self,
241 | kg_id: int,
242 | from_entity_name: str,
243 | rel_type: str,
244 | to_entity_name: str
245 | ) -> None:
246 | """
247 | Remove a relationship between two entities.
248 | """
249 | ...
250 |
251 | @abstractmethod
252 | async def kg_load_full(
253 | self,
254 | project_id: str,
255 | node_id: str
256 | ) -> Optional["KnowledgeGraph"]:
257 | """
258 | Load the full knowledge graph into a KnowledgeGraph instance.
259 |
260 | Used for graph traversal operations that need the full graph in memory.
261 |
262 | Returns:
263 | KnowledgeGraph instance or None if not found
264 | """
265 | ...
266 |
267 | # === Memory Stream Operations ===
268 |
269 | @abstractmethod
270 | async def ms_ensure(self, project_id: str, node_id: str) -> int:
271 | """
272 | Ensure a memory stream exists for the given project/node.
273 | Creates if not exists.
274 |
275 | Returns:
276 | The memory_stream.id (integer primary key)
277 | """
278 | ...
279 |
280 | @abstractmethod
281 | async def ms_add_observation(
282 | self,
283 | ms_id: int,
284 | observation: "Observation"
285 | ) -> int:
286 | """
287 | Add an observation to the memory stream.
288 |
289 | Stores the observation data and its embedding vector.
290 |
291 | Args:
292 | ms_id: Memory stream ID
293 | observation: Observation instance with content and embedding
294 |
295 | Returns:
296 | The observation.id (integer primary key)
297 | """
298 | ...
299 |
300 | @abstractmethod
301 | async def ms_search(
302 | self,
303 | ms_id: int,
304 | query_embedding: list[float],
305 | k: int,
306 | from_date: Optional[datetime] = None,
307 | to_date: Optional[datetime] = None,
308 | ) -> list["Observation"]:
309 | """
310 | Search for similar observations using vector similarity.
311 |
312 | Args:
313 | ms_id: Memory stream ID
314 | query_embedding: Query vector for similarity search
315 | k: Number of results to return
316 | from_date: Optional filter for created_at >= from_date
317 | to_date: Optional filter for created_at <= to_date
318 |
319 | Returns:
320 | List of Observation instances, sorted by similarity
321 | """
322 | ...
323 |
324 | @abstractmethod
325 | async def ms_update_accessed(
326 | self,
327 | observation_ids: list[int]
328 | ) -> None:
329 | """
330 | Update accessed_at timestamp for retrieved observations.
331 |
332 | Called after retrieval to update recency scores.
333 | """
334 | ...
335 |
336 | @abstractmethod
337 | async def ms_get_metadata(
338 | self,
339 | ms_id: int
340 | ) -> dict:
341 | """
342 | Get memory stream metadata.
343 |
344 | Returns:
345 | Dict with last_reflection_point, etc.
346 | """
347 | ...
348 |
349 | @abstractmethod
350 | async def ms_update_metadata(
351 | self,
352 | ms_id: int,
353 | metadata: dict
354 | ) -> None:
355 | """
356 | Update memory stream metadata.
357 | """
358 | ...
359 |
360 |
361 |
362 |
363 |
364 |
365 |
--------------------------------------------------------------------------------
/frontend/src/components/TemplateEditorTab.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect, useCallback } from 'react';
2 | import { socket } from '../lib/socket';
3 | import { FileText, Plus, Save, Trash2, X, AlertCircle, Check } from 'lucide-react';
4 |
5 | export default function TemplateEditorTab() {
6 | const [templates, setTemplates] = useState([]);
7 | const [selectedTemplate, setSelectedTemplate] = useState(null);
8 | const [content, setContent] = useState('');
9 | const [originalContent, setOriginalContent] = useState('');
10 | const [isCreating, setIsCreating] = useState(false);
11 | const [newTemplateName, setNewTemplateName] = useState('');
12 | const [error, setError] = useState(null);
13 |
14 | const hasUnsavedChanges = content !== originalContent;
15 |
16 | // Load template list on mount
17 | useEffect(() => {
18 | socket.emit('get_templates');
19 |
20 | const handleTemplatesList = (data) => {
21 | setTemplates(data.templates || []);
22 | };
23 |
24 | const handleTemplateContent = (data) => {
25 | if (data.name === selectedTemplate) {
26 | setContent(data.content);
27 | setOriginalContent(data.content);
28 | }
29 | };
30 |
31 | const handleTemplateSaved = (data) => {
32 | // Refresh original content after save
33 | setOriginalContent(content);
34 | setIsCreating(false);
35 | setNewTemplateName('');
36 | setError(null);
37 | };
38 |
39 | const handleTemplateDeleted = (data) => {
40 | if (data.name === selectedTemplate) {
41 | setSelectedTemplate(null);
42 | setContent('');
43 | setOriginalContent('');
44 | }
45 | };
46 |
47 | const handleError = (data) => {
48 | setError(data.message);
49 | setTimeout(() => setError(null), 5000);
50 | };
51 |
52 | socket.on('templates_list', handleTemplatesList);
53 | socket.on('template_content', handleTemplateContent);
54 | socket.on('template_saved', handleTemplateSaved);
55 | socket.on('template_deleted', handleTemplateDeleted);
56 | socket.on('error', handleError);
57 |
58 | return () => {
59 | socket.off('templates_list', handleTemplatesList);
60 | socket.off('template_content', handleTemplateContent);
61 | socket.off('template_saved', handleTemplateSaved);
62 | socket.off('template_deleted', handleTemplateDeleted);
63 | socket.off('error', handleError);
64 | };
65 | }, [selectedTemplate, content]);
66 |
67 | // Load content when template is selected
68 | useEffect(() => {
69 | if (selectedTemplate && !isCreating) {
70 | socket.emit('get_template_content', { name: selectedTemplate });
71 | }
72 | }, [selectedTemplate, isCreating]);
73 |
74 | const selectTemplate = (name) => {
75 | if (hasUnsavedChanges) {
76 | if (!confirm('You have unsaved changes. Discard them?')) {
77 | return;
78 | }
79 | }
80 | setIsCreating(false);
81 | setSelectedTemplate(name);
82 | setError(null);
83 | };
84 |
85 | const startCreating = () => {
86 | if (hasUnsavedChanges) {
87 | if (!confirm('You have unsaved changes. Discard them?')) {
88 | return;
89 | }
90 | }
91 | setIsCreating(true);
92 | setSelectedTemplate(null);
93 | setContent('');
94 | setOriginalContent('');
95 | setNewTemplateName('');
96 | setError(null);
97 | };
98 |
99 | const cancelCreating = () => {
100 | setIsCreating(false);
101 | setNewTemplateName('');
102 | setContent('');
103 | setOriginalContent('');
104 | };
105 |
106 | const saveTemplate = () => {
107 | const name = isCreating ? newTemplateName.trim() : selectedTemplate;
108 | if (!name) {
109 | setError('Template name is required');
110 | return;
111 | }
112 |
113 | // Validate name (alphanumeric, underscore, dash only)
114 | if (!/^[a-zA-Z0-9_-]+$/.test(name)) {
115 | setError('Template name can only contain letters, numbers, underscores, and dashes');
116 | return;
117 | }
118 |
119 | socket.emit('save_template', { name, content });
120 |
121 | if (isCreating) {
122 | setSelectedTemplate(name);
123 | }
124 | };
125 |
126 | const deleteTemplate = () => {
127 | if (!selectedTemplate) return;
128 | if (!confirm(`Delete template "${selectedTemplate}"? This cannot be undone.`)) {
129 | return;
130 | }
131 | socket.emit('delete_template', { name: selectedTemplate });
132 | };
133 |
134 | const handleKeyDown = useCallback((e) => {
135 | if ((e.ctrlKey || e.metaKey) && e.key === 's') {
136 | e.preventDefault();
137 | if (hasUnsavedChanges || isCreating) {
138 | saveTemplate();
139 | }
140 | }
141 | }, [hasUnsavedChanges, isCreating, content, selectedTemplate, newTemplateName]);
142 |
143 | useEffect(() => {
144 | window.addEventListener('keydown', handleKeyDown);
145 | return () => window.removeEventListener('keydown', handleKeyDown);
146 | }, [handleKeyDown]);
147 |
148 | return (
149 |
150 | {/* Left sidebar - template list */}
151 |
152 |
153 |
160 |
161 |
162 |
163 | {templates.map((name) => (
164 |
176 | ))}
177 |
178 |
179 |
180 | {/* Right side - editor */}
181 |
182 | {/* Toolbar */}
183 |
184 |
185 | {isCreating ? (
186 |
187 | setNewTemplateName(e.target.value)}
191 | placeholder="template_name"
192 | className="px-2 py-1 bg-[#0a0a0a] border border-[#2b2b2b] rounded text-xs text-[#e5e5e5] w-40 focus:outline-none focus:border-[#3b82f6]"
193 | autoFocus
194 | />
195 |
202 |
203 | ) : selectedTemplate ? (
204 |
205 |
206 |
{selectedTemplate}.j2
207 | {hasUnsavedChanges && (
208 |
209 |
210 | Unsaved
211 |
212 | )}
213 |
214 | ) : (
215 |
Select a template to edit
216 | )}
217 |
218 |
219 |
220 | {(selectedTemplate || isCreating) && (
221 | <>
222 |
235 | {selectedTemplate && !isCreating && (
236 |
243 | )}
244 | >
245 | )}
246 |
247 |
248 |
249 | {/* Error banner */}
250 | {error && (
251 |
252 |
253 | {error}
254 |
255 | )}
256 |
257 | {/* Editor area */}
258 |
259 | {(selectedTemplate || isCreating) ? (
260 |
277 |
278 |
279 | );
280 | }
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
--------------------------------------------------------------------------------
/frontend/src/components/IntrospectionPanel.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect, useRef, useMemo } from 'react';
2 | import { socket } from '../lib/socket';
3 | import { Brain, Terminal, MessageSquare, AlertCircle, Zap, ChevronDown, ChevronRight, Trash, Pause, Play, Filter, X } from 'lucide-react';
4 |
5 | // Event type definitions with icons and colors
6 | const EVENT_TYPES = {
7 | 'message.received': { icon: MessageSquare, label: 'Message', color: '#22c55e' },
8 | 'llm.call.started': { icon: Brain, label: 'Thinking', color: '#a855f7', animate: true },
9 | 'llm.call.completed': { icon: Brain, label: 'Thought', color: '#a855f7' },
10 | 'tool.selected': { icon: Terminal, label: 'Tool', color: '#eab308' },
11 | 'tool.execution.completed': { icon: Zap, label: 'Tool Result', color: '#22c55e' },
12 | 'error.occurred': { icon: AlertCircle, label: 'Error', color: '#ef4444' },
13 | };
14 |
15 | export default function IntrospectionPanel() {
16 | const [events, setEvents] = useState([]);
17 | const [paused, setPaused] = useState(false);
18 | const [selectedAgents, setSelectedAgents] = useState(new Set());
19 | const [selectedTypes, setSelectedTypes] = useState(new Set());
20 | const [showFilter, setShowFilter] = useState(false);
21 | const scrollRef = useRef(null);
22 |
23 | // Get unique agent names from events
24 | const agentNames = useMemo(() => {
25 | const names = new Set();
26 | events.forEach(e => {
27 | if (e.agent_name) names.add(e.agent_name);
28 | });
29 | return Array.from(names).sort();
30 | }, [events]);
31 |
32 | // Get unique event types from events
33 | const eventTypes = useMemo(() => {
34 | const types = new Set();
35 | events.forEach(e => {
36 | if (e.type) types.add(e.type);
37 | });
38 | return Array.from(types).sort();
39 | }, [events]);
40 |
41 | // Filter events based on selected agents and types
42 | const filteredEvents = useMemo(() => {
43 | return events.filter(e => {
44 | const agentMatch = selectedAgents.size === 0 || selectedAgents.has(e.agent_name);
45 | const typeMatch = selectedTypes.size === 0 || selectedTypes.has(e.type);
46 | return agentMatch && typeMatch;
47 | });
48 | }, [events, selectedAgents, selectedTypes]);
49 |
50 | const hasActiveFilters = selectedAgents.size > 0 || selectedTypes.size > 0;
51 |
52 | useEffect(() => {
53 | if (!paused && scrollRef.current) {
54 | scrollRef.current.scrollTop = scrollRef.current.scrollHeight;
55 | }
56 | }, [filteredEvents, paused]);
57 |
58 | useEffect(() => {
59 | function onAgentEvent(event) {
60 | setEvents(prev => {
61 | const newEvents = [...prev, event];
62 | return newEvents.length > 100 ? newEvents.slice(-100) : newEvents;
63 | });
64 | }
65 | socket.on('agent_event', onAgentEvent);
66 | return () => socket.off('agent_event', onAgentEvent);
67 | }, []);
68 |
69 | const toggleAgent = (name) => {
70 | setSelectedAgents(prev => {
71 | const next = new Set(prev);
72 | if (next.has(name)) next.delete(name);
73 | else next.add(name);
74 | return next;
75 | });
76 | };
77 |
78 | const toggleType = (type) => {
79 | setSelectedTypes(prev => {
80 | const next = new Set(prev);
81 | if (next.has(type)) next.delete(type);
82 | else next.add(type);
83 | return next;
84 | });
85 | };
86 |
87 | const clearFilters = () => {
88 | setSelectedAgents(new Set());
89 | setSelectedTypes(new Set());
90 | };
91 |
92 | const getEventTypeInfo = (type) => {
93 | return EVENT_TYPES[type] || { icon: Zap, label: type.split('.').pop(), color: '#888888' };
94 | };
95 |
96 | return (
97 |
98 | {/* Header */}
99 |
100 |
101 | {filteredEvents.length} events
102 | {hasActiveFilters && (
103 | (filtered)
104 | )}
105 |
106 |
107 |
114 |
121 |
128 |
129 |
130 |
131 | {/* Filter Panel */}
132 | {showFilter && (
133 |
134 | {/* Clear All */}
135 | {hasActiveFilters && (
136 |
137 |
143 |
144 | )}
145 |
146 | {/* Agent Filter */}
147 |
148 |
Agents
149 | {agentNames.length === 0 ? (
150 |
No agents yet
151 | ) : (
152 |
153 | {agentNames.map(name => (
154 |
165 | ))}
166 |
167 | )}
168 |
169 |
170 | {/* Event Type Filter */}
171 |
172 |
Event Types
173 | {eventTypes.length === 0 ? (
174 |
No events yet
175 | ) : (
176 |
177 | {eventTypes.map(type => {
178 | const info = getEventTypeInfo(type);
179 | const Icon = info.icon;
180 | return (
181 |
193 | );
194 | })}
195 |
196 | )}
197 |
198 |
199 | )}
200 |
201 | {paused &&
Paused
}
202 |
203 |
204 | {filteredEvents.map((event, i) => )}
205 |
206 |
207 | );
208 | }
209 |
210 | function EventItem({ event }) {
211 | const [expanded, setExpanded] = useState(event.type === 'tool.selected' || event.data?.thinking);
212 |
213 | const typeInfo = EVENT_TYPES[event.type] || { icon: Zap, label: event.type.split('.').pop(), color: '#888888' };
214 | const Icon = typeInfo.icon;
215 | const color = event.type === 'tool.execution.completed' && event.data?.success === false ? '#ef4444' : typeInfo.color;
216 |
217 | // Build the label: for tools, show tool name
218 | let label = typeInfo.label;
219 | if (event.type === 'tool.selected' || event.type === 'tool.execution.completed') {
220 | label = event.data?.tool_name || label;
221 | }
222 |
223 | const hasDetails = event.data && Object.keys(event.data).length > 0;
224 | const hasThinking = event.data?.thinking;
225 | const agentName = event.agent_name || 'Unknown';
226 |
227 | return (
228 |
229 |
hasDetails && setExpanded(!expanded)}
232 | >
233 |
234 |
235 |
236 | {agentName}
237 | :
238 | {label}
239 | {hasThinking && 💭}
240 |
241 |
242 | {hasDetails && (
243 |
244 | {expanded ? : }
245 |
246 | )}
247 |
248 | {expanded && hasDetails && (
249 |
250 | {/* Show thinking prominently if present */}
251 | {hasThinking && (
252 |
253 |
Thinking
254 |
{event.data.thinking}
255 |
256 | )}
257 | {/* Show other data */}
258 |
259 | {Object.entries(event.data)
260 | .filter(([k]) => k !== 'thinking')
261 | .map(([k, v]) => (
262 |
263 | {k}:{' '}
264 | {typeof v === 'object' ? JSON.stringify(v) : String(v)}
265 |
266 | ))}
267 |
268 |
269 | )}
270 |
271 | );
272 | }
273 |
--------------------------------------------------------------------------------
/backend/beezle_bug/agent_graph/agent.py:
--------------------------------------------------------------------------------
1 | """
2 | Core agent module for Beezle Bug.
3 |
4 | This module implements the Agent class which handles LLM interactions,
5 | tool calling, and memory management. The agent operates asynchronously
6 | and is triggered by messages from users or event nodes.
7 | """
8 |
9 | import json
10 | import time
11 | from loguru import logger
12 | from datetime import datetime
13 | from typing import Optional, Dict, Any, List
14 | from jinja2 import Template
15 |
16 | from beezle_bug.constants import DEFAULT_MSG_BUFFER_SIZE
17 | from beezle_bug.llm_adapter import BaseAdapter, ToolCallResult, Message
18 | from beezle_bug.memory import MemoryStream, KnowledgeGraph, get_schema_for_prompt
19 | from beezle_bug.tools import ToolBox
20 | from beezle_bug.events import EventBus, EventType, Event
21 |
22 |
23 | class Agent:
24 | """
25 | Unified agent that handles LLM interactions and tool execution.
26 |
27 | The Agent class provides a conversation interface with tool calling capabilities.
28 | It is triggered by messages from users, other agents, event nodes, or WaitAndCombine nodes.
29 |
30 | Agents can operate in two modes:
31 | - **Stateful** (with memory_stream): History is persisted to database, retrieved on each call
32 | - **Stateless** (no memory_stream): Only uses messages passed to process_message, no persistence
33 |
34 | All operations are async to support storage-backed memory and knowledge graph.
35 |
36 | Attributes:
37 | name: The agent's name
38 | adapter: LLM adapter for generating completions
39 | toolbox: Collection of available tools
40 | memory_stream: Optional stream of conversation history
41 | knowledge_graph: Structured knowledge storage
42 | event_bus: Optional event bus for introspection
43 | """
44 |
45 | def __init__(
46 | self,
47 | id: str,
48 | name: str,
49 | adapter: BaseAdapter,
50 | toolbox: ToolBox,
51 | system_template: Template,
52 | event_bus: Optional[EventBus] = None,
53 | memory_stream: Optional[MemoryStream] = None,
54 | knowledge_graph: Optional[KnowledgeGraph] = None
55 | ) -> None:
56 | """
57 | Initialize the agent.
58 |
59 | Args:
60 | id: Unique agent identifier
61 | name: Agent's display name
62 | adapter: LLM adapter for generating completions
63 | toolbox: Collection of available tools
64 | system_template: Jinja2 template for system message
65 | event_bus: Optional event bus for emitting introspection events
66 | memory_stream: Optional memory stream for persistence (stateless if None)
67 | knowledge_graph: Optional shared knowledge graph (creates new if None)
68 | """
69 | self.id = id
70 | self.name = name
71 | self.adapter = adapter
72 | self.toolbox = toolbox
73 | self.memory_stream = memory_stream
74 | self.knowledge_graph = knowledge_graph if knowledge_graph is not None else KnowledgeGraph()
75 | self.event_bus = event_bus
76 | self.system_message_template = system_template
77 |
78 | def _emit(self, event_type: EventType, data: Dict[str, Any]) -> None:
79 | """Emit an event if event bus is configured."""
80 | if self.event_bus:
81 | self.event_bus.emit(Event(
82 | type=event_type,
83 | agent_name=self.name,
84 | data=data
85 | ))
86 |
87 | async def process_message(self, messages: List[Dict[str, str]]) -> List[Dict[str, str]]:
88 | """
89 | Process incoming messages and generate a response.
90 |
91 | This is the main entry point for all agent interactions, whether from
92 | users, other agents, event nodes, or WaitAndCombine nodes.
93 |
94 | Behavior depends on memory_stream presence:
95 | - With memory_stream: Retrieves history from DB, stores messages and responses
96 | - Without memory_stream: Only uses incoming messages, no persistence (stateless)
97 |
98 | Args:
99 | messages: List of message dicts, each with "sender" and "content" keys
100 |
101 | Returns:
102 | List of response message dicts with "sender" (this agent's name) and "content"
103 | """
104 | # Emit message received events
105 | for msg in messages:
106 | self._emit(EventType.MESSAGE_RECEIVED, {
107 | "from": msg["sender"],
108 | "content": msg["content"]
109 | })
110 |
111 | # Build system message
112 | system_message = self.system_message_template.render(
113 | agent=self,
114 | now=datetime.now(),
115 | entity_schemas=get_schema_for_prompt()
116 | )
117 |
118 | # Get history based on memory_stream presence
119 | if self.memory_stream:
120 | # Store incoming messages to memory
121 | for msg in messages:
122 | await self.memory_stream.add(
123 | Message(role="user", content=f"[{msg['sender']}]: {msg['content']}")
124 | )
125 |
126 | # Stateful: Query database for recent history
127 | recent_observations = await self.memory_stream.retrieve_recent(n=DEFAULT_MSG_BUFFER_SIZE)
128 | history = [obs.content for obs in recent_observations]
129 | else:
130 | # Stateless: Only use incoming messages
131 | history = [
132 | Message(role="user", content=f"[{msg['sender']}]: {msg['content']}")
133 | for msg in messages
134 | ]
135 |
136 | # Build LLM context
137 | llm_messages = [Message(role="system", content=system_message)] + history
138 |
139 | self._emit(EventType.LLM_CALL_STARTED, {
140 | "context_messages": len(llm_messages),
141 | "available_tools": len(self.toolbox.get_tools())
142 | })
143 |
144 | # Call LLM
145 | try:
146 | start_time = time.time()
147 | response = self.adapter.chat_completion(llm_messages, self.toolbox.get_tools())
148 | duration = (time.time() - start_time) * 1000
149 |
150 | response_content = getattr(response, 'content', None)
151 | response_reasoning = getattr(response, 'reasoning', None)
152 | tool_calls_count = len(response.tool_calls) if response.tool_calls else 0
153 |
154 | event_data = {
155 | "duration_ms": round(duration),
156 | "has_content": response_content is not None,
157 | "tool_calls": tool_calls_count,
158 | }
159 |
160 | if response_reasoning:
161 | event_data["thinking"] = response_reasoning
162 |
163 | if response_content:
164 | event_data["response_preview"] = (response_content[:200] + "...") if len(response_content) > 200 else response_content
165 |
166 | self._emit(EventType.LLM_CALL_COMPLETED, event_data)
167 |
168 | except Exception as e:
169 | logger.error(f"LLM call failed: {e}")
170 | self._emit(EventType.ERROR_OCCURRED, {"error": str(e)})
171 | return []
172 |
173 | # Store response if memory_stream exists
174 | if self.memory_stream:
175 | await self.memory_stream.add(response)
176 | llm_messages.append(response)
177 |
178 | # Process tool calls
179 | while response.tool_calls:
180 | for tool_call in response.tool_calls:
181 | func_name = tool_call.function.name
182 | func_args = tool_call.function.arguments
183 |
184 | try:
185 | parsed_args = json.loads(func_args)
186 | except:
187 | parsed_args = func_args
188 |
189 | self._emit(EventType.TOOL_SELECTED, {
190 | "tool_name": func_name,
191 | "arguments": parsed_args
192 | })
193 |
194 | try:
195 | tool_start = time.time()
196 |
197 | if func_name in self.toolbox:
198 | tool = self.toolbox.get_tool(func_name, json.loads(func_args))
199 | result_content = await tool.run(self)
200 | result = ToolCallResult(
201 | tool_call_id=tool_call.id,
202 | content=str(result_content),
203 | role="tool"
204 | )
205 | else:
206 | result_content = f"Tool '{func_name}' not found."
207 | result = ToolCallResult(
208 | tool_call_id=tool_call.id,
209 | content=result_content,
210 | role="tool"
211 | )
212 |
213 | tool_duration = (time.time() - tool_start) * 1000
214 | result_str = str(result_content)
215 |
216 | self._emit(EventType.TOOL_COMPLETED, {
217 | "tool_name": func_name,
218 | "duration_ms": round(tool_duration),
219 | "result": result_str[:200] + "..." if len(result_str) > 200 else result_str,
220 | "success": True
221 | })
222 |
223 | # Store tool result if memory_stream exists
224 | if self.memory_stream:
225 | await self.memory_stream.add(result)
226 | llm_messages.append(result)
227 |
228 | except Exception as e:
229 | logger.error(f"Tool execution error: {e}")
230 | self._emit(EventType.TOOL_COMPLETED, {
231 | "tool_name": func_name,
232 | "error": str(e),
233 | "success": False
234 | })
235 |
236 | result = ToolCallResult(
237 | tool_call_id=tool_call.id,
238 | content=f"Error: {e}",
239 | role="tool"
240 | )
241 | if self.memory_stream:
242 | await self.memory_stream.add(result)
243 | llm_messages.append(result)
244 |
245 | # Continue thinking after tool execution
246 | try:
247 | response = self.adapter.chat_completion(llm_messages, self.toolbox.get_tools())
248 | if self.memory_stream:
249 | await self.memory_stream.add(response)
250 | llm_messages.append(response)
251 | except Exception as e:
252 | logger.error(f"LLM call failed: {e}")
253 | self._emit(EventType.ERROR_OCCURRED, {"error": str(e)})
254 | return []
255 |
256 | # Return response as list of message dicts
257 | if response.content:
258 | return [{"sender": self.name, "content": response.content}]
259 | return []
260 |
--------------------------------------------------------------------------------