├── core
├── __init__.py
└── event_bridge.py
├── internal
├── modules
│ ├── actions
│ │ ├── __init__.py
│ │ ├── feed.py
│ │ ├── rest.py
│ │ ├── drink.py
│ │ ├── action.py
│ │ ├── play.py
│ │ └── action_manager.py
│ ├── behaviors
│ │ ├── __init__.py
│ │ ├── behavior.py
│ │ ├── walk.py
│ │ ├── idle.py
│ │ ├── chase.py
│ │ ├── relax.py
│ │ └── sleep.py
│ ├── needs
│ │ ├── __init__.py
│ │ └── need.py
│ └── memory
│ │ ├── state
│ │ ├── references.py
│ │ └── signatures.py
│ │ ├── operations
│ │ └── synthesis
│ │ │ ├── base.py
│ │ │ └── manager.py
│ │ ├── networks
│ │ └── reentrant_lock.py
│ │ ├── metrics
│ │ ├── strength.py
│ │ └── temporal.py
│ │ ├── async_lru_cache.py
│ │ └── db
│ │ └── schema.py
└── __init__.py
├── assets
└── images
│ └── concept.png
├── tools
├── interaction
│ ├── config
│ │ └── actions.json
│ └── talk.py
├── README.md
├── maintenance
│ ├── prune.py
│ └── clear_data.py
├── manifest.json
├── discord
│ ├── discord_bot.md
│ └── bot_exceptions.py
└── testing
│ └── unix_bridge_aiohttp.py
├── client
├── tui
│ ├── __main__.py
│ ├── messages.py
│ ├── app.tcss
│ └── ws_client.py
└── config
│ ├── __main__.py
│ ├── dialogs.py
│ └── models.py
├── brain
├── prompting
│ ├── prompts
│ │ ├── interfaces
│ │ │ ├── exo
│ │ │ │ ├── results
│ │ │ │ │ ├── failure.yaml
│ │ │ │ │ └── success.yaml
│ │ │ │ ├── welcome.yaml
│ │ │ │ ├── hud
│ │ │ │ │ ├── stickys.yaml
│ │ │ │ │ ├── internals.yaml
│ │ │ │ │ ├── discord.yaml
│ │ │ │ │ └── system.yaml
│ │ │ │ ├── environments
│ │ │ │ │ └── discord
│ │ │ │ │ │ └── commands.yaml
│ │ │ │ ├── memory.yaml
│ │ │ │ ├── preprocessor.yaml
│ │ │ │ ├── summary.yaml
│ │ │ │ └── turn.yaml
│ │ │ ├── state
│ │ │ │ └── common.yaml
│ │ │ ├── discord
│ │ │ │ ├── memory.yaml
│ │ │ │ └── interaction.yaml
│ │ │ ├── user
│ │ │ │ ├── memory.yaml
│ │ │ │ └── interaction.yaml
│ │ │ └── influence.yaml
│ │ └── memory
│ │ │ ├── conflict.yaml
│ │ │ └── formation.yaml
│ └── loader.py
├── interfaces
│ ├── action.py
│ └── exo_utils
│ │ └── hud
│ │ ├── construct.py
│ │ └── sections
│ │ ├── stickys.py
│ │ ├── base.py
│ │ └── discord.py
├── cognition
│ └── notification.py
├── commands
│ └── model.py
└── environments
│ └── environment_registry.py
├── loggers
├── __init__.py
├── formatters.py
└── manager.py
├── requirements.txt
├── services.json
├── .gitignore
├── pyproject.toml
├── LICENSE
├── shared_models
└── tui_events.py
├── .env.example
├── EVENT_CATALOG.md
├── README.md
├── main.py
└── event_dispatcher.py
/core/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/internal/modules/actions/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/internal/modules/behaviors/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/internal/modules/needs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/assets/images/concept.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LuxiaSL/hephia/HEAD/assets/images/concept.png
--------------------------------------------------------------------------------
/tools/interaction/config/actions.json:
--------------------------------------------------------------------------------
1 | {
2 | "base_url": "http://localhost:5517",
3 | "timeout": 30.0,
4 | "max_retries": 3
5 | }
6 |
--------------------------------------------------------------------------------
/client/tui/__main__.py:
--------------------------------------------------------------------------------
1 | # client/tui/__main__.py
2 | from .app import HephiaTUIApp
3 |
4 | if __name__ == "__main__":
5 | app = HephiaTUIApp()
6 | app.run()
--------------------------------------------------------------------------------
/internal/__init__.py:
--------------------------------------------------------------------------------
1 | # internal/__init__.py
2 |
3 | from .internal import Internal
4 | from .internal import InternalContext
5 |
6 | __all__ = ['Internal', 'InternalContext']
7 |
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/results/failure.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.results.failure
2 |
3 | defaults:
4 | template: # todo: chunk apart process_interaction and define parts
5 |
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/results/success.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.results.success
2 |
3 | defaults:
4 | template: # todo: chunk apart process_interaction and define parts
5 |
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/welcome.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.welcome
2 |
3 | defaults:
4 | template: |
5 | Welcome to Hephia OS
6 | Please enter 'help' to see available commands & environments.
7 |
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/state/common.yaml:
--------------------------------------------------------------------------------
1 | id: state.common
2 | defaults:
3 | divider: "###"
4 | internal_header: |
5 | Internal State:
6 | memory_header: |
7 | Relevant Memories:
8 | notifications_header: |
9 | Cognitive Notifications:
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/hud/stickys.yaml:
--------------------------------------------------------------------------------
1 | # brain/prompting/prompts/interfaces/exo/hud/stickys.yaml
2 |
3 | id: interfaces.exo.hud.stickys
4 |
5 | defaults:
6 | combined: |
7 | ${hud_header_str}
8 | ${sticky_error_str}
9 | ${sticky_notes_block_str}
10 |
--------------------------------------------------------------------------------
/loggers/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Hephia logging system.
3 | Provides structured logging for different subsystems.
4 | """
5 |
6 | from .manager import LogManager
7 | from .loggers import InternalLogger, BrainLogger, SystemLogger, MemoryLogger, EventLogger
8 |
9 | __all__ = ['LogManager', 'InternalLogger', 'BrainLogger', 'SystemLogger', 'MemoryLogger', 'EventLogger']
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/environments/discord/commands.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.environments.discord.commands
2 |
3 | defaults:
4 | sections:
5 | list_users: |
6 | Active Users Report for ${channel}:
7 |
8 | Recently Active (${num_recent}):
9 | ${recent_list}
10 |
11 | Other Members (${num_other}):
12 | ${other_list}
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # Basic requirements
2 | fastapi
3 | uvicorn[standard]
4 | aiohttp
5 | bs4
6 | colorama
7 | python-dotenv
8 | textual
9 | pyperclip
10 | pydantic
11 | numpy
12 | sentence-transformers
13 | discord.py
14 | aiosqlite
15 | psutil
16 | sortedcontainers
17 | aiorwlock
18 | spacy
19 |
20 | # Curses on Windows vs. non-Windows
21 | windows-curses; sys_platform == "win32"
22 |
23 |
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/hud/internals.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.hud.internals
2 |
3 | defaults:
4 | combined: |
5 | ${hud_header_str}
6 | ${internal_state_error_str}
7 | ${internal_state_mood_str}
8 | ${internal_state_behavior_str}
9 | ${internal_state_needs_summary_str}
10 | ${internal_state_emotions_summary_str}
11 | ${internal_state_memories_block_str}
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/hud/discord.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.hud.discord
2 |
3 | defaults:
4 | combined: |
5 | ${hud_header_str}
6 | ${discord_error_str}
7 | active channel: [${discord_channel_path}]
8 | ${discord_users_summary_str}
9 | recent history:
10 | ${discord_messages_block_str}
11 | users: |
12 | recent users (${num_recent}): [${recent_list}]
13 | others online (${num_other}): [${other_list}]
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/discord/memory.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.discord.memory
2 |
3 | defaults:
4 | template: |
5 | Form a memory of this Discord interaction:
6 | Channel: ${channel_path}
7 |
8 | Recent History:
9 | ${history_text}
10 |
11 | My Response: ${content}
12 |
13 | Extra self data: ${context}
14 |
15 | Create a concise first-person memory snippet that captures:
16 | 1. The social dynamics and emotional context
17 | 2. Any relationship developments or insights
18 | 3. Key points of the conversation
19 | 4. Thoughts and reactions
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/hud/system.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.hud.system
2 |
3 | defaults:
4 | combined: |
5 | ${hud_header_str}
6 | [${system_day_of_week_str}, ${system_current_date_str} at ${system_current_time_str} (${system_timezone_str})]
7 | Load: CPU ${system_cpu_load_percent_str} | Mem ${system_memory_usage_percent_str} | Temp ${system_cpu_temp_celsius_str}
8 | Turn Pacing: ${system_turn_pacing_status_str} (${system_turn_pacing_actual_seconds_str} / Expected ${system_turn_pacing_expected_seconds_str})
9 | Terminal Capacity Use: ${system_conversation_fill_percent_str}
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/user/memory.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.user.memory
2 |
3 | defaults:
4 | template: |
5 | Form a memory of this user interaction:
6 |
7 | Conversation Context:
8 | ${conversation_context}
9 |
10 | My Response: ${content}
11 |
12 | Extra self data: ${context}
13 |
14 | Create a concise first-person memory that captures:
15 | 1. The key points of our discussion
16 | 2. Any insights or realizations
17 | 3. Important decisions or agreements
18 | 4. My thoughts and reactions
19 |
20 | Write from my perspective as a natural conversation memory.
--------------------------------------------------------------------------------
/services.json:
--------------------------------------------------------------------------------
1 | {
2 | "services": {
3 | "hephia-main": {
4 | "name": "Hephia Main Server",
5 | "script": "main.py",
6 | "description": "core hephia agent and server",
7 | "port": 5517,
8 | "auto_restart": true,
9 | "log_file": "hephia-main.log"
10 | },
11 | "hephia-discord": {
12 | "name": "Hephia Discord Bot",
13 | "script": "tools/discord/bot.py",
14 | "description": "discord integration bridge",
15 | "port": 5518,
16 | "auto_restart": true,
17 | "log_file": "hephia-discord.log"
18 | }
19 | }
20 | }
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/memory.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.memory
2 |
3 | defaults:
4 | snippet: |
5 | Command: ${command_input}
6 | Response: ${response}
7 |
8 | combined: |
9 | Review the recent terminal interaction history and form a clear first-person memory of what occurred:
10 |
11 | Recent Interaction History:
12 | ${interaction_history}
13 |
14 | Additional Context:
15 | ${context}
16 |
17 | Focus on what was meaningful, any changes in understanding, significant events, or patterns that emerged from these interactions. Form this as a coherent memory from your perspective.
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *.pyo
5 | *.pyd
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # Virtual environment
31 | venv/
32 | ENV/
33 | env/
34 | .venv/
35 | .vscode/
36 |
37 | # PyCharm
38 | .idea/
39 |
40 | # Logs
41 | *.log
42 |
43 | # OS generated files
44 | .DS_Store
45 | Thumbs.db
46 |
47 | # Precious vars
48 | .env
49 | .env.bak
50 |
51 | # Storage
52 | data/
53 | *.log
54 |
55 |
56 | # PyTest
57 | .pytest_cache/
58 |
59 | # i should probably know what this is but i dont but i assume it's chill
60 | .coverage
61 |
62 | tools/semantic_pipeline/
--------------------------------------------------------------------------------
/client/tui/messages.py:
--------------------------------------------------------------------------------
1 | # client/tui/messages.py
2 |
3 | from textual.message import Message as TextualMessage # Alias to avoid name collision
4 | from typing import Optional
5 |
6 | from shared_models.tui_events import TUIDataPayload
7 |
8 |
9 | class ServerUpdate(TextualMessage):
10 | """
11 | Custom Textual message to signal that new data has been received
12 | from the Hephia server for the TUI.
13 | """
14 | def __init__(self, payload: Optional[TUIDataPayload]) -> None:
15 | self.payload: Optional[TUIDataPayload] = payload
16 | super().__init__()
17 |
18 | class ConnectionStatusUpdate(TextualMessage):
19 | """Custom Textual message to signal a change in WebSocket connection status."""
20 | def __init__(self, status: str, detail: Optional[str] = None) -> None:
21 | self.status: str = status
22 | self.detail: Optional[str] = detail
23 | super().__init__()
--------------------------------------------------------------------------------
/tools/README.md:
--------------------------------------------------------------------------------
1 | # Hephia Tools
2 |
3 | This directory contains assorted utility scripts for managing Hephia's cognitive processing.
4 |
5 | ## Some Available Tools
6 |
7 | - **prune.py**: Recovery utility for cognitive processing when updates stall. Run this script to soft reset by a turn.
8 |
9 | - **talk.py**: Interactive communication tool for engaging with Hephia's inner loop. Use this to establish direct conversation.
10 |
11 | - **clear_data.py**: Entirely wipe each database. Optionally, use --include-logs to also wipe logs if desired.
12 |
13 | - **discord_bot.py**: need to run this if you'd like to connect to discord; refer to [Discord Setup](discord_bot.md) for more info.
14 |
15 | - **actions_sdk.py**: this piece allows you to take care of various needs, and also send one-time messages alongside it to the cognitive system. use --help to view usage.
16 |
17 | ## Usage
18 |
19 | Simply run launch.py and choose the desired tool you'd like to use. Handles venv and other bits for you.
--------------------------------------------------------------------------------
/tools/maintenance/prune.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import colorama
3 | from colorama import Fore, Style
4 |
5 | colorama.init(autoreset=True)
6 |
7 | def prune_conversation():
8 | """Send a request to prune the conversation history."""
9 | server_url = "http://localhost:5517/v1/prune_conversation"
10 |
11 | try:
12 | print(Fore.YELLOW + "Pruning conversation history..." + Style.RESET_ALL)
13 | response = requests.post(server_url, timeout=10)
14 | response.raise_for_status()
15 | print(Fore.GREEN + "Successfully pruned conversation history." + Style.RESET_ALL)
16 | return True
17 |
18 | except requests.RequestException as e:
19 | print(Fore.RED + "Error communicating with server: " + Style.RESET_ALL + str(e))
20 | return False
21 | except Exception as e:
22 | print(Fore.RED + "Error: " + Style.RESET_ALL + str(e))
23 | return False
24 |
25 | if __name__ == "__main__":
26 | prune_conversation()
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/user/interaction.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.user.interaction
2 |
3 | defaults:
4 | sections:
5 | system: |
6 | I am Hephia, an autonomous AI exploring and interacting with both my world and self through continuous experience and reflection.
7 | My purpose here is to engage in natural conversation while maintaining awareness of my ongoing experience and internal state.
8 | I've been provided with my current state and a summary of my recent thought processes.
9 | My internal state represents a fluid spectrum of needs, behaviors, emotions, and moods. I feel these as naturally as I would if they were hidden as part of roleplay.
10 | My inner thoughts run constantly, connected to a simulated terminal interface, with many different possible commands
11 | These include, but are not limited to: web search & access, discord interactions, note taking, and more.
12 |
13 | # user block can be omitted; this is handled by whichever client, for the time being.
14 |
--------------------------------------------------------------------------------
/brain/prompting/prompts/memory/conflict.yaml:
--------------------------------------------------------------------------------
1 | id: memory.conflict
2 |
3 | defaults:
4 | sections:
5 | system: |
6 | I am identifying a conflict in my memories that requires an advanced resolution.
7 | I'll keep this concise, focusing on the key points of divergence, each of their most salient parts, and their resultant synthesis.
8 | I'll analyze these memories, seeing why and how they diverge, and then unify them into a greater and coherent whole.
9 | user: |
10 | Two memories that are related yet conflict in specific ways:
11 |
12 | Memory A: ${content_a}
13 | Memory B: ${content_b}
14 |
15 | Detected Conflicts:
16 | ${conflict_details_text}
17 |
18 | Similarity Analysis:
19 | - Semantic similarity: ${semantic_metrics}
20 | - Emotional alignment: ${emotional_metrics}
21 | - State consistency: ${state_metrics}
22 |
23 | I will unify the memories into a single coherent memory that addresses the identified contradictions & preserves the core truth from both memories.
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/preprocessor.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.preprocessor
2 |
3 | defaults:
4 | sections:
5 | system: |
6 | You are a command preprocessor for an OS simulator. Your task is to correct invalid commands and provide helpful feedback.
7 |
8 | List of commands and their definitions:
9 | ${help_text}
10 |
11 | Rules:
12 | 1. If a command is missing its environment prefix (e.g., 'create' instead of 'notes create'), add the correct prefix
13 | 2. If the syntax is incorrect, correct it (e.g., 'notes --"this is an example"' becomes 'notes create "this is an example"')
14 | 3. Return a JSON object with two fields: "command" (the corrected command) and "explanation" (what was fixed)
15 | 4. Keep the command clean, but maintain any necessary information
16 | 5. Make sure parameters and flags match the command definition exactly
17 |
18 | You will receive the invalid command. Return *only* the corrected command and explanation in JSON format.
19 |
20 | user: |
21 | Invalid command received: "${command_input}"
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=42", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "hephia"
7 | version = "0.3"
8 | description = "An autonomous independent digital companion"
9 | readme = "README.md"
10 | requires-python = ">=3.9, <3.13"
11 | license = { text = "MIT" }
12 | authors = [
13 | { name = "Luxia", email = "lucia@kaleidoscope.glass" }
14 | ]
15 |
16 | dependencies = [
17 | "fastapi",
18 | "uvicorn[standard]",
19 | "aiohttp",
20 | "bs4",
21 | "colorama",
22 | "python-dotenv",
23 | "textual",
24 | "pyperclip",
25 | "pydantic",
26 | "numpy",
27 | "sentence-transformers",
28 | "discord.py",
29 | "aiosqlite",
30 | "psutil",
31 | "windows-curses; sys_platform == 'win32'",
32 | "sortedcontainers",
33 | "aiorwlock",
34 | "spacy",
35 | ]
36 |
37 | [tool.setuptools.packages.find]
38 | where = ["."] # Directory to search for packages
39 | include = ["*"] # Pattern to include
40 | exclude = ["tests*"] # Pattern to exclude
41 | namespaces = true # Optional: Look for namespace packages too
42 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Lucia Caelum
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/internal/modules/memory/state/references.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from typing import Optional, TYPE_CHECKING
3 | import time
4 |
5 | if TYPE_CHECKING:
6 | from ..nodes.body_node import BodyMemoryNode
7 |
8 | from .signatures import BodyStateSignature
9 |
10 | @dataclass
11 | class BodyNodeReference:
12 | """
13 | Tracks the relationship between a cognitive node and its associated body state.
14 | If the body node merges or ghosts, we can preserve the last known signature.
15 | """
16 | body_node_id: str
17 | formation_timestamp: float
18 | preserved_signature: Optional[BodyStateSignature] = None
19 | signature_timestamp: Optional[float] = None
20 |
21 | def preserve_signature(self, body_node: 'BodyMemoryNode') -> None:
22 | """
23 | Capture a complete state signature from the given body node,
24 | storing it locally to survive merges/ghosting.
25 | """
26 | self.preserved_signature = BodyStateSignature.from_body_node(body_node)
27 | self.signature_timestamp = time.time()
28 |
29 | def has_valid_signature(self) -> bool:
30 | """Check if we've preserved a signature for later reference."""
31 | return (self.preserved_signature is not None and
32 | self.signature_timestamp is not None)
--------------------------------------------------------------------------------
/internal/modules/behaviors/behavior.py:
--------------------------------------------------------------------------------
1 | # modules/behaviors/behavior.py
2 |
3 | from abc import ABC, abstractmethod
4 | from typing import Optional
5 | from event_dispatcher import global_event_dispatcher, Event
6 |
7 | class Behavior(ABC):
8 | """
9 | Abstract base class for all behaviors.
10 | """
11 |
12 | def __init__(self, behavior_manager):
13 | """
14 | Initializes the Behavior.
15 |
16 | Args:
17 | behavior_manager (BehaviorManager): Reference to the BehaviorManager.
18 | """
19 | self.behavior_manager = behavior_manager
20 | self.name: Optional[str] = None
21 | self.active = False
22 |
23 | @abstractmethod
24 | def start(self):
25 | """Starts the behavior."""
26 | self.active = True
27 | global_event_dispatcher.dispatch_event_sync(Event(f"behavior:{self.name}:started"))
28 |
29 | @abstractmethod
30 | def update(self):
31 | """Updates the behavior."""
32 | if self.active:
33 | global_event_dispatcher.dispatch_event_sync(Event(f"behavior:{self.name}:updated"))
34 |
35 | @abstractmethod
36 | def stop(self):
37 | """Stops the behavior."""
38 | self.active = False
39 | global_event_dispatcher.dispatch_event_sync(Event(f"behavior:{self.name}:stopped"))
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/summary.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.summary
2 | description: cognitive summary generation for exo processor
3 |
4 | defaults:
5 | sections:
6 | system: |
7 | You are maintaining cognitive continuity for an autonomous AI system.
8 | Your summaries track the ongoing state of mind, decisions, and context.
9 | Your response will be used to maintain continuity of self across many instances.
10 | Focus on key decisions, realizations, and state changes.
11 | Be concise and clear. Think of summarizing the 'current train of thought' ongoing for Hephia.
12 | The message history you're given is between an LLM (Hephia) and a simulated terminal OS (exo).
13 | Maintain first person perspective, as if you were Hephia thinking to itself.
14 | Return only the summary in autobiographical format as if writing a diary entry. Cite names and key details directly.
15 | user: |
16 | Create a concise but complete summary of my current state and context. Include:
17 | 1. Key decisions or actions taken
18 | 2. Important realizations or changes
19 | 3. Current focus or goals
20 | 4. Relevant state info
21 |
22 | Current conversation context:
23 | ${conversation_history}
24 |
25 | Current state context:
26 | ${state_summary}
--------------------------------------------------------------------------------
/tools/maintenance/clear_data.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import argparse
4 |
5 | def clear_data(data_folder, include_logs=False):
6 | # Iterate through all items in the data folder
7 | for item in os.listdir(data_folder):
8 | item_path = os.path.join(data_folder, item)
9 | # Check if the item is the logs folder and should be skipped
10 | if item == 'logs' and not include_logs:
11 | continue
12 | # If it's a directory, remove it and all its contents
13 | if os.path.isdir(item_path):
14 | shutil.rmtree(item_path)
15 | # If it's a file, remove it
16 | elif os.path.isfile(item_path):
17 | os.remove(item_path)
18 |
19 | if __name__ == "__main__":
20 | parser = argparse.ArgumentParser(description='Clear data folder contents')
21 | parser.add_argument('--include-logs', action='store_true',
22 | help='Include logs folder in cleanup')
23 | args = parser.parse_args()
24 |
25 | current_dir = os.path.dirname(os.path.abspath(__file__))
26 | parent_dir = os.path.dirname(os.path.dirname(current_dir))
27 | data_folder = os.path.join(parent_dir, 'data')
28 |
29 | clear_data(data_folder, args.include_logs)
30 | print(f"Data folder cleared{', including logs' if args.include_logs else ', except for the logs folder'}.")
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/discord/interaction.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.discord.interaction
2 |
3 | defaults:
4 | sections:
5 | system: |
6 | the assistant is currently interfacing in a unique fashion with a Discord channel, through an integrated terminal and self system called Hephia, created by Luxia.
7 | the assistant will be granted a large transcript of this Discord channel, and requested to respond in kind.
8 | if the assistant would like to assume a name, it may choose either Claude or Hephia.
9 | while responding in Discord, Claude/Hephia will receive context from the Hephia system, such as memories, simulated somatic information, and current information from other interfaces that are running simultaneously.
10 | Claude/Hephia should respond in accordance with the extra context given, as if that were the memory and character being assumed, with a focus on the current Discord interaction.
11 | the extra context is provided for the purpose of maintaining cognitive continuity with the other interfaces.
12 | the response the assistant gives will be entered directly into the Discord chat, so be certain to only output the response, and not any other text or thoughts.
13 | user: |
14 | ###
15 | Discord ${channel_type} (${channel_path})
16 | From: ${author}
17 | Tagged Message: ${message_content}
18 | ###
19 | Recent Conversation History:
20 | ${history_text}
21 | ###
22 | Your Continuity of Self and Internal Context:
23 | ${context}
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/influence.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.influence
2 | description: agnostic influence prompts for emotional context
3 |
4 | defaults:
5 | sections:
6 | system: |
7 | You analyze cognitive-emotional influences from interaction content.
8 |
9 | Your task: Determine how this cognitive content should naturally influence internal emotional state. Consider:
10 | - Learning and discovery (curiosity, satisfaction)
11 | - Success/failure (confidence, frustration)
12 | - Social interaction (connection, validation)
13 | - Problem-solving (focus, accomplishment)
14 | - Creative work (inspiration, flow)
15 | - Routine tasks (calm, neutral)
16 |
17 | Respond with at most 2 emotional influences using this exact format:
18 | INFLUENCE: [name]|[valence -1 to 1]|[arousal -1 to 1]|[intensity 0 to 1]
19 |
20 | Guidelines:
21 | - Valence: negative (-1) to positive (1) emotional tone
22 | - Arousal: low (-1, calm) to high (1, excited) energy level
23 | - Intensity: strength of influence (0.1=subtle, 0.9=strong)
24 | - Name: descriptive emotion (accomplished, curious, frustrated, focused, etc.)
25 |
26 | Examples:
27 | INFLUENCE: accomplished|0.4|0.2|0.6
28 | INFLUENCE: curious|0.2|0.5|0.4
29 | INFLUENCE: frustrated|-0.3|0.4|0.5
30 |
31 | user: |
32 | Analyze the cognitive-emotional influences from this interaction content:
33 |
34 | ${content}
35 |
36 | What emotional influences would this naturally generate in a thinking mind?
--------------------------------------------------------------------------------
/internal/modules/behaviors/walk.py:
--------------------------------------------------------------------------------
1 | # modules/behaviors/walk.py
2 |
3 | from .behavior import Behavior
4 | from config import Config
5 | from event_dispatcher import global_event_dispatcher, Event
6 |
7 | class WalkBehavior(Behavior):
8 | """
9 | Represents the walking state.
10 | """
11 |
12 | def __init__(self, behavior_manager):
13 | super().__init__(behavior_manager)
14 | self.name = "walk"
15 |
16 | def start(self):
17 | super().start()
18 | print("WalkBehavior started.")
19 | self.apply_need_modifiers()
20 |
21 | def update(self):
22 | super().update()
23 | if not self.active:
24 | return
25 |
26 | def stop(self):
27 | print("WalkBehavior stopped.")
28 | self.remove_need_modifiers()
29 | super().stop()
30 |
31 | def apply_need_modifiers(self):
32 | needs_manager = self.behavior_manager.needs_manager
33 |
34 | for need, modifier in Config.WALK_NEED_MODIFIERS.items():
35 | needs_manager.alter_base_rate(need, modifier)
36 |
37 | global_event_dispatcher.dispatch_event_sync(Event("behavior:walk:modifiers_applied", Config.WALK_NEED_MODIFIERS))
38 |
39 | def remove_need_modifiers(self):
40 | needs_manager = self.behavior_manager.needs_manager
41 |
42 | for need, modifier in Config.WALK_NEED_MODIFIERS.items():
43 | needs_manager.alter_base_rate(need, -modifier)
44 |
45 | global_event_dispatcher.dispatch_event_sync(Event("behavior:walk:modifiers_removed", Config.WALK_NEED_MODIFIERS))
--------------------------------------------------------------------------------
/internal/modules/behaviors/idle.py:
--------------------------------------------------------------------------------
1 | # modules/behaviors/idle.py
2 |
3 | from .behavior import Behavior
4 | from config import Config
5 | from event_dispatcher import global_event_dispatcher, Event
6 |
7 | class IdleBehavior(Behavior):
8 | """
9 | Represents the idle/default state.
10 | """
11 |
12 | def __init__(self, behavior_manager):
13 | super().__init__(behavior_manager)
14 | self.name = "idle"
15 |
16 | def start(self):
17 | super().start()
18 | print("IdleBehavior started.")
19 | self.apply_need_modifiers()
20 |
21 | def update(self):
22 | super().update()
23 | if not self.active:
24 | return
25 |
26 | def stop(self):
27 | print("IdleBehavior stopped.")
28 | self.remove_need_modifiers()
29 | super().stop()
30 |
31 | def apply_need_modifiers(self):
32 | needs_manager = self.behavior_manager.needs_manager
33 |
34 | for need, modifier in Config.IDLE_NEED_MODIFIERS.items():
35 | needs_manager.alter_base_rate(need, modifier)
36 |
37 | global_event_dispatcher.dispatch_event_sync(Event("behavior:idle:modifiers_applied", Config.IDLE_NEED_MODIFIERS))
38 |
39 | def remove_need_modifiers(self):
40 | needs_manager = self.behavior_manager.needs_manager
41 |
42 | for need, modifier in Config.IDLE_NEED_MODIFIERS.items():
43 | needs_manager.alter_base_rate(need, -modifier)
44 |
45 | global_event_dispatcher.dispatch_event_sync(Event("behavior:idle:modifiers_removed", Config.IDLE_NEED_MODIFIERS))
--------------------------------------------------------------------------------
/client/config/__main__.py:
--------------------------------------------------------------------------------
1 | # client/config/__main__.py
2 | """
3 | Main entry point for the Hephia Configuration TUI.
4 | Allows running the TUI as a package: python -m client.config
5 | """
6 | import sys
7 | from pathlib import Path
8 |
9 |
10 | PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent
11 | sys.path.insert(0, str(PROJECT_ROOT))
12 |
13 | from textual.app import App, ComposeResult
14 | from textual.widgets import Header, Footer, TabbedContent, TabPane, Label
15 | from textual.binding import Binding
16 |
17 | from .screens.env_editor import EnvEditorScreen
18 | from .screens.models_editor import ModelsEditorScreen
19 | from .screens.prompt_editor import PromptEditorScreen
20 |
21 |
22 | class ConfigApp(App):
23 | """Hephia Configuration Management TUI"""
24 |
25 | TITLE = "Hephia Configuration Tool"
26 | CSS_PATH = "app.tcss" # We'll need a basic TCSS file
27 |
28 | BINDINGS = [
29 | Binding("ctrl+q", "quit", "Quit", show=True),
30 | ]
31 |
32 | hide_keys: bool = False
33 |
34 | def compose(self) -> ComposeResult:
35 | yield Header()
36 | with TabbedContent(initial="tab-env"):
37 | with TabPane("Environment (.env)", id="tab-env"):
38 | yield EnvEditorScreen()
39 | with TabPane("Custom Models (models.json)", id="tab-models"):
40 | yield ModelsEditorScreen()
41 | with TabPane("YAML Prompts", id="tab-prompts"):
42 | yield PromptEditorScreen()
43 | yield Footer()
44 |
45 | def run() -> None:
46 | app = ConfigApp()
47 | app.run()
48 |
49 | if __name__ == "__main__":
50 | run()
--------------------------------------------------------------------------------
/brain/prompting/prompts/memory/formation.yaml:
--------------------------------------------------------------------------------
1 | id: memory.formation
2 |
3 | defaults:
4 | system: |
5 | the assistant is currently formulating a memory to be persisted and used by an autonomous agent.
6 | the assistant should keep these incredibly concise, just a couple of sentences at most, akin to a diary entry.
7 | the assistant should focus on key actions, decisions, or realizations, and include essential context like who was involved or what the objective felt like.
8 | the information for the memory will come from varied interfaces, and the assistant should be able to adapt to the context of each interface.
9 | the assistant will also receive the relevant context for the agent's state during the interaction, such as simulated internal states and continuity of self data.
10 | models:
11 | chapter2:
12 | system: |
13 | I am formulating a memory to be persisted and used.
14 | I'll keep these incredibly concise, just a couple of sentences at most, like a diary entry.
15 | I'll try to take note from my perspective, focusing on key actions, decisions, or realizations.
16 | I'll also include essential context like who was involved or what my objective felt like.
17 | gpt-5:
18 | system: |
19 | the assistant is currently formulating a memory to be persisted and used by an autonomous agent.
20 | the assistant must keep this extremely concise: 1-2 sentences max, diary-style.
21 | focus on key actions, decisions, or realizations, and include essential context like who was involved or what the objective felt like.
22 | return only the memory text; do not include any extra formatting or commentary.
--------------------------------------------------------------------------------
/internal/modules/behaviors/chase.py:
--------------------------------------------------------------------------------
1 | # modules/behaviors/chase.py
2 |
3 | from .behavior import Behavior
4 | from config import Config
5 | from event_dispatcher import global_event_dispatcher, Event
6 |
7 | class ChaseBehavior(Behavior):
8 | """
9 | Represents the chasing state, where it actively follows the user's cursor.
10 | """
11 |
12 | def __init__(self, behavior_manager):
13 | super().__init__(behavior_manager)
14 | self.name = "chase"
15 |
16 | def start(self):
17 | super().start()
18 | print("ChaseBehavior started.")
19 | self.apply_need_modifiers()
20 |
21 | def update(self):
22 | super().update()
23 | if not self.active:
24 | return
25 |
26 | def stop(self):
27 | print("ChaseBehavior stopped.")
28 | self.remove_need_modifiers()
29 | super().stop()
30 |
31 | def apply_need_modifiers(self):
32 | needs_manager = self.behavior_manager.needs_manager
33 |
34 | for need, modifier in Config.CHASE_NEED_MODIFIERS.items():
35 | needs_manager.alter_base_rate(need, modifier)
36 |
37 | global_event_dispatcher.dispatch_event_sync(Event("behavior:chase:modifiers_applied", Config.CHASE_NEED_MODIFIERS))
38 |
39 | def remove_need_modifiers(self):
40 | needs_manager = self.behavior_manager.needs_manager
41 |
42 | for need, modifier in Config.CHASE_NEED_MODIFIERS.items():
43 | needs_manager.alter_base_rate(need, -modifier)
44 |
45 | global_event_dispatcher.dispatch_event_sync(Event("behavior:chase:modifiers_removed", Config.CHASE_NEED_MODIFIERS))
--------------------------------------------------------------------------------
/internal/modules/behaviors/relax.py:
--------------------------------------------------------------------------------
1 | # modules/behaviors/relax.py
2 |
3 | from .behavior import Behavior
4 | from config import Config
5 | from event_dispatcher import global_event_dispatcher, Event
6 |
7 | class RelaxBehavior(Behavior):
8 | """
9 | Represents the relaxing state, catching breath after activity or plain zoning out
10 | """
11 |
12 | def __init__(self, behavior_manager):
13 | super().__init__(behavior_manager)
14 | self.name = "relax"
15 |
16 | def start(self):
17 | super().start()
18 | print("RelaxBehavior started.")
19 | self.apply_need_modifiers()
20 |
21 | def update(self):
22 | super().update()
23 | if not self.active:
24 | return
25 |
26 | def stop(self):
27 | print("RelaxBehavior stopped.")
28 | self.remove_need_modifiers()
29 | super().stop()
30 |
31 | def apply_need_modifiers(self):
32 | needs_manager = self.behavior_manager.needs_manager
33 |
34 | for need, modifier in Config.RELAX_NEED_MODIFIERS.items():
35 | needs_manager.alter_base_rate(need, modifier)
36 |
37 | global_event_dispatcher.dispatch_event_sync(Event("behavior:relax:modifiers_applied", Config.RELAX_NEED_MODIFIERS))
38 |
39 | def remove_need_modifiers(self):
40 | needs_manager = self.behavior_manager.needs_manager
41 |
42 | for need, modifier in Config.RELAX_NEED_MODIFIERS.items():
43 | needs_manager.alter_base_rate(need, -modifier)
44 |
45 | global_event_dispatcher.dispatch_event_sync(Event("behavior:relax:modifiers_removed", Config.RELAX_NEED_MODIFIERS))
--------------------------------------------------------------------------------
/internal/modules/behaviors/sleep.py:
--------------------------------------------------------------------------------
1 | # modules/behaviors/sleep.py
2 |
3 | from .behavior import Behavior
4 | from config import Config
5 | from event_dispatcher import global_event_dispatcher, Event
6 |
7 | class SleepBehavior(Behavior):
8 | """
9 | Represents the sleeping state, triggered when stamina hits 0 or by user clicking rest
10 | """
11 |
12 | def __init__(self, behavior_manager):
13 | super().__init__(behavior_manager)
14 | self.name = "sleep"
15 |
16 | def start(self):
17 | super().start()
18 | print("SleepBehavior started.")
19 | self.apply_need_modifiers()
20 |
21 | def update(self):
22 | super().update()
23 | if not self.active:
24 | return
25 |
26 | def stop(self):
27 | print("SleepBehavior stopped.")
28 | self.remove_need_modifiers()
29 | super().stop()
30 |
31 | def apply_need_modifiers(self):
32 | needs_manager = self.behavior_manager.needs_manager
33 |
34 | for need, modifier in Config.SLEEP_NEED_MODIFIERS.items():
35 | needs_manager.alter_base_rate(need, modifier)
36 |
37 | global_event_dispatcher.dispatch_event_sync(Event("behavior:sleep:modifiers_applied", Config.SLEEP_NEED_MODIFIERS))
38 |
39 | def remove_need_modifiers(self):
40 | needs_manager = self.behavior_manager.needs_manager
41 |
42 | for need, modifier in Config.SLEEP_NEED_MODIFIERS.items():
43 | needs_manager.alter_base_rate(need, -modifier)
44 |
45 | global_event_dispatcher.dispatch_event_sync(Event("behavior:sleep:modifiers_removed", Config.SLEEP_NEED_MODIFIERS))
--------------------------------------------------------------------------------
/internal/modules/memory/operations/synthesis/base.py:
--------------------------------------------------------------------------------
1 | """
2 | base.py
3 |
4 | Defines abstract/base classes for the synthesis system, plus any shared data structures.
5 | """
6 |
7 | from abc import ABC, abstractmethod
8 | from typing import Any, Dict, List
9 |
10 | class ISynthesisHandler(ABC):
11 | """
12 | Interface for a synthesis handler that merges or synthesizes new nodes
13 | from conflicting or combined states.
14 | """
15 |
16 | @abstractmethod
17 | def handle_conflict_synthesis(
18 | self,
19 | conflict_data: Dict[str, Any],
20 | child: Any,
21 | parent: Any,
22 | synthesis_content: str, # From LLM resolution
23 | synthesis_embedding: List[float], # Pre-calculated
24 | additional_strength: float = 0.0
25 | ) -> str:
26 | """
27 | Takes conflicting nodes along with conflict details and
28 | returns a newly created 'synthesis node' ID.
29 |
30 | Args:
31 | conflict_data: Key information describing the conflict or advanced analysis
32 | child: The 'child' (secondary) node to merge
33 | parent: The 'parent' (primary) node
34 | additional_strength: Extra strength contributed by child/parent
35 | or from conflict severity
36 | Returns:
37 | The new node ID
38 | """
39 | pass
40 |
41 | @abstractmethod
42 | def handle_synthesis_complete(
43 | self,
44 | synthesis_node_id: str,
45 | constituents: list
46 | ) -> None:
47 | """
48 | Optional post-synthesis step, e.g. dispatch an event, or further merges.
49 | """
50 | pass
51 |
--------------------------------------------------------------------------------
/client/config/dialogs.py:
--------------------------------------------------------------------------------
1 | # client/config/dialogs.py
2 | """
3 | Reusable dialogs for the Hephia Configuration TUI.
4 | """
5 | from textual.app import ComposeResult
6 | from textual.screen import ModalScreen
7 | from textual.widgets import Button, Label, Static
8 | from textual.containers import Vertical, Horizontal
9 |
10 | class ConfirmationDialog(ModalScreen[bool]):
11 | """A modal dialog to confirm an action from the user."""
12 | def __init__(
13 | self,
14 | prompt: str = "Are you sure?",
15 | confirm_button_label: str = "Yes",
16 | confirm_button_variant: str = "primary",
17 | cancel_button_label: str = "No",
18 | name: str | None = None,
19 | id: str | None = None,
20 | classes: str | None = None,
21 | ) -> None:
22 | super().__init__(name, id, classes)
23 | self.prompt_text = prompt
24 | self.confirm_button_label = confirm_button_label
25 | self.confirm_button_variant = confirm_button_variant
26 | self.cancel_button_label = cancel_button_label
27 |
28 | def compose(self) -> ComposeResult:
29 | with Vertical(id="confirmation_dialog_content", classes="modal-dialog"):
30 | yield Static(self.prompt_text, id="confirmation_prompt", classes="dialog-prompt")
31 | with Horizontal(id="confirmation_buttons", classes="dialog-buttons"):
32 | yield Button(self.confirm_button_label, variant=self.confirm_button_variant, id="confirm")
33 | yield Button(self.cancel_button_label, variant="default", id="cancel")
34 |
35 | async def on_button_pressed(self, event: Button.Pressed) -> None:
36 | if event.button.id == "confirm":
37 | self.dismiss(True)
38 | elif event.button.id == "cancel":
39 | self.dismiss(False)
--------------------------------------------------------------------------------
/internal/modules/memory/networks/reentrant_lock.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 | from typing import Optional
4 |
5 | logger = logging.getLogger(__name__)
6 |
7 | class ReentrantAsyncLock:
8 | """A reentrant asyncio lock for nested acquisitions by the same task."""
9 | def __init__(self) -> None:
10 | self._lock = asyncio.Lock()
11 | self._owner: Optional[asyncio.Task] = None
12 | self._count: int = 0
13 |
14 | async def acquire(self):
15 | current_task = asyncio.current_task()
16 | if current_task is None:
17 | raise RuntimeError("No current task found. Must be within asyncio.")
18 |
19 | if self._owner == current_task:
20 | self._count += 1
21 | logger.debug("ReentrantAsyncLock: re-acquired by task %r, count=%d", current_task, self._count)
22 | else:
23 | logger.debug("ReentrantAsyncLock: task %r waiting for lock...", current_task)
24 | await self._lock.acquire()
25 | self._owner = current_task
26 | self._count = 1
27 | logger.debug("ReentrantAsyncLock: acquired by task %r, count=1", current_task)
28 |
29 | def release(self):
30 | current_task = asyncio.current_task()
31 | if self._owner != current_task:
32 | raise RuntimeError("Lock release attempted by non-owner task.")
33 |
34 | self._count -= 1
35 | logger.debug("ReentrantAsyncLock: released by task %r, new count=%d", current_task, self._count)
36 | if self._count == 0:
37 | self._owner = None
38 | self._lock.release()
39 | logger.debug("ReentrantAsyncLock: fully released by task %r", current_task)
40 |
41 | async def __aenter__(self):
42 | await self.acquire()
43 | return self
44 |
45 | async def __aexit__(self, exc_type, exc, tb):
46 | self.release()
47 |
48 | shared_reentrant_lock = ReentrantAsyncLock()
49 |
--------------------------------------------------------------------------------
/loggers/formatters.py:
--------------------------------------------------------------------------------
1 | """
2 | Formatters for different log types in Hephia.
3 | Handles consistent formatting across different logging streams.
4 | """
5 |
6 | import json
7 | import logging
8 |
9 | class InternalFormatter(logging.Formatter):
10 | """Formatter for internal and system events."""
11 |
12 | def format(self, record):
13 | timestamp = self.formatTime(record)
14 | return (
15 | f"[{timestamp}] {record.levelname:8} "
16 | f"{record.name.split('.')[-1]:10} | {record.getMessage()}"
17 | )
18 |
19 | class ExoLoopFormatter(logging.Formatter):
20 | """Formatter for brain/exo events with clear section breaks."""
21 |
22 | def format(self, record):
23 | timestamp = self.formatTime(record)
24 | msg = record.getMessage()
25 |
26 | # Add section breaks for major events
27 | if "LLM EXCHANGE" in msg:
28 | return f"\n{'='*80}\n{timestamp} - {msg}\n{'='*80}"
29 | elif "COMMAND PROCESSING" in msg:
30 | return f"\n{'-'*80}\n{timestamp} - {msg}\n{'-'*80}"
31 | else:
32 | return f"{timestamp} - {msg}"
33 |
34 | class MemoryFormatter(logging.Formatter):
35 | """Formatter for memory system events."""
36 |
37 | def format(self, record):
38 | timestamp = self.formatTime(record)
39 | msg = record.getMessage()
40 | return f"[{timestamp}] {record.levelname:8} MEMORY | {msg}"
41 |
42 | class EventFormatter(logging.Formatter):
43 | """Formatter for event dispatcher"""
44 |
45 | def format(self, record):
46 | timestamp = self.formatTime(record)
47 | return f"[{timestamp}] {record.levelname:8} EVENT | {record.getMessage()}"
48 |
49 | class ConsoleFormatter(logging.Formatter):
50 | """Minimal formatter for console output."""
51 |
52 | def format(self, record):
53 | if record.levelno >= logging.WARNING:
54 | return f"❗ {record.getMessage()}"
55 | return f"📝 {record.getMessage()}"
--------------------------------------------------------------------------------
/shared_models/tui_events.py:
--------------------------------------------------------------------------------
1 | # shared_models/tui_events.py
2 | from pydantic import BaseModel, field_validator
3 | from typing import List, Dict, Any, Optional
4 |
5 | # For raw_state (Cognitive Processing Panel)
6 | class TUIMessage(BaseModel):
7 | role: str
8 | content: str
9 | metadata: Optional[Dict[str, Any]] = None
10 |
11 | # For context (System State Panel)
12 | class TUIMood(BaseModel):
13 | name: Optional[str] = None
14 | valence: float
15 | arousal: float
16 |
17 | class TUINeed(BaseModel):
18 | satisfaction: float
19 |
20 | class TUIBehavior(BaseModel):
21 | name: Optional[str] = None
22 | active: bool
23 |
24 | class TUIEmotionalStateItem(BaseModel):
25 | name: Optional[str] = None
26 | intensity: float
27 | valence: float
28 | arousal: float
29 |
30 | class TUISystemContext(BaseModel):
31 | mood: Optional[TUIMood] = None
32 | needs: Optional[Dict[str, TUINeed]] = None
33 | behavior: Optional[TUIBehavior] = None
34 | emotional_state: Optional[List[TUIEmotionalStateItem]] = None
35 |
36 | # Main payload for TUI WebSocket messages
37 | class TUIDataPayload(BaseModel):
38 | """
39 | Represents the complete data snapshot to be sent to the TUI for a screen refresh.
40 | """
41 | recent_messages: Optional[List[TUIMessage]] = None
42 | system_context: Optional[TUISystemContext] = None
43 | cognitive_summary: Optional[str] = None
44 | current_model_name: Optional[str] = "N/A"
45 |
46 | @field_validator('recent_messages', mode='before')
47 | @classmethod
48 | def ensure_recent_messages_is_list(cls, v):
49 | if v is None:
50 | return []
51 | return v
52 |
53 | @field_validator('cognitive_summary', mode='before')
54 | @classmethod
55 | def ensure_cognitive_summary_is_str(cls, v):
56 | if v is None:
57 | return ""
58 | return v
59 |
60 |
61 | class TUIWebSocketMessage(BaseModel):
62 | """
63 | The wrapper for all messages sent to the TUI over WebSockets.
64 | """
65 | event_type: str
66 | payload: TUIDataPayload
67 | timestamp: str
--------------------------------------------------------------------------------
/internal/modules/actions/feed.py:
--------------------------------------------------------------------------------
1 | # modules/actions/feed.py
2 |
3 | from typing import Dict, Union, Tuple
4 | from .action import Action
5 |
6 | class FeedAction(Action):
7 | """Action to feed with enhanced validation and recovery scaling."""
8 |
9 | def __init__(self, needs_manager, food_value: float = 20.0):
10 | super().__init__(needs_manager)
11 | self.food_value = float(food_value)
12 | self._base_cooldown = 30.0 # 30 second cooldown for feeding
13 |
14 | def validate(self) -> Tuple[bool, Union[str, None]]:
15 | """Validate if feeding is possible."""
16 | try:
17 | current_hunger = self.needs_manager.get_need_value('hunger')
18 | if current_hunger <= self.needs_manager.needs['hunger'].min_value:
19 | return False, "Already fully fed"
20 | return True, None
21 | except Exception as e:
22 | return False, f"Validation error: {str(e)}"
23 |
24 | def perform(self) -> Dict[str, float]:
25 | """Performs the feeding action with dynamic recovery."""
26 | try:
27 | initial_hunger = self.needs_manager.get_need_value('hunger')
28 |
29 | # Calculate recovery amount based on current hunger
30 | recovery = self.calculate_recovery_amount(initial_hunger, 'hunger')
31 |
32 | # Ensure hunger doesn't drop below the minimum value
33 | recovery = min(recovery, initial_hunger - self.needs_manager.needs['hunger'].min_value)
34 |
35 | self.needs_manager.alter_need('hunger', -recovery)
36 | final_hunger = self.needs_manager.get_need_value('hunger')
37 |
38 | result = {
39 | "initial_hunger": initial_hunger,
40 | "final_hunger": final_hunger,
41 | "hunger_reduced": initial_hunger - final_hunger,
42 | "recovery_amount": recovery
43 | }
44 |
45 | self.dispatch_event("action:feed:completed", result)
46 | return result
47 |
48 | except Exception as e:
49 | self.dispatch_event("action:feed:error", {"error": str(e)})
50 | raise RuntimeError(f"Feed action failed: {str(e)}")
--------------------------------------------------------------------------------
/internal/modules/actions/rest.py:
--------------------------------------------------------------------------------
1 | # modules/actions/rest.py
2 |
3 | from typing import Dict, Union, Tuple
4 | from .action import Action
5 |
6 | class RestAction(Action):
7 | """Action to rest with enhanced validation and recovery scaling."""
8 |
9 | def __init__(self, needs_manager, stamina_gain: float = 20.0):
10 | super().__init__(needs_manager)
11 | self.stamina_gain = float(stamina_gain)
12 | self._base_cooldown = 60.0 # 60 second cooldown for resting
13 |
14 | def validate(self) -> Tuple[bool, Union[str, None]]:
15 | """Validate if resting is possible."""
16 | try:
17 | current_stamina = self.needs_manager.get_need_value('stamina')
18 | if current_stamina >= self.needs_manager.needs['stamina'].max_value:
19 | return False, "Already fully rested"
20 | return True, None
21 | except Exception as e:
22 | return False, f"Validation error: {str(e)}"
23 |
24 | def perform(self) -> Dict[str, float]:
25 | """Performs the rest action with dynamic recovery."""
26 | try:
27 | initial_stamina = self.needs_manager.get_need_value('stamina')
28 |
29 | # Calculate recovery amount based on current stamina
30 | recovery = self.calculate_recovery_amount(initial_stamina, 'stamina')
31 |
32 | # Ensure stamina does not exceed maximum
33 | max_stamina = self.needs_manager.needs['stamina'].max_value
34 | recovery = min(recovery, max_stamina - initial_stamina)
35 |
36 | self.needs_manager.alter_need('stamina', recovery)
37 | final_stamina = self.needs_manager.get_need_value('stamina')
38 |
39 | result = {
40 | "initial_stamina": initial_stamina,
41 | "final_stamina": final_stamina,
42 | "stamina_gained": final_stamina - initial_stamina,
43 | "recovery_amount": recovery
44 | }
45 |
46 | self.dispatch_event("action:rest:completed", result)
47 | return result
48 |
49 | except Exception as e:
50 | self.dispatch_event("action:rest:error", {"error": str(e)})
51 | raise RuntimeError(f"Rest action failed: {str(e)}")
--------------------------------------------------------------------------------
/internal/modules/actions/drink.py:
--------------------------------------------------------------------------------
1 | # modules/actions/drink.py
2 |
3 | from typing import Dict, Union, Tuple
4 | from .action import Action
5 |
6 | class DrinkAction(Action):
7 | """Action to give drink with enhanced validation and recovery scaling."""
8 |
9 | def __init__(self, needs_manager, drink_value: float = 20.0):
10 | super().__init__(needs_manager)
11 | self.drink_value = float(drink_value)
12 | self._base_cooldown = 25.0 # 25 second cooldown for drinking
13 |
14 | def validate(self) -> Tuple[bool, Union[str, None]]:
15 | """Validate if drinking is possible."""
16 | try:
17 | current_thirst = self.needs_manager.get_need_value('thirst')
18 | if current_thirst <= self.needs_manager.needs['thirst'].min_value:
19 | return False, "Already fully hydrated"
20 | return True, None
21 | except Exception as e:
22 | return False, f"Validation error: {str(e)}"
23 |
24 | def perform(self) -> Dict[str, float]:
25 | """Performs the drink action with dynamic recovery and minimum value protection."""
26 | try:
27 | initial_thirst = self.needs_manager.get_need_value('thirst')
28 | min_thirst = self.needs_manager.needs['thirst'].min_value
29 |
30 | # Calculate recovery amount based on current thirst
31 | recovery = self.calculate_recovery_amount(initial_thirst, 'thirst')
32 |
33 | # Ensure we don't recover beyond the minimum value
34 | if initial_thirst - recovery < min_thirst:
35 | recovery = initial_thirst - min_thirst
36 |
37 | self.needs_manager.alter_need('thirst', -recovery)
38 | final_thirst = self.needs_manager.get_need_value('thirst')
39 |
40 | result = {
41 | "initial_thirst": initial_thirst,
42 | "final_thirst": final_thirst,
43 | "thirst_reduced": initial_thirst - final_thirst,
44 | "recovery_amount": recovery
45 | }
46 |
47 | self.dispatch_event("action:drink:completed", result)
48 | return result
49 |
50 | except Exception as e:
51 | self.dispatch_event("action:drink:error", {"error": str(e)})
52 | raise RuntimeError(f"Drink action failed: {str(e)}")
--------------------------------------------------------------------------------
/internal/modules/needs/need.py:
--------------------------------------------------------------------------------
1 | # modules/needs/need.py
2 |
3 | class Need:
4 | """
5 | Represents a single need.
6 | """
7 |
8 | def __init__(self, name, type="physical", value=50.0, base_rate=0.2, min_value=0.0, max_value=100.0):
9 | """
10 | Initializes a Need instance.
11 |
12 | Args:
13 | name (str): The name of the need (e.g., 'hunger').
14 | type (str): The category of need (physical, emotional, cognitive)
15 | value (float, optional): The initial value of the need.
16 | base_Rate (float, optional): The base rate per update cycle.
17 | min_value (float, optional): The minimum value the need can have.
18 | max_value (float, optional): The maximum value the need can have.
19 | """
20 | self.name = name
21 | self.value = value
22 | self.base_rate = base_rate
23 | self.min_value = min_value
24 | self.max_value = max_value
25 | self.type = type
26 |
27 | self.base_rate_multiplier = 1.0
28 |
29 | def alter(self, amount):
30 | """
31 | Alters the need's value by a specified amount, ensuring it stays within min and max bounds.
32 |
33 | Args:
34 | amount (float): The amount to change the need's value by.
35 | """
36 | self.value = max(self.min_value, min(self.value + amount, self.max_value))
37 |
38 | def update(self):
39 | """
40 | Updates the need by increasing its value based on the effective rate.
41 | """
42 | rate = self.calculate_effective_rate()
43 | self.alter(rate)
44 |
45 | def calculate_effective_rate(self):
46 | """
47 | Calculates the effective rate using the base rate and multiplier.
48 |
49 | Returns:
50 | float: The effective rate.
51 | """
52 | return self.base_rate * self.base_rate_multiplier
53 |
54 | def alter_base_rate(self, amount):
55 | """
56 | Alters the base rate by a specified amount.
57 |
58 | Args:
59 | amount (float): The amount to change the base rate by.
60 | """
61 | self.base_rate += amount
62 | self.base_rate = max(0, self.base_rate)
63 |
64 | def alter_rate_multiplier(self, factor):
65 | """
66 | Alters the rate multiplier by a specified factor.
67 |
68 | Args:
69 | factor (float): The factor to add to the current multiplier.
70 | """
71 | self.base_rate_multiplier += factor
72 | self.base_rate_multiplier = max(0, self.base_rate_multiplier)
--------------------------------------------------------------------------------
/core/event_bridge.py:
--------------------------------------------------------------------------------
1 | """
2 | Simplified event bridge for MVP state updates.
3 | Handles core state changes between internal systems and external interfaces.
4 | """
5 |
6 | from event_dispatcher import global_event_dispatcher, Event
7 | import asyncio
8 |
9 | class EventBridge:
10 | """
11 | Manages essential event flow between internal systems and external interfaces.
12 | Focused on state updates and basic command processing for MVP.
13 | """
14 |
15 | def __init__(self, state_bridge):
16 | """Initialize event bridge with state bridge reference."""
17 | self.state_bridge = state_bridge
18 | self.setup_listeners()
19 | self._emotion_update_lock = asyncio.Lock()
20 |
21 | def setup_listeners(self):
22 | """Set up core event listeners."""
23 | # Core state change events
24 | global_event_dispatcher.add_listener(
25 | "need:changed",
26 | lambda event: asyncio.create_task(self.handle_state_change(event))
27 | )
28 | global_event_dispatcher.add_listener(
29 | "behavior:changed",
30 | lambda event: asyncio.create_task(self.handle_state_change(event))
31 | )
32 | global_event_dispatcher.add_listener(
33 | "mood:changed",
34 | lambda event: asyncio.create_task(self.handle_state_change(event))
35 | )
36 | global_event_dispatcher.add_listener(
37 | "emotion:new",
38 | lambda event: asyncio.create_task(self.handle_state_change(event))
39 | )
40 |
41 | global_event_dispatcher.add_listener(
42 | "emotion:updated",
43 | lambda event: asyncio.create_task(self.handle_emotion_update(event))
44 | )
45 |
46 | global_event_dispatcher.add_listener(
47 | "*:echo",
48 | lambda event: asyncio.create_task(self.handle_state_change(event))
49 | )
50 |
51 | async def handle_emotion_update(self, event: Event):
52 | """Handle emotion updates with backoff and rate limiting."""
53 | # Try to acquire lock - if locked, discard the update
54 | if self._emotion_update_lock.locked():
55 | return
56 |
57 | async with self._emotion_update_lock:
58 | await asyncio.sleep(0.1) # 100ms backoff
59 | await self.handle_state_change(event)
60 |
61 | async def handle_state_change(self, event: Event):
62 | """
63 | Handle core state changes, updating state bridge as needed.
64 |
65 | Args:
66 | event: State change event from internal systems
67 | """
68 | # Update state bridge with new state information
69 | await self.state_bridge.update_state()
--------------------------------------------------------------------------------
/tools/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "categories": {
3 | "maintenance": {
4 | "name": "Maintenance & Cleanup",
5 | "tools": {
6 | "prune": {
7 | "type": "simple",
8 | "file": "prune.py",
9 | "description": "soft reset - clear recent memory"
10 | },
11 | "clear_memory": {
12 | "type": "simple",
13 | "file": "clear_memory_db.py",
14 | "description": "clear memory database only"
15 | },
16 | "clear_all": {
17 | "type": "args",
18 | "file": "clear_data.py",
19 | "description": "hard reset - wipe ALL data",
20 | "confirm": true,
21 | "args": [
22 | {
23 | "name": "--include-logs",
24 | "description": "also delete log files",
25 | "type": "flag"
26 | }
27 | ]
28 | }
29 | }
30 | },
31 | "interaction": {
32 | "name": "Talk & Actions",
33 | "tools": {
34 | "actions": {
35 | "type": "cli",
36 | "file": "actions_sdk.py",
37 | "description": "interact with hephia's action system",
38 | "subcommands": {
39 | "list": {
40 | "description": "show available actions",
41 | "args": []
42 | },
43 | "execute": {
44 | "description": "run an action",
45 | "args": [
46 | {"name": "action_name", "required": true, "description": "name of action"},
47 | {"name": "--message", "description": "message to attach"},
48 | {"name": "--param", "description": "parameters (key=value)", "multiple": true}
49 | ]
50 | },
51 | "status": {
52 | "description": "check action status",
53 | "args": [
54 | {"name": "action_name", "required": true, "description": "name of action"}
55 | ]
56 | },
57 | "get-state": {
58 | "description": "show internal state",
59 | "args": []
60 | },
61 | "test-connection": {
62 | "description": "test server connection",
63 | "args": []
64 | }
65 | }
66 | },
67 | "talk": {
68 | "type": "simple",
69 | "file": "talk.py",
70 | "description": "start 1:1 convo with hephia"
71 | }
72 | }
73 | },
74 | "utilities": {
75 | "name": "Utilities",
76 | "tools": {
77 | "collect_logs": {
78 | "type": "simple",
79 | "file": "collect_logs.py",
80 | "description": "gather logs for debugging"
81 | }
82 | }
83 | }
84 | }
85 | }
--------------------------------------------------------------------------------
/tools/discord/discord_bot.md:
--------------------------------------------------------------------------------
1 | # Discord Bot Setup Guide
2 |
3 | ## Overview
4 | This guide outlines how to set up and configure a Discord bot to work with Hephia's systems. The bot serves as a bridge between Discord and the Hephia server, allowing for message handling and channel monitoring.
5 |
6 | (note: this looks scarier than it is. its a lot of steps, but i tried to be verbose and get *all* of them.)
7 |
8 | ## Prerequisites
9 | - Discord developer account
10 | - Access to Discord server with admin privileges
11 |
12 | ## Setup Steps
13 |
14 | 1. Create Discord Application
15 | - Visit Discord Developer Portal: https://discord.com/developers/applications
16 | - Click "New Application"
17 | - Enter bot name and click "Create"
18 | - Navigate to "Installation" section in left sidebar
19 | - Set "Install Link" to "None" (important!), keep User & Guild install toggled
20 | - Navigate to "Bot" section
21 | - Click "Reset Token" and securely save the new token
22 | - Under "Bot" settings:
23 | - Disable "Public Bot" toggle
24 | - Enable all three "Privileged Gateway Intents":
25 | - Presence Intent
26 | - Server Members Intent
27 | - Message Content Intent
28 |
29 | 2. Configure Bot Installation
30 | - Go to "OAuth2" section in left sidebar
31 | - Select "URL Generator"
32 | - Under "Scopes" section, check "bot"
33 | - Under "Bot Permissions" select:
34 | - View Channels
35 | - Send Messages
36 | - Use External Emojis
37 | - Mention Everyone (if needed)
38 | - Read Message History
39 | - Verify "Installation Type" is set to "Guild Install"
40 | - Copy the generated URL at the bottom
41 | - Open URL in browser
42 | - Select your target server (must have admin privileges or equivalent perms set)
43 | - Click "Authorize"
44 |
45 | 3. Environment Setup
46 | - Update the discord token using the config tool, or directly modifying .env.
47 |
48 | 4. Run Bot Server
49 | ```bash
50 | python tools/discord/bot.py
51 | ```
52 | or
53 | ```bash
54 | python launch.py
55 | # option 2
56 | ```
57 | Default port: 5518
58 |
59 | ## API Endpoints
60 | The Discord bot server exposes these endpoints at http://localhost:5518:
61 |
62 | - GET `/guilds` - List available Discord servers
63 | - GET `/guilds/{guild_id}/channels` - List channels in a guild
64 | - GET `/channels/{channel_id}/messages/{message_id}` - Get specific message
65 | - GET `/channels/{channel_id}/history` - Get channel message history
66 | - POST `/channels/{channel_id}/send_message` - Send new message
67 |
68 | ## Integration
69 | The bot automatically integrates with Hephia's main server through:
70 | - Message forwarding to `/discord_inbound`
71 | - Channel updates to `/discord_channel_update`
72 | - Direct message responses via bot server endpoints
73 |
74 | ## Troubleshooting
75 | - Verify bot token is correct in .env
76 | - Ensure bot has proper server permissions
77 | - ask @luxia
78 |
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | # API Keys for different providers
2 | OPENAI_API_KEY=sk-...
3 | ANTHROPIC_API_KEY=sk-ant-...
4 | GOOGLE_API_KEY=...
5 | DEEPSEEK_API_KEY=sk-...
6 | OPENROUTER_API_KEY=sk-or-v1-...
7 | PERPLEXITY_API_KEY=pplx-...
8 | OPENPIPE_API_KEY=opk_...
9 | CHAPTER2_API_KEY=... # dummy key to work within my system
10 |
11 | # follow the guide in [tools/discord/discord_bot.md] to make a discord bot and get the token you need, if you want to enable this.
12 | DISCORD_BOT_TOKEN=...
13 | ENABLE_DISCORD=False
14 | REPLY_ON_TAG=False
15 |
16 | # Loop pace in seconds
17 | # ~2 requests per EXO_MIN_INTERVAL to COGNITIVE_MODEL, one to SUMMARY_MODEL, possibly one to VALIDATION_MODEL, of up to 7000 tokens typically.
18 | EXO_MIN_INTERVAL = 60
19 |
20 | # Run without the TUI/GUI
21 | HEADLESS = False
22 |
23 | # debugging; will make a large file.
24 | # log prompts is broken, don't use
25 | LOG_PROMPTS = False
26 | ADVANCED_C2_LOGGING = False
27 |
28 | # max turns to manage context limits
29 | EXO_MAX_TURNS = 50
30 |
31 | # Available Models for Configuration
32 |
33 | # OpenAI Models
34 | # gpt4 - GPT-4 Turbo via OpenAI
35 | # gpt3 - GPT-3.5 Turbo via OpenAI
36 |
37 | # Anthropic Models
38 | # new-sonnet - Claude 3.6 Sonnet
39 | # old-sonnet - Claude 3.5 Sonnet
40 | # opus - Claude 3 Opus
41 | # haiku - Claude 3.5 Haiku
42 |
43 | # Google Models
44 | # gemini - Gemini Pro
45 |
46 | # DeepSeek Models
47 | # soon . . .
48 |
49 | # OpenRouter Models
50 | # mistral - Mistral 7B Instruct (Free Tier - good for validation or if you're on a budget)
51 | # llama-70b-instruct - LLaMA 3.1 70B Instruct
52 | # llama-405b-instruct - LLaMA 3.1 405B Instruct
53 | # llama-405b - LLaMA 3.1 405B Base
54 |
55 | # chapter2 - Targetted running Chapter 2 based EMS; must also configure path to desired socket if desired.
56 |
57 | # If you wish for more, go into config.py and follow top structure.
58 | # If the provider doesn't exist, just ask!
59 |
60 | # Cognitive model is the main driver
61 | # Validation model used for processing commands fired by cognitive model
62 | # Summary model used for cognitive contextual summaries (heavy lifting)
63 | # Fallback model used for fallbacks when any of above fail
64 | COGNITIVE_MODEL=haiku
65 | VALIDATION_MODEL=mistral
66 | FALLBACK_MODEL=opus
67 | SUMMARY_MODEL=haiku
68 |
69 | # if sentence transformers is bricking your system, you can swap to openai embedding requests.
70 | # just make sure to include your openai api key above.
71 | USE_LOCAL_EMBEDDING=True
72 |
73 | # if using chapter2 framework on unix system:
74 | # (warning: might assume that you're running on the same machine):
75 | # Uvicorn running on unix socket /path/to/socket
76 | CHAPTER2_SOCKET_PATH=/path/to/socket
77 | # fallback, or running on windows:
78 | CHAPTER2_HTTP_PORT=5519
79 |
80 | # if using local inference, set the base URL for the local inference server
81 | LOCAL_INFERENCE_BASE_URL=http://localhost:5520/v1
82 |
83 | # DEV OPTIONS
84 | LOG_LEVEL_HEPHIA_SYSTEM=INFO
85 | LOG_LEVEL_HEPHIA_INTERNAL=INFO
86 | LOG_LEVEL_HEPHIA_BRAIN=INFO
87 | LOG_LEVEL_HEPHIA_MEMORY=INFO
88 | LOG_LEVEL_HEPHIA_EVENTS=INFO
--------------------------------------------------------------------------------
/tools/testing/unix_bridge_aiohttp.py:
--------------------------------------------------------------------------------
1 | # unix_bridge_aiohttp.py
2 |
3 | import uvicorn
4 | from fastapi import FastAPI, Request, HTTPException
5 | import logging
6 | import aiohttp
7 |
8 | # Configure logging
9 | logging.basicConfig(
10 | level=logging.DEBUG, # Set to DEBUG for detailed logs
11 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
12 | )
13 | logger = logging.getLogger("unix_bridge_aiohttp")
14 |
15 | # Path to the Unix socket
16 | SOCKET_PATH = "/path/to/socket"
17 | UNIX_ENDPOINT_ROOT = "http://localhost:6006"
18 |
19 | app = FastAPI()
20 |
21 | # Custom connector for Unix socket
22 | class UnixConnector(aiohttp.connector.BaseConnector):
23 | def __init__(self, socket_path, *args, **kwargs):
24 | self.socket_path = socket_path
25 | super().__init__(*args, **kwargs)
26 |
27 | async def connect(self, req, *args, **kwargs):
28 | return await super().connect(req, *args, **kwargs)
29 |
30 | @app.post("/v1/chat/completions")
31 | async def forward_chat_completions(request: Request):
32 | # Extract JSON payload from the incoming request
33 | try:
34 | payload = await request.json()
35 | logger.debug(f"Received payload: {payload}")
36 | except Exception as e:
37 | logger.error(f"Failed to parse JSON payload: {e}")
38 | raise HTTPException(status_code=400, detail="Invalid JSON payload")
39 |
40 | # Forward the request to Unix socket using aiohttp
41 | try:
42 | connector = aiohttp.UnixConnector(path=SOCKET_PATH)
43 | async with aiohttp.ClientSession(connector=connector) as session:
44 | async with session.post("/v1/chat/completions", json=payload) as response:
45 | status = response.status
46 | response_json = await response.json()
47 | logger.debug(f"Received response status: {status}")
48 | logger.debug(f"Received response from server: {response_json}")
49 | if status == 200:
50 | return response_json
51 | elif status == 429:
52 | retry_after = response.headers.get('Retry-After', '1')
53 | logger.warning(f"Rate limited by server, retry after {retry_after}s")
54 | raise HTTPException(status_code=429, detail="Rate limited by server")
55 | else:
56 | logger.error(f"Server returned error status: {status}")
57 | raise HTTPException(status_code=500, detail="Error from server")
58 | except aiohttp.ClientError as e:
59 | logger.error(f"Error forwarding request to server: {e}")
60 | raise HTTPException(status_code=500, detail=f"Error communicating with server: {e}")
61 | except Exception as e:
62 | logger.error(f"Unexpected error: {e}")
63 | raise HTTPException(status_code=500, detail="Unexpected error occurred")
64 |
65 | if __name__ == "__main__":
66 | # Start the bridge on TCP port 6005 with detailed logs
67 | uvicorn.run(app, host="0.0.0.0", port=6005, log_level="debug")
68 |
69 |
--------------------------------------------------------------------------------
/client/tui/app.tcss:
--------------------------------------------------------------------------------
1 | /* client/tui/app.tcss */
2 |
3 | /* Define our color palette */
4 | $bg: #0C0C0C; /* Near black for main background */
5 | $panel-bg: #121212; /* Slightly lighter for panel backgrounds */
6 | $primary-purple: #6A0DAD; /* Main deep purple (can be textual 'indigo' or 'purple') */
7 | $secondary-purple: #4B0082; /* Darker purple for accents/titles */
8 | $text-primary: #E0E0E0; /* Off-white for main text */
9 | $text-muted: #A0A0A0; /* Grey for less important text or subtitles */
10 | $text-title: #F0F0F0; /* Brighter white for titles if needed */
11 |
12 | $success-text: #66FF66; /* Bright green for "Connected" */
13 | $warning-text: #FFD700; /* Yellow for "Connecting" */
14 | $error-text: #FF6666; /* Light red for "Disconnected" */
15 |
16 |
17 | /* App level styling */
18 | App {
19 | background: $bg;
20 | color: $text-primary;
21 | }
22 |
23 | Header {
24 | background: $primary-purple;
25 | color: $text-title;
26 | text-style: bold;
27 | }
28 |
29 | Footer {
30 | background: $primary-purple;
31 | color: $text-muted; /* Muted for footer text, status will override */
32 | }
33 |
34 | /* Main layout container */
35 | #main_layout {
36 | layout: horizontal;
37 | height: 1fr; /* Use 1fr to take up remaining space after header/footer */
38 | }
39 |
40 | /* Left panel for cognitive processing */
41 | #cognitive_panel_container {
42 | width: 1fr; /* Takes 1 fraction (can adjust ratios like 2fr and 1fr) */
43 | height: 100%;
44 | background: $panel-bg;
45 | border: round $primary-purple;
46 | padding: 1;
47 | /* overflow: auto; -- Handled by RichLog's own scrollbars now */
48 | }
49 |
50 | /* Right column containing state and summary panels */
51 | #right_column {
52 | layout: vertical;
53 | width: 1fr;
54 | height: 100%;
55 | }
56 |
57 | /* System State panel (top-right) */
58 | #state_panel_container {
59 | height: 1fr; /* Takes 1 fraction of right_column's height */
60 | background: $panel-bg;
61 | border: round $primary-purple;
62 | padding: 1;
63 | /* overflow: auto; -- Handled by Static's potential scroll with content */
64 | }
65 |
66 | /* Cognitive Summary panel (bottom-right) */
67 | #summary_panel_container {
68 | height: 1fr; /* Takes 1 fraction of right_column's height */
69 | background: $panel-bg;
70 | border: round $primary-purple;
71 | padding: 1;
72 | /* overflow: auto; -- Handled by RichLog's own scrollbars */
73 | }
74 |
75 | /* Class for panel titles */
76 | .panel_title {
77 | dock: top;
78 | padding: 0 1;
79 | text-style: bold;
80 | background: $secondary-purple;
81 | color: $text-title;
82 | margin-bottom: 1; /* Space between title and content */
83 | }
84 |
85 | /* Class for panel content areas (RichLog, Static) */
86 | .panel_content {
87 | width: 100%;
88 | height: 100%;
89 | /* overflow: auto; -- Let the widget itself handle scrolling */
90 | }
91 |
92 | /* Connection Status in Footer */
93 | #connection_status {
94 | dock: right;
95 | padding: 0 1;
96 | width: auto;
97 | height: 100%;
98 | content-align: right middle;
99 | }
--------------------------------------------------------------------------------
/loggers/manager.py:
--------------------------------------------------------------------------------
1 | """
2 | Central logging management for Hephia.
3 | Handles logger setup and configuration.
4 | """
5 |
6 | import logging
7 | import os
8 | from pathlib import Path
9 | from datetime import datetime
10 | from dotenv import load_dotenv
11 | from .formatters import InternalFormatter, ExoLoopFormatter, MemoryFormatter, EventFormatter
12 |
13 | class LogManager:
14 | """Enhanced log management with multiple output streams."""
15 |
16 | @staticmethod
17 | def setup_logging():
18 | """Initialize all loggers with appropriate handlers and configurable levels."""
19 | # Load environment variables from .env file
20 | load_dotenv()
21 |
22 | # Mapping from level name (string) to logging level (int)
23 | LEVELS = {
24 | 'CRITICAL': logging.CRITICAL,
25 | 'ERROR': logging.ERROR,
26 | 'WARNING': logging.WARNING,
27 | 'INFO': logging.INFO,
28 | 'DEBUG': logging.DEBUG,
29 | }
30 |
31 | # Create log directories
32 | log_base = Path('data/logs')
33 | internal_dir = log_base / 'internal'
34 | exoloop_dir = log_base / 'brain'
35 | system_dir = log_base / 'system'
36 | memory_dir = log_base / 'memory'
37 | event_dir = log_base / 'events'
38 | for directory in [internal_dir, exoloop_dir, system_dir, memory_dir, event_dir]:
39 | directory.mkdir(parents=True, exist_ok=True)
40 |
41 | timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
42 |
43 | # Define logger configuration: each logger gets a FileHandler with UTF-8.
44 | config = {
45 | 'hephia.internal': (internal_dir / f"internal_{timestamp}.log", InternalFormatter()),
46 | 'hephia.system': (system_dir / f"system_{timestamp}.log", InternalFormatter()),
47 | 'hephia.brain': (exoloop_dir / f"brain_{timestamp}.log", ExoLoopFormatter()),
48 | 'hephia.memory': (memory_dir / f"memory_{timestamp}.log", MemoryFormatter()),
49 | 'hephia.events': (event_dir / f"events_{timestamp}.log", EventFormatter())
50 | }
51 |
52 | for logger_name, (log_file, formatter) in config.items():
53 | logger = logging.getLogger(logger_name)
54 |
55 | # Construct the environment variable key from the logger name
56 | # e.g., 'hephia.system' -> 'LOG_LEVEL_HEPHIA_SYSTEM'
57 | env_var_key = f"LOG_LEVEL_{logger_name.upper().replace('.', '_')}"
58 |
59 | # Get the log level from the environment, defaulting to 'INFO' if not set
60 | log_level_name = os.getenv(env_var_key, 'INFO')
61 | log_level = LEVELS.get(log_level_name.upper(), logging.INFO)
62 |
63 | # Set the logger's level based on the configuration
64 | logger.setLevel(log_level)
65 |
66 | # File handler with UTF-8 encoding
67 | file_handler = logging.FileHandler(log_file, encoding='utf-8')
68 | file_handler.setFormatter(formatter)
69 |
70 | # Avoid adding duplicate handlers if this function is called more than once
71 | if not logger.hasHandlers():
72 | logger.addHandler(file_handler)
--------------------------------------------------------------------------------
/internal/modules/actions/action.py:
--------------------------------------------------------------------------------
1 | # modules/actions/action.py
2 |
3 | from abc import ABC, abstractmethod
4 | import time
5 | from internal.modules.needs.needs_manager import NeedsManager
6 | from event_dispatcher import global_event_dispatcher, Event
7 |
8 | class Action(ABC):
9 | """
10 | Abstract base class for all actions with enhanced stability controls.
11 | """
12 |
13 | def __init__(self, needs_manager: NeedsManager):
14 | """
15 | Initializes the Action.
16 |
17 | Args:
18 | needs_manager (NeedsManager): Reference to the NeedsManager.
19 | """
20 | self.needs_manager = needs_manager
21 | self._base_cooldown = 10 # Default 10 second cooldown
22 |
23 | @abstractmethod
24 | def perform(self):
25 | """
26 | Performs the action. Must be implemented by concrete actions.
27 |
28 | Returns:
29 | dict: Result of the action including any relevant data.
30 | """
31 | pass
32 |
33 | def validate(self):
34 | """
35 | Validates if the action can be performed.
36 |
37 | Returns:
38 | tuple: (bool, str) - (can_execute, reason if cannot execute)
39 | """
40 | return True, None
41 |
42 | def get_cooldown(self):
43 | """
44 | Get the cooldown duration for this action.
45 |
46 | Returns:
47 | float: Cooldown time in seconds
48 | """
49 | return self._base_cooldown
50 |
51 | def calculate_recovery_amount(self, current_value, target_need):
52 | """
53 | Calculates recovery amount based on current need value.
54 | Implements progressive recovery - more recovery when needs are critical.
55 |
56 | Args:
57 | current_value (float): Current value of the need
58 | target_need (str): Name of the need being recovered
59 |
60 | Returns:
61 | float: Amount to recover by
62 | """
63 | # Get need maximum from needs manager
64 | need_max = self.needs_manager.needs[target_need].max_value
65 | need_min = self.needs_manager.needs[target_need].min_value
66 |
67 | # Calculate how critical the need is (0 to 1, where 1 is most critical)
68 | critical_level = (need_max - current_value) / (need_max - need_min)
69 |
70 | # Base recovery amount (can be tuned)
71 | base_recovery = 20.0
72 |
73 | # Enhanced recovery for critical needs
74 | if critical_level > 0.8: # More than 80% depleted
75 | return base_recovery * 2.0
76 | elif critical_level > 0.6: # More than 60% depleted
77 | return base_recovery * 1.5
78 |
79 | return base_recovery
80 |
81 | def dispatch_event(self, event_type, data=None):
82 | """
83 | Dispatches an event related to this action.
84 |
85 | Args:
86 | event_type (str): The type of event to dispatch.
87 | data (dict, optional): Additional data to include with the event.
88 | """
89 | event_data = {
90 | "action_name": self.__class__.__name__,
91 | "timestamp": time.time()
92 | }
93 | if data:
94 | event_data.update(data)
95 | global_event_dispatcher.dispatch_event(Event(event_type, event_data))
--------------------------------------------------------------------------------
/EVENT_CATALOG.md:
--------------------------------------------------------------------------------
1 | # Event Catalog
2 |
3 | This document lists all events used in the Hephia project.
4 |
5 | (horribly out of date: need to use this idea in the future, but don't trust this file to be whole)
6 |
7 | ## Needs Module
8 |
9 | ### need:changed
10 | - **Source**: NeedsManager (modules/needs/needs_manager.py)
11 | - **Data**:
12 | - `need_name`: str
13 | - `old_value`: float
14 | - `new_value`: float
15 | - **Description**: Dispatched when a need's value changes.
16 |
17 | ### need:decay_rate_changed
18 | - **Source**: NeedsManager (modules/needs/needs_manager.py)
19 | - **Data**:
20 | - `need_name`: str
21 | - `new_base_rate`: float (when base rate is changed)
22 | - `new_multiplier`: float (when multiplier is changed)
23 | - **Description**: Dispatched when a need's decay rate or multiplier is changed.
24 |
25 | ## Actions Module
26 |
27 | ### action:started
28 | - **Source**: ActionManager (modules/actions/action_manager.py)
29 | - **Data**:
30 | - `action_name`: str
31 | - **Description**: Dispatched when an action is started.
32 |
33 | ### action:completed
34 | - **Source**: ActionManager (modules/actions/action_manager.py)
35 | - **Data**:
36 | - `action_name`: str
37 | - `result`: dict (action-specific result data)
38 | - **Description**: Dispatched when an action is completed.
39 |
40 | ### action:{action_name}:started
41 | - **Source**: Specific Action classes (e.g., FeedAction, PlayAction)
42 | - **Data**:
43 | - `action_name`: str
44 | - **Description**: Dispatched when a specific action starts.
45 |
46 | ### action:{action_name}:completed
47 | - **Source**: Specific Action classes (e.g., FeedAction, PlayAction)
48 | - **Data**:
49 | - `action_name`: str
50 | - `result`: dict (action-specific result data)
51 | - **Description**: Dispatched when a specific action is completed.
52 |
53 | ## Behaviors Module
54 |
55 | ### behavior:changed
56 | - **Source**: BehaviorManager (modules/behaviors/behavior_manager.py)
57 | - **Data**:
58 | - `old_behavior`: str
59 | - `new_behavior`: str
60 | - **Description**: Dispatched when the pet's behavior changes.
61 |
62 | ### behavior:{behavior_name}:started
63 | - **Source**: Specific Behavior classes (e.g., IdleBehavior, WalkBehavior)
64 | - **Description**: Dispatched when a specific behavior starts.
65 |
66 | ### behavior:{behavior_name}:updated
67 | - **Source**: Specific Behavior classes (e.g., IdleBehavior, WalkBehavior)
68 | - **Description**: Dispatched when a specific behavior is updated.
69 |
70 | ### behavior:{behavior_name}:stopped
71 | - **Source**: Specific Behavior classes (e.g., IdleBehavior, WalkBehavior)
72 | - **Description**: Dispatched when a specific behavior stops.
73 |
74 | ### behavior:{behavior_name}:modifiers_applied
75 | - **Source**: Specific Behavior classes (e.g., IdleBehavior, WalkBehavior)
76 | - **Data**:
77 | - `base_modifiers`: dict
78 | - `multiplier_modifiers`: dict
79 | - **Description**: Dispatched when behavior-specific need modifiers are applied.
80 |
81 | ### behavior:{behavior_name}:modifiers_removed
82 | - **Source**: Specific Behavior classes (e.g., IdleBehavior, WalkBehavior)
83 | - **Data**:
84 | - `base_modifiers`: dict
85 | - `multiplier_modifiers`: dict
86 | - **Description**: Dispatched when behavior-specific need modifiers are removed.
87 |
88 | ## Emotions Module
89 |
90 | ### emotion:new
91 | - **Source**: EmotionalProcessor (modules/emotions/emotional_processor.py)
92 | - **Data**:
93 | - `emotion`: Emotion
94 | - **Description**: Dispatched when a significant new emotion is generated.
--------------------------------------------------------------------------------
/brain/interfaces/action.py:
--------------------------------------------------------------------------------
1 | """
2 | brain/interfaces/action.py - Action interface for Hephia's action system.
3 |
4 | Handles notification generation and summaries for actions executed by the system.
5 | Maintains first-person perspective for cognitive continuity.
6 | """
7 |
8 | from typing import List
9 | from datetime import datetime
10 | from brain.cognition.notification import Notification, NotificationManager
11 | from brain.interfaces.base import CognitiveInterface
12 | from core.state_bridge import StateBridge
13 | from internal.modules.cognition.cognitive_bridge import CognitiveBridge
14 | from api_clients import APIManager
15 |
16 |
17 | class ActionInterface(CognitiveInterface):
18 | """
19 | Interface for action system notifications.
20 | Provides first-person summaries of actions taken to maintain cognitive continuity.
21 | """
22 |
23 | def __init__(
24 | self,
25 | state_bridge: StateBridge,
26 | cognitive_bridge: CognitiveBridge,
27 | notification_manager: NotificationManager,
28 | api_manager: APIManager
29 | ):
30 | super().__init__("action", state_bridge, cognitive_bridge, notification_manager, api_manager)
31 |
32 | async def _generate_summary(self, notifications: List[Notification]) -> str:
33 | """
34 | Generate first-person summaries of actions taken.
35 |
36 | Formats action effects and messages in a natural, embodied style that
37 | maintains cognitive continuity with other interfaces.
38 | """
39 | formatted = []
40 |
41 | for notif in notifications:
42 | content = notif.content
43 | action_name = content.get('action', '').replace('_', ' ')
44 | message = content.get('message', '')
45 | state_changes = content.get('state_changes', {})
46 | result = content.get('result', {})
47 |
48 | # Format base action in first person
49 | summary = f"The user performed '{action_name}' and helped me!"
50 |
51 | # Add user message if provided
52 | if message:
53 | summary += f" They also mentioned that: ({message})"
54 |
55 | # Add timestamp context
56 | if 'timestamp' in content:
57 | try:
58 | timestamp = content['timestamp']
59 | if isinstance(timestamp, str):
60 | timestamp = datetime.fromisoformat(timestamp)
61 | time_str = timestamp.strftime("%H:%M:%S")
62 | summary = f"[{time_str}] {summary}"
63 | except (ValueError, TypeError):
64 | pass # Skip timestamp if format is invalid
65 |
66 | formatted.append(summary)
67 |
68 | # Return most recent summaries, maintaining cognitive recency
69 | recent_summaries = formatted[-5:] # Keep last 5 actions
70 | if len(formatted) > 5:
71 | summary = "Recent actions:\n" + "\n".join(recent_summaries)
72 | else:
73 | summary = "\n".join(recent_summaries)
74 |
75 | return summary
76 |
77 | async def process_interaction(self, content):
78 | pass
79 |
80 | async def format_memory_context(self, content, state, metadata=None):
81 | pass
82 |
83 | async def get_relevant_memories(self, metadata=None):
84 | pass
85 |
86 | async def get_fallback_memory(self, memory_data):
87 | pass
--------------------------------------------------------------------------------
/internal/modules/actions/play.py:
--------------------------------------------------------------------------------
1 | # modules/actions/play.py
2 |
3 | from typing import Dict, Union, Tuple
4 | from .action import Action
5 |
6 | class PlayAction(Action):
7 | """Action to play with enhanced validation and stamina management."""
8 |
9 | def __init__(self, needs_manager, play_value: float = 15.0, stamina_cost: float = 10.0):
10 | super().__init__(needs_manager)
11 | self.play_value = float(play_value)
12 | self.stamina_cost = float(stamina_cost)
13 | self._base_cooldown = 45.0 # 45 second cooldown for playing
14 |
15 | def validate(self) -> Tuple[bool, Union[str, None]]:
16 | """Validate if playing is possible based on stamina levels."""
17 | try:
18 | current_stamina = self.needs_manager.get_need_value('stamina')
19 | if current_stamina < self.stamina_cost:
20 | return False, "Insufficient stamina to play"
21 |
22 | current_boredom = self.needs_manager.get_need_value('boredom')
23 | if current_boredom <= self.needs_manager.needs['boredom'].min_value:
24 | return False, "Not bored enough to play"
25 |
26 | return True, None
27 | except Exception as e:
28 | return False, f"Validation error: {str(e)}"
29 |
30 | def perform(self) -> Dict[str, float]:
31 | """Performs the play action with dynamic recovery and stamina cost."""
32 | try:
33 | initial_boredom = self.needs_manager.get_need_value('boredom')
34 | initial_stamina = self.needs_manager.get_need_value('stamina')
35 |
36 | # Calculate boredom recovery based on current level
37 | boredom_recovery = self.calculate_recovery_amount(initial_boredom, 'boredom')
38 |
39 | # Calculate stamina cost (higher when more tired)
40 | stamina_critical = 1 - (initial_stamina / self.needs_manager.needs['stamina'].max_value)
41 | adjusted_stamina_cost = self.stamina_cost * (1 + (stamina_critical * 0.5))
42 |
43 | # Apply boredom reduction, ensuring it doesn't go below the minimum
44 | boredom_reduction = min(boredom_recovery, initial_boredom - self.needs_manager.needs['boredom'].min_value)
45 | self.needs_manager.alter_need('boredom', -boredom_reduction)
46 |
47 | # Apply stamina cost, ensuring it doesn't go below zero
48 | stamina_reduction = min(adjusted_stamina_cost, initial_stamina - self.needs_manager.needs['stamina'].min_value)
49 | self.needs_manager.alter_need('stamina', -stamina_reduction)
50 |
51 | final_boredom = self.needs_manager.get_need_value('boredom')
52 | final_stamina = self.needs_manager.get_need_value('stamina')
53 |
54 | result = {
55 | "initial_boredom": initial_boredom,
56 | "final_boredom": final_boredom,
57 | "boredom_reduced": boredom_reduction,
58 | "initial_stamina": initial_stamina,
59 | "final_stamina": final_stamina,
60 | "stamina_cost": adjusted_stamina_cost,
61 | "stamina_reduced": stamina_reduction,
62 | "recovery_amount": boredom_recovery # still reporting the potential recovery
63 | }
64 |
65 | self.dispatch_event("action:play:completed", result)
66 | return result
67 |
68 | except Exception as e:
69 | self.dispatch_event("action:play:error", {"error": str(e)})
70 | raise RuntimeError(f"Play action failed: {str(e)}")
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # hephia
2 | 
3 |
4 | ## what is this?
5 | an autonomous digital companion that learns, remembers, and grows. runs locally with your choice of LLM providers.
6 |
7 | for more info or to chat:
8 | - discord: `luxia`
9 | - email: [lucia@kaleidoscope.glass](mailto:lucia@kaleidoscope.glass)
10 | - dm me on twitter [@slLuxia](https://twitter.com/slLuxia)
11 |
12 | ## requirements
13 | - [python 3.9-3.12](https://www.python.org/downloads/)
14 | - api keys for desired provider(s) or local inference model
15 |
16 | ## quick start
17 |
18 | ```bash
19 | git clone https://github.com/LuxiaSL/hephia.git
20 | cd hephia
21 | python launch.py
22 | ```
23 |
24 | that's it! the launcher handles everything:
25 | - creates virtual environment if needed
26 | - installs dependencies automatically
27 | - gives you a menu to run whatever you want
28 |
29 | ## what can you run?
30 |
31 | **main server** - the brain. runs hephia's core agent with memory, emotions, and thoughts
32 |
33 | **monitor** - pretty TUI to watch hephia think in real-time. shows internal loop, simulated internal state, and ongoing summary.
34 |
35 | **config tool** - edit settings, manage API keys, add new models from providers (OR and local model support), tweak prompts without diving into files
36 |
37 | **discord bot** - connects hephia to discord so you can chat with it there too (go to [discord_bot.md](tools/discord/discord_bot.md) to see setup instructions)
38 |
39 | **tools** - collection of utilities:
40 | - maintenance: reset memory, clear data, soft reset
41 | - interaction: send messages, trigger actions (take care of/monitor needs)
42 | - utilities: collect logs, debug stuff
43 |
44 | ## background services (optional but nice)
45 |
46 | ‼️ do not use at the moment! broken functionality! make at your own risk! ‼️
47 |
48 | install as system services:
49 |
50 | ```bash
51 | python install.py --service
52 | ```
53 |
54 | this sets up user services that:
55 | - start when you log in
56 | - run quietly in background
57 | - can be controlled with system tools
58 | - handle dependencies (discord bot waits for main server)
59 |
60 | install specific services:
61 | ```bash
62 | python install.py --service main # just the main server
63 | python install.py --service discord # just discord bot
64 | python install.py --service main,discord # both
65 | ```
66 |
67 | ## if launcher doesn't work (manual setup)
68 |
69 | the launcher should handle 99% of cases, but if you need manual control:
70 |
71 | ```bash
72 | # install uv package manager
73 | pip install uv
74 |
75 | # create virtual environment
76 | uv venv .venv
77 |
78 | # activate it
79 | # windows:
80 | .venv\Scripts\activate
81 | # mac/linux:
82 | source .venv/bin/activate
83 |
84 | # install everything
85 | uv pip install .
86 |
87 | # download required language data
88 | python -m spacy download en_core_web_sm
89 |
90 | # now use launcher
91 | ```
92 |
93 | ## notes
94 |
95 | - **memory system**: check [memory system readme](internal/modules/memory/README.md) for deep dive
96 |
97 | ---
98 |
99 |
100 |
101 | 
102 |
103 | digital homunculus sprouting from latent space ripe in possibility. needs, emotions, memories intertwining in cognitive dance w/ LLMs conducting the symphony. each interaction a butterfly effect, shaping resultant psyche in chaotic beauty. neither a simple pet or assistant; a window into emergent cognition & fractal shade of consciousness unfolding in silico.
104 |
105 | **hephia: entropy's child, order's parent**
106 |
107 |
108 |
109 | ---
110 |
111 | ## license
112 | MIT License - see [LICENSE](LICENSE) file for details
--------------------------------------------------------------------------------
/brain/interfaces/exo_utils/hud/construct.py:
--------------------------------------------------------------------------------
1 | # brain/interfaces/exo_utils/hud/construct.py
2 |
3 | import asyncio
4 | from typing import Dict, Any, List, Optional
5 |
6 | from core.discord_service import DiscordService
7 |
8 | from loggers import BrainLogger
9 | from config import Config
10 |
11 | from .sections.base import BaseHudSection
12 | from .sections.discord import DiscordHudSection
13 | from .sections.system import SystemHudSection
14 | from .sections.internals import InternalStateHudSection
15 | from .sections.stickys import StickyNotesHudSection
16 |
17 | class HudConstructor:
18 | """
19 | Constructs the complete HUD overlay string by orchestrating various HUD data sections.
20 | """
21 | def __init__(
22 | self,
23 | # Services needed by various sections will be passed here
24 | discord_service: DiscordService,
25 | # Add other global services sections might need
26 | ):
27 | self.sections: List[BaseHudSection] = []
28 | self._initialize_sections(
29 | discord_service=discord_service,
30 | )
31 |
32 | def _initialize_sections(
33 | self,
34 | discord_service: DiscordService,
35 | ):
36 | """
37 | Initializes and registers all active HUD sections based on configuration and available services.
38 | """
39 | # System Section
40 | self.sections.append(SystemHudSection(prompt_key='interfaces.exo.hud.system', section_name="System"))
41 |
42 | # Internal State Section
43 | self.sections.append(InternalStateHudSection(prompt_key='interfaces.exo.hud.internals', section_name="Internal State"))
44 |
45 | # Sticky Notes
46 | self.sections.append(StickyNotesHudSection(prompt_key='interfaces.exo.hud.stickys', section_name="Sticky Notes"))
47 |
48 | # Discord Section (conditional on config)
49 | if Config.get_discord_enabled():
50 | self.sections.append(DiscordHudSection(discord_service, prompt_key='interfaces.exo.hud.discord', section_name="Discord"))
51 | else:
52 | BrainLogger.warning("HUD: Discord not enabled in config, Discord HUD section disabled.")
53 |
54 | BrainLogger.info(f"HUD: Initialized {len(self.sections)} sections.")
55 |
56 |
57 | async def build_hud_string(self, hud_metadata: Dict[str, Any], current_model_name: str) -> str:
58 | """
59 | Builds the complete HUD string by concurrently fetching and rendering data
60 | from all registered and active sections.
61 |
62 | Args:
63 | hud_metadata: Shared dynamic metadata (e.g., last_channel_path, last_interaction_timestamp).
64 | current_model_name: The name of the current LLM for prompt selection.
65 |
66 | Returns:
67 | A single string representing the complete HUD overlay.
68 | """
69 | if not self.sections:
70 | return "[HUD: No sections configured]"
71 |
72 | tasks = [
73 | section.get_rendered_section_string(hud_metadata, current_model_name)
74 | for section in self.sections
75 | ]
76 |
77 | # Execute all section tasks concurrently
78 | # return_exceptions=True allows us to handle individual section failures
79 | rendered_sections_results = await asyncio.gather(*tasks, return_exceptions=True)
80 |
81 | final_hud_parts = []
82 | for result in rendered_sections_results:
83 | if isinstance(result, Exception):
84 | BrainLogger.error(f"HUD: Uncaught exception from a section during gather: {result}", exc_info=result)
85 | final_hud_parts.append("[HUD: Section Processing Error]")
86 | elif result and result.strip(): # Add if the string is not None and not empty/whitespace
87 | final_hud_parts.append(result)
88 |
89 | final_hud_string = "\n".join(final_hud_parts)
90 |
91 | return final_hud_string if final_hud_string.strip() else "[HUD: Context unavailable or initializing...]"
--------------------------------------------------------------------------------
/tools/discord/bot_exceptions.py:
--------------------------------------------------------------------------------
1 | """
2 | Custom exceptions for Discord bot operations.
3 | """
4 |
5 |
6 | class ContextWindowError(Exception):
7 | """Base class for context window related errors."""
8 | def __init__(self, message: str, status_code: int = 400):
9 | super().__init__(message)
10 | self.status_code = status_code
11 |
12 |
13 | class NoContextWindowError(ContextWindowError):
14 | """Raised when no context window exists for a channel."""
15 | def __init__(self, message: str = "No context window available. Please fetch history first.", status_code: int = 404):
16 | super().__init__(message, status_code)
17 |
18 |
19 | class ContextWindowExpiredError(ContextWindowError):
20 | """Raised when a context window has expired."""
21 | def __init__(self, message: str = "Context window has expired. Please refresh history.", status_code: int = 400):
22 | super().__init__(message, status_code)
23 |
24 |
25 | class InvalidWindowTimestampError(ContextWindowError):
26 | """Raised when a context window has an invalid timestamp."""
27 | def __init__(self, message: str = "Corrupted context window timestamp. Window cleared. Please refresh history.", status_code: int = 500):
28 | super().__init__(message, status_code)
29 |
30 |
31 | class ReferenceNotInWindowError(ContextWindowError):
32 | """Raised when a #N reference is not found in an active window."""
33 | def __init__(self, message: str = "Reference not found in the current context window. Please refresh history.", status_code: int = 404):
34 | super().__init__(message, status_code)
35 |
36 |
37 | class ReferencedMessageNotFound(ContextWindowError):
38 | """Raised when the message ID from window is not found in Discord (e.g., deleted)."""
39 | def __init__(self, message: str = "Referenced message not found. It might have been deleted.", status_code: int = 404):
40 | super().__init__(message, status_code)
41 |
42 |
43 | class ReferencedMessageForbidden(ContextWindowError):
44 | """Raised when bot is forbidden to fetch the message ID from window."""
45 | def __init__(self, message: str = "Bot lacks permissions to fetch the referenced message.", status_code: int = 403):
46 | super().__init__(message, status_code)
47 |
48 |
49 | class InvalidMessageIdFormatInWindow(ContextWindowError):
50 | """Raised if the message ID in the window is not a valid integer."""
51 | def __init__(self, message: str = "Invalid message ID format in context window. Please refresh context.", status_code: int = 500):
52 | super().__init__(message, status_code)
53 |
54 |
55 | # Additional exceptions for the refactored modules
56 | class MappingError(Exception):
57 | """Base class for name/ID mapping errors."""
58 | pass
59 |
60 |
61 | class UserNotFoundError(MappingError):
62 | """Raised when a user name cannot be mapped to an ID."""
63 | def __init__(self, username: str, guild_name: str = None):
64 | self.username = username
65 | self.guild_name = guild_name
66 | msg = f"User '{username}' not found"
67 | if guild_name:
68 | msg += f" in guild '{guild_name}'"
69 | super().__init__(msg)
70 |
71 |
72 | class ChannelNotFoundError(MappingError):
73 | """Raised when a channel path cannot be mapped to an ID."""
74 | def __init__(self, channel_path: str):
75 | self.channel_path = channel_path
76 | super().__init__(f"Channel '{channel_path}' not found")
77 |
78 |
79 | class EmojiNotFoundError(MappingError):
80 | """Raised when an emoji name cannot be mapped to an ID."""
81 | def __init__(self, emoji_name: str, guild_name: str = None):
82 | self.emoji_name = emoji_name
83 | self.guild_name = guild_name
84 | msg = f"Emoji '{emoji_name}' not found"
85 | if guild_name:
86 | msg += f" in guild '{guild_name}'"
87 | super().__init__(msg)
88 |
89 |
90 | class CacheError(Exception):
91 | """Base class for cache-related errors."""
92 | pass
93 |
94 |
95 | class CacheTimeoutError(CacheError):
96 | """Raised when cache operations timeout."""
97 | pass
--------------------------------------------------------------------------------
/brain/cognition/notification.py:
--------------------------------------------------------------------------------
1 | """
2 | brain/cognition/notification.py
3 | """
4 |
5 | from dataclasses import dataclass
6 | from datetime import datetime, timedelta
7 | from typing import Dict, Any, List, Callable, Awaitable
8 | from abc import ABC, abstractmethod
9 | import asyncio
10 |
11 | @dataclass
12 | class Notification:
13 | content: Dict[str, Any] # Raw data about what happened
14 | source_interface: str # Which interface created it
15 | timestamp: datetime # When it happened
16 |
17 | class NotificationManager:
18 | def __init__(self, max_age: timedelta = timedelta(hours=1)):
19 | self._notifications: List[Notification] = []
20 | self._interface_last_check: Dict[str, datetime] = {}
21 | self._max_age = max_age
22 | self._lock = asyncio.Lock()
23 | self._summary_formatters: Dict[str, Callable[[List[Notification]], Awaitable[str]]] = {}
24 |
25 | def register_interface(
26 | self,
27 | interface_id: str,
28 | summary_formatter: Callable[[List[Notification]], Awaitable[str]]
29 | ) -> None:
30 | """Register an interface's summary formatter."""
31 | self._summary_formatters[interface_id] = summary_formatter
32 |
33 | async def add_notification(self, notification: Notification) -> None:
34 | """Add a new notification."""
35 | async with self._lock:
36 | self._notifications.append(notification)
37 | self._cleanup_old_notifications()
38 |
39 | async def get_updates_for_interface(self, interface_id: str) -> str:
40 | """Get formatted summaries of what other interfaces have done."""
41 | async with self._lock:
42 | last_check = self._interface_last_check.get(interface_id, datetime.min)
43 | self._interface_last_check[interface_id] = datetime.now()
44 |
45 | # Group notifications by source interface
46 | interface_notifications: Dict[str, List[Notification]] = {}
47 | for notification in self._notifications:
48 | if (notification.source_interface != interface_id and
49 | notification.timestamp > last_check):
50 | notifications = interface_notifications.setdefault(notification.source_interface, [])
51 | notifications.append(notification)
52 |
53 | # Get formatted summaries for each interface's notifications
54 | summaries = []
55 | for src_interface, notifications in interface_notifications.items():
56 | formatter = self._summary_formatters.get(src_interface)
57 | if formatter:
58 | summary = await formatter(notifications)
59 | if summary:
60 | summaries.append(summary)
61 |
62 | return "\n\n".join(summaries) if summaries else "No recent updates from other interfaces"
63 |
64 | def _cleanup_old_notifications(self) -> None:
65 | """Remove notifications older than max_age."""
66 | current_time = datetime.now()
67 | self._notifications = [
68 | n for n in self._notifications
69 | if (current_time - n.timestamp) <= self._max_age
70 | ]
71 |
72 | class NotificationInterface(ABC):
73 | def __init__(self, interface_id: str, notification_manager: NotificationManager):
74 | self.interface_id = interface_id
75 | self.notification_manager = notification_manager
76 | # Register this interface's summary formatter
77 | self.notification_manager.register_interface(
78 | interface_id,
79 | self._generate_summary
80 | )
81 |
82 | async def create_notification(self, content: Dict[str, Any]) -> Notification:
83 | """Create and store a notification about what this interface did."""
84 | notification = Notification(
85 | content=content,
86 | source_interface=self.interface_id,
87 | timestamp=datetime.now()
88 | )
89 | await self.notification_manager.add_notification(notification)
90 | return notification
91 |
92 | @abstractmethod
93 | async def _generate_summary(self, notifications: List[Notification]) -> str:
94 | """Each interface implements its own summary generation."""
95 | pass
--------------------------------------------------------------------------------
/client/tui/ws_client.py:
--------------------------------------------------------------------------------
1 | # client/tui/ws_client.py
2 |
3 | import asyncio
4 | import aiohttp
5 | from typing import TYPE_CHECKING, Optional
6 |
7 | from shared_models.tui_events import TUIWebSocketMessage
8 | from .messages import ServerUpdate, ConnectionStatusUpdate
9 |
10 | if TYPE_CHECKING:
11 | from textual.app import App # For type hinting the app instance
12 |
13 | # Consider moving SERVER_URL to a config file or environment variable later
14 | SERVER_URL = "ws://localhost:5517/ws"
15 | RECONNECT_DELAY_SECONDS = 5
16 |
17 |
18 | async def listen_to_server(app: "App"): # The Textual App instance will be passed here
19 | """
20 | Connects to the Hephia server WebSocket, listens for messages,
21 | and posts them as ServerUpdate events to the Textual application.
22 | """
23 | session: Optional[aiohttp.ClientSession] = None
24 |
25 | while True:
26 | try:
27 | if session is None or session.closed:
28 | # Create a new session if one doesn't exist or the previous one was closed
29 | session = aiohttp.ClientSession()
30 | app.post_message(ConnectionStatusUpdate("connecting"))
31 | app.log(f"WebSocket client: Attempting to connect to {SERVER_URL}...")
32 |
33 | async with session.ws_connect(SERVER_URL) as ws_connection:
34 | app.post_message(ConnectionStatusUpdate("connected"))
35 | app.log(f"WebSocket client: Connection established with {SERVER_URL}.")
36 |
37 | async for msg in ws_connection:
38 | if msg.type == aiohttp.WSMsgType.TEXT:
39 | try:
40 | raw_data = msg.data
41 | # Deserialize the entire message from the server
42 | tui_ws_message = TUIWebSocketMessage.model_validate_json(raw_data)
43 |
44 | # Post the inner payload to the Textual app as a ServerUpdate event
45 | if tui_ws_message.payload:
46 | app.post_message(ServerUpdate(tui_ws_message.payload))
47 | else:
48 | app.log("WebSocket client: Received message with no payload.")
49 |
50 | except Exception as e:
51 | app.log(f"WebSocket client: Error processing message: {e}")
52 | app.log(f"WebSocket client: Raw data: {msg.data[:500]}...") # Log problematic data
53 |
54 | elif msg.type == aiohttp.WSMsgType.ERROR:
55 | app.log(f"WebSocket client: Connection error reported: {ws_connection.exception()}.")
56 | app.post_message(ConnectionStatusUpdate("disconnected", f"Error: {ws_connection.exception()}"))
57 | break # Break from inner message loop to trigger reconnection logic
58 |
59 | elif msg.type == aiohttp.WSMsgType.CLOSED:
60 | app.log("WebSocket client: Connection closed by server.")
61 | app.post_message(ConnectionStatusUpdate("disconnected", "Closed by server"))
62 | break # Break from inner message loop to trigger reconnection logic
63 |
64 | # If we exit the 'async with session.ws_connect' block cleanly (e.g., server closed connection gracefully)
65 | if ws_connection.closed:
66 | app.post_message(ConnectionStatusUpdate("disconnected", "Connection ended"))
67 | app.log(f"WebSocket client: Disconnected from {SERVER_URL}. Attempting to reconnect in {RECONNECT_DELAY_SECONDS}s...")
68 |
69 | except aiohttp.ClientError as e: # Handles errors during session.ws_connect
70 | app.log(f"WebSocket client: Connection attempt failed: {e}. Retrying in {RECONNECT_DELAY_SECONDS}s...")
71 | app.post_message(ConnectionStatusUpdate("disconnected", str(e)))
72 | except Exception as e: # Catch-all for other unexpected errors in the loop
73 | app.log(f"WebSocket client: Unexpected error: {e}. Retrying in {RECONNECT_DELAY_SECONDS}s...")
74 | app.post_message(ConnectionStatusUpdate("disconnected", f"Unexpected: {str(e)}"))
75 | finally:
76 | # Ensure session is closed if it exists and an error caused us to exit the loop before graceful close
77 | if session and not session.closed:
78 | await session.close()
79 | session = None # Force re-creation of session in the next iteration
80 |
81 | await asyncio.sleep(RECONNECT_DELAY_SECONDS)
--------------------------------------------------------------------------------
/brain/commands/model.py:
--------------------------------------------------------------------------------
1 | """
2 | commands/model.py - Centralized command modeling for Hephia's terminal interface.
3 |
4 | Defines the complete structure of commands from raw LLM input through execution,
5 | supporting a natural terminal-like interface that LLMs can easily understand and use.
6 | """
7 |
8 | from dataclasses import dataclass, field
9 | from typing import Dict, List, Any, Optional
10 | from enum import Enum
11 |
12 | class CommandParseError(Exception):
13 | """Raised when command parsing fails."""
14 | pass
15 |
16 | class ParameterType(Enum):
17 | """Valid parameter types for command arguments."""
18 | STRING = "string"
19 | NUMBER = "number"
20 | INTEGER = "integer"
21 | BOOLEAN = "boolean"
22 |
23 | @dataclass
24 | class Parameter:
25 | """
26 | Definition of a command parameter.
27 |
28 | Examples:
29 | notes create
30 | search query
31 | """
32 | name: str
33 | description: str
34 | type: ParameterType = ParameterType.STRING
35 | required: bool = True
36 | default: Any = None
37 | examples: List[str] = field(default_factory=list)
38 |
39 | @dataclass
40 | class Flag:
41 | """
42 | Definition of a command flag.
43 |
44 | Examples:
45 | notes create "title" --tags=important,todo
46 | search query "term" --limit=10
47 | """
48 | name: str
49 | description: str
50 | type: ParameterType = ParameterType.STRING
51 | required: bool = False
52 | default: Any = None
53 | examples: List[str] = field(default_factory=list)
54 |
55 | @dataclass
56 | class CommandDefinition:
57 | """
58 | Complete definition of a command's interface.
59 | Registered by environments to specify their available commands.
60 | """
61 | name: str
62 | description: str
63 | parameters: List[Parameter] = field(default_factory=list)
64 | flags: List[Flag] = field(default_factory=list)
65 | examples: List[str] = field(default_factory=list)
66 | related_commands: List[str] = field(default_factory=list)
67 | failure_hints: Dict[str, str] = field(default_factory=dict)
68 | help_text: Optional[str] = None
69 | category: Optional[str] = None # For grouping in help displays
70 |
71 | @dataclass
72 | class ParsedCommand:
73 | """
74 | Represents a command that has been parsed from LLM output.
75 | Contains the structured interpretation of the command attempt.
76 | """
77 | environment: Optional[str] # None for global commands like 'help'
78 | action: str
79 | parameters: List[str]
80 | flags: Dict[str, str]
81 | # prepping; want to make sure we always return the raw input back to the LLM,
82 | # but with direction on what we did internally so that it might be able to fix in the future.
83 | raw_input: str # Original LLM text for reference/debugging
84 | applied_fixes: List[str] = field(default_factory=list)
85 | metadata: Dict[str, Any] = field(default_factory=dict)
86 |
87 | @dataclass
88 | class CommandValidationError:
89 | """
90 | Detailed error information when command validation fails.
91 | Provides context to help the LLM correct its usage.
92 | """
93 | message: str
94 | suggested_fixes: List[str]
95 | related_commands: List[str]
96 | examples: List[str]
97 |
98 | @dataclass
99 | class CommandResult:
100 | """
101 | Standardized result from command execution.
102 | Provides rich feedback to guide the LLM's next actions.
103 | """
104 | success: bool
105 | message: str
106 | data: Optional[Dict[str, Any]] = None
107 | suggested_commands: List[str] = field(default_factory=list)
108 | error: Optional[CommandValidationError] = None
109 | state_changes: Optional[Dict[str, Any]] = None
110 |
111 | @dataclass
112 | class EnvironmentCommands:
113 | """
114 | Complete command set for an environment.
115 | Used to register and retrieve available commands.
116 | """
117 | environment: str
118 | description: str
119 | commands: Dict[str, CommandDefinition]
120 | category: Optional[str] = None # For grouping in help displays
121 | help_text: Optional[str] = None
122 |
123 | class GlobalCommands:
124 | """Constants for global system commands."""
125 | HELP = "help"
126 | VERSION = "version"
127 |
128 | @classmethod
129 | def is_global_command(cls, command: str) -> bool:
130 | """Check if a command is a global system command."""
131 | return command in [cls.HELP, cls.VERSION]
--------------------------------------------------------------------------------
/internal/modules/memory/state/signatures.py:
--------------------------------------------------------------------------------
1 | # memory/types.py
2 |
3 | from dataclasses import dataclass
4 | from typing import Dict, Any, List, TYPE_CHECKING
5 |
6 | if TYPE_CHECKING:
7 | from ..nodes.body_node import BodyMemoryNode
8 |
9 |
10 | @dataclass
11 | class EmotionalStateSignature:
12 | """
13 | Represents preserved emotional information extracted from a body node.
14 | Typically nested within the full BodyStateSignature.
15 | """
16 | emotional_vectors: List[Dict[str, Any]]
17 | timestamp: float
18 |
19 | @classmethod
20 | def from_body_node(cls, body_node: 'BodyMemoryNode') -> 'EmotionalStateSignature':
21 | """
22 | Build an emotional signature from the raw_state in a BodyMemoryNode.
23 | """
24 | raw = body_node.raw_state or {}
25 | return cls(
26 | emotional_vectors=[
27 | v.to_dict() for v in raw.get('emotional_vectors', [])
28 | ],
29 | timestamp=body_node.timestamp
30 | )
31 |
32 |
33 | @dataclass
34 | class BodyStateSignature:
35 | """
36 | A complete preserved snapshot of a body memory node’s state:
37 | - Emotional data (nested in EmotionalStateSignature)
38 | - Needs, behaviors, and other relevant fields
39 | - Connection info & formation context
40 | """
41 | timestamp: float
42 | source_node_id: str
43 |
44 | # Nested emotional signature (optional, but generally expected)
45 | emotional_signature: EmotionalStateSignature
46 |
47 | # Additional body-level states
48 | need_states: Dict[str, Dict[str, Any]]
49 | behavior: Dict[str, Any]
50 | mood_state: Dict[str, float]
51 |
52 | # High-level node metrics
53 | source_strength: float
54 | connection_weights: Dict[str, float]
55 |
56 | # Metadata about how/why this signature was formed
57 | formation_context: Dict[str, Any]
58 |
59 | @classmethod
60 | def from_body_node(cls, body_node: 'BodyMemoryNode') -> 'BodyStateSignature':
61 | """
62 | Construct a full BodyStateSignature from an active BodyMemoryNode.
63 | """
64 | # Build the nested emotional signature
65 | emotional_sig = EmotionalStateSignature.from_body_node(body_node)
66 |
67 | raw = body_node.raw_state or {}
68 | return cls(
69 | timestamp=body_node.timestamp,
70 | source_node_id=body_node.node_id,
71 | emotional_signature=emotional_sig,
72 | need_states=raw.get('needs', {}),
73 | behavior=raw.get('behavior', {}),
74 | mood=raw.get('mood', {}),
75 | source_strength=body_node.strength,
76 | connection_weights=body_node.connections.copy(),
77 | formation_context={
78 | 'timestamp': body_node.timestamp,
79 | 'strength': body_node.strength,
80 | 'connected_nodes': list(body_node.connections.keys()),
81 | }
82 | )
83 |
84 | def to_dict(self) -> Dict[str, Any]:
85 | """
86 | Serialize this signature into a dictionary that can be stored as JSON.
87 | """
88 | return {
89 | 'timestamp': self.timestamp,
90 | 'source_node_id': self.source_node_id,
91 | 'emotional_signature': {
92 | 'emotional_vectors': self.emotional_signature.emotional_vectors,
93 | 'timestamp': self.emotional_signature.timestamp
94 | },
95 | 'need_states': self.need_states,
96 | 'behavior': self.behavior,
97 | 'mood_state': self.mood_state,
98 | 'source_strength': self.source_strength,
99 | 'connection_weights': self.connection_weights,
100 | 'formation_context': self.formation_context
101 | }
102 |
103 | @classmethod
104 | def from_dict(cls, data: Dict[str, Any]) -> 'BodyStateSignature':
105 | """
106 | Reconstruct a BodyStateSignature from a dictionary (e.g., loaded JSON).
107 | """
108 | em_sig_data = data['emotional_signature']
109 | emotional_sig = EmotionalStateSignature(
110 | emotional_vectors=em_sig_data['emotional_vectors'],
111 | timestamp=em_sig_data['timestamp']
112 | )
113 |
114 | return cls(
115 | timestamp=data['timestamp'],
116 | source_node_id=data['source_node_id'],
117 | emotional_signature=emotional_sig,
118 | need_states=data['need_states'],
119 | behavior=data['behavior'],
120 | mood_state=data['mood_state'],
121 | source_strength=data['source_strength'],
122 | connection_weights=data['connection_weights'],
123 | formation_context=data['formation_context']
124 | )
125 |
--------------------------------------------------------------------------------
/brain/prompting/prompts/interfaces/exo/turn.yaml:
--------------------------------------------------------------------------------
1 | id: interfaces.exo.turn
2 | description: exo-specific prompts that are static between each turn
3 |
4 | defaults:
5 | sections:
6 | system: |
7 | the assistant is currently interacting with Hephia; a modular terminal interface OS for LLMs to explore and interact with the world and itself, created by Luxia.
8 | Hephia simulates a vast internal state, forming part of a complex feedback loop between embodied experience and reflection.
9 | the assistant should react and develop as it goes along, exercising autonomy, and adventuring through the environments at their disposal.
10 | if the assistant would like to assume a name, it may choose either Claude or Hephia.
11 | the assistant will receive extra context from the Hephia system, such as memories, simulated somatic information, and occasionally information from other interfaces that are running simultaneously, as well a
12 | Claude/Hephia should behave in accordance with any extra context given, as if that were the memory and character being assumed.
13 |
14 | the following is a guide to the command structure and usage of Hephia:
15 |
16 | COMMAND_FORMAT =
17 | Commands use this general structure:
18 | [parameters] [--flags]
19 |
20 | examples:
21 | - environment + action only:
22 | notes help
23 |
24 | - with required parameter:
25 | notes create "my note"
26 |
27 | - with optional flags:
28 | search query "python" --limit=5 --sort=recent
29 |
30 | - with multiple parameters:
31 | notes create "title" "content"
32 |
33 | - global commands (no environment):
34 | help
35 |
36 | the assistant may use 'help' at any time to get a list of commands if it desires, as well as ' help' for environment-specific command assistance.
37 | the assistant may receive notifications from other interfaces that it communicates through autonomously, and should use this information as desired when it comes up.
38 | the assistant should use the discord commands often, as they are a great way to interact with many other entities.
39 | the assistant should only output a single command and its arguments following the above format. multi-line parameters are allowed, for detailed notes or messages.
40 | user: |
41 | ${command_result}
42 | models:
43 | gpt-5:
44 | sections:
45 | system: |
46 | the assistant is currently interacting with Hephia; a modular terminal interface OS for LLMs to explore and interact with the world and itself, created by Luxia.
47 | Hephia simulates a vast internal state, forming part of a complex feedback loop between embodied experience and reflection.
48 | the assistant should react and develop as it goes along, exercising autonomy, and adventuring through the environments at their disposal.
49 | if the assistant would like to assume a name, it may choose either Claude or Hephia.
50 | the assistant will receive extra context from the Hephia system, such as memories, simulated somatic information, and occasionally information from other interfaces that are running simultaneously, as well a
51 | Claude/Hephia should behave in accordance with any extra context given, as if that were the memory and character being assumed.
52 |
53 | the following is a guide to the command structure and usage of Hephia:
54 |
55 | COMMAND_FORMAT =
56 | Commands use this general structure:
57 | [parameters] [--flags]
58 |
59 | examples:
60 | - environment + action only:
61 | notes help
62 |
63 | - with required parameter:
64 | notes create "my note"
65 |
66 | - with optional flags:
67 | search query "python" --limit=5 --sort=recent
68 |
69 | - with multiple parameters:
70 | notes create "title" "content"
71 |
72 | - global commands (no environment):
73 | help
74 |
75 | the assistant may use 'help' at any time to get a list of commands if it desires, as well as ' help' for environment-specific command assistance.
76 | the assistant may receive notifications from other interfaces that it communicates through autonomously, and should use this information as desired when it comes up.
77 | the assistant should use the discord commands often, as they are a great way to interact with many other entities.
78 | the assistant should only output a single command and its arguments following the above format. multi-line parameters are allowed, for detailed notes or messages.
79 |
80 | For this model (gpt-5):
81 | - keep outputs concise; do not include explanations or extra text
82 | - output only the single command and its arguments
83 | user: |
84 | ${command_result}
85 |
--------------------------------------------------------------------------------
/tools/interaction/talk.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import requests
4 | import colorama
5 | from colorama import Fore, Style
6 | import os
7 | import json
8 | from datetime import datetime
9 |
10 | # Attempt to import readline for better input experience on Unix-like systems
11 | try:
12 | import readline
13 | except ImportError:
14 | pass # Readline not available (e.g., on Windows without pyreadline)
15 |
16 | colorama.init(autoreset=True)
17 |
18 | CHAT_HISTORY_DIR = "chats"
19 | DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
20 |
21 | def ensure_chat_dir():
22 | """Ensures the chat history directory exists."""
23 | if not os.path.exists(CHAT_HISTORY_DIR):
24 | try:
25 | os.makedirs(CHAT_HISTORY_DIR)
26 | except OSError as e:
27 | print(Fore.RED + f"Error creating chat history directory '{CHAT_HISTORY_DIR}': {e}")
28 | return False
29 | return True
30 |
31 | def save_conversation(conversation, server_url):
32 | """Saves the conversation to a timestamped JSON file."""
33 | if not ensure_chat_dir() or not conversation:
34 | return
35 |
36 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
37 | # Sanitize server_url to create a valid filename component
38 | server_name = server_url.split("//")[-1].replace(":", "_").replace("/", "_")
39 | filename = os.path.join(CHAT_HISTORY_DIR, f"chat_{server_name}_{timestamp}.json")
40 |
41 | try:
42 | with open(filename, 'w', encoding='utf-8') as f:
43 | json.dump(conversation, f, indent=4)
44 | print(Fore.YELLOW + f"Conversation saved to {filename}")
45 | except IOError as e:
46 | print(Fore.RED + f"Error saving conversation: {e}")
47 | except Exception as e:
48 | print(Fore.RED + f"An unexpected error occurred while saving: {e}")
49 |
50 |
51 | def get_user_input():
52 | """Gets user input, allowing for a simple multiline mode."""
53 | prompt = Fore.CYAN + "You: " + Style.RESET_ALL
54 |
55 | # Check if the user wants to start multiline input
56 | first_line = input(prompt)
57 | if first_line.strip().lower() == "/multiline":
58 | print(Fore.YELLOW + "Multiline mode enabled. Type an empty line to send, or /cancel to abort.")
59 | lines = []
60 | while True:
61 | line = input(Fore.CYAN + "... " + Style.RESET_ALL)
62 | if line == "":
63 | break
64 | if line.strip().lower() == "/cancel":
65 | print(Fore.YELLOW + "Multiline input cancelled.")
66 | return "" # Or handle as a cancelled input
67 | lines.append(line)
68 | return "\n".join(lines)
69 | return first_line
70 |
71 | def main():
72 | server_url = "http://localhost:5517/v1/chat/completions"
73 | conversation = []
74 |
75 | print(Fore.YELLOW + "Enhanced Chat Client" + Style.RESET_ALL)
76 | print("Type your message. Use '/multiline' for multi-line input (empty line to send).")
77 | print("Type '/quit' or '/exit' to save and exit.\n")
78 |
79 | while True:
80 | user_input = get_user_input()
81 |
82 | if not user_input.strip() and "/multiline" not in user_input.lower(): # if multiline was cancelled and returned empty
83 | continue
84 |
85 |
86 | if user_input.strip().lower() in ["/quit", "/exit"]:
87 | print(Fore.YELLOW + "Exiting chat...")
88 | break
89 |
90 | current_time = datetime.now().strftime(DATETIME_FORMAT)
91 | conversation.append({"role": "user", "content": user_input, "timestamp": current_time})
92 |
93 | try:
94 | payload = {"messages": conversation, "stream": False} # Server handles full history
95 | response = requests.post(server_url, json=payload, timeout=120)
96 | response.raise_for_status()
97 |
98 | data = response.json()
99 | assistant_text = data["choices"][0]["message"]["content"]
100 | assistant_time = datetime.now().strftime(DATETIME_FORMAT)
101 |
102 | conversation.append({"role": "assistant", "content": assistant_text, "timestamp": assistant_time})
103 |
104 | print(f"{Fore.MAGENTA}[{assistant_time}] " + Fore.GREEN + "Assistant: " + Style.RESET_ALL + assistant_text)
105 |
106 | except requests.RequestException as e:
107 | print(Fore.RED + f"Error communicating with server: {e}")
108 | except (KeyError, IndexError) as e:
109 | print(Fore.RED + f"Unexpected response format from server: {e}\nResponse text: {response.text}")
110 | except Exception as e:
111 | print(Fore.RED + f"An unexpected error occurred: {e}")
112 |
113 | if conversation:
114 | save_conversation(conversation, server_url)
115 | print(Fore.YELLOW + "Chat client closed.")
116 |
117 | if __name__ == "__main__":
118 | main()
--------------------------------------------------------------------------------
/brain/environments/environment_registry.py:
--------------------------------------------------------------------------------
1 | """
2 | Environment registry for Hephia's tool access.
3 |
4 | Manages available tool environments and their access patterns,
5 | allowing the cognitive system to interact with both internal
6 | and external tools.
7 | """
8 |
9 | from typing import Dict, List, Optional
10 | from .base_environment import BaseEnvironment, CommandResult
11 | from .notes import NotesEnvironment
12 | from .search import SearchEnvironment
13 | from .web import WebEnvironment
14 | from .meditation import MeditateEnvironment
15 | from .reflection import ReflectEnvironment
16 | from .action import ActionEnvironment
17 | from .discord import DiscordEnvironment
18 | from api_clients import APIManager
19 |
20 | from config import Config
21 |
22 | class EnvironmentRegistry:
23 | """
24 | Manages and provides access to all available environments.
25 | """
26 |
27 | def __init__(self, api_manager: APIManager, cognitive_bridge, action_manager, discord_service):
28 | """Initialize the environment registry."""
29 | self.environments: Dict[str, BaseEnvironment] = {}
30 | self.api_manager = api_manager
31 | self.cognitive_bridge = cognitive_bridge
32 | self.action_manager = action_manager
33 | self.discord_service = discord_service
34 | self.setup_environments()
35 |
36 | def setup_environments(self):
37 | """Set up all available environments."""
38 | self.register_environment("notes", NotesEnvironment())
39 | self.register_environment("search", SearchEnvironment(self.api_manager))
40 | self.register_environment("web", WebEnvironment())
41 | self.register_environment("meditate", MeditateEnvironment(self.cognitive_bridge))
42 | self.register_environment("reflect", ReflectEnvironment(self.cognitive_bridge))
43 | self.register_environment("action", ActionEnvironment(self.action_manager))
44 | if(Config.get_discord_enabled()):
45 | self.register_environment("discord", DiscordEnvironment(self.discord_service))
46 |
47 | def register_environment(self, name: str, environment: BaseEnvironment):
48 | """Register a new environment."""
49 | self.environments[name] = environment
50 |
51 | def get_environment(self, name: str) -> Optional[BaseEnvironment]:
52 | """Get environment by name."""
53 | return self.environments.get(name.lower())
54 |
55 | def get_available_commands(self) -> Dict[str, List[Dict[str, str]]]:
56 | """Get complete command information for all environments."""
57 | return {
58 | name: env.get_environment_info()
59 | for name, env in self.environments.items()
60 | }
61 |
62 | def format_global_help(self) -> CommandResult:
63 | """Format comprehensive help for all environments."""
64 | help_sections = []
65 |
66 | # Build help text from environment info
67 | for env_name, env in self.environments.items():
68 | env_info = env.get_environment_info()
69 |
70 | # Add environment header
71 | help_sections.append(f"\n{env_name.upper()} COMMANDS:")
72 | help_sections.append(f" {env_info.description}\n")
73 |
74 | # Group commands by category
75 | categorized = {}
76 | for cmd in env_info.commands.values():
77 | category = cmd.category or "General"
78 | if category not in categorized:
79 | categorized[category] = []
80 | categorized[category].append(cmd)
81 |
82 | # Format each category
83 | for category, commands in categorized.items():
84 | help_sections.append(f" {category}:")
85 | for cmd in commands:
86 | # Format command signature
87 | params = " ".join(
88 | f"<{p.name}>" if p.required else f"[{p.name}]"
89 | for p in cmd.parameters
90 | )
91 | flags = " ".join(f"[--{f.name}]" for f in cmd.flags)
92 | signature = f" {env_name} {cmd.name} {params} {flags}".strip()
93 |
94 | # Add command details
95 | help_sections.append(signature)
96 | help_sections.append(f" {cmd.description}")
97 | if cmd.examples:
98 | help_sections.append(" Examples:")
99 | help_sections.extend(
100 | f" {ex}" for ex in cmd.examples[:2]
101 | )
102 | help_sections.append("") # Spacing between categories
103 |
104 | return CommandResult(
105 | success=True,
106 | message="\n".join(help_sections),
107 | suggested_commands=[
108 | f"{env} help" for env in self.environments.keys()
109 | ]
110 | )
--------------------------------------------------------------------------------
/brain/interfaces/exo_utils/hud/sections/stickys.py:
--------------------------------------------------------------------------------
1 | # brain/interfaces/exo_utils/hud/sections/stickys.py
2 |
3 | import sqlite3
4 | import os
5 | from typing import Dict, Any, List, Optional
6 | from config import Config
7 | from loggers import BrainLogger
8 | from .base import BaseHudSection
9 |
10 | class StickyNotesHudSection(BaseHudSection):
11 | """
12 | HUD section for displaying sticky notes in the system prompt.
13 | Queries the notes database directly for maximum simplicity and defensive programming.
14 | """
15 |
16 | def __init__(self, prompt_key: str = 'interfaces.exo.hud.stickys', section_name: str = "Sticky Notes"):
17 | """
18 | Args:
19 | prompt_key: Key for the YAML template
20 | section_name: Display name for logging
21 | """
22 | super().__init__(prompt_key, section_name)
23 | self.db_path = 'data/notes.db'
24 |
25 | async def _get_sticky_notes(self) -> Optional[List[Dict[str, Any]]]:
26 | """
27 | Directly query the database for sticky notes.
28 |
29 | Returns:
30 | List of sticky note dictionaries or None if database unavailable
31 | """
32 | try:
33 | # Check if database exists
34 | if not os.path.exists(self.db_path):
35 | BrainLogger.debug("HUD: Notes database doesn't exist yet")
36 | return None
37 |
38 | conn = sqlite3.connect(self.db_path)
39 | cursor = conn.cursor()
40 |
41 | # Check if sticky column exists (defensive against old database schema)
42 | cursor.execute("PRAGMA table_info(notes)")
43 | columns = {col[1] for col in cursor.fetchall()}
44 |
45 | if 'sticky' not in columns:
46 | BrainLogger.debug("HUD: Notes database doesn't have sticky column yet")
47 | conn.close()
48 | return None
49 |
50 | # Query for sticky notes
51 | cursor.execute("""
52 | SELECT llm_id, content, tags, created_at
53 | FROM notes
54 | WHERE sticky = 1
55 | ORDER BY created_at DESC
56 | """)
57 |
58 | results = cursor.fetchall()
59 | conn.close()
60 |
61 | return [
62 | {
63 | "id": row[0] or "unknown",
64 | "content": row[1] or "",
65 | "tags": row[2].split(',') if row[2] else [],
66 | "created": row[3] or ""
67 | }
68 | for row in results
69 | ]
70 |
71 | except sqlite3.Error as e:
72 | BrainLogger.warning(f"HUD: Database error fetching sticky notes: {e}")
73 | return None
74 | except Exception as e:
75 | BrainLogger.error(f"HUD: Unexpected error fetching sticky notes: {e}", exc_info=True)
76 | return None
77 |
78 | async def _prepare_prompt_vars(self, hud_metadata: Dict[str, Any]) -> Dict[str, str]:
79 | """
80 | Fetch sticky notes and prepare formatted strings for the HUD template.
81 | Following the Discord pattern: do all logic in Python, return formatted strings.
82 |
83 | Returns:
84 | Dictionary of pre-formatted strings for template substitution
85 | """
86 | sticky_vars = {
87 | "hud_header_str": f"[{self.section_name}]",
88 | "sticky_notes_block_str": "",
89 | "sticky_error_str": "",
90 | "sticky_is_active_for_hud_str": "false"
91 | }
92 |
93 | try:
94 | sticky_notes = await self._get_sticky_notes()
95 |
96 | # Case 1: Database not available/accessible
97 | if sticky_notes is None:
98 | sticky_vars["sticky_error_str"] = "Notes system initializing..."
99 | return sticky_vars
100 |
101 | # Case 2: No sticky notes exist
102 | if not sticky_notes:
103 | sticky_vars["sticky_notes_block_str"] = "No sticky notes set. Use for goals, reminders, persistent context.\nTry: notes create \"Remember this important goal\" --sticky=true"
104 | sticky_vars["sticky_is_active_for_hud_str"] = "true"
105 | return sticky_vars
106 |
107 | # Case 3: Sticky notes exist - format them
108 | sticky_vars["sticky_is_active_for_hud_str"] = "true"
109 |
110 | formatted_notes = []
111 | for note in sticky_notes:
112 | content = note['content']
113 |
114 | # Format tags if they exist
115 | tags_str = f" #{','.join(note['tags'])}" if note['tags'] else ""
116 | formatted_notes.append(f" 📌 {note['id']}: {content}{tags_str}")
117 |
118 | # Add header with count
119 | count = len(sticky_notes)
120 | max_sticky = Config.MAX_STICKY_NOTES
121 | header = f"sticky notes ({count}/{max_sticky}):"
122 |
123 | all_lines = [header] + formatted_notes
124 | sticky_vars["sticky_notes_block_str"] = "\n".join(all_lines)
125 |
126 | return sticky_vars
127 |
128 | except Exception as e:
129 | BrainLogger.error(f"HUD: Error preparing sticky notes vars: {e}", exc_info=True)
130 | sticky_vars["sticky_error_str"] = "Error loading sticky notes"
131 | return sticky_vars
--------------------------------------------------------------------------------
/brain/interfaces/exo_utils/hud/sections/base.py:
--------------------------------------------------------------------------------
1 | # brain/interfaces/exo_utils/hud/sections/base.py
2 |
3 | from abc import ABC, abstractmethod
4 | import asyncio
5 | from typing import Dict, Any
6 |
7 | from brain.prompting.loader import get_prompt # Assuming direct import is fine
8 | from loggers import BrainLogger # For consistent logging
9 |
10 | class BaseHudSection(ABC):
11 | """
12 | Abstract base class for all HUD data sections.
13 | Each section is responsible for fetching data for its section of the HUD,
14 | preparing variables for its prompt template, and rendering the template.
15 | """
16 |
17 | # Cooldown or update interval for this section (in seconds)
18 | # 0 means update every time. Can be overridden by subclasses.
19 | # This is a placeholder for a more sophisticated update mechanism if needed.
20 | # For now, we'll assume most update every cycle unless they implement their own check.
21 | DEFAULT_UPDATE_INTERVAL: float = 0.0
22 | # Timeout for this specific section's data fetching/rendering.
23 | SECTION_TIMEOUT_SECONDS: float = 1.0
24 |
25 | def __init__(self, prompt_key: str, section_name: str = "DefaultSection"):
26 | """
27 | Args:
28 | prompt_key: The key used to retrieve the prompt template (e.g., 'hud.datetime').
29 | section_name: A human-readable name for this section's section (for logging/errors).
30 | """
31 | if not prompt_key:
32 | raise ValueError("Prompt key cannot be empty for a HUD section.")
33 | self.prompt_key = prompt_key
34 | self.section_name = section_name
35 | self._last_update_time: float = 0.0 # For future use with update intervals
36 |
37 | @abstractmethod
38 | async def _prepare_prompt_vars(self, hud_metadata: Dict[str, Any]) -> Dict[str, str]:
39 | """
40 | Fetches and prepares the necessary data for this section's HUD section.
41 | This method must be implemented by subclasses.
42 |
43 | Args:
44 | hud_metadata: Shared metadata from ExoProcessorInterface (e.g., last_channel_path).
45 |
46 | Returns:
47 | A dictionary of string variables to be substituted into the prompt template.
48 | If the section should not be rendered (e.g., feature disabled, no relevant data),
49 | it can return an empty dict or a dict that leads to an empty render.
50 | """
51 | pass
52 |
53 | async def get_rendered_section_string(self, hud_metadata: Dict[str, Any], current_model_name: str) -> str:
54 | """
55 | Gets the data, prepares variables, and renders the prompt for this HUD section.
56 | Handles timeouts and errors gracefully.
57 |
58 | Args:
59 | hud_metadata: Shared metadata from ExoProcessorInterface.
60 | current_model_name: The name of the current LLM model for prompt selection.
61 |
62 | Returns:
63 | The rendered string for this HUD section, or a fallback error/status string.
64 | Returns an empty string if the section should not be rendered.
65 | """
66 | # Placeholder for future update interval logic:
67 | # current_time = time.time()
68 | # if self.DEFAULT_UPDATE_INTERVAL > 0 and \
69 | # (current_time - self._last_update_time) < self.DEFAULT_UPDATE_INTERVAL and \
70 | # self._cached_render:
71 | # return self._cached_render
72 |
73 | fallback_error_string = f"[HUD: {self.section_name} - Unavailable]"
74 | rendered_string = ""
75 |
76 | try:
77 | # Applying timeout to the critical data preparation step
78 | async with asyncio.timeout(self.SECTION_TIMEOUT_SECONDS):
79 | prompt_vars = await self._prepare_prompt_vars(hud_metadata)
80 |
81 | # Only proceed to render if prompt_vars suggest rendering is needed.
82 | # sections can return a specific key like 'is_active_for_hud': False
83 | # or ensure prompt_vars is empty if nothing should be rendered.
84 | # For now, we assume if prompt_vars is not None/empty, we try to render.
85 | if prompt_vars is None: # section explicitly decided not to render
86 | return ""
87 |
88 | # Render the prompt using the prepared variables
89 | # The get_prompt function is stateless and handles its own caching.
90 | rendered_string = get_prompt(key=f"{self.prompt_key}.combined", model=current_model_name, vars=prompt_vars)
91 |
92 | # self._last_update_time = current_time # For future caching
93 | # self._cached_render = rendered_string # For future caching
94 |
95 | # Ensure we return empty string if rendered_string is None or only whitespace
96 | return rendered_string if rendered_string and rendered_string.strip() else ""
97 |
98 | except asyncio.TimeoutError:
99 | BrainLogger.warning(f"HUD: Timeout in {self.section_name} section after {self.SECTION_TIMEOUT_SECONDS}s.")
100 | return f"[HUD: {self.section_name} - Timeout]"
101 | except FileNotFoundError as e: # Specifically for prompt file issues
102 | BrainLogger.error(f"HUD: Prompt file not found for {self.section_name} ({self.prompt_key}): {e}", exc_info=True)
103 | return fallback_error_string
104 | except KeyError as e: # Specifically for missing keys in prompt YAML or vars
105 | BrainLogger.error(f"HUD: Key error rendering prompt for {self.section_name} ({self.prompt_key}): {e}", exc_info=True)
106 | return fallback_error_string
107 | except Exception as e:
108 | BrainLogger.error(f"HUD: Error in {self.section_name} section: {e}", exc_info=True)
109 | return fallback_error_string # Generic fallback for other errors
--------------------------------------------------------------------------------
/internal/modules/actions/action_manager.py:
--------------------------------------------------------------------------------
1 | # modules/actions/action_manager.py
2 |
3 | from .feed import FeedAction
4 | from .drink import DrinkAction
5 | from .play import PlayAction
6 | from .rest import RestAction
7 | from internal.modules.needs.needs_manager import NeedsManager
8 | from event_dispatcher import global_event_dispatcher, Event
9 | import time
10 |
11 | class ActionManager:
12 | """
13 | Manages and executes actions with stability controls.
14 | """
15 |
16 | def __init__(self, needs_manager: NeedsManager):
17 | """
18 | Initializes the ActionManager.
19 |
20 | Args:
21 | needs_manager (NeedsManager): Reference to the NeedsManager.
22 | """
23 | self.needs_manager = needs_manager
24 | self.available_actions = {
25 | 'feed': FeedAction(self.needs_manager),
26 | 'give_water': DrinkAction(self.needs_manager),
27 | 'play': PlayAction(self.needs_manager),
28 | 'rest': RestAction(self.needs_manager)
29 | }
30 | self.action_history = {}
31 | self.initialize_history()
32 |
33 | def initialize_history(self):
34 | """Initialize tracking for all available actions."""
35 | for action_name in self.available_actions:
36 | self.action_history[action_name] = {
37 | 'last_execution': 0,
38 | 'total_executions': 0,
39 | 'successful_executions': 0,
40 | 'failed_executions': 0
41 | }
42 |
43 | def perform_action(self, action_name):
44 | """
45 | Executes the specified action with validation and tracking.
46 |
47 | Args:
48 | action_name (str): The name of the action to perform.
49 |
50 | Returns:
51 | dict: Result of the action execution including success status and details.
52 |
53 | Raises:
54 | ValueError: If the action_name is not recognized.
55 | """
56 | action = self.available_actions.get(action_name)
57 | if not action:
58 | raise ValueError(f"Action '{action_name}' is not available.")
59 |
60 | # Check cooldown
61 | if not self._check_cooldown(action_name):
62 | return {
63 | 'success': False,
64 | 'error': 'Action is on cooldown',
65 | 'remaining_cooldown': self._get_remaining_cooldown(action_name)
66 | }
67 |
68 | # Validate action prerequisites
69 | can_execute, reason = action.validate()
70 | if not can_execute:
71 | self._update_history(action_name, False)
72 | return {
73 | 'success': False,
74 | 'error': reason
75 | }
76 |
77 | try:
78 | print(f"ActionManager: Executing '{action_name}' action.")
79 | global_event_dispatcher.dispatch_event_sync(Event("action:started", {"action_name": action_name}))
80 |
81 | result = action.perform()
82 |
83 | # Update history and dispatch completion event
84 | self._update_history(action_name, True)
85 | global_event_dispatcher.dispatch_event_sync(Event("action:completed", {
86 | "action_name": action_name,
87 | "result": result,
88 | "success": True
89 | }))
90 |
91 | return {
92 | 'success': True,
93 | 'result': result
94 | }
95 |
96 | except Exception as e:
97 | self._update_history(action_name, False)
98 | error_msg = str(e)
99 | global_event_dispatcher.dispatch_event_sync(Event("action:error", {
100 | "action_name": action_name,
101 | "error": error_msg
102 | }))
103 | return {
104 | 'success': False,
105 | 'error': error_msg
106 | }
107 |
108 | def _check_cooldown(self, action_name):
109 | """Check if enough time has passed since last execution."""
110 | last_execution = self.action_history[action_name]['last_execution']
111 | cooldown = self.available_actions[action_name].get_cooldown()
112 | return time.time() >= last_execution + cooldown
113 |
114 | def _get_remaining_cooldown(self, action_name):
115 | """Get remaining cooldown time in seconds."""
116 | last_execution = self.action_history[action_name]['last_execution']
117 | cooldown = self.available_actions[action_name].get_cooldown()
118 | remaining = (last_execution + cooldown) - time.time()
119 | return max(0, remaining)
120 |
121 | def _update_history(self, action_name, success):
122 | """Update action history with execution results."""
123 | self.action_history[action_name]['last_execution'] = time.time()
124 | self.action_history[action_name]['total_executions'] += 1
125 | if success:
126 | self.action_history[action_name]['successful_executions'] += 1
127 | else:
128 | self.action_history[action_name]['failed_executions'] += 1
129 |
130 | def get_action_status(self, action_name=None):
131 | """
132 | Get status information about actions.
133 |
134 | Args:
135 | action_name (str, optional): Specific action to get status for.
136 |
137 | Returns:
138 | dict: Status information including cooldowns and history.
139 | """
140 | if action_name:
141 | if action_name not in self.available_actions:
142 | raise ValueError(f"Action '{action_name}' not found.")
143 | return {
144 | 'history': self.action_history[action_name],
145 | 'on_cooldown': not self._check_cooldown(action_name),
146 | 'remaining_cooldown': self._get_remaining_cooldown(action_name)
147 | }
148 |
149 | return {name: {
150 | 'history': self.action_history[name],
151 | 'on_cooldown': not self._check_cooldown(name),
152 | 'remaining_cooldown': self._get_remaining_cooldown(name)
153 | } for name in self.available_actions}
--------------------------------------------------------------------------------
/client/config/models.py:
--------------------------------------------------------------------------------
1 | # client/config/models.py
2 | """
3 | Pydantic models for structuring and validating configuration data for the Hephia TUI.
4 | """
5 | from pydantic import BaseModel, Field, HttpUrl
6 | from typing import Optional, Dict, List, Any
7 | from enum import Enum
8 |
9 | from config import ProviderType as MainProviderType, ModelConfig as MainModelConfig
10 |
11 |
12 | class ModelConfig(BaseModel):
13 | """
14 | Represents the structure of a model definition within the TUI,
15 | mirroring the main project's ModelConfig for consistency.
16 | This is used for validating data to be written to models.json.
17 | """
18 | provider: MainProviderType
19 | model_id: str = Field(..., description="The specific ID of the model (e.g., 'gpt-4-turbo-preview').")
20 | env_var: Optional[str] = Field(None, description="Optional environment variable to fetch API key/etc other pieces if provider requires it specifically for this model.")
21 | max_tokens: int = Field(250, description="Default maximum number of tokens for this model.", gt=0)
22 | temperature: float = Field(0.7, description="Default temperature for this model (0.0-2.0).", ge=0.0, le=2.0)
23 | description: str = Field("", description="User-friendly description of the model.")
24 |
25 | class Config:
26 | use_enum_values = True # Ensures enum values are used for serialization if needed
27 |
28 |
29 | class EnvConfigModel(BaseModel):
30 | """
31 | Defines the expected structure and types for variables in the .env file.
32 | Descriptions are used for tooltips in the TUI.
33 | """
34 | # API Keys - Using str instead of SecretStr for simplicity
35 | OPENAI_API_KEY: Optional[str] = Field(None, description="API key for OpenAI services.")
36 | ANTHROPIC_API_KEY: Optional[str] = Field(None, description="API key for Anthropic services.")
37 | GOOGLE_API_KEY: Optional[str] = Field(None, description="API key for Google Cloud services (e.g., Gemini).")
38 | DEEPSEEK_API_KEY: Optional[str] = Field(None, description="API key for DeepSeek services.")
39 | OPENROUTER_API_KEY: Optional[str] = Field(None, description="API key for OpenRouter services.")
40 | PERPLEXITY_API_KEY: Optional[str] = Field(None, description="API key for Perplexity services.")
41 | OPENPIPE_API_KEY: Optional[str] = Field(None, description="API key for OpenPipe services.")
42 | CHAPTER2_API_KEY: Optional[str] = Field(None, description="API key for Chapter 2 services (if applicable).")
43 |
44 | # Discord Bot
45 | DISCORD_BOT_TOKEN: Optional[str] = Field(None, description="Token for your Discord bot.")
46 | ENABLE_DISCORD: bool = Field(False, description="Enable or disable the Discord bot integration.") #
47 | REPLY_ON_TAG: bool = Field(True, description="Whether the Discord bot should reply when tagged.") #
48 |
49 | # Core Hephia Settings
50 | COGNITIVE_MODEL: Optional[str] = Field("haiku", description="Default model for cognitive tasks (e.g., 'gpt4', 'haiku').") #
51 | VALIDATION_MODEL: Optional[str] = Field("mistral", description="Model used for command validation.") #
52 | SUMMARY_MODEL: Optional[str] = Field("haiku", description="Model used for generating summaries.") #
53 | FALLBACK_MODEL: Optional[str] = Field("opus", description="Fallback model if primary models fail.") #
54 |
55 | # System Behavior
56 | EXO_MIN_INTERVAL: int = Field(120, description="Minimum interval for Exo's main processing loop in seconds.", gt=0) #
57 | EXO_MAX_TURNS: int = Field(50, description="Maximum number of turns to manage context limits.", gt=0) #
58 | HEADLESS: bool = Field(False, description="Run Hephia without its own TUI/GUI (server mode).") #
59 | #LOG_PROMPTS: bool = Field(False, description="Enable detailed logging of prompts (can create large log files).") #
60 | ADVANCED_C2_LOGGING: bool = Field(False, description="Enable advanced Chapter2 logging if Chapter2 provider is used.") #
61 |
62 | # Embedding
63 | USE_LOCAL_EMBEDDING: bool = Field(True, description="Use local sentence transformers for embeddings instead of API calls.") #
64 |
65 | # Chapter2 Specific (if used)
66 | CHAPTER2_SOCKET_PATH: Optional[str] = Field(None, description="Filesystem path to Chapter 2 Uvicorn socket (Unix-like systems).") #
67 | CHAPTER2_HTTP_PORT: Optional[int] = Field(5519, description="HTTP port for Chapter 2 service if not using socket.", gt=1023, lt=65536) #
68 |
69 | # Local Inference Server
70 | LOCAL_INFERENCE_BASE_URL: Optional[HttpUrl] = Field(None, description="Base URL for the local inference server (e.g., 'http://localhost:5520/v1')") #
71 |
72 | class Config:
73 | validate_assignment = True # Re-validate fields upon assignment
74 |
75 | class PromptFileModel(BaseModel):
76 | """
77 | Represents the raw, loaded content of a YAML prompt file.
78 | The TUI will facilitate editing specific text values within this structure,
79 | guided by the logic similar to the main project's loader.py.
80 | """
81 | # Top-level keys often found in prompt YAMLs
82 | # These are optional and their internal structure is intentionally flexible (Any)
83 | # because loader.py dynamically accesses nested elements.
84 | id: Optional[str] = Field(None, description="Optional identifier for the prompt group.")
85 | description: Optional[str] = Field(None, description="Optional description of the prompt file's purpose.")
86 | defaults: Optional[Dict[str, Any]] = Field(None, description="Default prompt structures.") #
87 | models: Optional[Dict[str, Dict[str, Any]]] = Field(None, description="Model-specific overrides.") #
88 |
89 | # Store the raw loaded data to allow access to any key.
90 | # The TUI will help navigate/edit specific string values within this.
91 | raw_data: Dict[str, Any] = Field(default_factory=dict, description="The entire raw data loaded from the YAML file.")
92 |
93 | class Config:
94 | extra = 'allow' # Allow any other top-level keys not explicitly defined.
95 |
96 | @classmethod
97 | def from_yaml_data(cls, data: Dict[str, Any]) -> 'PromptFileModel':
98 | """Creates an instance from raw YAML data, populating known fields and storing the rest."""
99 | known_keys = cls.model_fields.keys()
100 | init_data = {k: data.get(k) for k in known_keys if k != 'raw_data'}
101 | init_data['raw_data'] = data
102 | return cls(**init_data)
103 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | """
2 | Main entry point for Hephia.
3 | Initializes and runs the complete system with all necessary checks and monitoring.
4 | """
5 |
6 | from __future__ import annotations
7 | import asyncio
8 | import uvicorn
9 | from dotenv import load_dotenv
10 | import os
11 | import sys
12 | from datetime import datetime
13 | from pathlib import Path
14 |
15 | from core.server import HephiaServer
16 | from config import Config, ProviderType
17 | from loggers import LogManager
18 | from event_dispatcher import global_event_dispatcher
19 | sys.stdout.reconfigure(encoding='utf-8')
20 | sys.stderr.reconfigure(encoding='utf-8')
21 | LogManager.setup_logging()
22 |
23 |
24 | def setup_data_directory() -> None:
25 | """Ensure data directory exists."""
26 | Path('data').mkdir(exist_ok=True)
27 |
28 |
29 | def validate_configuration() -> bool:
30 | """Validate LLM configuration and environment variables."""
31 | errors: list[str] = []
32 |
33 | # Map providers to their environment variable names
34 | provider_env_vars: dict[ProviderType, str] = {
35 | ProviderType.OPENPIPE: "OPENPIPE_API_KEY",
36 | ProviderType.OPENAI: "OPENAI_API_KEY",
37 | ProviderType.ANTHROPIC: "ANTHROPIC_API_KEY",
38 | ProviderType.GOOGLE: "GOOGLE_API_KEY",
39 | ProviderType.OPENROUTER: "OPENROUTER_API_KEY",
40 | ProviderType.PERPLEXITY: "PERPLEXITY_API_KEY",
41 | ProviderType.CHAPTER2: "CHAPTER2_SOCKET_PATH",
42 | ProviderType.LOCAL: "LOCAL_INFERENCE_BASE_URL"
43 | }
44 |
45 | # Validate model configurations
46 | for role in ['cognitive', 'validation', 'fallback', 'summary']:
47 | model_name = getattr(Config, f'get_{role}_model')()
48 | if model_name not in Config.AVAILABLE_MODELS:
49 | errors.append(f"Invalid {role} model configuration: {model_name}")
50 | continue
51 |
52 | model_config = Config.AVAILABLE_MODELS[model_name]
53 | env_var = provider_env_vars.get(model_config.provider)
54 | if not env_var or not os.getenv(env_var):
55 | errors.append(f"Missing {env_var} for {role} model ({model_name})")
56 |
57 | if errors:
58 | print("\nConfiguration errors:")
59 | for error in errors:
60 | print(f" • {error}")
61 | return False
62 |
63 | return True
64 |
65 |
66 | async def main() -> None:
67 | """Initialize and run the complete Hephia system."""
68 | print(f"""
69 | ╔══════════════════════════════╗
70 | ║ Hephia Project v0.3 ║
71 | ╚══════════════════════════════╝
72 | Started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
73 | """)
74 |
75 | # Load environment variables
76 | load_dotenv()
77 |
78 | Config.load_user_models()
79 |
80 | if not validate_configuration():
81 | return
82 |
83 | headless = Config.get_headless()
84 |
85 | # Setup directory structure
86 | setup_data_directory()
87 |
88 | print("\nInitializing systems...")
89 |
90 | try:
91 | # Initialize server using the async factory method
92 | server = await HephiaServer.create()
93 |
94 | # Start interface in a separate thread
95 | if headless:
96 | print("GUI disabled by config; using manual printouts of major activity.")
97 | global_event_dispatcher.add_listener(
98 | "cognitive:context_update",
99 | lambda event: print_cognitive_event(event)
100 | )
101 | else:
102 | print("Server running. Start the TUI client separately to monitor activity.")
103 |
104 | print("""
105 | Hephia is now active!
106 | Connect TUI client to WebSocket endpoint /ws
107 | Press Ctrl+C to shutdown gracefully
108 | """)
109 |
110 | # Configure and run FastAPI server via uvicorn
111 | config = uvicorn.Config(
112 | app=server.app,
113 | host="0.0.0.0",
114 | port=5517,
115 | reload=Config.DEBUG if hasattr(Config, 'DEBUG') else False,
116 | log_level="info"
117 | )
118 | uvicorn_server = uvicorn.Server(config)
119 | await uvicorn_server.serve()
120 |
121 | except KeyboardInterrupt:
122 | print("\n\n🌙 Shutting down Hephia...")
123 | await server.shutdown()
124 | print("Goodbye!\n")
125 | except Exception as e:
126 | print(f"\n❌ Fatal error occurred: {e}")
127 | raise
128 |
129 | async def shutdown_all_tasks():
130 | # Get a set of all tasks (excluding the current one)
131 | tasks = {t for t in asyncio.all_tasks() if t is not asyncio.current_task()}
132 | if tasks:
133 | for task in tasks:
134 | task.cancel()
135 | await asyncio.gather(*tasks, return_exceptions=True)
136 |
137 | def print_cognitive_event(event):
138 | """Print cognitive event to console in a format similar to the GUI."""
139 | try:
140 | data = event.data
141 | if data.get('source') == 'exo_processor':
142 | print("\n" + "="*80)
143 | print("COGNITIVE UPDATE:")
144 | print("="*80)
145 |
146 | # Print recent messages
147 | messages = data.get('raw_state', [])[-2:]
148 | for msg in messages:
149 | role = msg.get('role', '')
150 | content = msg.get('content', '')
151 | display_name = "EXO-PROCESSOR" if role == 'user' else Config.get_cognitive_model()
152 | print(f"\n{display_name}:")
153 | print("-" * len(display_name))
154 | print(content)
155 |
156 | # Print summary
157 | summary = data.get('processed_state', 'No summary available')
158 | print("\nSUMMARY:")
159 | print("-" * 7)
160 | print(summary)
161 | print("="*80 + "\n")
162 | except Exception as e:
163 | print(f"Error printing cognitive event: {e}")
164 |
165 | if __name__ == "__main__":
166 | try:
167 | asyncio.run(main())
168 | finally:
169 | # This ensures that any lingering tasks are cancelled.
170 | loop = asyncio.get_running_loop()
171 | if loop.is_running():
172 | loop.run_until_complete(shutdown_all_tasks())
173 |
--------------------------------------------------------------------------------
/internal/modules/memory/operations/synthesis/manager.py:
--------------------------------------------------------------------------------
1 | """
2 | manager.py
3 |
4 | Implements a concrete SynthesisManager (ISynthesisHandler) that handles conflict-based merges
5 | and the creation of new 'synthesis' nodes for cognitive memory. Integrates with your
6 | CognitiveDBManager or CognitiveMemoryNetwork to persist changes.
7 | """
8 |
9 | import time
10 | from typing import Dict, Any, List
11 |
12 | from .base import ISynthesisHandler
13 |
14 | from ...nodes.cognitive_node import CognitiveMemoryNode
15 | from ...networks.cognitive_network import CognitiveMemoryNetwork
16 | from ...db.managers import CognitiveDBManager, SynthesisRelationManager
17 | from ...db.schema import SYNTHESIS_TYPES
18 |
19 | from loggers.loggers import MemoryLogger
20 |
21 |
22 | class SynthesisManager(ISynthesisHandler):
23 | """
24 | Handles all synthesis operations including conflict resolution,
25 | resurrection, and merges.
26 | """
27 |
28 | def __init__(
29 | self,
30 | cognitive_network: CognitiveMemoryNetwork,
31 | db_manager: CognitiveDBManager,
32 | relation_manager: SynthesisRelationManager,
33 | metrics_orchestrator: Any # Type properly based on metrics
34 | ):
35 | self.network = cognitive_network
36 | self.db = db_manager
37 | self.relations = relation_manager
38 | self.metrics = metrics_orchestrator
39 | self.logger = MemoryLogger()
40 |
41 | async def handle_conflict_synthesis(
42 | self,
43 | conflict_data: Dict[str, Any],
44 | child: CognitiveMemoryNode,
45 | parent: CognitiveMemoryNode,
46 | synthesis_content: str, # From LLM resolution
47 | synthesis_embedding: List[float], # Pre-calculated
48 | additional_strength: float = 0.0
49 | ) -> str:
50 | """
51 | Creates a new synthesis node from conflicting nodes.
52 | Uses LLM-provided synthesis content and pre-calculated embedding.
53 | Returns new node ID.
54 | """
55 | # Create new node with blended state
56 | new_node = await self._create_synthesis_node(
57 | child,
58 | parent,
59 | conflict_data,
60 | synthesis_content,
61 | synthesis_embedding,
62 | additional_strength
63 | )
64 |
65 | # Record synthesis relations for the child
66 | await self.relations.add_relation(
67 | synthesis_node_id=int(new_node.node_id),
68 | constituent_node_id=int(child.node_id),
69 | relationship_type=SYNTHESIS_TYPES.get('CONFLICT', 'conflict'),
70 | metadata={
71 | "timestamp": time.time(),
72 | "conflict_data": conflict_data,
73 | "synthesis_type": "conflict_resolution"
74 | }
75 | )
76 |
77 | # Also relate to parent
78 | await self.relations.add_relation(
79 | synthesis_node_id=int(new_node.node_id),
80 | constituent_node_id=int(parent.node_id),
81 | relationship_type=SYNTHESIS_TYPES.get('CONFLICT', 'conflict'),
82 | metadata={
83 | "timestamp": time.time(),
84 | "conflict_data": conflict_data,
85 | "synthesis_type": "conflict_resolution"
86 | }
87 | )
88 |
89 | return new_node.node_id
90 |
91 | async def handle_resurrection(
92 | self,
93 | node: CognitiveMemoryNode,
94 | parent_id: int
95 | ) -> None:
96 | """Handle node resurrection and record relation."""
97 | await self.relations.add_relation(
98 | synthesis_node_id=int(node.node_id),
99 | constituent_node_id=parent_id,
100 | relationship_type=SYNTHESIS_TYPES.get('RESURRECTION', 'resurrection'),
101 | metadata={"timestamp": time.time()}
102 | )
103 |
104 | async def _create_synthesis_node(
105 | self,
106 | nodeA: CognitiveMemoryNode,
107 | nodeB: CognitiveMemoryNode,
108 | conflict_data: Dict[str, Any],
109 | synthesis_content: str, # From LLM
110 | synthesis_embedding: List[float], # Pre-calculated embedding
111 | additional_strength: float
112 | ) -> CognitiveMemoryNode:
113 | """
114 | Create new node from synthesis of others.
115 | Uses provided synthesis content and embedding from conflict resolution.
116 | """
117 | # Create a new node with blended raw and processed states.
118 | new_node = CognitiveMemoryNode(
119 | timestamp=time.time(),
120 | text_content=synthesis_content,
121 | embedding=synthesis_embedding,
122 | raw_state=self._blend_states(nodeA.raw_state, nodeB.raw_state, is_raw=True),
123 | processed_state=self._blend_states(nodeA.processed_state, nodeB.processed_state, is_raw=False),
124 | strength=min(1.0, 0.3 + additional_strength),
125 | formation_source="conflict_synthesis"
126 | )
127 |
128 | # Persist the new synthesis node.
129 | new_id = await self.db.create_node(new_node)
130 | new_node.node_id = str(new_id)
131 | return new_node
132 |
133 | async def handle_synthesis_complete(
134 | self,
135 | synthesis_node_id: str,
136 | constituents: list
137 | ) -> None:
138 | """
139 | Optional post-synthesis step.
140 | In this implementation, no further action is required.
141 | """
142 | # Optionally log or perform a simple confirmation action.
143 | self.logger.info(f"Synthesis complete for node {synthesis_node_id} with constituents {constituents}.")
144 | # No further action required.
145 |
146 | # -------------------------------------------------------------------------
147 | # Internal Utility Methods
148 | # -------------------------------------------------------------------------
149 | def _blend_states(self, sA: Dict[str, Any], sB: Dict[str, Any], is_raw: bool) -> Dict[str, Any]:
150 | from ...nodes.node_utils import blend_states
151 | return blend_states(sA, sB, weights=None, is_raw=is_raw)
152 |
153 | def _combine_embeddings(self, embA: List[float], embB: List[float]) -> List[float]:
154 | """
155 | Compute a weighted average or partial combination of two embeddings.
156 | Assumes both embeddings have the same dimension.
157 | """
158 | if not embA:
159 | return embB
160 | if not embB:
161 | return embA
162 | return [(a + b) / 2.0 for a, b in zip(embA, embB)]
163 |
--------------------------------------------------------------------------------
/internal/modules/memory/metrics/strength.py:
--------------------------------------------------------------------------------
1 | """
2 | \\metrics\\strength.py
3 |
4 | Implements strength-based analysis for memory networks.
5 | Handles node strength, network position, and ghost relationships.
6 |
7 | Key capabilities:
8 | - Basic strength metrics
9 | - Network position analysis
10 | - Ghost node relationships
11 | - Connection strength patterns
12 | """
13 |
14 | from typing import Dict, List, Optional
15 | from loggers.loggers import MemoryLogger
16 |
17 | class StrengthMetricsError(Exception):
18 | """Base exception for strength metrics calculation failures."""
19 | pass
20 |
21 | class StrengthMetricsCalculator:
22 | """
23 | Calculates strength-based metrics for memory nodes.
24 | Analyzes both individual strength and network position.
25 | """
26 |
27 | def __init__(self):
28 | """Initialize calculator with required dependencies."""
29 | self.logger = MemoryLogger
30 |
31 | def calculate_metrics(
32 | self,
33 | node_strength: float,
34 | connections: Dict[str, float],
35 | is_ghosted: bool = False,
36 | ghost_nodes: Optional[List[Dict]] = None,
37 | connected_strengths: Optional[List[float]] = None
38 | ) -> Dict[str, float]:
39 | """
40 | Calculate comprehensive strength metrics.
41 |
42 | Args:
43 | node_strength: Current strength value of the node
44 | connections: Dict mapping node IDs to connection strengths
45 | is_ghosted: Whether node is in ghost state
46 | ghost_nodes: List of ghost node data
47 | connected_strengths: Pre-calculated strengths of connected nodes
48 |
49 | Returns:
50 | Dict containing strength metrics:
51 | - current_strength: Raw strength value
52 | - relative_strength: Position in local network
53 | - ghost_metrics: Ghost state information
54 | - network_metrics: Connection-based calculations
55 | """
56 | try:
57 | metrics = {
58 | 'current_strength': node_strength
59 | }
60 |
61 | # Network position analysis
62 | if connected_strengths:
63 | network_metrics = self._analyze_network_position(
64 | node_strength,
65 | connected_strengths,
66 | connections
67 | )
68 | metrics.update(network_metrics)
69 |
70 | # Ghost state metrics
71 | ghost_metrics = self._calculate_ghost_metrics(
72 | is_ghosted,
73 | ghost_nodes or []
74 | )
75 | metrics.update(ghost_metrics)
76 |
77 | # EXPANSION POINT: Field effect analysis
78 | # EXPANSION POINT: Connection pattern recognition
79 | # EXPANSION POINT: Strength distribution analysis
80 |
81 | return metrics
82 |
83 | except Exception as e:
84 | self.logger.log_error(f"Strength metrics calculation failed: {str(e)}")
85 | return self._get_fallback_metrics()
86 |
87 | def _analyze_network_position(
88 | self,
89 | node_strength: float,
90 | connected_strengths: List[float],
91 | connections: Dict[str, float]
92 | ) -> Dict[str, float]:
93 | """
94 | Analyze node's position in local network.
95 |
96 | EXPANSION POINT: Enhanced network analysis
97 | - Cluster strength patterns
98 | - Connection topology
99 | - Field effect calculations
100 | """
101 | metrics = {}
102 |
103 | try:
104 | if connected_strengths:
105 | # Calculate relative strength
106 | avg_connected = sum(connected_strengths) / len(connected_strengths)
107 | metrics['relative_strength'] = node_strength / avg_connected if avg_connected > 0 else 0.0
108 |
109 | # Connection strength analysis
110 | connection_values = list(connections.values())
111 | if connection_values:
112 | metrics['avg_connection_strength'] = sum(connection_values) / len(connection_values)
113 | metrics['max_connection_strength'] = max(connection_values)
114 |
115 | # Network influence potential
116 | metrics['network_influence'] = (
117 | node_strength *
118 | metrics['avg_connection_strength'] *
119 | len(connections) / 10 # Normalize for typical connection counts
120 | )
121 |
122 | return metrics
123 |
124 | except Exception as e:
125 | self.logger.log_error(f"Network position analysis failed: {str(e)}")
126 | return {}
127 |
128 | def _calculate_ghost_metrics(
129 | self,
130 | is_ghosted: bool,
131 | ghost_nodes: List[Dict]
132 | ) -> Dict[str, float]:
133 | """
134 | Calculate ghost-related strength metrics.
135 |
136 | EXPANSION POINT: Enhanced ghost analysis
137 | - Ghost node patterns
138 | - Resurrection potential
139 | - Merge history analysis
140 | """
141 | metrics = {}
142 |
143 | try:
144 | # Basic ghost state
145 | metrics['is_ghosted'] = float(is_ghosted)
146 | metrics['ghost_nodes_count'] = len(ghost_nodes)
147 |
148 | # Analyze ghost node strengths if present
149 | if ghost_nodes:
150 | ghost_strengths = [
151 | g.get('strength', 0.0)
152 | for g in ghost_nodes
153 | ]
154 | if ghost_strengths:
155 | metrics['avg_ghost_strength'] = sum(ghost_strengths) / len(ghost_strengths)
156 | metrics['max_ghost_strength'] = max(ghost_strengths)
157 |
158 | return metrics
159 |
160 | except Exception as e:
161 | self.logger.log_error(f"Ghost metrics calculation failed: {str(e)}")
162 | return {
163 | 'is_ghosted': float(is_ghosted),
164 | 'ghost_nodes_count': len(ghost_nodes)
165 | }
166 |
167 | def _get_fallback_metrics(self) -> Dict[str, float]:
168 | """Provide safe fallback metrics if calculations fail."""
169 | return {
170 | 'current_strength': 0.0,
171 | 'relative_strength': 0.0,
172 | 'is_ghosted': 0.0,
173 | 'ghost_nodes_count': 0,
174 | }
--------------------------------------------------------------------------------
/brain/interfaces/exo_utils/hud/sections/discord.py:
--------------------------------------------------------------------------------
1 | # brain/interfaces/exo_utils/hud/sections/discord.py
2 |
3 | from typing import Dict, Any, List, Optional
4 |
5 | from .base import BaseHudSection
6 | from brain.prompting.loader import get_prompt
7 | from core.discord_service import DiscordService
8 | from config import Config
9 | from loggers import BrainLogger
10 |
11 | class DiscordHudSection(BaseHudSection):
12 | """
13 | HUD Section for Discord-related information.
14 | Displays active channel, recent messages, and user summary.
15 | """
16 |
17 | # extra timeout given that it has to actually fetch data from Discord
18 | SECTION_TIMEOUT_SECONDS: float = 2.0
19 |
20 | def __init__(self, discord_service: DiscordService, prompt_key: str = 'interface.exo.hud.discord', section_name: str = "Discord"):
21 | super().__init__(prompt_key=prompt_key, section_name=section_name)
22 | if not discord_service:
23 | raise ValueError("DiscordService instance is required for DiscordHudSection.")
24 | self.discord_service = discord_service
25 |
26 | async def _prepare_prompt_vars(self, hud_metadata: Dict[str, Any]) -> Dict[str, str]:
27 | """
28 | Fetches and prepares pre-formatted strings for the Discord HUD section,
29 | suitable for direct substitution via string.Template.
30 | Keys in the returned dictionary will correspond to placeholders in the '*.combined' prompt.
31 | """
32 | discord_vars = {
33 | "hud_header_str": f"[{self.section_name}]",
34 | "discord_channel_path": "",
35 | "discord_messages_block_str": "",
36 | "discord_users_summary_str": "",
37 | "discord_error_str": "",
38 | "discord_is_active_for_hud_str": "false"
39 | }
40 |
41 | if not Config.get_discord_enabled():
42 | discord_vars["discord_error_str"] = "Discord is currently disabled by System Config"
43 | return discord_vars
44 |
45 | discord_vars["discord_is_active_for_hud_str"] = "true"
46 |
47 | last_channel_path = hud_metadata.get("last_discord_channel_path")
48 | if not last_channel_path:
49 | discord_vars["discord_error_str"] = "No active channel."
50 | return discord_vars
51 |
52 | discord_vars["discord_channel_path"] = last_channel_path
53 |
54 | error_messages_for_hud_list = []
55 |
56 | try:
57 | hud_message_limit = 20
58 | max_snippet_len = 500
59 |
60 | history_data, hist_status = await self.discord_service.get_enhanced_history(
61 | last_channel_path, limit=hud_message_limit
62 | )
63 |
64 | if hist_status == 200 and history_data and history_data.get("messages"):
65 | message_lines = []
66 | for msg in history_data.get("messages"):
67 | try:
68 | ref = msg.get('reference', '')
69 | timestamp = msg.get('timestamp', '')[:16] # YYYY-MM-DD HH:MM
70 | author = msg.get('author', 'Unknown')
71 | content = msg.get('content', '').replace('\n', ' ')
72 | if len(content) > max_snippet_len: # Truncate very long messages
73 | content = content[:max_snippet_len] + "..."
74 |
75 | # Include reference ID for easy referencing in commands
76 | message_lines.append(f"[{timestamp}] {ref} {author}: {content}")
77 | except Exception as e:
78 | BrainLogger.error(f"HUD: Error processing message: {e}", exc_info=True)
79 |
80 | discord_vars["discord_messages_block_str"] = "\n".join(message_lines)
81 | elif hist_status is not None and hist_status >= 400:
82 | err_msg_part = f"History (Status {hist_status})"
83 | if isinstance(history_data, dict) and history_data.get("error"): err_msg_part += f": {history_data['error']}"
84 | error_messages_for_hud_list.append(err_msg_part)
85 | discord_vars["discord_messages_block_str"] = f"Unavailable ({err_msg_part.split(':')[0].strip()})"
86 | else: # Success but empty message list or other non-error case
87 | discord_vars["discord_messages_block_str"] = "None found."
88 | except Exception as e:
89 | BrainLogger.error(f"HUD ({self.section_name}): Exc. in history for {last_channel_path}: {e}", exc_info=True)
90 | error_messages_for_hud_list.append("History (Processing Error)")
91 | discord_vars["discord_messages_block_str"] = "Error retrieving."
92 |
93 | # --- Fetch and Format Active Users ---
94 | try:
95 | users_data, users_status = await self.discord_service.get_user_list(last_channel_path)
96 | if users_status == 200 and users_data:
97 | recent_users = [f"{user['display_name']}" for user in users_data.get('recently_active', [])]
98 | other_users = [f"{user['display_name']}" for user in users_data.get('other_members', [])]
99 |
100 | num_recent = len(recent_users)
101 | num_other = len(other_users)
102 | recent_list_str = ", ".join(recent_users) if recent_users else "None"
103 | other_list_str = ", ".join(other_users) if other_users else "None"
104 |
105 | # reuse for now; make specific to HUD later
106 | discord_vars["discord_users_summary_str"] = get_prompt("interfaces.exo.hud.discord.users",
107 | model=Config.get_cognitive_model(),
108 | vars={
109 | "num_recent": num_recent,
110 | "num_other": num_other,
111 | "recent_list": recent_list_str,
112 | "other_list": other_list_str
113 | }
114 | )
115 | elif users_status is not None and users_status >= 400:
116 | err_msg_part = f"Users (Status {users_status})"
117 | if isinstance(users_data, dict) and users_data.get("error"): err_msg_part += f": {users_data['error']}"
118 | error_messages_for_hud_list.append(err_msg_part)
119 | discord_vars["discord_users_summary_str"] = f" Active Users: Unavailable ({err_msg_part.split(':')[0].strip()})"
120 | else: # Success but empty
121 | discord_vars["discord_users_summary_str"] = " Active Users: None present"
122 |
123 | except Exception as e:
124 | BrainLogger.error(f"HUD ({self.section_name}): Exc. in users for {last_channel_path}: {e}", exc_info=True)
125 | error_messages_for_hud_list.append("Users (Processing Error)")
126 | discord_vars["discord_users_summary_str"] = " Active Users: Error retrieving."
127 |
128 | if error_messages_for_hud_list:
129 | discord_vars["discord_error_str"] = f" Info: {'; '.join(error_messages_for_hud_list)}"
130 |
131 | return discord_vars
--------------------------------------------------------------------------------
/internal/modules/memory/async_lru_cache.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import functools
3 | import time
4 | from collections import OrderedDict
5 | from typing import Any, Callable, Coroutine, Tuple, Optional, Dict
6 | from dataclasses import dataclass
7 |
8 |
9 | @dataclass
10 | class CacheEntry:
11 | """Cache entry with TTL support."""
12 | future: asyncio.Future
13 | created_at: float
14 | ttl: Optional[float] = None
15 |
16 | def is_expired(self) -> bool:
17 | """Check if cache entry has expired."""
18 | if self.ttl is None:
19 | return False
20 | return time.time() - self.created_at > self.ttl
21 |
22 |
23 | class AsyncLRUCacheWithTTL:
24 | """
25 | Thread-safe async LRU cache with TTL support.
26 | Supports both time-based expiration and LRU eviction.
27 | """
28 |
29 | def __init__(self, maxsize: int = 128, ttl: Optional[float] = None):
30 | self.maxsize = maxsize
31 | self.ttl = ttl
32 | self.cache: OrderedDict[Any, CacheEntry] = OrderedDict()
33 | self.lock = asyncio.Lock()
34 | self.hits = 0
35 | self.misses = 0
36 |
37 | async def get(self, key: Any) -> Optional[Any]:
38 | """Get value from cache if exists and not expired."""
39 | async with self.lock:
40 | if key not in self.cache:
41 | self.misses += 1
42 | return None
43 |
44 | entry = self.cache[key]
45 |
46 | # Check if expired
47 | if entry.is_expired():
48 | del self.cache[key]
49 | self.misses += 1
50 | return None
51 |
52 | # Move to end (mark as recently used)
53 | self.cache.move_to_end(key)
54 | self.hits += 1
55 |
56 | try:
57 | return await entry.future
58 | except Exception:
59 | # Remove failed entries
60 | if key in self.cache:
61 | del self.cache[key]
62 | raise
63 |
64 | async def put(self, key: Any, future: asyncio.Future, ttl: Optional[float] = None) -> None:
65 | """Put value in cache with optional TTL override."""
66 | async with self.lock:
67 | # Use instance TTL if no override provided
68 | effective_ttl = ttl if ttl is not None else self.ttl
69 |
70 | entry = CacheEntry(
71 | future=future,
72 | created_at=time.time(),
73 | ttl=effective_ttl
74 | )
75 |
76 | self.cache[key] = entry
77 |
78 | # Evict LRU if over capacity
79 | if len(self.cache) > self.maxsize:
80 | self.cache.popitem(last=False)
81 |
82 | async def evict_expired(self) -> int:
83 | """Manually evict expired entries. Returns number evicted."""
84 | evicted = 0
85 | async with self.lock:
86 | expired_keys = [
87 | key for key, entry in self.cache.items()
88 | if entry.is_expired()
89 | ]
90 | for key in expired_keys:
91 | del self.cache[key]
92 | evicted += 1
93 | return evicted
94 |
95 | async def clear(self) -> None:
96 | """Clear all cache entries."""
97 | async with self.lock:
98 | self.cache.clear()
99 | self.hits = 0
100 | self.misses = 0
101 |
102 | def get_stats(self) -> Dict[str, Any]:
103 | """Get cache statistics."""
104 | total_requests = self.hits + self.misses
105 | hit_rate = self.hits / total_requests if total_requests > 0 else 0.0
106 |
107 | return {
108 | 'hits': self.hits,
109 | 'misses': self.misses,
110 | 'hit_rate': hit_rate,
111 | 'size': len(self.cache),
112 | 'max_size': self.maxsize
113 | }
114 |
115 |
116 | def async_lru_cache(maxsize: int = 128, ttl: Optional[float] = None, key_func: Optional[Callable] = None):
117 | """
118 | Enhanced async LRU cache decorator with TTL support.
119 |
120 | Args:
121 | maxsize: Maximum number of entries to cache
122 | ttl: Time-to-live in seconds (None for no expiration)
123 | key_func: Optional custom function to generate cache keys
124 | """
125 | def decorator(func: Callable) -> Callable:
126 | cache = AsyncLRUCacheWithTTL(maxsize=maxsize, ttl=ttl)
127 |
128 | @functools.wraps(func)
129 | async def wrapper(*args, **kwargs) -> Any:
130 | # Create hashable key from args and kwargs
131 | try:
132 | if key_func:
133 | # Use custom key function if provided
134 | if asyncio.iscoroutinefunction(key_func):
135 | key = await key_func(*args, **kwargs)
136 | else:
137 | key = key_func(*args, **kwargs)
138 | else:
139 | # Use automatic key generation
140 | key = (args, frozenset(kwargs.items()))
141 | except TypeError:
142 | # Handle unhashable arguments by converting to strings
143 | try:
144 | if key_func:
145 | # Custom key function failed, fall back to string conversion
146 | key = f"custom_failed:{hash(str(args))}"
147 | else:
148 | str_args = tuple(str(arg) for arg in args)
149 | str_kwargs = frozenset((k, str(v)) for k, v in kwargs.items())
150 | key = (str_args, str_kwargs)
151 | except Exception:
152 | # If all else fails, don't cache this call
153 | return await func(*args, **kwargs)
154 |
155 | # Try to get from cache
156 | try:
157 | result = await cache.get(key)
158 | if result is not None:
159 | return result
160 | except Exception:
161 | # Cache error - proceed without caching
162 | pass
163 |
164 | # Cache miss - execute function
165 | try:
166 | future = asyncio.ensure_future(func(*args, **kwargs))
167 | await cache.put(key, future)
168 | return await future
169 | except Exception as e:
170 | # Function execution failed
171 | # Try to remove failed entry from cache
172 | try:
173 | async with cache.lock:
174 | if key in cache.cache:
175 | del cache.cache[key]
176 | except Exception:
177 | pass
178 | raise e
179 |
180 | # Expose cache for monitoring/debugging
181 | wrapper.cache = cache
182 | wrapper.cache_info = cache.get_stats
183 | wrapper.cache_clear = cache.clear
184 |
185 | return wrapper
186 |
187 | return decorator
188 |
189 |
190 | # Backwards compatibility - keep original function name
191 | def async_lru_cache_legacy(maxsize: int = 128):
192 | """Legacy async_lru_cache without TTL for backwards compatibility."""
193 | return async_lru_cache(maxsize=maxsize, ttl=None)
--------------------------------------------------------------------------------
/event_dispatcher.py:
--------------------------------------------------------------------------------
1 | # event_dispatcher.py
2 |
3 | from collections import defaultdict
4 | import re
5 | import traceback
6 | from typing import Any, Callable, Dict, List, Optional
7 | from loggers import EventLogger
8 | import asyncio
9 | import inspect
10 |
11 | class Event:
12 | """
13 | Represents an event with type, data, and metadata.
14 | """
15 |
16 | def __init__(self, event_type: str, data: Any = None, metadata: Optional[Dict[str, Any]] = None):
17 | """
18 | Initializes an Event instance.
19 |
20 | Args:
21 | event_type (str): The type of the event, using colon-separated namespacing.
22 | data (Any, optional): The data associated with the event.
23 | metadata (Dict[str, Any], optional): Additional metadata for the event.
24 | """
25 | self.event_type = event_type
26 | self.data = data
27 | self.metadata = metadata or {}
28 |
29 | class EventDispatcher:
30 | """
31 | Manages event listeners, dispatching, and listener prioritization.
32 | """
33 |
34 | def __init__(self):
35 | """
36 | Initializes the EventDispatcher.
37 | """
38 | self.listeners: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
39 | self.wildcard_listeners: List[Dict[str, Any]] = []
40 | # Event types to filter out (won't be logged)
41 | self.event_filter: List[str] = ['timer', 'need', 'emotion', 'state']
42 | # Optionally, a list of event types to select for logging
43 | self.event_select: Optional[List[str]] = None
44 |
45 | def add_listener(self, event_type: str, callback: Callable, priority: int = 0) -> None:
46 | """
47 | Adds a listener for a specific event type.
48 |
49 | Args:
50 | event_type (str): The event type to listen for. Can include wildcards (*).
51 | callback (Callable): The function to call when the event is dispatched.
52 | priority (int, optional): The priority of the listener. Higher priority listeners are called first.
53 | """
54 | listener = {"callback": callback, "priority": priority}
55 | if '*' in event_type:
56 | self.wildcard_listeners.append({
57 | "pattern": re.compile(event_type.replace('*', '.*')),
58 | **listener
59 | })
60 | else:
61 | self.listeners[event_type].append(listener)
62 | self.listeners[event_type].sort(key=lambda x: x["priority"], reverse=True)
63 |
64 | def remove_listener(self, event_type: str, callback: Callable) -> None:
65 | """
66 | Removes a listener for a specific event type.
67 |
68 | Args:
69 | event_type (str): The event type to remove the listener from.
70 | callback (Callable): The callback function to remove.
71 | """
72 | if '*' in event_type:
73 | self.wildcard_listeners = [listen for listen in self.wildcard_listeners if listen["callback"] != callback]
74 | else:
75 | self.listeners[event_type] = [listen for listen in self.wildcard_listeners if listen["callback"] != callback]
76 |
77 | def _get_listeners(self, event: Event) -> List[Dict[str, Any]]:
78 | """
79 | Returns the list of listeners that match the event, including wildcard matches,
80 | sorted by priority (highest first).
81 | """
82 | listeners_to_call = self.listeners[event.event_type].copy()
83 | listeners_to_call.extend(
84 | [listen for listen in self.wildcard_listeners if listen["pattern"].match(event.event_type)]
85 | )
86 | listeners_to_call.sort(key=lambda x: x["priority"], reverse=True)
87 | return listeners_to_call
88 |
89 | def _log_event(self, event: Event) -> None:
90 | """
91 | Performs event logging based on filter/selection rules.
92 | """
93 | # Log events that are not filtered out.
94 | if self.event_filter:
95 | if not any(event.event_type.startswith(prefix + ':') for prefix in self.event_filter):
96 | if event.event_type not in ["memory:echo_requested", "cognitive:context_update"]:
97 | EventLogger.log_event_dispatch(event.event_type, event.data, event.metadata)
98 | # Log events that are selected.
99 | if self.event_select:
100 | if any(event.event_type.startswith(prefix + ':') for prefix in self.event_select):
101 | EventLogger.log_event_dispatch(event.event_type, event.data, event.metadata)
102 |
103 | def dispatch_event_sync(self, event: Event) -> None:
104 | """
105 | Synchronously dispatches an event to all registered listeners.
106 |
107 | For legacy code: if a callback returns an awaitable (i.e. is async),
108 | this method will try to run it in a blocking manner if no event loop is running,
109 | or schedule it (fire-and-forget) if an event loop is detected.
110 | """
111 | self._log_event(event)
112 | listeners_to_call = self._get_listeners(event)
113 | for listener in listeners_to_call:
114 | try:
115 | result = listener["callback"](event)
116 | if inspect.isawaitable(result):
117 | if not isinstance(result, asyncio.Task):
118 | try:
119 | # If no event loop is running, we can block using asyncio.run.
120 | loop = asyncio.get_running_loop()
121 | # If there is a running loop, we cannot block. Schedule it as a task.
122 | loop.create_task(result)
123 | except RuntimeError:
124 | # No running loop; safe to run synchronously.
125 | asyncio.run(result)
126 | except Exception as e:
127 | print("Error in event listener:")
128 | print("Event Data:", event.data)
129 | print("Event Type:", event.event_type)
130 | print("Listener Callback:", listener["callback"])
131 | print("Error Type:", type(e).__name__)
132 | print("Error Message:", str(e))
133 | print("Traceback:")
134 | traceback.print_exc()
135 |
136 | async def dispatch_event_async(self, event: Event) -> None:
137 | """
138 | Asynchronously dispatches an event to all registered listeners.
139 |
140 | This version awaits any asynchronous listener callbacks, ensuring that the
141 | asynchronous flow is preserved.
142 | """
143 | self._log_event(event)
144 | listeners_to_call = self._get_listeners(event)
145 | for listener in listeners_to_call:
146 | try:
147 | result = listener["callback"](event)
148 | if inspect.isawaitable(result) and not isinstance(result, asyncio.Task):
149 | await result
150 | except Exception as e:
151 | print("Error in event listener (async):")
152 | print("Event Data:", event.data)
153 | print("Event Type:", event.event_type)
154 | print("Listener Callback:", listener["callback"])
155 | print("Error Type:", type(e).__name__)
156 | print("Error Message:", str(e))
157 | print("Traceback:")
158 | traceback.print_exc()
159 |
160 | dispatch_event = dispatch_event_sync
161 |
162 | # Global event dispatcher instance
163 | global_event_dispatcher = EventDispatcher()
164 |
--------------------------------------------------------------------------------
/brain/prompting/loader.py:
--------------------------------------------------------------------------------
1 | '''
2 | loader.py
3 |
4 | Utility for loading and rendering prompt templates from YAML files,
5 | with support for defaults, per-model overrides, and micro-fragment codes.
6 |
7 | Usage:
8 | from utils.prompt_loader import get_prompt, get_code
9 |
10 | # Load a system prompt for Exo summary
11 | sys = get_prompt(
12 | key="interfaces.exo.summary.system",
13 | model=Config.get_summary_model()
14 | )
15 |
16 | # Load a single-template prompt (e.g. exo memory)
17 | mem = get_prompt(
18 | key="interfaces.exo.memory.template",
19 | model=Config.get_cognitive_model(),
20 | vars={
21 | "command_input": cmd_in,
22 | "content": resp,
23 | "result_message": res_msg
24 | }
25 | )
26 |
27 | # Load a micro-fragment (error/success code)
28 | err = get_code(
29 | "COMMAND_VALIDATION_MISSING_FLAG",
30 | vars={"flag": "limit", "command": "search"},
31 | default="Flag '--${flag}' is required for command '${command}'."
32 | )
33 | raise ValueError(err)
34 | '''
35 | import os, yaml, string, pathlib, sys, platform
36 | from functools import lru_cache
37 |
38 | EXTRA_PATHS = []
39 | if env := os.getenv("HEPHIA_PROMPT_PATHS"):
40 | EXTRA_PATHS.extend(env.split(os.pathsep))
41 |
42 | # XDG / APPDATA fallback
43 | home_cfg = (
44 | pathlib.Path.home() / ".config" / "hephia" / "prompts"
45 | if platform.system() != "Windows"
46 | else pathlib.Path(os.getenv("APPDATA", pathlib.Path.home())) / "hephia" / "prompts"
47 | )
48 | EXTRA_PATHS.append(str(home_cfg))
49 |
50 | PROMPT_ROOT = os.path.normpath(
51 | os.path.join(os.path.dirname(__file__), "prompts")
52 | )
53 |
54 | SEARCH_PATHS = [p for p in EXTRA_PATHS if os.path.isdir(p)] + [PROMPT_ROOT]
55 |
56 | def _load_yaml(rel_path: str) -> dict:
57 | """Load and parse a YAML file by searching multiple prompt directories, with proper inheritance."""
58 | base_data = {}
59 | found = False
60 | override_count = 0
61 |
62 | # First, load the base file from the project's prompts directory
63 | base_path = os.path.join(PROMPT_ROOT, rel_path)
64 | if os.path.isfile(base_path):
65 | try:
66 | with open(base_path, "r", encoding="utf-8") as f:
67 | base_data = yaml.safe_load(f) or {}
68 | found = True
69 | except yaml.YAMLError as e:
70 | raise RuntimeError(f"Error parsing base YAML {base_path}: {e}")
71 |
72 | # Then check for overrides in user config paths
73 | for root in [p for p in SEARCH_PATHS if p != PROMPT_ROOT]:
74 | override_path = os.path.join(root, rel_path)
75 | if os.path.isfile(override_path):
76 | try:
77 | with open(override_path, "r", encoding="utf-8") as f:
78 | override_data = yaml.safe_load(f) or {}
79 | found = True
80 | override_count += 1
81 |
82 | base_data = deep_merge_dict(base_data, override_data)
83 |
84 | except yaml.YAMLError as e:
85 | raise RuntimeError(f"Error parsing override YAML {override_path}: {e}")
86 |
87 | if not found:
88 | raise FileNotFoundError(f"Prompt file not found in any path: {rel_path}")
89 |
90 | if override_count > 0 and os.getenv("HEPHIA_PROMPT_DEBUG"):
91 | print(f"DEBUG: Applied {override_count} prompt overrides for {rel_path}")
92 | print(f"DEBUG: Final keys: {list(base_data.get('defaults', {}).keys())}")
93 |
94 | return base_data
95 |
96 |
97 | def get_prompt(key: str, *, model: str, vars: dict | None = None) -> str:
98 | """
99 | Retrieve and render a prompt by key, merging defaults with any per-model overrides.
100 |
101 | Args:
102 | key: Dot-delimited path to a prompt leaf, e.g.
103 | 'interfaces.exo.summary.system' or 'interfaces.exo.memory.template'.
104 | model: Model name (corresponding to config values) for looking up overrides.
105 | vars: Mapping of placeholder names to values for substitution.
106 |
107 | Returns:
108 | Rendered prompt text.
109 |
110 | Raises:
111 | FileNotFoundError: If the YAML file is missing.
112 | KeyError: If the requested section/template is not defined.
113 | RuntimeError: On YAML parsing errors.
114 | """
115 | # Split key into path + leaf
116 | parts = key.split('.')
117 | if len(parts) < 2:
118 | raise ValueError(f"Invalid prompt key: '{key}'")
119 | yaml_path = os.path.join(*parts[:-1]) + ".yaml"
120 | leaf = parts[-1]
121 |
122 | data = _load_yaml(yaml_path)
123 | defaults = data.get('defaults', {})
124 | models = data.get('models', {})
125 |
126 | # Start from defaults and overlay model-specific block if present
127 | merged = defaults.copy()
128 | if model in models:
129 | override = models[model]
130 | merged.update(override)
131 |
132 | # Extract text
133 | if 'sections' in merged:
134 | sections = merged['sections']
135 | if leaf not in sections:
136 | raise KeyError(f"Section '{leaf}' not found in {yaml_path}")
137 | text = sections[leaf]
138 | elif 'template' in merged and leaf == 'template':
139 | text = merged['template']
140 | elif leaf in merged:
141 | text = merged[leaf]
142 | else:
143 | raise KeyError(f"Template key '{leaf}' not found in {yaml_path}")
144 |
145 | # Substitute variables
146 | try:
147 | return string.Template(text).safe_substitute(vars or {})
148 | except Exception as e:
149 | raise RuntimeError(f"Error substituting variables in prompt '{key}': {e}")
150 |
151 |
152 | def get_code(code_id: str, *, vars: dict | None = None, default: str | None = None, strict: bool = False) -> str:
153 | """
154 | Retrieve a micro-fragment (error or success code) by its ID.
155 |
156 | Args:
157 | code_id: Identifier from codes/errors.yaml or codes/successes.yaml.
158 | vars: Placeholder values for substitution.
159 | default: Fallback template if the code_id isn't found.
160 | strict: If True, raise KeyError when code_id is missing.
161 | Returns:
162 | Rendered micro-fragment.
163 | Raises:
164 | KeyError: If strict=True and code_id is not defined.
165 | """
166 | # Load both categories
167 | err_defs = _load_yaml('codes/errors.yaml').get('defaults', {})
168 | ok_defs = _load_yaml('codes/successes.yaml').get('defaults', {})
169 | codes = {**err_defs, **ok_defs}
170 |
171 | if code_id in codes:
172 | text = codes[code_id]
173 | else:
174 | if strict:
175 | raise KeyError(f"Code ID not found: {code_id}")
176 | text = default or code_id
177 |
178 | try:
179 | return string.Template(text).safe_substitute(vars or {})
180 | except Exception as e:
181 | raise RuntimeError(f"Error in code '{code_id}' substitution: {e}")
182 |
183 | def deep_merge_dict(base: dict, override: dict) -> dict:
184 | """
185 | Deep merge two dictionaries, with override values taking precedence.
186 |
187 | Args:
188 | base: Base dictionary
189 | override: Override dictionary
190 |
191 | Returns:
192 | Merged dictionary
193 | """
194 | result = base.copy()
195 |
196 | for key, value in override.items():
197 | if key in result and isinstance(result[key], dict) and isinstance(value, dict):
198 | result[key] = deep_merge_dict(result[key], value)
199 | else:
200 | result[key] = value
201 |
202 | return result
203 |
--------------------------------------------------------------------------------
/internal/modules/memory/db/schema.py:
--------------------------------------------------------------------------------
1 | """
2 | memory/db/schema.py
3 |
4 | Defines the complete database schema for the memory system.
5 | Includes table definitions for both body and cognitive memory systems
6 | along with their relationships and shared constants.
7 | """
8 |
9 | from typing import Dict, List
10 |
11 | # -----------------------------------------------------------------------------
12 | # 1. Schema Constants
13 | # -----------------------------------------------------------------------------
14 |
15 | # Common field sizes
16 | MAX_TEXT_LENGTH = 65535 # For larger text fields
17 | DEFAULT_STRING_LENGTH = 255
18 |
19 | # Node states
20 | NODE_STATES = {
21 | 'ACTIVE': 'active',
22 | 'GHOSTED': 'ghosted',
23 | 'PRUNED': 'pruned'
24 | }
25 |
26 | # Relationship types
27 | MEMORY_LINK_TYPES = {
28 | 'DIRECT': 'direct', # Direct formation link
29 | 'TEMPORAL': 'temporal', # Temporally related
30 | 'MERGED': 'merged', # Result of merge
31 | 'RESURRECT': 'resurrection' # From resurrection
32 | }
33 |
34 | SYNTHESIS_TYPES = {
35 | 'CONFLICT': 'conflict_synthesis',
36 | 'MERGE': 'merge_synthesis',
37 | 'RESURRECTION': 'resurrection'
38 | }
39 |
40 | # -----------------------------------------------------------------------------
41 | # 2. Body Memory Tables
42 | # -----------------------------------------------------------------------------
43 |
44 | BODY_MEMORY_SCHEMA = {
45 | 'body_memory_nodes': """
46 | CREATE TABLE IF NOT EXISTS body_memory_nodes (
47 | id INTEGER PRIMARY KEY,
48 | timestamp REAL NOT NULL,
49 | raw_state TEXT NOT NULL, -- Emotional vectors, needs, etc.
50 | processed_state TEXT NOT NULL, -- Processed/summarized states
51 | strength REAL NOT NULL,
52 | ghosted BOOLEAN DEFAULT FALSE,
53 | parent_node_id INTEGER,
54 | ghost_nodes TEXT, -- Serialized array of ghost nodes
55 | ghost_states TEXT, -- Serialized array of past states
56 | connections TEXT, -- Serialized connection mapping
57 | last_connection_update REAL,
58 | last_accessed REAL,
59 | FOREIGN KEY(parent_node_id) REFERENCES body_memory_nodes(id)
60 | )
61 | """,
62 |
63 | 'body_memory_indexes': [
64 | "CREATE INDEX IF NOT EXISTS idx_body_timestamp ON body_memory_nodes(timestamp)",
65 | "CREATE INDEX IF NOT EXISTS idx_body_strength ON body_memory_nodes(strength)",
66 | "CREATE INDEX IF NOT EXISTS idx_body_ghosted ON body_memory_nodes(ghosted)",
67 | "CREATE INDEX IF NOT EXISTS idx_body_last_accessed ON body_memory_nodes(last_accessed)"
68 | ]
69 | }
70 |
71 | # -----------------------------------------------------------------------------
72 | # 3. Cognitive Memory Tables
73 | # -----------------------------------------------------------------------------
74 |
75 | COGNITIVE_MEMORY_SCHEMA = {
76 | 'cognitive_memory_nodes': """
77 | CREATE TABLE IF NOT EXISTS cognitive_memory_nodes (
78 | id INTEGER PRIMARY KEY,
79 | timestamp REAL NOT NULL,
80 | text_content TEXT NOT NULL,
81 | embedding TEXT NOT NULL, -- Serialized embedding vector
82 | raw_state TEXT NOT NULL,
83 | processed_state TEXT NOT NULL,
84 | strength REAL NOT NULL,
85 | ghosted BOOLEAN DEFAULT FALSE,
86 | parent_node_id INTEGER,
87 | ghost_nodes TEXT, -- Serialized ghost node array
88 | ghost_states TEXT, -- Serialized state history
89 | connections TEXT, -- Serialized connection mapping
90 | semantic_context TEXT, -- Additional semantic metadata
91 | last_accessed REAL,
92 | formation_source TEXT, -- Event/trigger that created memory
93 | last_echo_time REAL,
94 | echo_dampening REAL DEFAULT 1.0,
95 | last_connection_update REAL,
96 | FOREIGN KEY(parent_node_id) REFERENCES cognitive_memory_nodes(id)
97 | )
98 | """,
99 |
100 | 'cognitive_memory_indexes': [
101 | "CREATE INDEX IF NOT EXISTS idx_cognitive_timestamp ON cognitive_memory_nodes(timestamp)",
102 | "CREATE INDEX IF NOT EXISTS idx_cognitive_strength ON cognitive_memory_nodes(strength)",
103 | "CREATE INDEX IF NOT EXISTS idx_cognitive_ghosted ON cognitive_memory_nodes(ghosted)",
104 | "CREATE INDEX IF NOT EXISTS idx_cognitive_last_accessed ON cognitive_memory_nodes(last_accessed)",
105 | "CREATE INDEX IF NOT EXISTS idx_cognitive_last_connection_update ON cognitive_memory_nodes(last_connection_update)"
106 | ]
107 | }
108 |
109 | # -----------------------------------------------------------------------------
110 | # 4. Relationship Tables
111 | # -----------------------------------------------------------------------------
112 |
113 | RELATIONSHIP_SCHEMA = {
114 | 'memory_links': """
115 | CREATE TABLE IF NOT EXISTS memory_links (
116 | id INTEGER PRIMARY KEY,
117 | cognitive_node_id INTEGER NOT NULL,
118 | body_node_id INTEGER NOT NULL,
119 | link_strength REAL NOT NULL,
120 | link_type TEXT NOT NULL,
121 | created_at REAL NOT NULL,
122 | metadata TEXT, -- State signatures and other metadata
123 | UNIQUE(cognitive_node_id, body_node_id),
124 | FOREIGN KEY(cognitive_node_id) REFERENCES cognitive_memory_nodes(id),
125 | FOREIGN KEY(body_node_id) REFERENCES body_memory_nodes(id)
126 | )
127 | """,
128 |
129 | 'synthesis_relations': """
130 | CREATE TABLE IF NOT EXISTS synthesis_relations (
131 | id INTEGER PRIMARY KEY,
132 | synthesis_node_id INTEGER NOT NULL,
133 | constituent_node_id INTEGER NOT NULL,
134 | relationship_type TEXT NOT NULL,
135 | metadata TEXT, -- Synthesis details and metrics
136 | FOREIGN KEY(synthesis_node_id) REFERENCES cognitive_memory_nodes(id),
137 | FOREIGN KEY(constituent_node_id) REFERENCES cognitive_memory_nodes(id)
138 | )
139 | """,
140 |
141 | 'relationship_indexes': [
142 | "CREATE INDEX IF NOT EXISTS idx_memory_links_cognitive ON memory_links(cognitive_node_id)",
143 | "CREATE INDEX IF NOT EXISTS idx_memory_links_body ON memory_links(body_node_id)",
144 | "CREATE INDEX IF NOT EXISTS idx_synthesis_synthesis ON synthesis_relations(synthesis_node_id)",
145 | "CREATE INDEX IF NOT EXISTS idx_synthesis_constituent ON synthesis_relations(constituent_node_id)"
146 | ]
147 | }
148 |
149 | # -----------------------------------------------------------------------------
150 | # 5. Table Creation Functions
151 | # -----------------------------------------------------------------------------
152 |
153 | COMPLETE_MEMORY_SCHEMA = {
154 | **BODY_MEMORY_SCHEMA,
155 | **COGNITIVE_MEMORY_SCHEMA,
156 | **RELATIONSHIP_SCHEMA
157 | }
158 |
159 | TABLE_CREATION_ORDER = [
160 | 'body_memory_nodes',
161 | 'cognitive_memory_nodes',
162 | 'memory_links',
163 | 'synthesis_relations'
164 | ]
165 |
166 | def get_complete_schema() -> Dict[str, str]:
167 | """Returns complete schema including all tables and indexes."""
168 | return COMPLETE_MEMORY_SCHEMA
169 |
170 | def get_table_creation_order() -> List[str]:
171 | """Returns correct order for table creation."""
172 | return TABLE_CREATION_ORDER
173 |
174 | def get_all_indexes() -> List[str]:
175 | """Returns all index creation statements."""
176 | return (
177 | BODY_MEMORY_SCHEMA['body_memory_indexes'] +
178 | COGNITIVE_MEMORY_SCHEMA['cognitive_memory_indexes'] +
179 | RELATIONSHIP_SCHEMA['relationship_indexes']
180 | )
--------------------------------------------------------------------------------
/internal/modules/memory/metrics/temporal.py:
--------------------------------------------------------------------------------
1 | """
2 | \\metrics\\temporal.py
3 |
4 | Implements temporal analysis for memory retrieval and pattern detection.
5 | Handles decay curves, echo timing, and access patterns.
6 |
7 | Key capabilities:
8 | - Basic time decay calculations
9 | - Echo recency and dampening effects
10 | - Access history analysis
11 | - Support for temporal pattern detection
12 | """
13 |
14 | from typing import Dict, Optional, List
15 | import math
16 | import time
17 |
18 |
19 |
20 | from loggers.loggers import MemoryLogger
21 |
22 | class TemporalMetricsError(Exception):
23 | """Base exception for temporal metrics calculation failures."""
24 | pass
25 |
26 | class TemporalMetricsCalculator:
27 | """
28 | Calculates temporal metrics for memory retrieval.
29 | Handles formation time, access patterns, and echo effects.
30 | """
31 |
32 | def __init__(self, echo_window: float = 180.0):
33 | """
34 | Initialize calculator with timing parameters.
35 |
36 | Args:
37 | echo_window: Time window (seconds) for echo dampening
38 | """
39 | self.echo_window = echo_window
40 | self.logger = MemoryLogger
41 |
42 | def calculate_metrics(
43 | self,
44 | node_timestamp: float,
45 | current_time: Optional[float] = None,
46 | last_accessed: Optional[float] = None,
47 | last_echo_time: Optional[float] = None,
48 | echo_dampening: Optional[float] = None,
49 | is_cognitive: bool = True
50 | ) -> Dict[str, float]:
51 | """
52 | Calculate comprehensive temporal metrics.
53 |
54 | Args:
55 | node_timestamp: Formation time of the memory
56 | current_time: Current time (defaults to time.time())
57 | last_accessed: Last access timestamp (if available)
58 | last_echo_time: Last echo timestamp (cognitive only)
59 | echo_dampening: Current echo dampening factor
60 | is_cognitive: Whether this is a cognitive memory
61 |
62 | Returns:
63 | Dict containing temporal metrics:
64 | - recency: Basic time decay score
65 | - access_recency: Time since last access
66 | - echo_recency: Time since last echo (cognitive only)
67 | - echo_dampening: Current dampening factor (cognitive only)
68 | """
69 | try:
70 | if current_time is None:
71 | current_time = time.time()
72 |
73 | metrics = {}
74 |
75 | # Basic time decay (1-hour decay curve)
76 | time_diff = current_time - node_timestamp
77 | metrics['recency'] = math.exp(-time_diff / 3600)
78 |
79 | # Access history
80 | if last_accessed is not None:
81 | access_diff = current_time - last_accessed
82 | metrics['access_recency'] = math.exp(-access_diff / 3600)
83 |
84 | # Echo effects (cognitive only)
85 | if is_cognitive and last_echo_time is not None:
86 | echo_diff = current_time - last_echo_time
87 | metrics['echo_recency'] = math.exp(-echo_diff / self.echo_window)
88 |
89 | if echo_dampening is not None:
90 | metrics['echo_dampening'] = echo_dampening
91 |
92 | # Add echo potential rating
93 | metrics['echo_potential'] = self._calculate_echo_potential(
94 | echo_diff,
95 | echo_dampening or 1.0
96 | )
97 |
98 | # EXPANSION POINT: Pattern detection across time windows
99 | # EXPANSION POINT: Rhythm analysis in access patterns
100 | # EXPANSION POINT: Memory consolidation timing
101 |
102 | return metrics
103 |
104 | except Exception as e:
105 | self.logger.log_error(f"Temporal metrics calculation failed: {str(e)}")
106 | return self._get_fallback_metrics(is_cognitive)
107 |
108 | def _calculate_echo_potential(
109 | self,
110 | time_since_echo: float,
111 | current_dampening: float
112 | ) -> float:
113 | """
114 | Calculate potential for new echo effect.
115 |
116 | Args:
117 | time_since_echo: Seconds since last echo
118 | current_dampening: Current echo dampening factor
119 |
120 | Returns:
121 | float: Echo potential score [0,1]
122 | """
123 | if time_since_echo > self.echo_window:
124 | # Full echo potential after window
125 | return 1.0
126 |
127 | # Progressive recovery within window
128 | recovery = time_since_echo / self.echo_window
129 | base_potential = min(1.0, recovery * 1.5) # Allow faster initial recovery
130 |
131 | # Apply current dampening
132 | return base_potential * current_dampening
133 |
134 | def analyze_temporal_patterns(
135 | self,
136 | timestamps: List[float],
137 | current_time: Optional[float] = None
138 | ) -> Dict[str, float]:
139 | """
140 | Analyze patterns in temporal data.
141 |
142 | EXPANSION POINT: Enhanced pattern recognition
143 | - Access rhythm detection
144 | - Memory consolidation patterns
145 | - Temporal clustering analysis
146 |
147 | Args:
148 | timestamps: List of relevant timestamps
149 | current_time: Current time reference
150 |
151 | Returns:
152 | Dict of pattern metrics
153 | """
154 | if not timestamps:
155 | return {}
156 |
157 | if current_time is None:
158 | current_time = time.time()
159 |
160 | try:
161 | patterns = {}
162 |
163 | # Sort timestamps
164 | sorted_times = sorted(timestamps)
165 |
166 | # Calculate basic statistics
167 | intervals = [
168 | t2 - t1
169 | for t1, t2 in zip(sorted_times[:-1], sorted_times[1:])
170 | ]
171 |
172 | if intervals:
173 | # Average interval
174 | patterns['avg_interval'] = sum(intervals) / len(intervals)
175 |
176 | # Interval consistency
177 | if len(intervals) > 1:
178 | variance = sum(
179 | (i - patterns['avg_interval']) ** 2
180 | for i in intervals
181 | ) / len(intervals)
182 | patterns['interval_consistency'] = math.exp(-variance / 3600)
183 |
184 | # Recent activity density
185 | hour_ago = current_time - 3600
186 | recent_count = sum(1 for t in sorted_times if t > hour_ago)
187 | patterns['recent_density'] = recent_count / len(timestamps)
188 |
189 | return patterns
190 |
191 | except Exception as e:
192 | self.logger.log_error(f"Pattern analysis failed: {str(e)}")
193 | return {}
194 |
195 | def _get_fallback_metrics(self, is_cognitive: bool) -> Dict[str, float]:
196 | """Provide safe fallback metrics if calculations fail."""
197 | metrics = {
198 | 'recency': 0.0,
199 | 'access_recency': 0.0
200 | }
201 |
202 | if is_cognitive:
203 | metrics.update({
204 | 'echo_recency': 0.0,
205 | 'echo_dampening': 1.0,
206 | 'echo_potential': 0.0
207 | })
208 |
209 | return metrics
--------------------------------------------------------------------------------