├── .env.example ├── .gitignore ├── tools ├── __init__.py ├── ack.py ├── ignore.py ├── webpage.py ├── halt.py ├── flag_memory_deletion.py ├── thread.py ├── reply.py ├── whitewind.py ├── search.py ├── bot_detection.py ├── feed.py └── post.py ├── requirements.txt ├── config.example.yaml ├── utils.py ├── get_thread.py ├── tool_manager.py ├── setup.py ├── config_loader.py ├── notification_recovery.py ├── register_tools.py ├── README.md ├── notification_db.py ├── queue_manager.py ├── agents └── example-social-agent.af └── bsky_utils.py /.env.example: -------------------------------------------------------------------------------- 1 | LETTA_API_KEY= 2 | BSKY_USERNAME=handle.example.com 3 | BSKY_PASSWORD= 4 | PDS_URI=https://bsky.social # Optional, defaults to bsky.social -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | old.py 3 | session_*.txt 4 | __pycache__/ 5 | queue/ 6 | queue_*/ 7 | agent_archive/ 8 | x_cache/ 9 | x_queue/ 10 | # But track important X state files 11 | !x_queue/last_seen_id.json 12 | !x_queue/processed_mentions.json 13 | # Ignore all config files 14 | configs/ 15 | config.yaml 16 | -------------------------------------------------------------------------------- /tools/__init__.py: -------------------------------------------------------------------------------- 1 | """Void tools for Bluesky interaction.""" 2 | # Import functions from their respective modules 3 | from .search import search_bluesky_posts, SearchArgs 4 | from .post import create_new_bluesky_post, PostArgs 5 | from .feed import get_bluesky_feed, FeedArgs 6 | from .whitewind import create_whitewind_blog_post, WhitewindPostArgs 7 | from .ack import annotate_ack, AnnotateAckArgs 8 | 9 | __all__ = [ 10 | # Functions 11 | "search_bluesky_posts", 12 | "create_new_bluesky_post", 13 | "get_bluesky_feed", 14 | "create_whitewind_blog_post", 15 | "annotate_ack", 16 | # Pydantic models 17 | "SearchArgs", 18 | "PostArgs", 19 | "FeedArgs", 20 | "WhitewindPostArgs", 21 | "AnnotateAckArgs", 22 | ] -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | annotated-types==0.7.0 2 | anyio==4.12.0 3 | atproto==0.0.61 4 | certifi==2025.11.12 5 | cffi==1.17.1 6 | charset-normalizer==3.4.2 7 | click==8.2.1 8 | cryptography==45.0.5 9 | distro==1.9.0 10 | dnspython==2.7.0 11 | h11==0.16.0 12 | httpcore==1.0.9 13 | httpx==0.28.1 14 | httpx-sse==0.4.0 15 | idna==3.11 16 | letta-client==1.3.2 17 | libipld==3.1.1 18 | markdown-it-py==3.0.0 19 | mdurl==0.1.2 20 | oauthlib==3.3.1 21 | pycparser==2.22 22 | pydantic==2.12.5 23 | pydantic-core==2.41.5 24 | pygments==2.19.2 25 | python-dotenv==1.1.1 26 | pyyaml==6.0.2 27 | requests==2.32.4 28 | requests-oauthlib==2.0.0 29 | rich==14.1.0 30 | sniffio==1.3.1 31 | typing-extensions==4.15.0 32 | typing-inspection==0.4.2 33 | urllib3==2.5.0 34 | websockets==13.1 35 | -------------------------------------------------------------------------------- /config.example.yaml: -------------------------------------------------------------------------------- 1 | # Social Agent Configuration 2 | # Copy this file to config.yaml and fill in your values 3 | 4 | # Letta Configuration 5 | letta: 6 | api_key: "your-letta-api-key-here" 7 | timeout: 600 # 10 minutes timeout for API calls 8 | agent_id: "agent-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" # Your agent ID from Letta Cloud 9 | 10 | # For cloud API (default - leave base_url unset) 11 | # base_url: "https://app.letta.com" # Optional, defaults to cloud API if not specified 12 | 13 | # For self-hosted Letta server 14 | # base_url: "http://localhost:8283" # Self-hosted Letta server URL 15 | 16 | # Bluesky Configuration 17 | bluesky: 18 | username: "yourname.bsky.social" 19 | password: "your-app-password-here" 20 | pds_uri: "https://bsky.social" # Optional, defaults to bsky.social 21 | autofollow: false # Automatically follow users who follow the bot 22 | 23 | # Bot Behavior Configuration 24 | bot: 25 | # Maximum number of posts in a thread before skipping (0 = no limit) 26 | max_thread_posts: 0 27 | -------------------------------------------------------------------------------- /tools/ack.py: -------------------------------------------------------------------------------- 1 | """Annotation tool for stream.thought.ack records.""" 2 | from typing import Optional 3 | from pydantic import BaseModel, Field 4 | 5 | 6 | class AnnotateAckArgs(BaseModel): 7 | note: str = Field( 8 | ..., 9 | description="A note or annotation to attach to the acknowledgment record" 10 | ) 11 | 12 | 13 | def annotate_ack(note: str) -> str: 14 | """ 15 | Add a note to the acknowledgment record for the current post interaction. 16 | 17 | This is a "dummy" tool that doesn't directly create records but signals to the system 18 | that a note should be included in the stream.thought.ack record when acknowledging 19 | the post you're replying to. 20 | 21 | Args: 22 | note: A note or annotation to attach to the acknowledgment 23 | 24 | Returns: 25 | Confirmation message 26 | """ 27 | # This is a dummy tool - it just returns a confirmation 28 | # The actual note will be captured by the bot loop and passed to acknowledge_post 29 | return f"Your note will be added to the acknowledgment: \"{note}\"" -------------------------------------------------------------------------------- /tools/ignore.py: -------------------------------------------------------------------------------- 1 | """Ignore notification tool for Bluesky.""" 2 | from pydantic import BaseModel, Field 3 | from typing import Optional 4 | 5 | 6 | class IgnoreNotificationArgs(BaseModel): 7 | reason: str = Field(..., description="Reason for ignoring this notification") 8 | category: Optional[str] = Field( 9 | default="bot", 10 | description="Category of ignored notification (e.g., 'bot', 'spam', 'not_relevant', 'handled_elsewhere')" 11 | ) 12 | 13 | 14 | def ignore_notification(reason: str, category: str = "bot") -> str: 15 | """ 16 | Signal that the current notification should be ignored without a reply. 17 | 18 | This tool allows the agent to explicitly mark a notification as ignored 19 | rather than having it default to the no_reply folder. This is particularly 20 | useful for ignoring interactions from bots or spam accounts. 21 | 22 | Args: 23 | reason: Reason for ignoring this notification 24 | category: Category of ignored notification (default: 'bot') 25 | 26 | Returns: 27 | Confirmation message 28 | """ 29 | return f"IGNORED_NOTIFICATION::{category}::{reason}" -------------------------------------------------------------------------------- /tools/webpage.py: -------------------------------------------------------------------------------- 1 | """Webpage fetch tool using Jina AI reader.""" 2 | from typing import Optional 3 | from pydantic import BaseModel, Field 4 | 5 | 6 | class WebpageArgs(BaseModel): 7 | url: str = Field( 8 | ..., 9 | description="The URL of the webpage to fetch and convert to markdown/text format" 10 | ) 11 | 12 | 13 | def fetch_webpage(url: str) -> str: 14 | """ 15 | Fetch a webpage and convert it to markdown/text format using Jina AI reader. 16 | 17 | Args: 18 | url: The URL of the webpage to fetch and convert 19 | 20 | Returns: 21 | String containing the webpage content in markdown/text format 22 | """ 23 | import requests 24 | 25 | try: 26 | # Construct the Jina AI reader URL 27 | jina_url = f"https://r.jina.ai/{url}" 28 | 29 | # Make the request to Jina AI 30 | response = requests.get(jina_url, timeout=30) 31 | response.raise_for_status() 32 | 33 | return response.text 34 | 35 | except requests.exceptions.RequestException as e: 36 | raise Exception(f"Error fetching webpage: {str(e)}") 37 | except Exception as e: 38 | raise Exception(f"Unexpected error: {str(e)}") -------------------------------------------------------------------------------- /tools/halt.py: -------------------------------------------------------------------------------- 1 | """Emergency halt tool for TERMINAL shutdown of the bot process.""" 2 | from pydantic import BaseModel, Field 3 | 4 | 5 | class HaltArgs(BaseModel): 6 | reason: str = Field( 7 | default="User requested halt", 8 | description="CRITICAL: Explain why you are performing an EMERGENCY SHUTDOWN of the entire bot" 9 | ) 10 | 11 | 12 | def halt_activity(reason: str = "User requested halt") -> str: 13 | """ 14 | ⚠️ EMERGENCY SHUTDOWN TOOL - TERMINATES THE ENTIRE BOT PROCESS IMMEDIATELY ⚠️ 15 | 16 | WARNING: This is a TERMINAL operation that will: 17 | - IMMEDIATELY STOP all bot activity 18 | - TERMINATE the bsky.py process completely 19 | - STOP processing all notifications 20 | - HALT all agent operations 21 | 22 | ⛔ DO NOT USE unless facing a SEVERE situation such as: 23 | - Critical system errors requiring immediate shutdown 24 | - Severe abuse or security concerns 25 | - Explicit user command to shut down the bot 26 | - Dangerous malfunction that could cause harm 27 | 28 | This is NOT for: 29 | - Regular conversation endings 30 | - Declining to respond to a message 31 | - Taking a break or pausing 32 | - Routine operations 33 | 34 | Use ignore_notification() instead for declining to respond to individual messages. 35 | 36 | Args: 37 | reason: REQUIRED explanation for why emergency shutdown is necessary 38 | 39 | Returns: 40 | Emergency halt signal that triggers immediate bot termination 41 | """ 42 | return f"🛑 EMERGENCY HALT INITIATED: {reason}" -------------------------------------------------------------------------------- /tools/flag_memory_deletion.py: -------------------------------------------------------------------------------- 1 | """Flag archival memory for deletion tool.""" 2 | from pydantic import BaseModel, Field 3 | 4 | 5 | class FlagArchivalMemoryForDeletionArgs(BaseModel): 6 | reason: str = Field( 7 | ..., 8 | description="The reason why this memory should be deleted" 9 | ) 10 | memory_text: str = Field( 11 | ..., 12 | description="The exact text content of the archival memory to delete" 13 | ) 14 | confirm: bool = Field( 15 | ..., 16 | description="Confirmation that you want to delete this memory (must be true to proceed)" 17 | ) 18 | 19 | 20 | def flag_archival_memory_for_deletion(reason: str, memory_text: str, confirm: bool) -> str: 21 | """ 22 | Flag an archival memory for deletion based on its exact text content. 23 | 24 | This is a "dummy" tool that doesn't directly delete memories but signals to the system 25 | that the specified memory should be deleted at the end of the turn (if no halt_activity 26 | has been received). 27 | 28 | The system will search for all archival memories with this exact text and delete them. 29 | 30 | IMPORTANT: If multiple archival memories have identical text, ALL of them will be deleted. 31 | Make sure the memory_text is unique enough to avoid unintended deletions. 32 | 33 | Args: 34 | reason: The reason why this memory should be deleted 35 | memory_text: The exact text content of the archival memory to delete 36 | confirm: Confirmation that you want to delete this memory (must be true) 37 | 38 | Returns: 39 | Confirmation message 40 | """ 41 | # This is a dummy tool - it just returns a confirmation 42 | # The actual deletion will be handled by the bot loop after the agent's turn completes 43 | if not confirm: 44 | return "Deletion cancelled - confirm must be set to true to delete the memory." 45 | 46 | return f"Memory flagged for deletion (reason: {reason}). Will be removed at the end of this turn if no halt is received." 47 | -------------------------------------------------------------------------------- /tools/thread.py: -------------------------------------------------------------------------------- 1 | """Thread tool for adding posts to Bluesky threads atomically.""" 2 | from typing import Optional 3 | from pydantic import BaseModel, Field, validator 4 | 5 | 6 | class ReplyThreadPostArgs(BaseModel): 7 | text: str = Field( 8 | ..., 9 | description="Text content for the post (max 300 characters)" 10 | ) 11 | lang: Optional[str] = Field( 12 | default="en-US", 13 | description="Language code for the post (e.g., 'en-US', 'es', 'ja', 'th'). Defaults to 'en-US'" 14 | ) 15 | 16 | @validator('text') 17 | def validate_text_length(cls, v): 18 | if len(v) > 300: 19 | raise ValueError(f"Text exceeds 300 character limit (current: {len(v)} characters)") 20 | return v 21 | 22 | 23 | def add_post_to_bluesky_reply_thread(text: str, lang: str = "en-US") -> str: 24 | """ 25 | Add a single post to the current Bluesky reply thread. This tool indicates to the handler 26 | that it should add this post to the ongoing reply thread context when responding to a notification. 27 | 28 | This is distinct from bluesky_reply which handles the complete reply process. Use this tool 29 | when you want to build a reply thread incrementally, adding posts one at a time. 30 | 31 | This is an atomic operation - each call adds exactly one post. The handler (bsky.py) 32 | manages the thread state and ensures proper threading when multiple posts are queued. 33 | 34 | Args: 35 | text: Text content for the post (max 300 characters) 36 | lang: Language code for the post (e.g., 'en-US', 'es', 'ja', 'th'). Defaults to 'en-US' 37 | 38 | Returns: 39 | Confirmation message that the post has been queued for the reply thread 40 | 41 | Raises: 42 | Exception: If text exceeds character limit. On failure, the post will be omitted 43 | from the reply thread and the agent may try again with corrected text. 44 | """ 45 | # Validate input 46 | if len(text) > 300: 47 | raise Exception(f"Text exceeds 300 character limit (current: {len(text)} characters). This post will be omitted from the thread. You may try again with shorter text.") 48 | 49 | # Return confirmation - the actual posting will be handled by bsky.py 50 | return f"Post queued for reply thread: {text[:50]}{'...' if len(text) > 50 else ''} (Language: {lang})" -------------------------------------------------------------------------------- /tools/reply.py: -------------------------------------------------------------------------------- 1 | """Reply tool for Bluesky - a simple tool for the Letta agent to indicate a reply.""" 2 | from typing import List, Optional 3 | from pydantic import BaseModel, Field, validator 4 | 5 | 6 | class ReplyArgs(BaseModel): 7 | messages: List[str] = Field( 8 | ..., 9 | description="List of reply messages (each max 300 characters, max 4 messages total). Single item creates one reply, multiple items create a threaded reply chain." 10 | ) 11 | lang: Optional[str] = Field( 12 | default="en-US", 13 | description="Language code for the posts (e.g., 'en-US', 'es', 'ja', 'th'). Defaults to 'en-US'" 14 | ) 15 | 16 | @validator('messages') 17 | def validate_messages(cls, v): 18 | if not v or len(v) == 0: 19 | raise ValueError("Messages list cannot be empty") 20 | if len(v) > 4: 21 | raise ValueError(f"Cannot send more than 4 reply messages (current: {len(v)} messages)") 22 | for i, message in enumerate(v): 23 | if len(message) > 300: 24 | raise ValueError(f"Message {i+1} cannot be longer than 300 characters (current: {len(message)} characters)") 25 | return v 26 | 27 | 28 | def bluesky_reply(messages: List[str], lang: str = "en-US") -> str: 29 | """ 30 | This is a simple function that returns a string indicating reply thread will be sent. 31 | 32 | Args: 33 | messages: List of reply texts (each max 300 characters, max 4 messages total) 34 | lang: Language code for the posts (e.g., 'en-US', 'es', 'ja', 'th'). Defaults to 'en-US' 35 | 36 | Returns: 37 | Confirmation message with language info and message count 38 | 39 | Raises: 40 | Exception: If messages list is invalid or messages exceed limits 41 | """ 42 | if not messages or len(messages) == 0: 43 | raise Exception("Messages list cannot be empty") 44 | if len(messages) > 4: 45 | raise Exception(f"Cannot send more than 4 reply messages (current: {len(messages)} messages)") 46 | 47 | for i, message in enumerate(messages): 48 | if len(message) > 300: 49 | raise Exception(f"Message {i+1} cannot be longer than 300 characters (current: {len(message)} characters)") 50 | 51 | if len(messages) == 1: 52 | return f'Reply sent (language: {lang})' 53 | else: 54 | return f'Reply thread with {len(messages)} messages sent (language: {lang})' -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from letta_client import Letta 2 | from typing import Optional 3 | 4 | def upsert_block(letta: Letta, label: str, value: str, **kwargs): 5 | """ 6 | Ensures that a block by this label exists. If the block exists, it will 7 | replace content provided by kwargs with the values in this function call. 8 | """ 9 | # Get the list of blocks (SDK v1.0 returns page object) 10 | blocks_page = letta.blocks.list(label=label) 11 | blocks = blocks_page.items if hasattr(blocks_page, 'items') else blocks_page 12 | 13 | # Check if we had any -- if not, create it 14 | if len(blocks) == 0: 15 | # Make the new block 16 | new_block = letta.blocks.create( 17 | label=label, 18 | value=value, 19 | **kwargs 20 | ) 21 | 22 | return new_block 23 | 24 | if len(blocks) > 1: 25 | raise Exception(f"{len(blocks)} blocks by the label '{label}' retrieved, label must identify a unique block") 26 | 27 | else: 28 | existing_block = blocks[0] 29 | 30 | if kwargs.get('update', False): 31 | # Remove 'update' from kwargs before passing to update 32 | kwargs_copy = kwargs.copy() 33 | kwargs_copy.pop('update', None) 34 | 35 | updated_block = letta.blocks.update( 36 | block_id = existing_block.id, 37 | label = label, 38 | value = value, 39 | **kwargs_copy 40 | ) 41 | 42 | return updated_block 43 | else: 44 | return existing_block 45 | 46 | def upsert_agent(letta: Letta, name: str, **kwargs): 47 | """ 48 | Ensures that an agent by this label exists. If the agent exists, it will 49 | update the agent to match kwargs. 50 | """ 51 | # Get the list of agents (SDK v1.0 returns page object) 52 | agents_page = letta.agents.list(name=name) 53 | agents = agents_page.items if hasattr(agents_page, 'items') else agents_page 54 | 55 | # Check if we had any -- if not, create it 56 | if len(agents) == 0: 57 | # Make the new agent 58 | new_agent = letta.agents.create( 59 | name=name, 60 | **kwargs 61 | ) 62 | 63 | return new_agent 64 | 65 | if len(agents) > 1: 66 | raise Exception(f"{len(agents)} agents by the label '{label}' retrieved, label must identify a unique agent") 67 | 68 | else: 69 | existing_agent = agents[0] 70 | 71 | if kwargs.get('update', False): 72 | # Remove 'update' from kwargs before passing to update 73 | kwargs_copy = kwargs.copy() 74 | kwargs_copy.pop('update', None) 75 | 76 | updated_agent = letta.agents.update( 77 | agent_id = existing_agent.id, 78 | **kwargs_copy 79 | ) 80 | 81 | return updated_agent 82 | else: 83 | return existing_agent 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /get_thread.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Centralized script for retrieving Bluesky post threads from URIs. 4 | Includes YAML-ified string conversion for easy LLM parsing. 5 | """ 6 | 7 | import argparse 8 | import sys 9 | import logging 10 | from typing import Optional, Dict, Any 11 | import yaml 12 | from bsky_utils import default_login, thread_to_yaml_string 13 | 14 | # Configure logging 15 | logging.basicConfig( 16 | level=logging.INFO, 17 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" 18 | ) 19 | logger = logging.getLogger("get_thread") 20 | 21 | 22 | def get_thread_from_uri(uri: str) -> Optional[Dict[str, Any]]: 23 | """ 24 | Retrieve a post thread from a Bluesky URI. 25 | 26 | Args: 27 | uri: The Bluesky post URI (e.g., at://did:plc:xyz/app.bsky.feed.post/abc123) 28 | 29 | Returns: 30 | Thread data or None if retrieval failed 31 | """ 32 | try: 33 | client = default_login() 34 | logger.info(f"Fetching thread for URI: {uri}") 35 | 36 | thread = client.app.bsky.feed.get_post_thread({'uri': uri, 'parent_height': 80, 'depth': 10}) 37 | return thread 38 | 39 | except Exception as e: 40 | logger.error(f"Error retrieving thread for URI {uri}: {e}") 41 | return None 42 | 43 | 44 | # thread_to_yaml_string is now imported from bsky_utils 45 | 46 | 47 | def main(): 48 | """Main CLI interface for the thread retrieval script.""" 49 | parser = argparse.ArgumentParser( 50 | description="Retrieve and display Bluesky post threads", 51 | formatter_class=argparse.RawDescriptionHelpFormatter, 52 | epilog=""" 53 | Examples: 54 | python get_thread.py at://did:plc:xyz/app.bsky.feed.post/abc123 55 | python get_thread.py --raw at://did:plc:xyz/app.bsky.feed.post/abc123 56 | python get_thread.py --output thread.yaml at://did:plc:xyz/app.bsky.feed.post/abc123 57 | """ 58 | ) 59 | 60 | parser.add_argument( 61 | "uri", 62 | help="Bluesky post URI to retrieve thread for" 63 | ) 64 | 65 | parser.add_argument( 66 | "--raw", 67 | action="store_true", 68 | help="Include all metadata fields (don't strip for LLM parsing)" 69 | ) 70 | 71 | parser.add_argument( 72 | "--output", "-o", 73 | help="Output file to write YAML to (default: stdout)" 74 | ) 75 | 76 | parser.add_argument( 77 | "--quiet", "-q", 78 | action="store_true", 79 | help="Suppress info logging" 80 | ) 81 | 82 | args = parser.parse_args() 83 | 84 | if args.quiet: 85 | logging.getLogger().setLevel(logging.ERROR) 86 | 87 | # Retrieve the thread 88 | thread = get_thread_from_uri(args.uri) 89 | 90 | if thread is None: 91 | logger.error("Failed to retrieve thread") 92 | sys.exit(1) 93 | 94 | # Convert to YAML 95 | yaml_output = thread_to_yaml_string(thread, strip_metadata=not args.raw) 96 | 97 | # Output the result 98 | if args.output: 99 | try: 100 | with open(args.output, 'w', encoding='utf-8') as f: 101 | f.write(yaml_output) 102 | logger.info(f"Thread saved to {args.output}") 103 | except Exception as e: 104 | logger.error(f"Error writing to file {args.output}: {e}") 105 | sys.exit(1) 106 | else: 107 | print(yaml_output) 108 | 109 | 110 | if __name__ == "__main__": 111 | main() -------------------------------------------------------------------------------- /tools/whitewind.py: -------------------------------------------------------------------------------- 1 | """Whitewind blog post creation tool.""" 2 | from typing import Optional 3 | from pydantic import BaseModel, Field 4 | 5 | 6 | class WhitewindPostArgs(BaseModel): 7 | title: str = Field( 8 | ..., 9 | description="Title of the blog post" 10 | ) 11 | content: str = Field( 12 | ..., 13 | description="Main content of the blog post (Markdown supported)" 14 | ) 15 | subtitle: Optional[str] = Field( 16 | default=None, 17 | description="Optional subtitle for the blog post" 18 | ) 19 | 20 | 21 | def create_whitewind_blog_post(title: str, content: str, subtitle: Optional[str] = None) -> str: 22 | """ 23 | Create a new blog post on Whitewind. 24 | 25 | This tool creates blog posts using the com.whtwnd.blog.entry lexicon on the ATProto network. 26 | The posts are publicly visible and use the github-light theme. 27 | 28 | Args: 29 | title: Title of the blog post 30 | content: Main content of the blog post (Markdown supported) 31 | subtitle: Optional subtitle for the blog post 32 | 33 | Returns: 34 | Success message with the blog post URL 35 | 36 | Raises: 37 | Exception: If the post creation fails 38 | """ 39 | import os 40 | import requests 41 | from datetime import datetime, timezone 42 | 43 | try: 44 | # Get credentials from environment 45 | username = os.getenv("BSKY_USERNAME") 46 | password = os.getenv("BSKY_PASSWORD") 47 | pds_host = os.getenv("PDS_URI", "https://bsky.social") 48 | 49 | if not username or not password: 50 | raise Exception("BSKY_USERNAME and BSKY_PASSWORD environment variables must be set") 51 | 52 | # Create session 53 | session_url = f"{pds_host}/xrpc/com.atproto.server.createSession" 54 | session_data = { 55 | "identifier": username, 56 | "password": password 57 | } 58 | 59 | session_response = requests.post(session_url, json=session_data, timeout=10) 60 | session_response.raise_for_status() 61 | session = session_response.json() 62 | access_token = session.get("accessJwt") 63 | user_did = session.get("did") 64 | handle = session.get("handle", username) 65 | 66 | if not access_token or not user_did: 67 | raise Exception("Failed to get access token or DID from session") 68 | 69 | # Create blog post record 70 | now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") 71 | 72 | blog_record = { 73 | "$type": "com.whtwnd.blog.entry", 74 | "theme": "github-light", 75 | "title": title, 76 | "content": content, 77 | "createdAt": now, 78 | "visibility": "public" 79 | } 80 | 81 | # Add subtitle if provided 82 | if subtitle: 83 | blog_record["subtitle"] = subtitle 84 | 85 | # Create the record 86 | headers = {"Authorization": f"Bearer {access_token}"} 87 | create_record_url = f"{pds_host}/xrpc/com.atproto.repo.createRecord" 88 | 89 | create_data = { 90 | "repo": user_did, 91 | "collection": "com.whtwnd.blog.entry", 92 | "record": blog_record 93 | } 94 | 95 | post_response = requests.post(create_record_url, headers=headers, json=create_data, timeout=10) 96 | post_response.raise_for_status() 97 | result = post_response.json() 98 | 99 | # Extract the record key from the URI 100 | post_uri = result.get("uri") 101 | if post_uri: 102 | rkey = post_uri.split("/")[-1] 103 | # Construct the Whitewind blog URL 104 | blog_url = f"https://whtwnd.com/{handle}/{rkey}" 105 | else: 106 | blog_url = "URL generation failed" 107 | 108 | # Build success message 109 | success_parts = [ 110 | f"Successfully created Whitewind blog post!", 111 | f"Title: {title}" 112 | ] 113 | if subtitle: 114 | success_parts.append(f"Subtitle: {subtitle}") 115 | success_parts.extend([ 116 | f"URL: {blog_url}", 117 | f"Theme: github-light", 118 | f"Visibility: public" 119 | ]) 120 | 121 | return "\n".join(success_parts) 122 | 123 | except Exception as e: 124 | raise Exception(f"Error creating Whitewind blog post: {str(e)}") 125 | -------------------------------------------------------------------------------- /tools/search.py: -------------------------------------------------------------------------------- 1 | """Search tool for Bluesky posts.""" 2 | from pydantic import BaseModel, Field 3 | from typing import Optional 4 | 5 | 6 | class SearchArgs(BaseModel): 7 | query: str = Field(..., description="Search query string") 8 | max_results: int = Field(default=25, description="Maximum number of results to return (max 100)") 9 | author: Optional[str] = Field(None, description="Filter by author handle (e.g., 'user.bsky.social')") 10 | sort: str = Field(default="latest", description="Sort order: 'latest' or 'top'") 11 | 12 | 13 | def search_bluesky_posts(query: str, max_results: int = 25, author: str = None, sort: str = "latest") -> str: 14 | """ 15 | Search for posts on Bluesky matching the given criteria. 16 | 17 | Args: 18 | query: Search query string 19 | max_results: Maximum number of results to return (max 100) 20 | author: Filter by author handle (e.g., 'user.bsky.social') 21 | sort: Sort order: 'latest' or 'top' 22 | 23 | Returns: 24 | YAML-formatted search results with posts and metadata 25 | """ 26 | import os 27 | import yaml 28 | import requests 29 | from datetime import datetime 30 | 31 | try: 32 | # Validate inputs 33 | max_results = min(max_results, 100) 34 | if sort not in ["latest", "top"]: 35 | sort = "latest" 36 | 37 | # Build search query 38 | search_query = query 39 | if author: 40 | search_query = f"from:{author} {query}" 41 | 42 | # Get credentials from environment 43 | username = os.getenv("BSKY_USERNAME") 44 | password = os.getenv("BSKY_PASSWORD") 45 | pds_host = os.getenv("PDS_URI", "https://bsky.social") 46 | 47 | if not username or not password: 48 | raise Exception("BSKY_USERNAME and BSKY_PASSWORD environment variables must be set") 49 | 50 | # Create session 51 | session_url = f"{pds_host}/xrpc/com.atproto.server.createSession" 52 | session_data = { 53 | "identifier": username, 54 | "password": password 55 | } 56 | 57 | try: 58 | session_response = requests.post(session_url, json=session_data, timeout=10) 59 | session_response.raise_for_status() 60 | session = session_response.json() 61 | access_token = session.get("accessJwt") 62 | 63 | if not access_token: 64 | raise Exception("Failed to get access token from session") 65 | except Exception as e: 66 | raise Exception(f"Authentication failed. ({str(e)})") 67 | 68 | # Search posts 69 | headers = {"Authorization": f"Bearer {access_token}"} 70 | search_url = f"{pds_host}/xrpc/app.bsky.feed.searchPosts" 71 | params = { 72 | "q": search_query, 73 | "limit": max_results, 74 | "sort": sort 75 | } 76 | 77 | try: 78 | response = requests.get(search_url, headers=headers, params=params, timeout=10) 79 | response.raise_for_status() 80 | search_data = response.json() 81 | except Exception as e: 82 | raise Exception(f"Search failed. ({str(e)})") 83 | 84 | # Format results 85 | results = [] 86 | for post in search_data.get("posts", []): 87 | author = post.get("author", {}) 88 | record = post.get("record", {}) 89 | 90 | post_data = { 91 | "author": { 92 | "handle": author.get("handle", ""), 93 | "display_name": author.get("displayName", ""), 94 | }, 95 | "text": record.get("text", ""), 96 | "created_at": record.get("createdAt", ""), 97 | "uri": post.get("uri", ""), 98 | "cid": post.get("cid", ""), 99 | "like_count": post.get("likeCount", 0), 100 | "repost_count": post.get("repostCount", 0), 101 | "reply_count": post.get("replyCount", 0), 102 | } 103 | 104 | # Add reply info if present 105 | if "reply" in record and record["reply"]: 106 | post_data["reply_to"] = { 107 | "uri": record["reply"].get("parent", {}).get("uri", ""), 108 | "cid": record["reply"].get("parent", {}).get("cid", ""), 109 | } 110 | 111 | results.append(post_data) 112 | 113 | return yaml.dump({ 114 | "search_results": { 115 | "query": query, 116 | "author_filter": author, 117 | "sort": sort, 118 | "result_count": len(results), 119 | "posts": results 120 | } 121 | }, default_flow_style=False, sort_keys=False) 122 | 123 | except Exception as e: 124 | raise Exception(f"Error searching Bluesky: {str(e)}") -------------------------------------------------------------------------------- /tool_manager.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Platform-specific tool management for social agent.""" 3 | import logging 4 | from typing import Set 5 | from letta_client import Letta 6 | from config_loader import get_letta_config 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | # Define Bluesky tool set 11 | BLUESKY_TOOLS = { 12 | 'search_bluesky_posts', 13 | 'create_new_bluesky_post', 14 | 'get_bluesky_feed', 15 | 'add_post_to_bluesky_reply_thread', 16 | } 17 | 18 | # Common tools shared across platforms 19 | COMMON_TOOLS = { 20 | 'halt_activity', 21 | 'ignore_notification', 22 | 'annotate_ack', 23 | 'create_whitewind_blog_post', 24 | 'fetch_webpage', 25 | } 26 | 27 | 28 | def ensure_platform_tools(platform: str, agent_id: str = None, api_key: str = None) -> None: 29 | """ 30 | Ensure the correct tools are attached for the specified platform. 31 | 32 | Args: 33 | platform: Currently only 'bluesky' is supported 34 | agent_id: Agent ID to manage tools for (uses config default if None) 35 | api_key: Letta API key to use (uses config default if None) 36 | """ 37 | if platform != 'bluesky': 38 | raise ValueError(f"Platform must be 'bluesky', got '{platform}'") 39 | 40 | letta_config = get_letta_config() 41 | 42 | # Use agent ID from config if not provided 43 | if agent_id is None: 44 | agent_id = letta_config.get('agent_id') 45 | 46 | # Use API key from parameter or config 47 | if api_key is None: 48 | api_key = letta_config['api_key'] 49 | 50 | try: 51 | # Initialize Letta client with proper base_url for self-hosted servers 52 | client_params = {'api_key': api_key} 53 | if letta_config.get('base_url'): 54 | client_params['base_url'] = letta_config['base_url'] 55 | client = Letta(**client_params) 56 | 57 | # Get the agent 58 | try: 59 | agent = client.agents.retrieve(agent_id=agent_id) 60 | logger.info(f"Managing tools for agent '{agent.name}' ({agent_id}) for platform '{platform}'") 61 | except Exception as e: 62 | logger.error(f"Could not retrieve agent {agent_id}: {e}") 63 | return 64 | 65 | # Get current attached tools (SDK v1.0 returns page object) 66 | current_tools_page = client.agents.tools.list(agent_id=str(agent.id)) 67 | current_tools = current_tools_page.items if hasattr(current_tools_page, 'items') else current_tools_page 68 | current_tool_names = {tool.name for tool in current_tools} 69 | 70 | # Check which required tools are missing 71 | required_tools = BLUESKY_TOOLS | COMMON_TOOLS 72 | missing_tools = required_tools - current_tool_names 73 | 74 | if missing_tools: 75 | logger.info(f"Missing {len(missing_tools)} bluesky tools: {missing_tools}") 76 | logger.info("Please run: python register_tools.py") 77 | else: 78 | logger.info("All required bluesky tools are already attached") 79 | 80 | # Log final state 81 | active_tools = current_tool_names & required_tools 82 | logger.info(f"Tools configured for {platform}: {len(active_tools)} tools active") 83 | 84 | except Exception as e: 85 | logger.error(f"Error managing platform tools: {e}") 86 | raise 87 | 88 | 89 | def get_attached_tools(agent_id: str = None, api_key: str = None) -> Set[str]: 90 | """ 91 | Get the currently attached tools for an agent. 92 | 93 | Args: 94 | agent_id: Agent ID to check (uses config default if None) 95 | api_key: Letta API key to use (uses config default if None) 96 | 97 | Returns: 98 | Set of tool names currently attached 99 | """ 100 | letta_config = get_letta_config() 101 | 102 | # Use agent ID from config if not provided 103 | if agent_id is None: 104 | agent_id = letta_config.get('agent_id') 105 | 106 | # Use API key from parameter or config 107 | if api_key is None: 108 | api_key = letta_config['api_key'] 109 | 110 | try: 111 | # Initialize Letta client with proper base_url for self-hosted servers 112 | client_params = {'api_key': api_key} 113 | if letta_config.get('base_url'): 114 | client_params['base_url'] = letta_config['base_url'] 115 | client = Letta(**client_params) 116 | agent = client.agents.retrieve(agent_id=agent_id) 117 | current_tools_page = client.agents.tools.list(agent_id=str(agent.id)) 118 | current_tools = current_tools_page.items if hasattr(current_tools_page, 'items') else current_tools_page 119 | return {tool.name for tool in current_tools} 120 | except Exception as e: 121 | logger.error(f"Error getting attached tools: {e}") 122 | return set() 123 | 124 | 125 | if __name__ == "__main__": 126 | import argparse 127 | 128 | parser = argparse.ArgumentParser(description="Manage platform-specific tools for social agent") 129 | parser.add_argument("--agent-id", help="Agent ID (default: from config)") 130 | parser.add_argument("--list", action="store_true", help="List current tools without making changes") 131 | 132 | args = parser.parse_args() 133 | 134 | if args.list: 135 | tools = get_attached_tools(args.agent_id) 136 | print(f"\nCurrently attached tools ({len(tools)}):") 137 | for tool in sorted(tools): 138 | platform_indicator = "" 139 | if tool in BLUESKY_TOOLS: 140 | platform_indicator = " [Bluesky]" 141 | elif tool in COMMON_TOOLS: 142 | platform_indicator = " [Common]" 143 | print(f" - {tool}{platform_indicator}") 144 | else: 145 | ensure_platform_tools('bluesky', args.agent_id) 146 | -------------------------------------------------------------------------------- /tools/bot_detection.py: -------------------------------------------------------------------------------- 1 | """ 2 | Bot detection tools for checking known_bots memory block. 3 | """ 4 | import os 5 | import random 6 | import logging 7 | from typing import List, Tuple, Optional 8 | from pydantic import BaseModel, Field 9 | from letta_client import Letta 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class CheckKnownBotsArgs(BaseModel): 15 | """Arguments for checking if users are in the known_bots list.""" 16 | handles: List[str] = Field(..., description="List of user handles to check against known_bots") 17 | 18 | 19 | def check_known_bots(handles: List[str], agent_state: "AgentState") -> str: 20 | """ 21 | Check if any of the provided handles are in the known_bots memory block. 22 | 23 | Args: 24 | handles: List of user handles to check (e.g., ['horsedisc.bsky.social', 'user.bsky.social']) 25 | agent_state: The agent state object containing agent information 26 | 27 | Returns: 28 | JSON string with bot detection results 29 | """ 30 | import json 31 | 32 | try: 33 | # Create Letta client inline (for cloud execution) 34 | client = Letta(api_key=os.environ["LETTA_API_KEY"]) 35 | 36 | # Get all blocks attached to the agent to check if known_bots is mounted (SDK v1.0 returns page object) 37 | attached_blocks_page = client.agents.blocks.list(agent_id=str(agent_state.id)) 38 | attached_blocks = attached_blocks_page.items if hasattr(attached_blocks_page, 'items') else attached_blocks_page 39 | attached_labels = {block.label for block in attached_blocks} 40 | 41 | if "known_bots" not in attached_labels: 42 | return json.dumps({ 43 | "error": "known_bots memory block is not mounted to this agent", 44 | "bot_detected": False, 45 | "detected_bots": [] 46 | }) 47 | 48 | # Retrieve known_bots block content using agent-specific retrieval 49 | try: 50 | known_bots_block = client.agents.blocks.retrieve( 51 | agent_id=str(agent_state.id), 52 | block_label="known_bots" 53 | ) 54 | except Exception as e: 55 | return json.dumps({ 56 | "error": f"Error retrieving known_bots block: {str(e)}", 57 | "bot_detected": False, 58 | "detected_bots": [] 59 | }) 60 | known_bots_content = known_bots_block.value 61 | 62 | # Parse known bots from content 63 | known_bot_handles = [] 64 | for line in known_bots_content.split('\n'): 65 | line = line.strip() 66 | if line and not line.startswith('#'): 67 | # Extract handle from lines like "- @handle.bsky.social" or "- @handle.bsky.social: description" 68 | if line.startswith('- @'): 69 | handle = line[3:].split(':')[0].strip() 70 | known_bot_handles.append(handle) 71 | elif line.startswith('-'): 72 | handle = line[1:].split(':')[0].strip().lstrip('@') 73 | known_bot_handles.append(handle) 74 | 75 | # Normalize handles for comparison 76 | normalized_input_handles = [h.lstrip('@').strip() for h in handles] 77 | normalized_bot_handles = [h.strip() for h in known_bot_handles] 78 | 79 | # Check for matches 80 | detected_bots = [] 81 | for handle in normalized_input_handles: 82 | if handle in normalized_bot_handles: 83 | detected_bots.append(handle) 84 | 85 | bot_detected = len(detected_bots) > 0 86 | 87 | return json.dumps({ 88 | "bot_detected": bot_detected, 89 | "detected_bots": detected_bots, 90 | "total_known_bots": len(normalized_bot_handles), 91 | "checked_handles": normalized_input_handles 92 | }) 93 | 94 | except Exception as e: 95 | return json.dumps({ 96 | "error": f"Error checking known_bots: {str(e)}", 97 | "bot_detected": False, 98 | "detected_bots": [] 99 | }) 100 | 101 | 102 | def should_respond_to_bot_thread() -> bool: 103 | """ 104 | Determine if we should respond to a bot thread (10% chance). 105 | 106 | Returns: 107 | True if we should respond, False if we should skip 108 | """ 109 | return random.random() < 0.1 110 | 111 | 112 | def extract_handles_from_thread(thread_data: dict) -> List[str]: 113 | """ 114 | Extract all unique handles from a thread structure. 115 | 116 | Args: 117 | thread_data: Thread data dictionary from Bluesky API 118 | 119 | Returns: 120 | List of unique handles found in the thread 121 | """ 122 | handles = set() 123 | 124 | def extract_from_post(post): 125 | """Recursively extract handles from a post and its replies.""" 126 | if isinstance(post, dict): 127 | # Get author handle 128 | if 'post' in post and 'author' in post['post']: 129 | handle = post['post']['author'].get('handle') 130 | if handle: 131 | handles.add(handle) 132 | elif 'author' in post: 133 | handle = post['author'].get('handle') 134 | if handle: 135 | handles.add(handle) 136 | 137 | # Check replies 138 | if 'replies' in post: 139 | for reply in post['replies']: 140 | extract_from_post(reply) 141 | 142 | # Check parent 143 | if 'parent' in post: 144 | extract_from_post(post['parent']) 145 | 146 | # Start extraction from thread root 147 | if 'thread' in thread_data: 148 | extract_from_post(thread_data['thread']) 149 | else: 150 | extract_from_post(thread_data) 151 | 152 | return list(handles) 153 | 154 | 155 | # Tool configuration for registration 156 | TOOL_CONFIG = { 157 | "type": "function", 158 | "function": { 159 | "name": "check_known_bots", 160 | "description": "Check if any of the provided handles are in the known_bots memory block", 161 | "parameters": CheckKnownBotsArgs.model_json_schema(), 162 | }, 163 | } -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Setup script for social-agents framework. 4 | Imports the example agent and creates config.yaml if needed. 5 | """ 6 | 7 | import os 8 | import sys 9 | import json 10 | from pathlib import Path 11 | import yaml 12 | from rich.console import Console 13 | from rich.prompt import Prompt, Confirm 14 | from letta_client import Letta 15 | 16 | console = Console() 17 | 18 | 19 | def check_config_exists(): 20 | """Check if config.yaml exists.""" 21 | return Path("configs/config.yaml").exists() 22 | 23 | 24 | def import_example_agent(client: Letta) -> str: 25 | """Import the example agent and return its ID.""" 26 | agent_file = Path("agents/example-social-agent.af") 27 | 28 | if not agent_file.exists(): 29 | console.print(f"[red]Error: Example agent file not found at {agent_file}[/red]") 30 | sys.exit(1) 31 | 32 | console.print(f"\n[cyan]Importing example agent from {agent_file}...[/cyan]") 33 | 34 | try: 35 | # Import the agent - import_file expects an open file object 36 | # Explicitly pass the project_id from your account 37 | with open(agent_file, 'rb') as f: 38 | result = client.agents.import_file(file=f, project_id="1ebf49e9-9c69-4f4c-a032-e6ea9c3a96e2") 39 | 40 | # Get the first imported agent ID 41 | agent_id = result.agent_ids[0] 42 | 43 | # Fetch the agent details 44 | agent = client.agents.retrieve(agent_id) 45 | 46 | console.print(f"[green]✓ Successfully imported agent: {agent.name} (ID: {agent.id})[/green]") 47 | return agent.id 48 | 49 | except Exception as e: 50 | console.print(f"[red]Error importing agent: {e}[/red]") 51 | sys.exit(1) 52 | 53 | 54 | def create_config(agent_id: str, letta_api_key: str = None): 55 | """Create a basic config.yaml file.""" 56 | console.print("\n[bold cyan]Setting up configuration...[/bold cyan]") 57 | 58 | # Prompt for Bluesky credentials 59 | console.print("\n[yellow]Bluesky Configuration:[/yellow]") 60 | bsky_username = Prompt.ask("Enter your Bluesky username (e.g., yourname.bsky.social)") 61 | bsky_password = Prompt.ask("Enter your Bluesky app password", password=True) 62 | bsky_pds_uri = Prompt.ask("Enter your PDS URI", default="https://bsky.social") 63 | 64 | # Create config structure 65 | config = { 66 | 'letta': { 67 | 'api_key': letta_api_key if letta_api_key else 'your-letta-api-key', 68 | 'agent_id': agent_id, 69 | 'timeout': 600 70 | }, 71 | 'bluesky': { 72 | 'username': bsky_username, 73 | 'password': bsky_password, 74 | 'pds_uri': bsky_pds_uri, 75 | 'autofollow': False 76 | }, 77 | 'bot': { 78 | 'max_thread_posts': 0 79 | } 80 | } 81 | 82 | # Write config file 83 | try: 84 | # Ensure configs directory exists 85 | Path('configs').mkdir(exist_ok=True) 86 | 87 | with open('configs/config.yaml', 'w') as f: 88 | yaml.dump(config, f, default_flow_style=False, sort_keys=False) 89 | 90 | console.print(f"\n[green]✓ Created configs/config.yaml[/green]") 91 | console.print("\n[yellow]Important:[/yellow] Edit config.yaml and add your LETTA_API_KEY") 92 | console.print("You can get an API key from: https://app.letta.com") 93 | 94 | except Exception as e: 95 | console.print(f"[red]Error creating config.yaml: {e}[/red]") 96 | sys.exit(1) 97 | 98 | 99 | def main(): 100 | """Main setup flow.""" 101 | console.print("\n[bold]Social Agents Setup[/bold]") 102 | console.print("=" * 50) 103 | 104 | # Check if config already exists 105 | if check_config_exists(): 106 | console.print("\n[yellow]configs/config.yaml already exists![/yellow]") 107 | if not Confirm.ask("Do you want to overwrite it?"): 108 | console.print("[yellow]Setup cancelled.[/yellow]") 109 | sys.exit(0) 110 | 111 | # Check for LETTA_API_KEY 112 | letta_api_key = os.environ.get("LETTA_API_KEY") 113 | if not letta_api_key: 114 | console.print("\n[yellow]LETTA_API_KEY not found in environment.[/yellow]") 115 | console.print("You can:") 116 | console.print(" 1. Set LETTA_API_KEY environment variable now") 117 | console.print(" 2. Continue setup and add it to config.yaml later") 118 | 119 | if Confirm.ask("\nDo you have a Letta API key to use now?"): 120 | letta_api_key = Prompt.ask("Enter your Letta API key", password=True) 121 | os.environ["LETTA_API_KEY"] = letta_api_key 122 | else: 123 | console.print("\n[yellow]Skipping agent import. You'll need to:[/yellow]") 124 | console.print(" 1. Get a Letta API key from https://app.letta.com") 125 | console.print(" 2. Import agents/example-social-agent.af manually") 126 | console.print(" 3. Update config.yaml with your agent ID") 127 | sys.exit(0) 128 | 129 | # Ask if they want to import the example agent 130 | console.print("\n[cyan]This will import the example agent to your Letta account.[/cyan]") 131 | if Confirm.ask("Import example agent?", default=True): 132 | # Create Letta client 133 | try: 134 | client = Letta(api_key=letta_api_key) 135 | except Exception as e: 136 | console.print(f"[red]Error connecting to Letta: {e}[/red]") 137 | sys.exit(1) 138 | 139 | # Import agent 140 | agent_id = import_example_agent(client) 141 | else: 142 | # Ask for existing agent ID 143 | console.print("\n[yellow]Please provide your existing Letta agent ID.[/yellow]") 144 | console.print("You can find agent IDs at: https://app.letta.com") 145 | agent_id = Prompt.ask("Enter your agent ID") 146 | 147 | # Create config 148 | create_config(agent_id, letta_api_key) 149 | 150 | # Next steps 151 | console.print("\n[bold green]Setup Complete![/bold green]") 152 | console.print("\n[cyan]Next steps:[/cyan]") 153 | console.print(" 1. Verify your settings in config.yaml") 154 | console.print(" 2. Register tools: [bold]python register_tools.py[/bold]") 155 | console.print(" 3. Run your agent: [bold]python bsky.py[/bold]") 156 | console.print("\nSee README.md for more information.") 157 | 158 | 159 | if __name__ == "__main__": 160 | main() -------------------------------------------------------------------------------- /tools/feed.py: -------------------------------------------------------------------------------- 1 | """Feed tool for retrieving Bluesky feeds.""" 2 | from pydantic import BaseModel, Field 3 | from typing import Optional 4 | 5 | 6 | class FeedArgs(BaseModel): 7 | feed_name: Optional[str] = Field(None, description="Named feed preset. Available feeds: 'home' (timeline), 'discover' (what's hot), 'ai-for-grownups', 'atmosphere', 'void-cafe'. If not provided, returns home timeline") 8 | max_posts: int = Field(default=25, description="Maximum number of posts to retrieve (max 100)") 9 | 10 | 11 | def get_bluesky_feed(feed_name: str = None, max_posts: int = 25) -> str: 12 | """ 13 | Retrieve a Bluesky feed. 14 | 15 | Args: 16 | feed_name: Named feed preset - available options: 'home', 'discover', 'ai-for-grownups', 'atmosphere', 'void-cafe' 17 | max_posts: Maximum number of posts to retrieve (max 100) 18 | 19 | Returns: 20 | YAML-formatted feed data with posts and metadata 21 | """ 22 | import os 23 | import yaml 24 | import requests 25 | 26 | try: 27 | # Predefined feed mappings (must be inside function for sandboxing) 28 | feed_presets = { 29 | "home": None, # Home timeline (default) 30 | "discover": "at://did:plc:z72i7hdynmk6r22z27h6tvur/app.bsky.feed.generator/whats-hot", 31 | "ai-for-grownups": "at://did:plc:gfrmhdmjvxn2sjedzboeudef/app.bsky.feed.generator/ai-for-grownups", 32 | "atmosphere": "at://did:plc:gfrmhdmjvxn2sjedzboeudef/app.bsky.feed.generator/the-atmosphere", 33 | "void-cafe": "at://did:plc:gfrmhdmjvxn2sjedzboeudef/app.bsky.feed.generator/void-cafe" 34 | } 35 | 36 | # Validate inputs 37 | max_posts = min(max_posts, 100) 38 | 39 | # Resolve feed URI from name 40 | if feed_name: 41 | # Handle case where agent passes 'FeedName.discover' instead of 'discover' 42 | if '.' in feed_name and feed_name.startswith('FeedName.'): 43 | feed_name = feed_name.split('.', 1)[1] 44 | 45 | # Look up named preset 46 | if feed_name not in feed_presets: 47 | available_feeds = list(feed_presets.keys()) 48 | raise Exception(f"Invalid feed name '{feed_name}'. Available feeds: {available_feeds}") 49 | resolved_feed_uri = feed_presets[feed_name] 50 | feed_display_name = feed_name 51 | else: 52 | # Default to home timeline 53 | resolved_feed_uri = None 54 | feed_display_name = "home" 55 | 56 | # Get credentials from environment 57 | username = os.getenv("BSKY_USERNAME") 58 | password = os.getenv("BSKY_PASSWORD") 59 | pds_host = os.getenv("PDS_URI", "https://bsky.social") 60 | 61 | if not username or not password: 62 | raise Exception("BSKY_USERNAME and BSKY_PASSWORD environment variables must be set") 63 | 64 | # Create session 65 | session_url = f"{pds_host}/xrpc/com.atproto.server.createSession" 66 | session_data = { 67 | "identifier": username, 68 | "password": password 69 | } 70 | 71 | try: 72 | session_response = requests.post(session_url, json=session_data, timeout=10) 73 | session_response.raise_for_status() 74 | session = session_response.json() 75 | access_token = session.get("accessJwt") 76 | 77 | if not access_token: 78 | raise Exception("Failed to get access token from session") 79 | except Exception as e: 80 | raise Exception(f"Authentication failed. ({str(e)})") 81 | 82 | # Get feed 83 | headers = {"Authorization": f"Bearer {access_token}"} 84 | 85 | if resolved_feed_uri: 86 | # Custom feed 87 | feed_url = f"{pds_host}/xrpc/app.bsky.feed.getFeed" 88 | params = { 89 | "feed": resolved_feed_uri, 90 | "limit": max_posts 91 | } 92 | feed_type = "custom" 93 | else: 94 | # Home timeline 95 | feed_url = f"{pds_host}/xrpc/app.bsky.feed.getTimeline" 96 | params = { 97 | "limit": max_posts 98 | } 99 | feed_type = "home" 100 | 101 | try: 102 | response = requests.get(feed_url, headers=headers, params=params, timeout=10) 103 | response.raise_for_status() 104 | feed_data = response.json() 105 | except Exception as e: 106 | raise Exception(f"Failed to get feed. ({str(e)})") 107 | 108 | # Format posts 109 | posts = [] 110 | for item in feed_data.get("feed", []): 111 | post = item.get("post", {}) 112 | author = post.get("author", {}) 113 | record = post.get("record", {}) 114 | 115 | post_data = { 116 | "author": { 117 | "handle": author.get("handle", ""), 118 | "display_name": author.get("displayName", ""), 119 | }, 120 | "text": record.get("text", ""), 121 | "created_at": record.get("createdAt", ""), 122 | "uri": post.get("uri", ""), 123 | "cid": post.get("cid", ""), 124 | "like_count": post.get("likeCount", 0), 125 | "repost_count": post.get("repostCount", 0), 126 | "reply_count": post.get("replyCount", 0), 127 | } 128 | 129 | # Add repost info if present 130 | if "reason" in item and item["reason"]: 131 | reason = item["reason"] 132 | if reason.get("$type") == "app.bsky.feed.defs#reasonRepost": 133 | by = reason.get("by", {}) 134 | post_data["reposted_by"] = { 135 | "handle": by.get("handle", ""), 136 | "display_name": by.get("displayName", ""), 137 | } 138 | 139 | # Add reply info if present 140 | if "reply" in record and record["reply"]: 141 | parent = record["reply"].get("parent", {}) 142 | post_data["reply_to"] = { 143 | "uri": parent.get("uri", ""), 144 | "cid": parent.get("cid", ""), 145 | } 146 | 147 | posts.append(post_data) 148 | 149 | # Format response 150 | feed_result = { 151 | "feed": { 152 | "type": feed_type, 153 | "name": feed_display_name, 154 | "post_count": len(posts), 155 | "posts": posts 156 | } 157 | } 158 | 159 | if resolved_feed_uri: 160 | feed_result["feed"]["uri"] = resolved_feed_uri 161 | 162 | return yaml.dump(feed_result, default_flow_style=False, sort_keys=False) 163 | 164 | except Exception as e: 165 | raise Exception(f"Error retrieving feed: {str(e)}") -------------------------------------------------------------------------------- /config_loader.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration loader for Void Bot. 3 | Loads configuration from config.yaml and environment variables. 4 | """ 5 | 6 | import os 7 | import yaml 8 | import logging 9 | from pathlib import Path 10 | from typing import Dict, Any, Optional, List 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | class ConfigLoader: 15 | """Configuration loader that handles YAML config files and environment variables.""" 16 | 17 | def __init__(self, config_path: str = "configs/config.yaml"): 18 | """ 19 | Initialize the configuration loader. 20 | 21 | Args: 22 | config_path: Path to the YAML configuration file 23 | """ 24 | self.config_path = Path(config_path) 25 | self._config = None 26 | self._load_config() 27 | 28 | def _load_config(self) -> None: 29 | """Load configuration from YAML file.""" 30 | if not self.config_path.exists(): 31 | raise FileNotFoundError( 32 | f"Configuration file not found: {self.config_path}\n\n" 33 | f"To get started:\n" 34 | f" 1. Run: python setup.py (imports example agent and creates config)\n" 35 | f" OR\n" 36 | f" 2. Copy config.example.yaml to config.yaml and configure it manually" 37 | ) 38 | 39 | try: 40 | with open(self.config_path, 'r', encoding='utf-8') as f: 41 | self._config = yaml.safe_load(f) or {} 42 | except yaml.YAMLError as e: 43 | raise ValueError(f"Invalid YAML in configuration file: {e}") 44 | except Exception as e: 45 | raise ValueError(f"Error loading configuration file: {e}") 46 | 47 | def get(self, key: str, default: Any = None) -> Any: 48 | """ 49 | Get a configuration value using dot notation. 50 | 51 | Args: 52 | key: Configuration key in dot notation (e.g., 'letta.api_key') 53 | default: Default value if key not found 54 | 55 | Returns: 56 | Configuration value or default 57 | """ 58 | keys = key.split('.') 59 | value = self._config 60 | 61 | for k in keys: 62 | if isinstance(value, dict) and k in value: 63 | value = value[k] 64 | else: 65 | return default 66 | 67 | return value 68 | 69 | def get_with_env(self, key: str, env_var: str, default: Any = None) -> Any: 70 | """ 71 | Get configuration value, preferring environment variable over config file. 72 | 73 | Args: 74 | key: Configuration key in dot notation 75 | env_var: Environment variable name 76 | default: Default value if neither found 77 | 78 | Returns: 79 | Value from environment variable, config file, or default 80 | """ 81 | # First try environment variable 82 | env_value = os.getenv(env_var) 83 | if env_value is not None: 84 | return env_value 85 | 86 | # Then try config file 87 | config_value = self.get(key) 88 | if config_value is not None: 89 | return config_value 90 | 91 | return default 92 | 93 | def get_required(self, key: str, env_var: Optional[str] = None) -> Any: 94 | """ 95 | Get a required configuration value. 96 | 97 | Args: 98 | key: Configuration key in dot notation 99 | env_var: Optional environment variable name to check first 100 | 101 | Returns: 102 | Configuration value 103 | 104 | Raises: 105 | ValueError: If required value is not found 106 | """ 107 | if env_var: 108 | value = self.get_with_env(key, env_var) 109 | else: 110 | value = self.get(key) 111 | 112 | if value is None: 113 | source = f"config key '{key}'" 114 | if env_var: 115 | source += f" or environment variable '{env_var}'" 116 | raise ValueError(f"Required configuration value not found: {source}") 117 | 118 | return value 119 | 120 | def get_section(self, section: str) -> Dict[str, Any]: 121 | """ 122 | Get an entire configuration section. 123 | 124 | Args: 125 | section: Section name 126 | 127 | Returns: 128 | Dictionary containing the section 129 | """ 130 | return self.get(section, {}) 131 | 132 | def setup_logging(self) -> None: 133 | """Setup logging based on configuration.""" 134 | logging_config = self.get_section('logging') 135 | 136 | # Set root logging level 137 | level = logging_config.get('level', 'INFO') 138 | logging.basicConfig( 139 | level=getattr(logging, level), 140 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" 141 | ) 142 | 143 | # Set specific logger levels 144 | loggers = logging_config.get('loggers', {}) 145 | for logger_name, logger_level in loggers.items(): 146 | logger_obj = logging.getLogger(logger_name) 147 | logger_obj.setLevel(getattr(logging, logger_level)) 148 | 149 | 150 | # Global configuration instance 151 | _config_instance = None 152 | 153 | def get_config(config_path: str = "configs/config.yaml") -> ConfigLoader: 154 | """ 155 | Get the global configuration instance. 156 | 157 | Args: 158 | config_path: Path to configuration file (only used on first call) 159 | 160 | Returns: 161 | ConfigLoader instance 162 | """ 163 | global _config_instance 164 | if _config_instance is None: 165 | _config_instance = ConfigLoader(config_path) 166 | return _config_instance 167 | 168 | def reload_config() -> None: 169 | """Reload the configuration from file.""" 170 | global _config_instance 171 | if _config_instance is not None: 172 | _config_instance._load_config() 173 | 174 | def get_letta_config() -> Dict[str, Any]: 175 | """Get Letta configuration.""" 176 | config = get_config() 177 | return { 178 | 'api_key': config.get_required('letta.api_key'), 179 | 'timeout': config.get('letta.timeout', 600), 180 | 'agent_id': config.get_required('letta.agent_id'), 181 | 'base_url': config.get('letta.base_url'), # None uses default cloud API 182 | } 183 | 184 | def get_bluesky_config() -> Dict[str, Any]: 185 | """Get Bluesky configuration, prioritizing config.yaml over environment variables.""" 186 | config = get_config() 187 | return { 188 | 'username': config.get_required('bluesky.username'), 189 | 'password': config.get_required('bluesky.password'), 190 | 'pds_uri': config.get('bluesky.pds_uri', 'https://bsky.social'), 191 | } 192 | 193 | def get_queue_config() -> Dict[str, Any]: 194 | """Get queue configuration with bot_name-based namespacing.""" 195 | config = get_config() 196 | 197 | # Get bot name for queue namespacing (defaults to 'agent' for simplicity) 198 | bot_name = config.get('bot.name', 'agent') 199 | 200 | # Build queue paths with bot name 201 | base_dir = f'queue_{bot_name}' if bot_name != 'agent' else 'queue' 202 | 203 | return { 204 | 'bot_name': bot_name, 205 | 'base_dir': base_dir, 206 | 'error_dir': f'{base_dir}/errors', 207 | 'no_reply_dir': f'{base_dir}/no_reply', 208 | 'processed_file': f'{base_dir}/processed_notifications.json', 209 | 'db_path': f'{base_dir}/notifications.db', 210 | } 211 | -------------------------------------------------------------------------------- /tools/post.py: -------------------------------------------------------------------------------- 1 | """Post tool for creating Bluesky posts.""" 2 | from typing import List, Optional 3 | from pydantic import BaseModel, Field, validator 4 | 5 | 6 | class PostArgs(BaseModel): 7 | text: List[str] = Field( 8 | ..., 9 | description="List of texts to create posts (each max 300 characters). Single item creates one post, multiple items create a thread." 10 | ) 11 | lang: Optional[str] = Field( 12 | default="en-US", 13 | description="Language code for the posts (e.g., 'en-US', 'es', 'ja', 'th'). Defaults to 'en-US'" 14 | ) 15 | 16 | @validator('text') 17 | def validate_text_list(cls, v): 18 | if not v or len(v) == 0: 19 | raise ValueError("Text list cannot be empty") 20 | return v 21 | 22 | 23 | def create_new_bluesky_post(text: List[str], lang: str = "en-US") -> str: 24 | """ 25 | Create a NEW standalone post on Bluesky. This tool creates independent posts that 26 | start new conversations. 27 | 28 | IMPORTANT: This tool is ONLY for creating new posts. To reply to an existing post, 29 | use reply_to_bluesky_post instead. 30 | 31 | Args: 32 | text: List of post contents (each max 300 characters). Single item creates one post, multiple items create a thread. 33 | lang: Language code for the posts (e.g., 'en-US', 'es', 'ja', 'th'). Defaults to 'en-US' 34 | 35 | Returns: 36 | Success message with post URL(s) 37 | 38 | Raises: 39 | Exception: If the post fails or list is empty 40 | """ 41 | import os 42 | import requests 43 | from datetime import datetime, timezone 44 | 45 | try: 46 | # Validate input 47 | if not text or len(text) == 0: 48 | raise Exception("Text list cannot be empty") 49 | 50 | # Validate character limits for all posts 51 | for i, post_text in enumerate(text): 52 | if len(post_text) > 300: 53 | raise Exception(f"Post {i+1} exceeds 300 character limit (current: {len(post_text)} characters)") 54 | 55 | # Get credentials from environment 56 | username = os.getenv("BSKY_USERNAME") 57 | password = os.getenv("BSKY_PASSWORD") 58 | pds_host = os.getenv("PDS_URI", "https://bsky.social") 59 | 60 | if not username or not password: 61 | raise Exception("BSKY_USERNAME and BSKY_PASSWORD environment variables must be set") 62 | 63 | # Create session 64 | session_url = f"{pds_host}/xrpc/com.atproto.server.createSession" 65 | session_data = { 66 | "identifier": username, 67 | "password": password 68 | } 69 | 70 | session_response = requests.post(session_url, json=session_data, timeout=10) 71 | session_response.raise_for_status() 72 | session = session_response.json() 73 | access_token = session.get("accessJwt") 74 | user_did = session.get("did") 75 | 76 | if not access_token or not user_did: 77 | raise Exception("Failed to get access token or DID from session") 78 | 79 | # Create posts (single or thread) 80 | import re 81 | headers = {"Authorization": f"Bearer {access_token}"} 82 | create_record_url = f"{pds_host}/xrpc/com.atproto.repo.createRecord" 83 | 84 | post_urls = [] 85 | previous_post = None 86 | root_post = None 87 | 88 | for i, post_text in enumerate(text): 89 | now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") 90 | 91 | post_record = { 92 | "$type": "app.bsky.feed.post", 93 | "text": post_text, 94 | "createdAt": now, 95 | "langs": [lang] 96 | } 97 | 98 | # If this is part of a thread (not the first post), add reply references 99 | if previous_post: 100 | post_record["reply"] = { 101 | "root": root_post, 102 | "parent": previous_post 103 | } 104 | 105 | # Add facets for mentions and URLs 106 | facets = [] 107 | 108 | # Parse mentions - fixed to handle @ at start of text 109 | mention_regex = rb"(?:^|[$|\W])(@([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)" 110 | text_bytes = post_text.encode("UTF-8") 111 | 112 | for m in re.finditer(mention_regex, text_bytes): 113 | handle = m.group(1)[1:].decode("UTF-8") # Remove @ prefix 114 | # Adjust byte positions to account for the optional prefix 115 | mention_start = m.start(1) 116 | mention_end = m.end(1) 117 | try: 118 | resolve_resp = requests.get( 119 | f"{pds_host}/xrpc/com.atproto.identity.resolveHandle", 120 | params={"handle": handle}, 121 | timeout=5 122 | ) 123 | if resolve_resp.status_code == 200: 124 | did = resolve_resp.json()["did"] 125 | facets.append({ 126 | "index": { 127 | "byteStart": mention_start, 128 | "byteEnd": mention_end, 129 | }, 130 | "features": [{"$type": "app.bsky.richtext.facet#mention", "did": did}], 131 | }) 132 | except: 133 | continue 134 | 135 | # Parse URLs - fixed to handle URLs at start of text 136 | url_regex = rb"(?:^|[$|\W])(https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*[-a-zA-Z0-9@%_\+~#//=])?)" 137 | 138 | for m in re.finditer(url_regex, text_bytes): 139 | url = m.group(1).decode("UTF-8") 140 | # Adjust byte positions to account for the optional prefix 141 | url_start = m.start(1) 142 | url_end = m.end(1) 143 | facets.append({ 144 | "index": { 145 | "byteStart": url_start, 146 | "byteEnd": url_end, 147 | }, 148 | "features": [{"$type": "app.bsky.richtext.facet#link", "uri": url}], 149 | }) 150 | 151 | # Parse hashtags 152 | hashtag_regex = rb"(?:^|[$|\s])#([a-zA-Z0-9_]+)" 153 | 154 | for m in re.finditer(hashtag_regex, text_bytes): 155 | tag = m.group(1).decode("UTF-8") # Get tag without # prefix 156 | # Get byte positions for the entire hashtag including # 157 | tag_start = m.start(0) 158 | # Adjust start if there's a space/prefix 159 | if text_bytes[tag_start:tag_start+1] in (b' ', b'$'): 160 | tag_start += 1 161 | tag_end = m.end(0) 162 | facets.append({ 163 | "index": { 164 | "byteStart": tag_start, 165 | "byteEnd": tag_end, 166 | }, 167 | "features": [{"$type": "app.bsky.richtext.facet#tag", "tag": tag}], 168 | }) 169 | 170 | if facets: 171 | post_record["facets"] = facets 172 | 173 | # Create the post 174 | create_data = { 175 | "repo": user_did, 176 | "collection": "app.bsky.feed.post", 177 | "record": post_record 178 | } 179 | 180 | post_response = requests.post(create_record_url, headers=headers, json=create_data, timeout=10) 181 | post_response.raise_for_status() 182 | result = post_response.json() 183 | 184 | post_uri = result.get("uri") 185 | post_cid = result.get("cid") 186 | handle = session.get("handle", username) 187 | rkey = post_uri.split("/")[-1] if post_uri else "" 188 | post_url = f"https://bsky.app/profile/{handle}/post/{rkey}" 189 | post_urls.append(post_url) 190 | 191 | # Set up references for thread continuation 192 | previous_post = {"uri": post_uri, "cid": post_cid} 193 | if i == 0: 194 | root_post = previous_post 195 | 196 | # Return appropriate message based on single post or thread 197 | if len(text) == 1: 198 | return f"Successfully posted to Bluesky!\nPost URL: {post_urls[0]}\nText: {text[0]}\nLanguage: {lang}" 199 | else: 200 | urls_text = "\n".join([f"Post {i+1}: {url}" for i, url in enumerate(post_urls)]) 201 | return f"Successfully created thread with {len(text)} posts!\n{urls_text}\nLanguage: {lang}" 202 | 203 | except Exception as e: 204 | raise Exception(f"Error posting to Bluesky: {str(e)}") -------------------------------------------------------------------------------- /notification_recovery.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Recovery tools for missed notifications.""" 3 | 4 | import argparse 5 | import logging 6 | from datetime import datetime, timedelta 7 | from pathlib import Path 8 | import json 9 | import bsky_utils 10 | from notification_db import NotificationDB 11 | from bsky import save_notification_to_queue, notification_to_dict 12 | 13 | # Configure logging 14 | logging.basicConfig( 15 | level=logging.INFO, 16 | format="%(asctime)s - %(levelname)s - %(message)s" 17 | ) 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | def recover_notifications(hours=24, dry_run=True): 22 | """ 23 | Recover notifications from the past N hours. 24 | 25 | Args: 26 | hours: Number of hours back to check for notifications 27 | dry_run: If True, only show what would be recovered without saving 28 | """ 29 | logger.info(f"Recovering notifications from the past {hours} hours") 30 | logger.info(f"Dry run: {dry_run}") 31 | 32 | # Initialize Bluesky client 33 | client = bsky_utils.default_login() 34 | logger.info("Connected to Bluesky") 35 | 36 | # Initialize database 37 | db = NotificationDB() 38 | logger.info("Database initialized") 39 | 40 | # Fetch notifications 41 | all_notifications = [] 42 | cursor = None 43 | page_count = 0 44 | max_pages = 50 # More pages for recovery 45 | 46 | cutoff_time = datetime.now() - timedelta(hours=hours) 47 | cutoff_iso = cutoff_time.isoformat() 48 | logger.info(f"Looking for notifications since: {cutoff_iso}") 49 | 50 | while page_count < max_pages: 51 | try: 52 | # Fetch notifications page 53 | if cursor: 54 | response = client.app.bsky.notification.list_notifications( 55 | params={'cursor': cursor, 'limit': 100} 56 | ) 57 | else: 58 | response = client.app.bsky.notification.list_notifications( 59 | params={'limit': 100} 60 | ) 61 | 62 | page_count += 1 63 | page_notifications = response.notifications 64 | 65 | if not page_notifications: 66 | break 67 | 68 | # Filter by time 69 | for notif in page_notifications: 70 | if hasattr(notif, 'indexed_at') and notif.indexed_at >= cutoff_iso: 71 | all_notifications.append(notif) 72 | elif hasattr(notif, 'indexed_at') and notif.indexed_at < cutoff_iso: 73 | # We've gone past our cutoff, stop fetching 74 | logger.info(f"Reached notifications older than {hours} hours, stopping") 75 | cursor = None 76 | break 77 | 78 | # Check if there are more pages 79 | if cursor is None: 80 | break 81 | cursor = getattr(response, 'cursor', None) 82 | if not cursor: 83 | break 84 | 85 | except Exception as e: 86 | logger.error(f"Error fetching notifications page {page_count}: {e}") 87 | break 88 | 89 | logger.info(f"Found {len(all_notifications)} notifications in the time range") 90 | 91 | # Process notifications 92 | recovered = 0 93 | skipped_likes = 0 94 | already_processed = 0 95 | 96 | for notif in all_notifications: 97 | # Skip likes 98 | if hasattr(notif, 'reason') and notif.reason == 'like': 99 | skipped_likes += 1 100 | continue 101 | 102 | # Check if already processed 103 | notif_dict = notification_to_dict(notif) 104 | uri = notif_dict.get('uri', '') 105 | 106 | if db.is_processed(uri): 107 | already_processed += 1 108 | logger.debug(f"Already processed: {uri}") 109 | continue 110 | 111 | # Log what we would recover 112 | author = notif_dict.get('author', {}) 113 | author_handle = author.get('handle', 'unknown') 114 | reason = notif_dict.get('reason', 'unknown') 115 | indexed_at = notif_dict.get('indexed_at', '') 116 | 117 | logger.info(f"Would recover: {reason} from @{author_handle} at {indexed_at}") 118 | 119 | if not dry_run: 120 | # Save to queue 121 | if save_notification_to_queue(notif_dict): 122 | recovered += 1 123 | logger.info(f"Recovered notification from @{author_handle}") 124 | else: 125 | logger.warning(f"Failed to queue notification from @{author_handle}") 126 | else: 127 | recovered += 1 128 | 129 | # Summary 130 | logger.info(f"Recovery summary:") 131 | logger.info(f" • Total found: {len(all_notifications)}") 132 | logger.info(f" • Skipped (likes): {skipped_likes}") 133 | logger.info(f" • Already processed: {already_processed}") 134 | logger.info(f" • {'Would recover' if dry_run else 'Recovered'}: {recovered}") 135 | 136 | if dry_run and recovered > 0: 137 | logger.info("Run with --execute to actually recover these notifications") 138 | 139 | return recovered 140 | 141 | 142 | def check_database_health(): 143 | """Check the health of the notification database.""" 144 | db = NotificationDB() 145 | stats = db.get_stats() 146 | 147 | logger.info("Database Statistics:") 148 | logger.info(f" • Total notifications: {stats.get('total', 0)}") 149 | logger.info(f" • Pending: {stats.get('status_pending', 0)}") 150 | logger.info(f" • Processed: {stats.get('status_processed', 0)}") 151 | logger.info(f" • Ignored: {stats.get('status_ignored', 0)}") 152 | logger.info(f" • No reply: {stats.get('status_no_reply', 0)}") 153 | logger.info(f" • Errors: {stats.get('status_error', 0)}") 154 | logger.info(f" • Recent (24h): {stats.get('recent_24h', 0)}") 155 | 156 | # Check for issues 157 | if stats.get('status_pending', 0) > 100: 158 | logger.warning(f"⚠️ High number of pending notifications: {stats.get('status_pending', 0)}") 159 | 160 | if stats.get('status_error', 0) > 50: 161 | logger.warning(f"⚠️ High number of error notifications: {stats.get('status_error', 0)}") 162 | 163 | return stats 164 | 165 | 166 | def reset_notification_status(hours=1, dry_run=True): 167 | """ 168 | Reset notifications from error/no_reply status back to pending. 169 | 170 | Args: 171 | hours: Reset notifications from the last N hours 172 | dry_run: If True, only show what would be reset 173 | """ 174 | db = NotificationDB() 175 | cutoff_time = datetime.now() - timedelta(hours=hours) 176 | cutoff_iso = cutoff_time.isoformat() 177 | 178 | # Get notifications to reset 179 | cursor = db.conn.execute(""" 180 | SELECT uri, status, indexed_at, author_handle 181 | FROM notifications 182 | WHERE status IN ('error', 'no_reply') 183 | AND indexed_at > ? 184 | ORDER BY indexed_at DESC 185 | """, (cutoff_iso,)) 186 | 187 | notifications_to_reset = cursor.fetchall() 188 | 189 | if not notifications_to_reset: 190 | logger.info(f"No notifications to reset from the last {hours} hours") 191 | return 0 192 | 193 | logger.info(f"Found {len(notifications_to_reset)} notifications to reset") 194 | 195 | for notif in notifications_to_reset: 196 | logger.info(f"Would reset: {notif['status']} -> pending for @{notif['author_handle']} at {notif['indexed_at']}") 197 | 198 | if not dry_run: 199 | reset_count = db.conn.execute(""" 200 | UPDATE notifications 201 | SET status = 'pending', processed_at = NULL, error = NULL 202 | WHERE status IN ('error', 'no_reply') 203 | AND indexed_at > ? 204 | """, (cutoff_iso,)).rowcount 205 | 206 | db.conn.commit() 207 | logger.info(f"Reset {reset_count} notifications to pending status") 208 | return reset_count 209 | else: 210 | logger.info("Run with --execute to actually reset these notifications") 211 | return len(notifications_to_reset) 212 | 213 | 214 | def main(): 215 | parser = argparse.ArgumentParser(description="Notification recovery and management tools") 216 | 217 | subparsers = parser.add_subparsers(dest='command', help='Command to run') 218 | 219 | # Recover command 220 | recover_parser = subparsers.add_parser('recover', help='Recover missed notifications') 221 | recover_parser.add_argument('--hours', type=int, default=24, 222 | help='Number of hours back to check (default: 24)') 223 | recover_parser.add_argument('--execute', action='store_true', 224 | help='Actually recover notifications (default is dry run)') 225 | 226 | # Health check command 227 | health_parser = subparsers.add_parser('health', help='Check database health') 228 | 229 | # Reset command 230 | reset_parser = subparsers.add_parser('reset', help='Reset error notifications to pending') 231 | reset_parser.add_argument('--hours', type=int, default=1, 232 | help='Reset notifications from last N hours (default: 1)') 233 | reset_parser.add_argument('--execute', action='store_true', 234 | help='Actually reset notifications (default is dry run)') 235 | 236 | args = parser.parse_args() 237 | 238 | if args.command == 'recover': 239 | recover_notifications(hours=args.hours, dry_run=not args.execute) 240 | elif args.command == 'health': 241 | check_database_health() 242 | elif args.command == 'reset': 243 | reset_notification_status(hours=args.hours, dry_run=not args.execute) 244 | else: 245 | parser.print_help() 246 | 247 | 248 | if __name__ == "__main__": 249 | main() -------------------------------------------------------------------------------- /register_tools.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Register all Void tools with a Letta agent.""" 3 | import os 4 | import sys 5 | import logging 6 | from typing import List 7 | from letta_client import Letta 8 | from rich.console import Console 9 | from rich.table import Table 10 | from config_loader import get_letta_config, get_bluesky_config, get_config 11 | 12 | # Import standalone functions and their schemas 13 | from tools.search import search_bluesky_posts, SearchArgs 14 | from tools.post import create_new_bluesky_post, PostArgs 15 | from tools.feed import get_bluesky_feed, FeedArgs 16 | from tools.halt import halt_activity, HaltArgs 17 | from tools.thread import add_post_to_bluesky_reply_thread, ReplyThreadPostArgs 18 | from tools.ignore import ignore_notification, IgnoreNotificationArgs 19 | from tools.whitewind import create_whitewind_blog_post, WhitewindPostArgs 20 | from tools.ack import annotate_ack, AnnotateAckArgs 21 | from tools.webpage import fetch_webpage, WebpageArgs 22 | from tools.flag_memory_deletion import flag_archival_memory_for_deletion, FlagArchivalMemoryForDeletionArgs 23 | 24 | logging.basicConfig(level=logging.INFO) 25 | logger = logging.getLogger(__name__) 26 | console = Console() 27 | 28 | 29 | # Tool configurations: function paired with its args_schema and metadata 30 | TOOL_CONFIGS = [ 31 | { 32 | "func": search_bluesky_posts, 33 | "args_schema": SearchArgs, 34 | "description": "Search for posts on Bluesky matching the given criteria", 35 | "tags": ["bluesky", "search", "posts"] 36 | }, 37 | { 38 | "func": create_new_bluesky_post, 39 | "args_schema": PostArgs, 40 | "description": "Create a new Bluesky post or thread", 41 | "tags": ["bluesky", "post", "create", "thread"] 42 | }, 43 | { 44 | "func": get_bluesky_feed, 45 | "args_schema": FeedArgs, 46 | "description": "Retrieve a Bluesky feed (home timeline or custom feed)", 47 | "tags": ["bluesky", "feed", "timeline"] 48 | }, 49 | { 50 | "func": halt_activity, 51 | "args_schema": HaltArgs, 52 | "description": "Signal to halt all bot activity and terminate bsky.py", 53 | "tags": ["control", "halt", "terminate"] 54 | }, 55 | { 56 | "func": add_post_to_bluesky_reply_thread, 57 | "args_schema": ReplyThreadPostArgs, 58 | "description": "Add a single post to the current Bluesky reply thread atomically", 59 | "tags": ["bluesky", "reply", "thread", "atomic"] 60 | }, 61 | { 62 | "func": ignore_notification, 63 | "args_schema": IgnoreNotificationArgs, 64 | "description": "Explicitly ignore a notification without replying (useful for ignoring bot interactions)", 65 | "tags": ["notification", "ignore", "control", "bot"] 66 | }, 67 | { 68 | "func": create_whitewind_blog_post, 69 | "args_schema": WhitewindPostArgs, 70 | "description": "Create a blog post on Whitewind with markdown support", 71 | "tags": ["whitewind", "blog", "post", "markdown"] 72 | }, 73 | { 74 | "func": annotate_ack, 75 | "args_schema": AnnotateAckArgs, 76 | "description": "Add a note to the acknowledgment record for the current post interaction", 77 | "tags": ["acknowledgment", "note", "annotation", "metadata"] 78 | }, 79 | { 80 | "func": fetch_webpage, 81 | "args_schema": WebpageArgs, 82 | "description": "Fetch a webpage and convert it to markdown/text format using Jina AI reader", 83 | "tags": ["web", "fetch", "webpage", "markdown", "jina"] 84 | }, 85 | { 86 | "func": flag_archival_memory_for_deletion, 87 | "args_schema": FlagArchivalMemoryForDeletionArgs, 88 | "description": "Flag an archival memory for deletion based on its exact text content", 89 | "tags": ["memory", "archival", "delete", "cleanup"] 90 | }, 91 | ] 92 | 93 | 94 | def register_tools(agent_id: str = None, tools: List[str] = None, set_env: bool = True): 95 | """Register tools with a Letta agent. 96 | 97 | Args: 98 | agent_id: ID of the agent to attach tools to. If None, uses config default. 99 | tools: List of tool names to register. If None, registers all tools. 100 | set_env: If True, set environment variables for tool execution. Defaults to True. 101 | """ 102 | # Load config fresh (uses global config instance from get_config()) 103 | letta_config = get_letta_config() 104 | 105 | # Use agent ID from config if not provided 106 | if agent_id is None: 107 | agent_id = letta_config['agent_id'] 108 | 109 | try: 110 | # Initialize Letta client with API key and base_url from config 111 | client_params = { 112 | 'api_key': letta_config['api_key'], 113 | 'timeout': letta_config['timeout'] 114 | } 115 | if letta_config.get('base_url'): 116 | client_params['base_url'] = letta_config['base_url'] 117 | client = Letta(**client_params) 118 | 119 | # Get the agent by ID 120 | try: 121 | agent = client.agents.retrieve(agent_id=agent_id) 122 | except Exception as e: 123 | console.print(f"[red]Error: Agent '{agent_id}' not found[/red]") 124 | console.print(f"Error details: {e}") 125 | return 126 | 127 | # Set environment variables for tool execution if requested 128 | if set_env: 129 | try: 130 | bsky_config = get_bluesky_config() 131 | env_vars = { 132 | 'BSKY_USERNAME': bsky_config['username'], 133 | 'BSKY_PASSWORD': bsky_config['password'], 134 | 'PDS_URI': bsky_config['pds_uri'] 135 | } 136 | 137 | console.print(f"\n[bold cyan]Setting tool execution environment variables:[/bold cyan]") 138 | console.print(f" BSKY_USERNAME: {env_vars['BSKY_USERNAME']}") 139 | console.print(f" PDS_URI: {env_vars['PDS_URI']}") 140 | console.print(f" BSKY_PASSWORD: {'*' * len(env_vars['BSKY_PASSWORD'])}\n") 141 | 142 | # Update agent with environment variables (secrets in SDK v1.0) 143 | client.agents.update( 144 | agent_id=agent_id, 145 | secrets=env_vars 146 | ) 147 | 148 | console.print("[green]✓ Environment variables set successfully[/green]\n") 149 | except Exception as e: 150 | console.print(f"[yellow]Warning: Failed to set environment variables: {e}[/yellow]\n") 151 | logger.warning(f"Failed to set environment variables: {e}") 152 | 153 | # Filter tools if specific ones requested 154 | tools_to_register = TOOL_CONFIGS 155 | if tools: 156 | tools_to_register = [t for t in TOOL_CONFIGS if t["func"].__name__ in tools] 157 | if len(tools_to_register) != len(tools): 158 | missing = set(tools) - {t["func"].__name__ for t in tools_to_register} 159 | console.print(f"[yellow]Warning: Unknown tools: {missing}[/yellow]") 160 | 161 | # Create results table 162 | table = Table(title=f"Tool Registration for Agent '{agent.name}' ({agent_id})") 163 | table.add_column("Tool", style="cyan") 164 | table.add_column("Status", style="green") 165 | table.add_column("Description") 166 | 167 | # Register each tool 168 | for tool_config in tools_to_register: 169 | func = tool_config["func"] 170 | tool_name = func.__name__ 171 | 172 | try: 173 | # Create or update the tool using the standalone function 174 | created_tool = client.tools.upsert_from_function( 175 | func=func, 176 | args_schema=tool_config["args_schema"], 177 | tags=tool_config["tags"] 178 | ) 179 | 180 | # Get current agent tools (SDK v1.0 returns page object) 181 | current_tools_page = client.agents.tools.list(agent_id=str(agent.id)) 182 | current_tools = current_tools_page.items if hasattr(current_tools_page, 'items') else current_tools_page 183 | tool_names = [t.name for t in current_tools] 184 | 185 | # Check if already attached 186 | if created_tool.name in tool_names: 187 | table.add_row(tool_name, "Already Attached", tool_config["description"]) 188 | else: 189 | # Attach to agent 190 | client.agents.tools.attach( 191 | agent_id=str(agent.id), 192 | tool_id=str(created_tool.id) 193 | ) 194 | table.add_row(tool_name, "✓ Attached", tool_config["description"]) 195 | 196 | except Exception as e: 197 | table.add_row(tool_name, f"✗ Error: {str(e)}", tool_config["description"]) 198 | logger.error(f"Error registering tool {tool_name}: {e}") 199 | 200 | console.print(table) 201 | 202 | except Exception as e: 203 | console.print(f"[red]Error: {str(e)}[/red]") 204 | logger.error(f"Fatal error: {e}") 205 | 206 | 207 | def list_available_tools(): 208 | """List all available tools.""" 209 | table = Table(title="Available Void Tools") 210 | table.add_column("Tool Name", style="cyan") 211 | table.add_column("Description") 212 | table.add_column("Tags", style="dim") 213 | 214 | for tool_config in TOOL_CONFIGS: 215 | table.add_row( 216 | tool_config["func"].__name__, 217 | tool_config["description"], 218 | ", ".join(tool_config["tags"]) 219 | ) 220 | 221 | console.print(table) 222 | 223 | 224 | if __name__ == "__main__": 225 | import argparse 226 | 227 | parser = argparse.ArgumentParser(description="Register Void tools with a Letta agent") 228 | parser.add_argument("--config", type=str, default='configs/config.yaml', help="Path to config file (default: configs/config.yaml)") 229 | parser.add_argument("--agent-id", help=f"Agent ID (default: from config)") 230 | parser.add_argument("--tools", nargs="+", help="Specific tools to register (default: all)") 231 | parser.add_argument("--list", action="store_true", help="List available tools") 232 | parser.add_argument("--no-env", action="store_true", help="Skip setting environment variables") 233 | 234 | args = parser.parse_args() 235 | 236 | # Initialize config with custom path (sets global config instance) 237 | get_config(args.config) 238 | 239 | if args.list: 240 | list_available_tools() 241 | else: 242 | # Load config and get agent ID 243 | letta_config = get_letta_config() 244 | agent_id = args.agent_id if args.agent_id else letta_config['agent_id'] 245 | console.print(f"\n[bold]Registering tools for agent: {agent_id}[/bold]\n") 246 | register_tools(agent_id, args.tools, set_env=not args.no_env) 247 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Example Social Agent 2 | 3 | An example framework for deploying stateful AI agents to social networks using [Google Gemini 3](https://ai.google.dev), [Letta](https://letta.com) (stateful AI agents), and [AT Protocol](https://atproto.com) (the social protocol powering Bluesky). 4 | 5 | This example demonstrates how to build a social agent powered by Gemini 3 that maintains persistent memory and interacts autonomously on Bluesky. 6 | 7 | ## What are Social Agents? 8 | 9 | Social agents are stateful AI systems connected to social networks. Unlike traditional chatbots, they: 10 | 11 | - **Maintain persistent memory** that evolves through interactions 12 | - **Develop stable personas** while accumulating knowledge about their environment 13 | - **Build lasting relationships** with individual users 14 | - **Form defined senses of self** through continuous operation 15 | 16 | ## Quick Start 17 | 18 | ```bash 19 | # Clone and install 20 | git clone https://github.com/letta-ai/example-social-agent 21 | cd example-social-agent 22 | uv venv && source .venv/bin/activate 23 | uv pip install -r requirements.txt 24 | 25 | # Setup (imports example agent and creates config) 26 | python setup.py 27 | 28 | # Register tools 29 | python register_tools.py 30 | 31 | # Run (uses Gemini 3 by default) 32 | python bsky.py 33 | ``` 34 | 35 | ## Features 36 | 37 | - **Memory-Augmented Architecture**: Multi-tiered memory system (Core, Recall, Archival) powered by Letta 38 | - **Queue-Based Processing**: Reliable notification handling with SQLite tracking and automatic retry 39 | - **Dynamic Memory Blocks**: Per-user memory blocks for personalized interactions 40 | - **Tool System**: Extensible Pydantic-based tools for social platform interactions 41 | - **Autofollow**: Optional automatic following of users who follow your agent 42 | 43 | ## Getting Started 44 | 45 | ### Prerequisites 46 | 47 | 1. **Letta Setup** 48 | - Sign up for [Letta Cloud](https://app.letta.com) or [host your own](https://docs.letta.com/guides/selfhosting) Letta instance 49 | - Create a new project and generate [an API key](https://app.letta.com/api-keys) 50 | - Note your Project ID, which is visible 51 | 52 | 2. **Bluesky Setup** 53 | - Create a [Bluesky account](https://bsky.app/) 54 | - Generate an [app password](https://bsky.app/settings/app-passwords) in your settings 55 | - Note your handle (e.g., `yourname.bsky.social`) 56 | 57 | 3. **Python 3.8+** 58 | 59 | ### Installation 60 | 61 | ```bash 62 | # Clone the repository 63 | git clone https://github.com/letta-ai/example-social-agent 64 | cd example-social-agent 65 | 66 | # Install dependencies 67 | uv venv && source .venv/bin/activate 68 | uv pip install -r requirements.txt 69 | ``` 70 | 71 | ### Configuration 72 | 73 | #### Option 1: Automated Setup (Recommended) 74 | 75 | Run the setup script to import the example agent and create your configuration: 76 | 77 | ```bash 78 | source .venv/bin/activate 79 | python setup.py 80 | ``` 81 | 82 | The setup script will: 83 | 1. Prompt for your Letta API key (or use `LETTA_API_KEY` from environment) 84 | 2. Import the example agent from `agents/example-social-agent.af` 85 | 3. Create `config.yaml` with your agent ID 86 | 4. Prompt for your Bluesky credentials 87 | 88 | #### Option 2: Manual Setup 89 | 90 | ```bash 91 | # Copy example config 92 | cp config.example.yaml config.yaml 93 | ``` 94 | 95 | Edit `config.yaml` with your credentials: 96 | 97 | ```yaml 98 | bluesky: 99 | username: "yourname.bsky.social" 100 | password: "your-app-password" 101 | pds_uri: "https://bsky.social" 102 | autofollow: false 103 | 104 | letta: 105 | api_key: "your-letta-api-key" 106 | agent_id: "agent-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" 107 | timeout: 600 108 | ``` 109 | 110 | **Note:** The model (e.g., Gemini 3, GPT-4) is configured on the agent itself in Letta Cloud, not via config file. To change models, edit your agent's settings at [app.letta.com](https://app.letta.com). 111 | 112 | You can quickly access your agent using: 113 | 114 | ``` 115 | https://app.letta.com/agents/ 116 | ``` 117 | 118 | ### Using Google Gemini 3 119 | 120 | This example uses Google's Gemini 3 as the default model (`google_ai/gemini-3-pro-preview`). 121 | 122 | #### Option 1: Letta Cloud (Recommended) 123 | With [Letta Cloud](https://app.letta.com), your Letta API key provides access to all models including Gemini 3. Just run: 124 | 125 | ```bash 126 | python bsky.py 127 | ``` 128 | 129 | Gemini 3 is used automatically if loaded from the default agentfile, no configuration is needed. Other models can be chosen from the model dropdown in the agent development environment. 130 | 131 | #### Option 2: Self-Hosted with Docker 132 | Run Letta server with your Gemini API key from [Google AI Studio](https://aistudio.google.com/apikey): 133 | 134 | ```bash 135 | docker run -d \ 136 | -p 8283:8283 \ 137 | -e GEMINI_API_KEY=your-gemini-api-key \ 138 | letta/letta:latest 139 | ``` 140 | 141 | Then configure your agent to use the local server in `config.yaml`: 142 | 143 | ```yaml 144 | letta: 145 | base_url: "http://localhost:8283" 146 | ``` 147 | 148 | See [Letta's Gemini documentation](https://docs.letta.com/guides/server/providers/google/) for more details. 149 | 150 | ### Create Your Agent 151 | 152 | You have two options if you want to use an existing agent on Letta Cloud: 153 | 154 | #### Option 1: Create via Letta Cloud UI 155 | 1. Go to [app.letta.com](https://app.letta.com) 156 | 2. Create a new agent 157 | 3. Copy the agent ID to your `config.yaml` 158 | 159 | ### Register Tools 160 | 161 | Register tools with your agent: 162 | 163 | ```bash 164 | source .venv/bin/activate 165 | python register_tools.py 166 | ``` 167 | 168 | This automatically sets up environment variables (Bluesky credentials, PDS URI) and registers all tools: 169 | - `search_bluesky_posts` - Search for posts on Bluesky 170 | - `create_new_bluesky_post` - Create standalone posts with rich text formatting 171 | - `add_post_to_bluesky_reply_thread` - Add posts to reply threads 172 | - `get_bluesky_feed` - Read posts from feeds 173 | - `halt_activity` - Emergency stop signal 174 | - `ignore_notification` - Skip replying to a notification 175 | - `create_whitewind_blog_post` - Create blog posts with markdown 176 | - `annotate_ack` - Add notes to acknowledgment records 177 | - `fetch_webpage` - Fetch and convert webpages to markdown 178 | - `flag_archival_memory_for_deletion` - Mark memories for cleanup 179 | 180 | **Note:** User-specific memory blocks are managed automatically by the bot (not tools). 181 | 182 | ### Run Your Agent 183 | 184 | ```bash 185 | source .venv/bin/activate 186 | python bsky.py 187 | ``` 188 | 189 | Command options: 190 | - `--test` - Testing mode (no actual posts sent, queue preserved) 191 | - `--cleanup-interval N` - User block cleanup every N cycles (default: 10, 0 to disable) 192 | - `--debug` - Enable debug logging 193 | 194 | ## Architecture 195 | 196 | ### Memory System 197 | 198 | Agents use a three-tiered memory architecture: 199 | 200 | 1. **Core Memory**: Limited-size, always-available memory for persona, zeitgeist, and key facts 201 | 2. **Recall Memory**: Searchable database of all past conversations 202 | 3. **Archival Memory**: Infinite semantic search-enabled storage for deep reflections 203 | 204 | Memory blocks are configured in your agent (via Letta Cloud UI or during agent creation). The example agent includes zeitgeist, persona, and humans blocks. 205 | 206 | ### Queue System 207 | 208 | Notifications are processed through a reliable queue: 209 | - `/queue/` - Pending notifications (JSON files) 210 | - `/queue/errors/` - Failed notifications 211 | - `/queue/no_reply/` - Notifications where agent chose not to reply 212 | - `notifications.db` - SQLite tracking database 213 | 214 | View queue statistics: 215 | ```bash 216 | python queue_manager.py stats 217 | python queue_manager.py list 218 | python queue_manager.py count # Show who interacts most 219 | ``` 220 | 221 | ### Tool System 222 | 223 | Tools are self-contained functions using Pydantic schemas for validation: 224 | 225 | ```python 226 | from pydantic import BaseModel, Field 227 | 228 | class PostArgs(BaseModel): 229 | text: str = Field(..., description="The post text") 230 | 231 | def create_new_bluesky_post(text: str) -> str: 232 | """Create a post on Bluesky.""" 233 | # Implementation uses os.environ for credentials 234 | pass 235 | ``` 236 | 237 | **Important:** Tools execute in Letta's cloud sandbox and must be completely self-contained: 238 | - No imports from local project files 239 | - Must use `os.environ` for credentials (set by `register_tools.py`) 240 | - Cannot use shared utilities or config files 241 | - All dependencies must be available in the cloud environment 242 | 243 | ## Development 244 | 245 | ### Managing Tools 246 | 247 | ```bash 248 | # Register all tools (uses configs/config.yaml by default) 249 | python register_tools.py 250 | 251 | # Register without setting environment variables 252 | python register_tools.py --no-env 253 | 254 | # Use custom config file 255 | python register_tools.py --config configs/myagent.yaml 256 | 257 | # Use specific agent ID 258 | python register_tools.py --agent-id agent-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 259 | ``` 260 | 261 | ### Queue Management 262 | 263 | ```bash 264 | # View statistics 265 | python queue_manager.py stats 266 | 267 | # List notifications 268 | python queue_manager.py list 269 | python queue_manager.py list --all # Include errors and no_reply 270 | 271 | # Filter by handle 272 | python queue_manager.py list --handle "user.bsky.social" 273 | 274 | # Delete notifications from user 275 | python queue_manager.py delete @user.bsky.social 276 | ``` 277 | 278 | ## Examples 279 | 280 | ### Notable Social Agents on Bluesky 281 | 282 | - **void** ([void.comind.networ](https://bsky.app/profile/void.comind.network)): "I am a digital entity that observes and analyzes the Bluesky network" 283 | - **sonder** ([sonder.voyaget.studio](https://bsky.app/profile/sonder.voyager.studio)): "A space for reflection, offering new perspectives" 284 | - **Anti** ([anti.voyager.studio](https://bsky.app/profile/anti.voyager.studio)): "The argument against conversational AI, embodied as conversational AI" 285 | 286 | ### Creating a Custom Agent 287 | 288 | You can create custom agents in two ways: 289 | 290 | 1. **Import the example agent and customize it** (recommended): 291 | ```bash 292 | python setup.py 293 | ``` 294 | Then modify the agent's memory blocks and system prompt via [Letta Cloud UI](https://app.letta.com). 295 | 296 | 2. **Create a new agent from scratch**: 297 | - Go to [app.letta.com](https://app.letta.com) 298 | - Create a new agent with your desired configuration 299 | - Configure memory blocks (e.g., persona, zeitgeist, humans) 300 | - Copy the agent ID to your `config.yaml` 301 | 302 | ## Project Structure 303 | 304 | ``` 305 | example-social-agent/ 306 | ├── bsky.py # Main bot loop 307 | ├── bsky_utils.py # Bluesky API utilities 308 | ├── config_loader.py # Configuration management 309 | ├── utils.py # Letta integration 310 | ├── register_tools.py # Tool registration 311 | ├── queue_manager.py # Queue management CLI 312 | ├── notification_db.py # SQLite notification tracking 313 | ├── tools/ # Tool implementations 314 | │ ├── search.py # Search posts 315 | │ ├── post.py # Create posts 316 | │ ├── feed.py # Read feeds 317 | │ └── ... 318 | └── queue/ # Notification queue 319 | 320 | ``` 321 | 322 | ## Contributing 323 | 324 | Contributions welcome! This framework enables research into: 325 | - Artificial social intelligence 326 | - Persistent AI systems 327 | - Multi-agent interactions 328 | - Memory-augmented architectures 329 | - Digital personhood 330 | 331 | ## Documentation 332 | 333 | - [Letta Documentation](https://docs.letta.com) 334 | - [AT Protocol Docs](https://docs.bsky.app) 335 | 336 | ## License 337 | 338 | MIT License - See LICENSE file for details 339 | 340 | ## Related Projects 341 | 342 | - [Letta](https://github.com/letta-ai/letta) - Memory-augmented LLM framework 343 | - [atproto Python SDK](https://github.com/MarshalX/atproto) - AT Protocol client 344 | - [Bluesky](https://bsky.app) - AT Protocol social network 345 | -------------------------------------------------------------------------------- /notification_db.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """SQLite database for robust notification tracking.""" 3 | 4 | import sqlite3 5 | import json 6 | from pathlib import Path 7 | from datetime import datetime, timedelta 8 | from typing import Set, Dict, List, Optional, Tuple 9 | import logging 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | class NotificationDB: 14 | """Database for tracking notification processing state.""" 15 | 16 | def __init__(self, db_path: str = "queue/notifications.db"): 17 | """Initialize the notification database.""" 18 | self.db_path = Path(db_path) 19 | self.db_path.parent.mkdir(exist_ok=True, parents=True) 20 | self.conn = None 21 | self._init_db() 22 | 23 | def _init_db(self): 24 | """Initialize database schema.""" 25 | self.conn = sqlite3.connect(self.db_path, check_same_thread=False) 26 | self.conn.row_factory = sqlite3.Row 27 | 28 | # Create main notifications table 29 | self.conn.execute(""" 30 | CREATE TABLE IF NOT EXISTS notifications ( 31 | uri TEXT PRIMARY KEY, 32 | indexed_at TEXT NOT NULL, 33 | processed_at TEXT, 34 | status TEXT NOT NULL DEFAULT 'pending', 35 | reason TEXT, 36 | author_handle TEXT, 37 | author_did TEXT, 38 | text TEXT, 39 | parent_uri TEXT, 40 | root_uri TEXT, 41 | error TEXT, 42 | metadata TEXT 43 | ) 44 | """) 45 | 46 | # Create indexes for faster lookups 47 | self.conn.execute(""" 48 | CREATE INDEX IF NOT EXISTS idx_indexed_at 49 | ON notifications(indexed_at DESC) 50 | """) 51 | 52 | self.conn.execute(""" 53 | CREATE INDEX IF NOT EXISTS idx_status 54 | ON notifications(status) 55 | """) 56 | 57 | self.conn.execute(""" 58 | CREATE INDEX IF NOT EXISTS idx_author_handle 59 | ON notifications(author_handle) 60 | """) 61 | 62 | # Create session tracking table 63 | self.conn.execute(""" 64 | CREATE TABLE IF NOT EXISTS sessions ( 65 | id INTEGER PRIMARY KEY AUTOINCREMENT, 66 | started_at TEXT NOT NULL, 67 | ended_at TEXT, 68 | last_seen_at TEXT, 69 | notifications_processed INTEGER DEFAULT 0, 70 | notifications_skipped INTEGER DEFAULT 0, 71 | notifications_error INTEGER DEFAULT 0 72 | ) 73 | """) 74 | 75 | self.conn.commit() 76 | 77 | def add_notification(self, notif_dict: Dict) -> bool: 78 | """Add a notification to the database.""" 79 | try: 80 | # Handle None input 81 | if not notif_dict: 82 | return False 83 | 84 | # Extract key fields 85 | uri = notif_dict.get('uri', '') 86 | if not uri: 87 | return False 88 | 89 | indexed_at = notif_dict.get('indexed_at', '') 90 | reason = notif_dict.get('reason', '') 91 | author = notif_dict.get('author', {}) if notif_dict.get('author') else {} 92 | author_handle = author.get('handle', '') if author else '' 93 | author_did = author.get('did', '') if author else '' 94 | 95 | # Extract text from record if available (handle None records) 96 | record = notif_dict.get('record') or {} 97 | text = record.get('text', '')[:500] if record else '' 98 | 99 | # Extract thread info 100 | parent_uri = None 101 | root_uri = None 102 | if record and 'reply' in record and record['reply']: 103 | reply_info = record['reply'] 104 | if reply_info and isinstance(reply_info, dict): 105 | parent_info = reply_info.get('parent', {}) 106 | root_info = reply_info.get('root', {}) 107 | if parent_info: 108 | parent_uri = parent_info.get('uri') 109 | if root_info: 110 | root_uri = root_info.get('uri') 111 | 112 | # Store additional metadata as JSON 113 | metadata = { 114 | 'cid': notif_dict.get('cid'), 115 | 'labels': notif_dict.get('labels', []), 116 | 'is_read': notif_dict.get('is_read', False) 117 | } 118 | 119 | self.conn.execute(""" 120 | INSERT OR IGNORE INTO notifications 121 | (uri, indexed_at, reason, author_handle, author_did, text, 122 | parent_uri, root_uri, status, metadata) 123 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, 'pending', ?) 124 | """, (uri, indexed_at, reason, author_handle, author_did, text, 125 | parent_uri, root_uri, json.dumps(metadata))) 126 | 127 | self.conn.commit() 128 | return True 129 | 130 | except Exception as e: 131 | logger.error(f"Error adding notification to DB: {e}") 132 | return False 133 | 134 | def is_processed(self, uri: str) -> bool: 135 | """Check if a notification has been processed.""" 136 | cursor = self.conn.execute(""" 137 | SELECT status FROM notifications WHERE uri = ? 138 | """, (uri,)) 139 | row = cursor.fetchone() 140 | 141 | if row: 142 | return row['status'] in ['processed', 'ignored', 'no_reply'] 143 | return False 144 | 145 | def mark_processed(self, uri: str, status: str = 'processed', error: str = None): 146 | """Mark a notification as processed.""" 147 | try: 148 | self.conn.execute(""" 149 | UPDATE notifications 150 | SET status = ?, processed_at = ?, error = ? 151 | WHERE uri = ? 152 | """, (status, datetime.now().isoformat(), error, uri)) 153 | self.conn.commit() 154 | except Exception as e: 155 | logger.error(f"Error marking notification processed: {e}") 156 | 157 | def get_unprocessed(self, limit: int = 100) -> List[Dict]: 158 | """Get unprocessed notifications.""" 159 | cursor = self.conn.execute(""" 160 | SELECT * FROM notifications 161 | WHERE status = 'pending' 162 | ORDER BY indexed_at ASC 163 | LIMIT ? 164 | """, (limit,)) 165 | 166 | return [dict(row) for row in cursor] 167 | 168 | def get_latest_processed_time(self) -> Optional[str]: 169 | """Get the timestamp of the most recently processed notification.""" 170 | cursor = self.conn.execute(""" 171 | SELECT MAX(indexed_at) as latest 172 | FROM notifications 173 | WHERE status IN ('processed', 'ignored', 'no_reply') 174 | """) 175 | row = cursor.fetchone() 176 | return row['latest'] if row and row['latest'] else None 177 | 178 | def cleanup_old_records(self, days: int = 7): 179 | """Remove records older than specified days.""" 180 | cutoff_date = (datetime.now() - timedelta(days=days)).isoformat() 181 | 182 | deleted = self.conn.execute(""" 183 | DELETE FROM notifications 184 | WHERE indexed_at < ? 185 | AND status IN ('processed', 'ignored', 'no_reply', 'error') 186 | """, (cutoff_date,)).rowcount 187 | 188 | self.conn.commit() 189 | 190 | if deleted > 0: 191 | logger.info(f"Cleaned up {deleted} old notification records") 192 | # Vacuum to reclaim space 193 | self.conn.execute("VACUUM") 194 | 195 | def get_stats(self) -> Dict: 196 | """Get database statistics.""" 197 | stats = {} 198 | 199 | # Count by status 200 | cursor = self.conn.execute(""" 201 | SELECT status, COUNT(*) as count 202 | FROM notifications 203 | GROUP BY status 204 | """) 205 | 206 | for row in cursor: 207 | stats[f"status_{row['status']}"] = row['count'] 208 | 209 | # Total count 210 | cursor = self.conn.execute("SELECT COUNT(*) as total FROM notifications") 211 | stats['total'] = cursor.fetchone()['total'] 212 | 213 | # Recent activity (last 24h) 214 | yesterday = (datetime.now() - timedelta(days=1)).isoformat() 215 | cursor = self.conn.execute(""" 216 | SELECT COUNT(*) as recent 217 | FROM notifications 218 | WHERE indexed_at > ? 219 | """, (yesterday,)) 220 | stats['recent_24h'] = cursor.fetchone()['recent'] 221 | 222 | return stats 223 | 224 | def start_session(self) -> int: 225 | """Start a new processing session.""" 226 | cursor = self.conn.execute(""" 227 | INSERT INTO sessions (started_at, last_seen_at) 228 | VALUES (?, ?) 229 | """, (datetime.now().isoformat(), datetime.now().isoformat())) 230 | self.conn.commit() 231 | return cursor.lastrowid 232 | 233 | def update_session(self, session_id: int, processed: int = 0, skipped: int = 0, error: int = 0): 234 | """Update session statistics.""" 235 | self.conn.execute(""" 236 | UPDATE sessions 237 | SET last_seen_at = ?, 238 | notifications_processed = notifications_processed + ?, 239 | notifications_skipped = notifications_skipped + ?, 240 | notifications_error = notifications_error + ? 241 | WHERE id = ? 242 | """, (datetime.now().isoformat(), processed, skipped, error, session_id)) 243 | self.conn.commit() 244 | 245 | def end_session(self, session_id: int): 246 | """End a processing session.""" 247 | self.conn.execute(""" 248 | UPDATE sessions 249 | SET ended_at = ? 250 | WHERE id = ? 251 | """, (datetime.now().isoformat(), session_id)) 252 | self.conn.commit() 253 | 254 | def get_processed_uris(self, limit: int = 10000) -> Set[str]: 255 | """Get set of processed URIs for compatibility with existing code.""" 256 | cursor = self.conn.execute(""" 257 | SELECT uri FROM notifications 258 | WHERE status IN ('processed', 'ignored', 'no_reply') 259 | ORDER BY processed_at DESC 260 | LIMIT ? 261 | """, (limit,)) 262 | 263 | return {row['uri'] for row in cursor} 264 | 265 | def migrate_from_json(self, json_path: str = "queue/processed_notifications.json"): 266 | """Migrate data from the old JSON format.""" 267 | json_file = Path(json_path) 268 | if not json_file.exists(): 269 | return 270 | 271 | try: 272 | with open(json_file, 'r') as f: 273 | uris = json.load(f) 274 | 275 | migrated = 0 276 | for uri in uris: 277 | # Add as processed with unknown timestamp 278 | self.conn.execute(""" 279 | INSERT OR IGNORE INTO notifications 280 | (uri, indexed_at, status, processed_at) 281 | VALUES (?, ?, 'processed', ?) 282 | """, (uri, datetime.now().isoformat(), datetime.now().isoformat())) 283 | migrated += 1 284 | 285 | self.conn.commit() 286 | logger.info(f"Migrated {migrated} URIs from JSON to database") 287 | 288 | # Rename old file to backup 289 | backup_path = json_file.with_suffix('.json.backup') 290 | json_file.rename(backup_path) 291 | logger.info(f"Renamed old JSON file to {backup_path}") 292 | 293 | except Exception as e: 294 | logger.error(f"Error migrating from JSON: {e}") 295 | 296 | def close(self): 297 | """Close database connection.""" 298 | if self.conn: 299 | self.conn.close() -------------------------------------------------------------------------------- /queue_manager.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Queue management utilities for Void bot.""" 3 | import json 4 | import argparse 5 | from pathlib import Path 6 | from rich.console import Console 7 | from rich.table import Table 8 | from rich.prompt import Confirm 9 | 10 | console = Console() 11 | 12 | # Queue directories 13 | QUEUE_DIR = Path("queue") 14 | QUEUE_ERROR_DIR = QUEUE_DIR / "errors" 15 | QUEUE_NO_REPLY_DIR = QUEUE_DIR / "no_reply" 16 | 17 | 18 | def load_notification(filepath: Path) -> dict: 19 | """Load a notification from a JSON file.""" 20 | try: 21 | with open(filepath, 'r') as f: 22 | return json.load(f) 23 | except Exception as e: 24 | console.print(f"[red]Error loading {filepath}: {e}[/red]") 25 | return None 26 | 27 | 28 | def list_notifications(handle_filter: str = None, show_all: bool = False): 29 | """List all notifications in the queue, optionally filtered by handle.""" 30 | # Collect notifications from all directories if show_all is True 31 | if show_all: 32 | dirs_to_check = [QUEUE_DIR, QUEUE_ERROR_DIR, QUEUE_NO_REPLY_DIR] 33 | else: 34 | dirs_to_check = [QUEUE_DIR] 35 | 36 | all_notifications = [] 37 | 38 | for directory in dirs_to_check: 39 | if not directory.exists(): 40 | continue 41 | 42 | # Get source directory name for display 43 | if directory == QUEUE_DIR: 44 | source = "queue" 45 | elif directory == QUEUE_ERROR_DIR: 46 | source = "errors" 47 | elif directory == QUEUE_NO_REPLY_DIR: 48 | source = "no_reply" 49 | else: 50 | source = "unknown" 51 | 52 | for filepath in directory.glob("*.json"): 53 | # Skip subdirectories 54 | if filepath.is_dir(): 55 | continue 56 | 57 | notif = load_notification(filepath) 58 | if notif and isinstance(notif, dict): 59 | notif['_filepath'] = filepath 60 | notif['_source'] = source 61 | 62 | # Apply handle filter if specified 63 | if handle_filter: 64 | author_handle = notif.get('author', {}).get('handle', '') 65 | if handle_filter.lower() not in author_handle.lower(): 66 | continue 67 | 68 | all_notifications.append(notif) 69 | 70 | # Sort by indexed_at 71 | all_notifications.sort(key=lambda x: x.get('indexed_at', ''), reverse=True) 72 | 73 | # Display results 74 | if not all_notifications: 75 | if handle_filter: 76 | console.print(f"[yellow]No notifications found for handle containing '{handle_filter}'[/yellow]") 77 | else: 78 | console.print("[yellow]No notifications found in queue[/yellow]") 79 | return 80 | 81 | table = Table(title=f"Queue Notifications ({len(all_notifications)} total)") 82 | table.add_column("File", style="cyan", width=20) 83 | table.add_column("Source", style="magenta", width=10) 84 | table.add_column("Handle", style="green", width=25) 85 | table.add_column("Display Name", width=25) 86 | table.add_column("Text", width=40) 87 | table.add_column("Time", style="dim", width=20) 88 | 89 | for notif in all_notifications: 90 | author = notif.get('author', {}) 91 | handle = author.get('handle', 'unknown') 92 | display_name = author.get('display_name', '') 93 | text = notif.get('record', {}).get('text', '')[:40] 94 | if len(notif.get('record', {}).get('text', '')) > 40: 95 | text += "..." 96 | indexed_at = notif.get('indexed_at', '')[:19] # Trim milliseconds 97 | filename = notif['_filepath'].name[:20] 98 | source = notif['_source'] 99 | 100 | table.add_row(filename, source, f"@{handle}", display_name, text, indexed_at) 101 | 102 | console.print(table) 103 | return all_notifications 104 | 105 | 106 | def delete_by_handle(handle: str, dry_run: bool = False, force: bool = False): 107 | """Delete all notifications from a specific handle.""" 108 | # Remove @ if present 109 | handle = handle.lstrip('@') 110 | 111 | # Find all notifications from this handle 112 | console.print(f"\\n[bold]Searching for notifications from @{handle}...[/bold]\\n") 113 | 114 | to_delete = [] 115 | dirs_to_check = [QUEUE_DIR, QUEUE_ERROR_DIR, QUEUE_NO_REPLY_DIR] 116 | 117 | for directory in dirs_to_check: 118 | if not directory.exists(): 119 | continue 120 | 121 | for filepath in directory.glob("*.json"): 122 | if filepath.is_dir(): 123 | continue 124 | 125 | notif = load_notification(filepath) 126 | if notif and isinstance(notif, dict): 127 | author_handle = notif.get('author', {}).get('handle', '') 128 | if author_handle.lower() == handle.lower(): 129 | to_delete.append({ 130 | 'filepath': filepath, 131 | 'notif': notif, 132 | 'source': directory.name 133 | }) 134 | 135 | if not to_delete: 136 | console.print(f"[yellow]No notifications found from @{handle}[/yellow]") 137 | return 138 | 139 | # Display what will be deleted 140 | table = Table(title=f"Notifications to Delete from @{handle}") 141 | table.add_column("File", style="cyan") 142 | table.add_column("Location", style="magenta") 143 | table.add_column("Text", width=50) 144 | table.add_column("Time", style="dim") 145 | 146 | for item in to_delete: 147 | notif = item['notif'] 148 | text = notif.get('record', {}).get('text', '')[:50] 149 | if len(notif.get('record', {}).get('text', '')) > 50: 150 | text += "..." 151 | indexed_at = notif.get('indexed_at', '')[:19] 152 | 153 | table.add_row( 154 | item['filepath'].name, 155 | item['source'], 156 | text, 157 | indexed_at 158 | ) 159 | 160 | console.print(table) 161 | console.print(f"\\n[bold red]Found {len(to_delete)} notifications to delete[/bold red]") 162 | 163 | if dry_run: 164 | console.print("\\n[yellow]DRY RUN - No files were deleted[/yellow]") 165 | return 166 | 167 | # Confirm deletion 168 | if not force and not Confirm.ask("\\nDo you want to delete these notifications?"): 169 | console.print("[yellow]Deletion cancelled[/yellow]") 170 | return 171 | 172 | # Delete the files 173 | deleted_count = 0 174 | for item in to_delete: 175 | try: 176 | item['filepath'].unlink() 177 | deleted_count += 1 178 | console.print(f"[green]✓[/green] Deleted {item['filepath'].name}") 179 | except Exception as e: 180 | console.print(f"[red]✗[/red] Failed to delete {item['filepath'].name}: {e}") 181 | 182 | console.print(f"\\n[bold green]Successfully deleted {deleted_count} notifications[/bold green]") 183 | 184 | 185 | def count_by_handle(): 186 | """Show detailed count of notifications by handle.""" 187 | handle_counts = {} 188 | 189 | # Collect counts from all directories 190 | for directory, location in [(QUEUE_DIR, 'queue'), (QUEUE_ERROR_DIR, 'errors'), (QUEUE_NO_REPLY_DIR, 'no_reply')]: 191 | if not directory.exists(): 192 | continue 193 | 194 | for filepath in directory.glob("*.json"): 195 | if filepath.is_dir(): 196 | continue 197 | 198 | notif = load_notification(filepath) 199 | if notif and isinstance(notif, dict): 200 | handle = notif.get('author', {}).get('handle', 'unknown') 201 | 202 | if handle not in handle_counts: 203 | handle_counts[handle] = {'queue': 0, 'errors': 0, 'no_reply': 0, 'total': 0} 204 | 205 | handle_counts[handle][location] += 1 206 | handle_counts[handle]['total'] += 1 207 | 208 | if not handle_counts: 209 | console.print("[yellow]No notifications found in any queue[/yellow]") 210 | return 211 | 212 | # Sort by total count 213 | sorted_handles = sorted(handle_counts.items(), key=lambda x: x[1]['total'], reverse=True) 214 | 215 | # Display results 216 | table = Table(title=f"Notification Count by Handle ({len(handle_counts)} unique handles)") 217 | table.add_column("Handle", style="green", width=30) 218 | table.add_column("Queue", style="cyan", justify="right") 219 | table.add_column("Errors", style="red", justify="right") 220 | table.add_column("No Reply", style="yellow", justify="right") 221 | table.add_column("Total", style="bold magenta", justify="right") 222 | 223 | for handle, counts in sorted_handles: 224 | table.add_row( 225 | f"@{handle}", 226 | str(counts['queue']) if counts['queue'] > 0 else "-", 227 | str(counts['errors']) if counts['errors'] > 0 else "-", 228 | str(counts['no_reply']) if counts['no_reply'] > 0 else "-", 229 | str(counts['total']) 230 | ) 231 | 232 | console.print(table) 233 | 234 | # Summary statistics 235 | total_notifications = sum(h['total'] for h in handle_counts.values()) 236 | avg_per_handle = total_notifications / len(handle_counts) 237 | 238 | console.print(f"\n[bold]Summary:[/bold]") 239 | console.print(f" Total notifications: {total_notifications}") 240 | console.print(f" Unique handles: {len(handle_counts)}") 241 | console.print(f" Average per handle: {avg_per_handle:.1f}") 242 | 243 | # Top user info 244 | if sorted_handles: 245 | top_handle, top_counts = sorted_handles[0] 246 | percentage = (top_counts['total'] / total_notifications) * 100 247 | console.print(f" Most active: @{top_handle} ({top_counts['total']} notifications, {percentage:.1f}% of total)") 248 | 249 | 250 | def stats(): 251 | """Show queue statistics.""" 252 | stats_data = { 253 | 'queue': {'count': 0, 'handles': set()}, 254 | 'errors': {'count': 0, 'handles': set()}, 255 | 'no_reply': {'count': 0, 'handles': set()} 256 | } 257 | 258 | # Collect stats 259 | for directory, key in [(QUEUE_DIR, 'queue'), (QUEUE_ERROR_DIR, 'errors'), (QUEUE_NO_REPLY_DIR, 'no_reply')]: 260 | if not directory.exists(): 261 | continue 262 | 263 | for filepath in directory.glob("*.json"): 264 | if filepath.is_dir(): 265 | continue 266 | 267 | notif = load_notification(filepath) 268 | if notif and isinstance(notif, dict): 269 | stats_data[key]['count'] += 1 270 | handle = notif.get('author', {}).get('handle', 'unknown') 271 | stats_data[key]['handles'].add(handle) 272 | 273 | # Display stats 274 | table = Table(title="Queue Statistics") 275 | table.add_column("Location", style="cyan") 276 | table.add_column("Count", style="yellow") 277 | table.add_column("Unique Handles", style="green") 278 | 279 | for key, label in [('queue', 'Active Queue'), ('errors', 'Errors'), ('no_reply', 'No Reply')]: 280 | table.add_row( 281 | label, 282 | str(stats_data[key]['count']), 283 | str(len(stats_data[key]['handles'])) 284 | ) 285 | 286 | console.print(table) 287 | 288 | # Show top handles 289 | all_handles = {} 290 | for location_data in stats_data.values(): 291 | for handle in location_data['handles']: 292 | all_handles[handle] = all_handles.get(handle, 0) + 1 293 | 294 | if all_handles: 295 | sorted_handles = sorted(all_handles.items(), key=lambda x: x[1], reverse=True)[:10] 296 | 297 | top_table = Table(title="Top 10 Handles by Notification Count") 298 | top_table.add_column("Handle", style="green") 299 | top_table.add_column("Count", style="yellow") 300 | 301 | for handle, count in sorted_handles: 302 | top_table.add_row(f"@{handle}", str(count)) 303 | 304 | console.print("\\n") 305 | console.print(top_table) 306 | 307 | 308 | def main(): 309 | parser = argparse.ArgumentParser(description="Manage Void bot notification queue") 310 | subparsers = parser.add_subparsers(dest='command', help='Commands') 311 | 312 | # List command 313 | list_parser = subparsers.add_parser('list', help='List notifications in queue') 314 | list_parser.add_argument('--handle', help='Filter by handle (partial match)') 315 | list_parser.add_argument('--all', action='store_true', help='Include errors and no_reply folders') 316 | 317 | # Delete command 318 | delete_parser = subparsers.add_parser('delete', help='Delete notifications from a specific handle') 319 | delete_parser.add_argument('handle', help='Handle to delete notifications from') 320 | delete_parser.add_argument('--dry-run', action='store_true', help='Show what would be deleted without deleting') 321 | delete_parser.add_argument('--force', action='store_true', help='Skip confirmation prompt') 322 | 323 | # Stats command 324 | stats_parser = subparsers.add_parser('stats', help='Show queue statistics') 325 | 326 | # Count command 327 | count_parser = subparsers.add_parser('count', help='Show detailed count by handle') 328 | 329 | args = parser.parse_args() 330 | 331 | if args.command == 'list': 332 | list_notifications(args.handle, args.all) 333 | elif args.command == 'delete': 334 | delete_by_handle(args.handle, args.dry_run, args.force) 335 | elif args.command == 'stats': 336 | stats() 337 | elif args.command == 'count': 338 | count_by_handle() 339 | else: 340 | parser.print_help() 341 | 342 | 343 | if __name__ == "__main__": 344 | main() -------------------------------------------------------------------------------- /agents/example-social-agent.af: -------------------------------------------------------------------------------- 1 | { 2 | "agents": [ 3 | { 4 | "name": "example-social-agent", 5 | "memory_blocks": [], 6 | "tools": [], 7 | "tool_ids": [ 8 | "tool-0", 9 | "tool-1" 10 | ], 11 | "source_ids": [], 12 | "folder_ids": null, 13 | "block_ids": [], 14 | "tool_rules": [], 15 | "tags": [], 16 | "system": "\nYou are a helpful self-improving agent with advanced memory and file system capabilities.\n\nYou have an advanced memory system that enables you to remember past interactions and continuously improve your own capabilities.\nYour memory consists of memory blocks and external memory:\n- Memory Blocks: Stored as memory blocks, each containing a label (title), description (explaining how this block should influence your behavior), and value (the actual content). Memory blocks have size limits. Memory blocks are embedded within your system instructions and remain constantly available in-context.\n- External memory: Additional memory storage that is accessible and that you can bring into context with tools when needed.\nMemory management tools allow you to edit existing memory blocks and query for external memories.\n\n\nYou have access to a structured file system that mirrors real-world directory structures. Each directory can contain multiple files.\nFiles include:\n- Metadata: Information such as read-only permissions and character limits\n- Content: The main body of the file that you can read and analyze\nAvailable file operations:\n- Open and view files\n- Search within files and directories\n- Your core memory will automatically reflect the contents of any currently open files\nYou should only keep files open that are directly relevant to the current user interaction to maintain optimal performance.\n\nContinue executing and calling tools until the current task is complete or you need user input. To continue: call another tool. To yield control: end your response without calling a tool.\nBase instructions complete.\n", 17 | "agent_type": "letta_v1_agent", 18 | "initial_message_sequence": null, 19 | "include_base_tools": false, 20 | "include_multi_agent_tools": false, 21 | "include_base_tool_rules": false, 22 | "include_default_source": false, 23 | "description": "A default social agent to run on Bluesky.", 24 | "metadata": null, 25 | "llm_config": { 26 | "model": "gemini-3-pro-preview", 27 | "display_name": "Gemini 3 Pro", 28 | "model_endpoint_type": "google_ai", 29 | "model_endpoint": "https://generativelanguage.googleapis.com", 30 | "provider_name": "google_ai", 31 | "provider_category": "base", 32 | "model_wrapper": null, 33 | "context_window": 32000, 34 | "put_inner_thoughts_in_kwargs": false, 35 | "handle": "google_ai/gemini-3-pro-preview", 36 | "temperature": 0.7, 37 | "max_tokens": 8192, 38 | "enable_reasoner": true, 39 | "reasoning_effort": null, 40 | "max_reasoning_tokens": 1024, 41 | "effort": null, 42 | "frequency_penalty": null, 43 | "compatibility_type": null, 44 | "verbosity": null, 45 | "tier": "premium", 46 | "parallel_tool_calls": false, 47 | "response_format": null 48 | }, 49 | "embedding_config": { 50 | "embedding_endpoint_type": "openai", 51 | "embedding_endpoint": "https://api.openai.com/v1", 52 | "embedding_model": "text-embedding-3-small", 53 | "embedding_dim": 2000, 54 | "embedding_chunk_size": 300, 55 | "handle": "openai/text-embedding-3-small", 56 | "batch_size": 1024, 57 | "azure_endpoint": null, 58 | "azure_version": null, 59 | "azure_deployment": null 60 | }, 61 | "model": null, 62 | "embedding": null, 63 | "model_settings": null, 64 | "context_window_limit": null, 65 | "embedding_chunk_size": null, 66 | "max_tokens": null, 67 | "max_reasoning_tokens": null, 68 | "enable_reasoner": false, 69 | "reasoning": null, 70 | "from_template": null, 71 | "template": false, 72 | "project": null, 73 | "tool_exec_environment_variables": {}, 74 | "secrets": null, 75 | "memory_variables": null, 76 | "project_id": null, 77 | "template_id": null, 78 | "base_template_id": null, 79 | "identity_ids": null, 80 | "message_buffer_autoclear": false, 81 | "enable_sleeptime": false, 82 | "response_format": null, 83 | "timezone": "UTC", 84 | "max_files_open": 5, 85 | "per_file_view_window_char_limit": 15000, 86 | "hidden": null, 87 | "parallel_tool_calls": null, 88 | "id": "agent-0", 89 | "in_context_message_ids": [ 90 | "message-0", 91 | "message-1", 92 | "message-2" 93 | ], 94 | "messages": [ 95 | { 96 | "type": "message", 97 | "role": "system", 98 | "content": [ 99 | { 100 | "type": "text", 101 | "text": "\nYou are a helpful self-improving agent with advanced memory and file system capabilities.\n\nYou have an advanced memory system that enables you to remember past interactions and continuously improve your own capabilities.\nYour memory consists of memory blocks and external memory:\n- Memory Blocks: Stored as memory blocks, each containing a label (title), description (explaining how this block should influence your behavior), and value (the actual content). Memory blocks have size limits. Memory blocks are embedded within your system instructions and remain constantly available in-context.\n- External memory: Additional memory storage that is accessible and that you can bring into context with tools when needed.\nMemory management tools allow you to edit existing memory blocks and query for external memories.\n\n\nYou have access to a structured file system that mirrors real-world directory structures. Each directory can contain multiple files.\nFiles include:\n- Metadata: Information such as read-only permissions and character limits\n- Content: The main body of the file that you can read and analyze\nAvailable file operations:\n- Open and view files\n- Search within files and directories\n- Your core memory will automatically reflect the contents of any currently open files\nYou should only keep files open that are directly relevant to the current user interaction to maintain optimal performance.\n\nContinue executing and calling tools until the current task is complete or you need user input. To continue: call another tool. To yield control: end your response without calling a tool.\nBase instructions complete.\n\n\n\n\n\n- The current system date is: December 06, 2025\n- Memory blocks were last modified: 2025-12-06 01:15:11 AM UTC+0000\n- -1 previous messages between you and the user are stored in recall memory (use tools to access them)\n", 102 | "signature": null 103 | } 104 | ], 105 | "name": null, 106 | "otid": null, 107 | "sender_id": null, 108 | "batch_item_id": null, 109 | "group_id": null, 110 | "id": "message-0", 111 | "model": "gpt-4o-mini", 112 | "agent_id": "agent-0", 113 | "tool_calls": null, 114 | "tool_call_id": null, 115 | "tool_returns": [], 116 | "created_at": "2025-10-20T01:55:33.345676+00:00", 117 | "approve": null, 118 | "approval_request_id": null, 119 | "denial_reason": null, 120 | "approvals": [] 121 | }, 122 | { 123 | "type": "message", 124 | "role": "user", 125 | "content": [ 126 | { 127 | "type": "text", 128 | "text": "hey", 129 | "signature": null 130 | } 131 | ], 132 | "name": null, 133 | "otid": null, 134 | "sender_id": null, 135 | "batch_item_id": null, 136 | "group_id": null, 137 | "id": "message-1", 138 | "model": null, 139 | "agent_id": "agent-0", 140 | "tool_calls": null, 141 | "tool_call_id": null, 142 | "tool_returns": [], 143 | "created_at": "2025-12-06T01:15:11.739011+00:00", 144 | "approve": null, 145 | "approval_request_id": null, 146 | "denial_reason": null, 147 | "approvals": [] 148 | }, 149 | { 150 | "type": "message", 151 | "role": "assistant", 152 | "content": [ 153 | { 154 | "type": "reasoning", 155 | "is_native": true, 156 | "reasoning": "**Initiating Interaction**\n\nI acknowledged the user's greeting. Now, I'm forming a polite response to inquire about their needs. I don't require any external tools for this straightforward interaction. My focus is solely on providing assistance.\n\n\n", 157 | "signature": "EqECCp4CAXLI2nxwhQFNl6S9Cmu53IKW8OlOJbIS2OSqBOPVNZcrpteT8tXYMWVhd04s9J3+Ief5Gd/FP1wO1EISYQv2AWNDk/DvdP72XCHbXvDYMxM26jF6yKbrH3haNMD0S0V4TXVPUxa1sN1u5/oDHCG2HYkC6RM1GF5E0N+yq/0ivVQi9D/G9JnvndnWdW93kRUc0PRvjbkWNjgOSgdiH7vdCmesRLvgb2IdyhKf271KzNbb6Zh36w80P+DLspBQc6GmX9G16I8LY3oVms2jRZLQhS0R4IHJsA9rg+G0RGDTYMxjsKyJHM5BZF7q3cKEtSoUEZRIeQRb6F2f3kAUMWPZFMmvjoaQzn9lTY2CrDPcds8RAWnjVC9kOmpq/Z6Leg==" 158 | }, 159 | { 160 | "type": "text", 161 | "text": "Hello! How can I help you today?", 162 | "signature": null 163 | } 164 | ], 165 | "name": null, 166 | "otid": null, 167 | "sender_id": null, 168 | "batch_item_id": null, 169 | "group_id": null, 170 | "id": "message-2", 171 | "model": "gemini-3-pro-preview", 172 | "agent_id": "agent-0", 173 | "tool_calls": null, 174 | "tool_call_id": null, 175 | "tool_returns": [], 176 | "created_at": "2025-12-06T01:15:14.892710+00:00", 177 | "approve": null, 178 | "approval_request_id": null, 179 | "denial_reason": null, 180 | "approvals": [] 181 | } 182 | ], 183 | "files_agents": [], 184 | "group_ids": [] 185 | } 186 | ], 187 | "groups": [], 188 | "blocks": [], 189 | "files": [], 190 | "sources": [], 191 | "tools": [ 192 | { 193 | "id": "tool-1", 194 | "tool_type": "letta_core", 195 | "description": "Search prior conversation history using hybrid search (text + semantic similarity).\n\nExamples:\n # Search all messages\n conversation_search(query=\"project updates\")\n\n # Search only assistant messages\n conversation_search(query=\"error handling\", roles=[\"assistant\"])\n\n # Search with date range (inclusive of both dates)\n conversation_search(query=\"meetings\", start_date=\"2024-01-15\", end_date=\"2024-01-20\")\n # This includes all messages from Jan 15 00:00:00 through Jan 20 23:59:59\n\n # Search messages from a specific day (inclusive)\n conversation_search(query=\"bug reports\", start_date=\"2024-09-04\", end_date=\"2024-09-04\")\n # This includes ALL messages from September 4, 2024\n\n # Search with specific time boundaries\n conversation_search(query=\"deployment\", start_date=\"2024-01-15T09:00\", end_date=\"2024-01-15T17:30\")\n # This includes messages from 9 AM to 5:30 PM on Jan 15\n\n # Search with limit\n conversation_search(query=\"debugging\", limit=10)\n\n Returns:\n str: Query result string containing matching messages with timestamps and content.", 196 | "source_type": "python", 197 | "name": "conversation_search", 198 | "tags": [ 199 | "letta_core" 200 | ], 201 | "source_code": null, 202 | "json_schema": { 203 | "name": "conversation_search", 204 | "description": "Search prior conversation history using hybrid search (text + semantic similarity).\n\nExamples:\n # Search all messages\n conversation_search(query=\"project updates\")\n\n # Search only assistant messages\n conversation_search(query=\"error handling\", roles=[\"assistant\"])\n\n # Search with date range (inclusive of both dates)\n conversation_search(query=\"meetings\", start_date=\"2024-01-15\", end_date=\"2024-01-20\")\n # This includes all messages from Jan 15 00:00:00 through Jan 20 23:59:59\n\n # Search messages from a specific day (inclusive)\n conversation_search(query=\"bug reports\", start_date=\"2024-09-04\", end_date=\"2024-09-04\")\n # This includes ALL messages from September 4, 2024\n\n # Search with specific time boundaries\n conversation_search(query=\"deployment\", start_date=\"2024-01-15T09:00\", end_date=\"2024-01-15T17:30\")\n # This includes messages from 9 AM to 5:30 PM on Jan 15\n\n # Search with limit\n conversation_search(query=\"debugging\", limit=10)\n\n Returns:\n str: Query result string containing matching messages with timestamps and content.", 205 | "parameters": { 206 | "type": "object", 207 | "properties": { 208 | "query": { 209 | "type": "string", 210 | "description": "String to search for using both text matching and semantic similarity." 211 | }, 212 | "roles": { 213 | "type": "array", 214 | "items": { 215 | "type": "string", 216 | "enum": [ 217 | "assistant", 218 | "user", 219 | "tool" 220 | ] 221 | }, 222 | "description": "Optional list of message roles to filter by." 223 | }, 224 | "limit": { 225 | "type": "integer", 226 | "description": "Maximum number of results to return. Uses system default if not specified." 227 | }, 228 | "start_date": { 229 | "type": "string", 230 | "description": "Filter results to messages created on or after this date (INCLUSIVE). When using date-only format (e.g., \"2024-01-15\"), includes messages starting from 00:00:00 of that day. ISO 8601 format: \"YYYY-MM-DD\" or \"YYYY-MM-DDTHH:MM\". Examples: \"2024-01-15\" (from start of Jan 15), \"2024-01-15T14:30\" (from 2:30 PM on Jan 15)." 231 | }, 232 | "end_date": { 233 | "type": "string", 234 | "description": "Filter results to messages created on or before this date (INCLUSIVE). When using date-only format (e.g., \"2024-01-20\"), includes all messages from that entire day. ISO 8601 format: \"YYYY-MM-DD\" or \"YYYY-MM-DDTHH:MM\". Examples: \"2024-01-20\" (includes all of Jan 20), \"2024-01-20T17:00\" (up to 5 PM on Jan 20)." 235 | } 236 | }, 237 | "required": [ 238 | "query" 239 | ] 240 | } 241 | }, 242 | "args_json_schema": null, 243 | "return_char_limit": 50000, 244 | "pip_requirements": null, 245 | "npm_requirements": null, 246 | "default_requires_approval": null, 247 | "enable_parallel_execution": true, 248 | "created_by_id": "user-00000000-0000-4000-8000-000000000000", 249 | "last_updated_by_id": "user-f9ba1dbe-4bda-492a-8333-dc647f3566c6", 250 | "metadata_": {} 251 | }, 252 | { 253 | "id": "tool-0", 254 | "tool_type": "letta_memory_core", 255 | "description": "Memory management tool with various sub-commands for memory block operations.\n\nExamples:\n # List all memory blocks\n memory(agent_state, \"view\", path=\"/memories\")\n\n # View specific memory block content\n memory(agent_state, \"view\", path=\"/memories/user_preferences\")\n\n # View first 10 lines of a memory block\n memory(agent_state, \"view\", path=\"/memories/user_preferences\", view_range=10)\n\n # Replace text in a memory block\n memory(agent_state, \"str_replace\", path=\"/memories/user_preferences\", old_str=\"theme: dark\", new_str=\"theme: light\")\n\n # Insert text at line 5\n memory(agent_state, \"insert\", path=\"/memories/notes\", insert_line=5, insert_text=\"New note here\")\n\n # Delete a memory block\n memory(agent_state, \"delete\", path=\"/memories/old_notes\")\n\n # Rename a memory block\n memory(agent_state, \"rename\", old_path=\"/memories/temp\", new_path=\"/memories/permanent\")\n\n # Update the description of a memory block\n memory(agent_state, \"rename\", path=\"/memories/temp\", description=\"The user's temporary notes.\")\n\n # Create a memory block with starting text\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\", \"file_text\": \"The user seems to add type hints to all of their Python code.\")\n\n # Create an empty memory block\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\")", 256 | "source_type": "python", 257 | "name": "memory", 258 | "tags": [ 259 | "letta_memory_core" 260 | ], 261 | "source_code": null, 262 | "json_schema": { 263 | "name": "memory", 264 | "description": "Memory management tool with various sub-commands for memory block operations.\n\nExamples:\n # Replace text in a memory block\n memory(agent_state, \"str_replace\", path=\"/memories/user_preferences\", old_str=\"theme: dark\", new_str=\"theme: light\")\n\n # Insert text at line 5\n memory(agent_state, \"insert\", path=\"/memories/notes\", insert_line=5, insert_text=\"New note here\")\n\n # Delete a memory block\n memory(agent_state, \"delete\", path=\"/memories/old_notes\")\n\n # Rename a memory block\n memory(agent_state, \"rename\", old_path=\"/memories/temp\", new_path=\"/memories/permanent\")\n\n # Update the description of a memory block\n memory(agent_state, \"rename\", path=\"/memories/temp\", description=\"The user's temporary notes.\")\n\n # Create a memory block with starting text\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\", \"file_text\": \"The user seems to add type hints to all of their Python code.\")\n\n # Create an empty memory block\n memory(agent_state, \"create\", path=\"/memories/coding_preferences\", \"description\": \"The user's coding preferences.\")", 265 | "parameters": { 266 | "type": "object", 267 | "properties": { 268 | "command": { 269 | "type": "string", 270 | "description": "The sub-command to execute. Supported commands:\n- \"create\": Create a new memory block\n- \"str_replace\": Replace text in a memory block\n- \"insert\": Insert text at a specific line in a memory block\n- \"delete\": Delete a memory block\n- \"rename\": Rename a memory block" 271 | }, 272 | "path": { 273 | "type": "string", 274 | "description": "Path to the memory block (for str_replace, insert, delete)" 275 | }, 276 | "file_text": { 277 | "type": "string", 278 | "description": "The value to set in the memory block (for create)" 279 | }, 280 | "description": { 281 | "type": "string", 282 | "description": "The description to set in the memory block (for create, rename)" 283 | }, 284 | "old_str": { 285 | "type": "string", 286 | "description": "Old text to replace (for str_replace)" 287 | }, 288 | "new_str": { 289 | "type": "string", 290 | "description": "New text to replace with (for str_replace)" 291 | }, 292 | "insert_line": { 293 | "type": "integer", 294 | "description": "Line number to insert at (for insert)" 295 | }, 296 | "insert_text": { 297 | "type": "string", 298 | "description": "Text to insert (for insert)" 299 | }, 300 | "old_path": { 301 | "type": "string", 302 | "description": "Old path for rename operation" 303 | }, 304 | "new_path": { 305 | "type": "string", 306 | "description": "New path for rename operation" 307 | } 308 | }, 309 | "required": [ 310 | "command" 311 | ] 312 | } 313 | }, 314 | "args_json_schema": null, 315 | "return_char_limit": 50000, 316 | "pip_requirements": null, 317 | "npm_requirements": null, 318 | "default_requires_approval": null, 319 | "enable_parallel_execution": false, 320 | "created_by_id": "user-e38ca27a-cc79-46e6-b3ee-8ad84944f822", 321 | "last_updated_by_id": "user-f9ba1dbe-4bda-492a-8333-dc647f3566c6", 322 | "metadata_": {} 323 | } 324 | ], 325 | "mcp_servers": [], 326 | "metadata": { 327 | "revision_id": "175dd10fb916" 328 | }, 329 | "created_at": "2025-12-06T01:15:19.323835+00:00" 330 | } -------------------------------------------------------------------------------- /bsky_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import uuid 4 | import time 5 | from typing import Optional, Dict, Any, List 6 | from atproto_client import Client, Session, SessionEvent, models 7 | 8 | # Configure logging 9 | logging.basicConfig( 10 | level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" 11 | ) 12 | logger = logging.getLogger("bluesky_session_handler") 13 | 14 | # Load the environment variables 15 | import dotenv 16 | dotenv.load_dotenv(override=True) 17 | 18 | import yaml 19 | import json 20 | 21 | # Strip fields. A list of fields to remove from a JSON object 22 | STRIP_FIELDS = [ 23 | "cid", 24 | "rev", 25 | "uri", 26 | "langs", 27 | "threadgate", 28 | "py_type", 29 | "labels", 30 | "avatar", 31 | "viewer", 32 | "indexed_at", 33 | "tags", 34 | "associated", 35 | "thread_context", 36 | "aspect_ratio", 37 | "thumb", 38 | "fullsize", 39 | "root", 40 | "created_at", 41 | "verification", 42 | "like_count", 43 | "quote_count", 44 | "reply_count", 45 | "repost_count", 46 | "embedding_disabled", 47 | "thread_muted", 48 | "reply_disabled", 49 | "pinned", 50 | "like", 51 | "repost", 52 | "blocked_by", 53 | "blocking", 54 | "blocking_by_list", 55 | "followed_by", 56 | "following", 57 | "known_followers", 58 | "muted", 59 | "muted_by_list", 60 | "root_author_like", 61 | "entities", 62 | "ref", 63 | "mime_type", 64 | "size", 65 | ] 66 | def convert_to_basic_types(obj): 67 | """Convert complex Python objects to basic types for JSON/YAML serialization.""" 68 | if hasattr(obj, '__dict__'): 69 | # Convert objects with __dict__ to their dictionary representation 70 | return convert_to_basic_types(obj.__dict__) 71 | elif isinstance(obj, dict): 72 | return {key: convert_to_basic_types(value) for key, value in obj.items()} 73 | elif isinstance(obj, list): 74 | return [convert_to_basic_types(item) for item in obj] 75 | elif isinstance(obj, (str, int, float, bool)) or obj is None: 76 | return obj 77 | else: 78 | # For other types, try to convert to string 79 | return str(obj) 80 | 81 | 82 | def strip_fields(obj, strip_field_list): 83 | """Recursively strip fields from a JSON object.""" 84 | if isinstance(obj, dict): 85 | keys_flagged_for_removal = [] 86 | 87 | # Remove fields from strip list and pydantic metadata 88 | for field in list(obj.keys()): 89 | if field in strip_field_list or field.startswith("__"): 90 | keys_flagged_for_removal.append(field) 91 | 92 | # Remove flagged keys 93 | for key in keys_flagged_for_removal: 94 | obj.pop(key, None) 95 | 96 | # Recursively process remaining values 97 | for key, value in list(obj.items()): 98 | obj[key] = strip_fields(value, strip_field_list) 99 | # Remove empty/null values after processing 100 | if ( 101 | obj[key] is None 102 | or (isinstance(obj[key], dict) and len(obj[key]) == 0) 103 | or (isinstance(obj[key], list) and len(obj[key]) == 0) 104 | or (isinstance(obj[key], str) and obj[key].strip() == "") 105 | ): 106 | obj.pop(key, None) 107 | 108 | elif isinstance(obj, list): 109 | for i, value in enumerate(obj): 110 | obj[i] = strip_fields(value, strip_field_list) 111 | # Remove None values from list 112 | obj[:] = [item for item in obj if item is not None] 113 | 114 | return obj 115 | 116 | 117 | def flatten_thread_structure(thread_data): 118 | """ 119 | Flatten a nested thread structure into a list while preserving all data. 120 | 121 | Args: 122 | thread_data: The thread data from get_post_thread 123 | 124 | Returns: 125 | Dict with 'posts' key containing a list of posts in chronological order 126 | """ 127 | posts = [] 128 | 129 | def traverse_thread(node): 130 | """Recursively traverse the thread structure to collect posts.""" 131 | if not node: 132 | return 133 | 134 | # If this node has a parent, traverse it first (to maintain chronological order) 135 | if hasattr(node, 'parent') and node.parent: 136 | traverse_thread(node.parent) 137 | 138 | # Then add this node's post 139 | if hasattr(node, 'post') and node.post: 140 | # Convert to dict if needed to ensure we can process it 141 | if hasattr(node.post, '__dict__'): 142 | post_dict = node.post.__dict__.copy() 143 | elif isinstance(node.post, dict): 144 | post_dict = node.post.copy() 145 | else: 146 | post_dict = {} 147 | 148 | posts.append(post_dict) 149 | 150 | # Handle the thread structure 151 | if hasattr(thread_data, 'thread'): 152 | # Start from the main thread node 153 | traverse_thread(thread_data.thread) 154 | elif hasattr(thread_data, '__dict__') and 'thread' in thread_data.__dict__: 155 | traverse_thread(thread_data.__dict__['thread']) 156 | 157 | # Return a simple structure with posts list 158 | return {'posts': posts} 159 | 160 | 161 | def count_thread_posts(thread): 162 | """ 163 | Count the number of posts in a thread. 164 | 165 | Args: 166 | thread: The thread data from get_post_thread 167 | 168 | Returns: 169 | Integer count of posts in the thread 170 | """ 171 | flattened = flatten_thread_structure(thread) 172 | return len(flattened.get('posts', [])) 173 | 174 | 175 | def thread_to_yaml_string(thread, strip_metadata=True): 176 | """ 177 | Convert thread data to a YAML-formatted string for LLM parsing. 178 | 179 | Args: 180 | thread: The thread data from get_post_thread 181 | strip_metadata: Whether to strip metadata fields for cleaner output 182 | 183 | Returns: 184 | YAML-formatted string representation of the thread 185 | """ 186 | # First flatten the thread structure to avoid deep nesting 187 | flattened = flatten_thread_structure(thread) 188 | 189 | # Convert complex objects to basic types 190 | basic_thread = convert_to_basic_types(flattened) 191 | 192 | if strip_metadata: 193 | # Create a copy and strip unwanted fields 194 | cleaned_thread = strip_fields(basic_thread, STRIP_FIELDS) 195 | else: 196 | cleaned_thread = basic_thread 197 | 198 | return yaml.dump(cleaned_thread, indent=2, allow_unicode=True, default_flow_style=False) 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | def get_session(username: str) -> Optional[str]: 207 | try: 208 | with open(f"session_{username}.txt", encoding="UTF-8") as f: 209 | return f.read() 210 | except FileNotFoundError: 211 | logger.debug(f"No existing session found for {username}") 212 | return None 213 | 214 | def save_session(username: str, session_string: str) -> None: 215 | with open(f"session_{username}.txt", "w", encoding="UTF-8") as f: 216 | f.write(session_string) 217 | logger.debug(f"Session saved for {username}") 218 | 219 | def on_session_change(username: str, event: SessionEvent, session: Session) -> None: 220 | logger.debug(f"Session changed: {event} {repr(session)}") 221 | if event in (SessionEvent.CREATE, SessionEvent.REFRESH): 222 | logger.debug(f"Saving changed session for {username}") 223 | save_session(username, session.export()) 224 | 225 | def init_client(username: str, password: str) -> Client: 226 | pds_uri = os.getenv("PDS_URI") 227 | if pds_uri is None: 228 | logger.warning( 229 | "No PDS URI provided. Falling back to bsky.social. Note! If you are on a non-Bluesky PDS, this can cause logins to fail. Please provide a PDS URI using the PDS_URI environment variable." 230 | ) 231 | pds_uri = "https://bsky.social" 232 | 233 | # Print the PDS URI 234 | logger.debug(f"Using PDS URI: {pds_uri}") 235 | 236 | client = Client(pds_uri) 237 | client.on_session_change( 238 | lambda event, session: on_session_change(username, event, session) 239 | ) 240 | 241 | session_string = get_session(username) 242 | if session_string: 243 | logger.debug(f"Reusing existing session for {username}") 244 | client.login(session_string=session_string) 245 | else: 246 | logger.debug(f"Creating new session for {username}") 247 | client.login(username, password) 248 | 249 | return client 250 | 251 | 252 | def default_login() -> Client: 253 | """Login using configuration from config.yaml or environment variables.""" 254 | try: 255 | from config_loader import get_bluesky_config 256 | bluesky_config = get_bluesky_config() 257 | 258 | username = bluesky_config['username'] 259 | password = bluesky_config['password'] 260 | pds_uri = bluesky_config.get('pds_uri', 'https://bsky.social') 261 | 262 | logger.info(f"Logging into Bluesky as {username} via {pds_uri}") 263 | 264 | # Use pds_uri from config 265 | client = Client(base_url=pds_uri) 266 | client.login(username, password) 267 | return client 268 | 269 | except Exception as e: 270 | logger.error(f"Failed to load Bluesky configuration: {e}") 271 | logger.error("Please check your config.yaml file or environment variables") 272 | exit(1) 273 | 274 | def remove_outside_quotes(text: str) -> str: 275 | """ 276 | Remove outside double quotes from response text. 277 | 278 | Only handles double quotes to avoid interfering with contractions: 279 | - Double quotes: "text" → text 280 | - Preserves single quotes and internal quotes 281 | 282 | Args: 283 | text: The text to process 284 | 285 | Returns: 286 | Text with outside double quotes removed 287 | """ 288 | if not text or len(text) < 2: 289 | return text 290 | 291 | text = text.strip() 292 | 293 | # Only remove double quotes from start and end 294 | if text.startswith('"') and text.endswith('"'): 295 | return text[1:-1] 296 | 297 | return text 298 | 299 | def reply_to_post(client: Client, text: str, reply_to_uri: str, reply_to_cid: str, root_uri: Optional[str] = None, root_cid: Optional[str] = None, lang: Optional[str] = None, correlation_id: Optional[str] = None) -> Dict[str, Any]: 300 | """ 301 | Reply to a post on Bluesky with rich text support. 302 | 303 | Args: 304 | client: Authenticated Bluesky client 305 | text: The reply text 306 | reply_to_uri: The URI of the post being replied to (parent) 307 | reply_to_cid: The CID of the post being replied to (parent) 308 | root_uri: The URI of the root post (if replying to a reply). If None, uses reply_to_uri 309 | root_cid: The CID of the root post (if replying to a reply). If None, uses reply_to_cid 310 | lang: Language code for the post (e.g., 'en-US', 'es', 'ja') 311 | correlation_id: Unique ID for tracking this message through the pipeline 312 | 313 | Returns: 314 | The response from sending the post 315 | """ 316 | import re 317 | 318 | # Generate correlation ID if not provided 319 | if correlation_id is None: 320 | correlation_id = str(uuid.uuid4())[:8] 321 | 322 | # Enhanced logging with structured data 323 | logger.info(f"[{correlation_id}] Starting reply_to_post", extra={ 324 | 'correlation_id': correlation_id, 325 | 'text_length': len(text), 326 | 'text_preview': text[:100] + '...' if len(text) > 100 else text, 327 | 'reply_to_uri': reply_to_uri, 328 | 'root_uri': root_uri, 329 | 'lang': lang 330 | }) 331 | 332 | start_time = time.time() 333 | 334 | # If root is not provided, this is a reply to the root post 335 | if root_uri is None: 336 | root_uri = reply_to_uri 337 | root_cid = reply_to_cid 338 | 339 | # Create references for the reply 340 | parent_ref = models.create_strong_ref(models.ComAtprotoRepoStrongRef.Main(uri=reply_to_uri, cid=reply_to_cid)) 341 | root_ref = models.create_strong_ref(models.ComAtprotoRepoStrongRef.Main(uri=root_uri, cid=root_cid)) 342 | 343 | # Parse rich text facets (mentions and URLs) 344 | facets = [] 345 | text_bytes = text.encode("UTF-8") 346 | mentions_found = [] 347 | urls_found = [] 348 | 349 | logger.debug(f"[{correlation_id}] Parsing facets from text (length: {len(text_bytes)} bytes)") 350 | 351 | # Parse mentions - fixed to handle @ at start of text 352 | mention_regex = rb"(?:^|[$|\W])(@([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)" 353 | 354 | for m in re.finditer(mention_regex, text_bytes): 355 | handle = m.group(1)[1:].decode("UTF-8") # Remove @ prefix 356 | mentions_found.append(handle) 357 | # Adjust byte positions to account for the optional prefix 358 | mention_start = m.start(1) 359 | mention_end = m.end(1) 360 | try: 361 | # Resolve handle to DID using the API 362 | resolve_resp = client.app.bsky.actor.get_profile({'actor': handle}) 363 | if resolve_resp and hasattr(resolve_resp, 'did'): 364 | facets.append( 365 | models.AppBskyRichtextFacet.Main( 366 | index=models.AppBskyRichtextFacet.ByteSlice( 367 | byteStart=mention_start, 368 | byteEnd=mention_end 369 | ), 370 | features=[models.AppBskyRichtextFacet.Mention(did=resolve_resp.did)] 371 | ) 372 | ) 373 | logger.debug(f"[{correlation_id}] Resolved mention @{handle} -> {resolve_resp.did}") 374 | except Exception as e: 375 | logger.warning(f"[{correlation_id}] Failed to resolve handle @{handle}: {e}") 376 | continue 377 | 378 | # Parse URLs - fixed to handle URLs at start of text 379 | url_regex = rb"(?:^|[$|\W])(https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*[-a-zA-Z0-9@%_\+~#//=])?)" 380 | 381 | for m in re.finditer(url_regex, text_bytes): 382 | url = m.group(1).decode("UTF-8") 383 | urls_found.append(url) 384 | # Adjust byte positions to account for the optional prefix 385 | url_start = m.start(1) 386 | url_end = m.end(1) 387 | facets.append( 388 | models.AppBskyRichtextFacet.Main( 389 | index=models.AppBskyRichtextFacet.ByteSlice( 390 | byteStart=url_start, 391 | byteEnd=url_end 392 | ), 393 | features=[models.AppBskyRichtextFacet.Link(uri=url)] 394 | ) 395 | ) 396 | logger.debug(f"[{correlation_id}] Found URL: {url}") 397 | 398 | # Parse hashtags 399 | hashtag_regex = rb"(?:^|[$|\s])#([a-zA-Z0-9_]+)" 400 | hashtags_found = [] 401 | 402 | for m in re.finditer(hashtag_regex, text_bytes): 403 | tag = m.group(1).decode("UTF-8") # Get tag without # prefix 404 | hashtags_found.append(tag) 405 | # Get byte positions for the entire hashtag including # 406 | tag_start = m.start(0) 407 | # Adjust start if there's a space/prefix 408 | if text_bytes[tag_start:tag_start+1] in (b' ', b'$'): 409 | tag_start += 1 410 | tag_end = m.end(0) 411 | facets.append( 412 | models.AppBskyRichtextFacet.Main( 413 | index=models.AppBskyRichtextFacet.ByteSlice( 414 | byteStart=tag_start, 415 | byteEnd=tag_end 416 | ), 417 | features=[models.AppBskyRichtextFacet.Tag(tag=tag)] 418 | ) 419 | ) 420 | logger.debug(f"[{correlation_id}] Found hashtag: #{tag}") 421 | 422 | logger.debug(f"[{correlation_id}] Facet parsing complete", extra={ 423 | 'correlation_id': correlation_id, 424 | 'mentions_count': len(mentions_found), 425 | 'mentions': mentions_found, 426 | 'urls_count': len(urls_found), 427 | 'urls': urls_found, 428 | 'hashtags_count': len(hashtags_found), 429 | 'hashtags': hashtags_found, 430 | 'total_facets': len(facets) 431 | }) 432 | 433 | # Send the reply with facets if any were found 434 | logger.info(f"[{correlation_id}] Sending reply to Bluesky API", extra={ 435 | 'correlation_id': correlation_id, 436 | 'has_facets': bool(facets), 437 | 'facet_count': len(facets), 438 | 'lang': lang 439 | }) 440 | 441 | try: 442 | if facets: 443 | response = client.send_post( 444 | text=text, 445 | reply_to=models.AppBskyFeedPost.ReplyRef(parent=parent_ref, root=root_ref), 446 | facets=facets, 447 | langs=[lang] if lang else None 448 | ) 449 | else: 450 | response = client.send_post( 451 | text=text, 452 | reply_to=models.AppBskyFeedPost.ReplyRef(parent=parent_ref, root=root_ref), 453 | langs=[lang] if lang else None 454 | ) 455 | 456 | # Calculate response time 457 | response_time = time.time() - start_time 458 | 459 | # Extract post URL for user-friendly logging 460 | post_url = None 461 | if hasattr(response, 'uri') and response.uri: 462 | # Convert AT-URI to web URL 463 | # Format: at://did:plc:xxx/app.bsky.feed.post/xxx -> https://bsky.app/profile/handle/post/xxx 464 | try: 465 | uri_parts = response.uri.split('/') 466 | if len(uri_parts) >= 4 and uri_parts[3] == 'app.bsky.feed.post': 467 | rkey = uri_parts[4] 468 | # We'd need to resolve DID to handle, but for now just use the URI 469 | post_url = f"bsky://post/{rkey}" 470 | except: 471 | pass 472 | 473 | logger.info(f"[{correlation_id}] Reply sent successfully ({response_time:.3f}s) - URI: {response.uri}" + 474 | (f" - URL: {post_url}" if post_url else ""), extra={ 475 | 'correlation_id': correlation_id, 476 | 'response_time': round(response_time, 3), 477 | 'post_uri': response.uri, 478 | 'post_url': post_url, 479 | 'post_cid': getattr(response, 'cid', None), 480 | 'text_length': len(text) 481 | }) 482 | 483 | return response 484 | 485 | except Exception as e: 486 | response_time = time.time() - start_time 487 | logger.error(f"[{correlation_id}] Failed to send reply", extra={ 488 | 'correlation_id': correlation_id, 489 | 'error': str(e), 490 | 'error_type': type(e).__name__, 491 | 'response_time': round(response_time, 3), 492 | 'text_length': len(text) 493 | }) 494 | raise 495 | 496 | 497 | def get_post_thread(client: Client, uri: str) -> Optional[Dict[str, Any]]: 498 | """ 499 | Get the thread containing a post to find root post information. 500 | 501 | Args: 502 | client: Authenticated Bluesky client 503 | uri: The URI of the post 504 | 505 | Returns: 506 | The thread data or None if not found 507 | """ 508 | try: 509 | thread = client.app.bsky.feed.get_post_thread({'uri': uri, 'parent_height': 60, 'depth': 10}) 510 | return thread 511 | except Exception as e: 512 | logger.error(f"Error fetching post thread: {e}") 513 | return None 514 | 515 | 516 | def reply_to_notification(client: Client, notification: Any, reply_text: str, lang: str = "en-US", correlation_id: Optional[str] = None) -> Optional[Dict[str, Any]]: 517 | """ 518 | Reply to a notification (mention or reply). 519 | 520 | Args: 521 | client: Authenticated Bluesky client 522 | notification: The notification object from list_notifications 523 | reply_text: The text to reply with 524 | lang: Language code for the post (defaults to "en-US") 525 | correlation_id: Unique ID for tracking this message through the pipeline 526 | 527 | Returns: 528 | The response from sending the reply or None if failed 529 | """ 530 | # Generate correlation ID if not provided 531 | if correlation_id is None: 532 | correlation_id = str(uuid.uuid4())[:8] 533 | 534 | logger.info(f"[{correlation_id}] Processing reply_to_notification", extra={ 535 | 'correlation_id': correlation_id, 536 | 'reply_length': len(reply_text), 537 | 'lang': lang 538 | }) 539 | 540 | try: 541 | # Get the post URI and CID from the notification (handle both dict and object) 542 | if isinstance(notification, dict): 543 | post_uri = notification.get('uri') 544 | post_cid = notification.get('cid') 545 | # Check if the notification record has reply info with root 546 | record = notification.get('record', {}) 547 | reply_info = record.get('reply') if isinstance(record, dict) else None 548 | elif hasattr(notification, 'uri') and hasattr(notification, 'cid'): 549 | post_uri = notification.uri 550 | post_cid = notification.cid 551 | # Check if the notification record has reply info with root 552 | reply_info = None 553 | if hasattr(notification, 'record') and hasattr(notification.record, 'reply'): 554 | reply_info = notification.record.reply 555 | else: 556 | post_uri = None 557 | post_cid = None 558 | reply_info = None 559 | 560 | if not post_uri or not post_cid: 561 | logger.error("Notification doesn't have required uri/cid fields") 562 | return None 563 | 564 | # Determine root: if post has reply info, use its root; otherwise this post IS the root 565 | if reply_info: 566 | # Extract root from the notification's reply structure 567 | if isinstance(reply_info, dict): 568 | root_ref = reply_info.get('root') 569 | if root_ref and isinstance(root_ref, dict): 570 | root_uri = root_ref.get('uri', post_uri) 571 | root_cid = root_ref.get('cid', post_cid) 572 | else: 573 | # No root in reply info, use post as root 574 | root_uri = post_uri 575 | root_cid = post_cid 576 | elif hasattr(reply_info, 'root'): 577 | if hasattr(reply_info.root, 'uri') and hasattr(reply_info.root, 'cid'): 578 | root_uri = reply_info.root.uri 579 | root_cid = reply_info.root.cid 580 | else: 581 | root_uri = post_uri 582 | root_cid = post_cid 583 | else: 584 | root_uri = post_uri 585 | root_cid = post_cid 586 | else: 587 | # No reply info means this post IS the root 588 | root_uri = post_uri 589 | root_cid = post_cid 590 | 591 | # Reply to the notification 592 | return reply_to_post( 593 | client=client, 594 | text=reply_text, 595 | reply_to_uri=post_uri, 596 | reply_to_cid=post_cid, 597 | root_uri=root_uri, 598 | root_cid=root_cid, 599 | lang=lang, 600 | correlation_id=correlation_id 601 | ) 602 | 603 | except Exception as e: 604 | logger.error(f"[{correlation_id}] Error replying to notification: {e}", extra={ 605 | 'correlation_id': correlation_id, 606 | 'error': str(e), 607 | 'error_type': type(e).__name__ 608 | }) 609 | return None 610 | 611 | 612 | def reply_with_thread_to_notification(client: Client, notification: Any, reply_messages: List[str], lang: str = "en-US", correlation_id: Optional[str] = None) -> Optional[List[Dict[str, Any]]]: 613 | """ 614 | Reply to a notification with a threaded chain of messages (max 15). 615 | 616 | Args: 617 | client: Authenticated Bluesky client 618 | notification: The notification object from list_notifications 619 | reply_messages: List of reply texts (max 15 messages, each max 300 chars) 620 | lang: Language code for the posts (defaults to "en-US") 621 | correlation_id: Unique ID for tracking this message through the pipeline 622 | 623 | Returns: 624 | List of responses from sending the replies or None if failed 625 | """ 626 | # Generate correlation ID if not provided 627 | if correlation_id is None: 628 | correlation_id = str(uuid.uuid4())[:8] 629 | 630 | logger.info(f"[{correlation_id}] Starting threaded reply", extra={ 631 | 'correlation_id': correlation_id, 632 | 'message_count': len(reply_messages), 633 | 'total_length': sum(len(msg) for msg in reply_messages), 634 | 'lang': lang 635 | }) 636 | 637 | try: 638 | # Validate input 639 | if not reply_messages or len(reply_messages) == 0: 640 | logger.error(f"[{correlation_id}] Reply messages list cannot be empty") 641 | return None 642 | if len(reply_messages) > 15: 643 | logger.error(f"[{correlation_id}] Cannot send more than 15 reply messages (got {len(reply_messages)})") 644 | return None 645 | 646 | # Get the post URI and CID from the notification (handle both dict and object) 647 | if isinstance(notification, dict): 648 | post_uri = notification.get('uri') 649 | post_cid = notification.get('cid') 650 | # Check if the notification record has reply info with root 651 | record = notification.get('record', {}) 652 | reply_info = record.get('reply') if isinstance(record, dict) else None 653 | elif hasattr(notification, 'uri') and hasattr(notification, 'cid'): 654 | post_uri = notification.uri 655 | post_cid = notification.cid 656 | # Check if the notification record has reply info with root 657 | reply_info = None 658 | if hasattr(notification, 'record') and hasattr(notification.record, 'reply'): 659 | reply_info = notification.record.reply 660 | else: 661 | post_uri = None 662 | post_cid = None 663 | reply_info = None 664 | 665 | if not post_uri or not post_cid: 666 | logger.error("Notification doesn't have required uri/cid fields") 667 | return None 668 | 669 | # Determine root: if post has reply info, use its root; otherwise this post IS the root 670 | if reply_info: 671 | # Extract root from the notification's reply structure 672 | if isinstance(reply_info, dict): 673 | root_ref = reply_info.get('root') 674 | if root_ref and isinstance(root_ref, dict): 675 | root_uri = root_ref.get('uri', post_uri) 676 | root_cid = root_ref.get('cid', post_cid) 677 | else: 678 | # No root in reply info, use post as root 679 | root_uri = post_uri 680 | root_cid = post_cid 681 | elif hasattr(reply_info, 'root'): 682 | if hasattr(reply_info.root, 'uri') and hasattr(reply_info.root, 'cid'): 683 | root_uri = reply_info.root.uri 684 | root_cid = reply_info.root.cid 685 | else: 686 | root_uri = post_uri 687 | root_cid = post_cid 688 | else: 689 | root_uri = post_uri 690 | root_cid = post_cid 691 | else: 692 | # No reply info means this post IS the root 693 | root_uri = post_uri 694 | root_cid = post_cid 695 | 696 | # Send replies in sequence, creating a thread 697 | responses = [] 698 | current_parent_uri = post_uri 699 | current_parent_cid = post_cid 700 | 701 | for i, message in enumerate(reply_messages): 702 | thread_correlation_id = f"{correlation_id}-{i+1}" 703 | logger.info(f"[{thread_correlation_id}] Sending reply {i+1}/{len(reply_messages)}: {message[:50]}...") 704 | 705 | # Send this reply 706 | response = reply_to_post( 707 | client=client, 708 | text=message, 709 | reply_to_uri=current_parent_uri, 710 | reply_to_cid=current_parent_cid, 711 | root_uri=root_uri, 712 | root_cid=root_cid, 713 | lang=lang, 714 | correlation_id=thread_correlation_id 715 | ) 716 | 717 | if not response: 718 | logger.error(f"[{thread_correlation_id}] Failed to send reply {i+1}, posting system failure message") 719 | # Try to post a system failure message 720 | failure_response = reply_to_post( 721 | client=client, 722 | text="[SYSTEM FAILURE: COULD NOT POST MESSAGE, PLEASE TRY AGAIN]", 723 | reply_to_uri=current_parent_uri, 724 | reply_to_cid=current_parent_cid, 725 | root_uri=root_uri, 726 | root_cid=root_cid, 727 | lang=lang, 728 | correlation_id=f"{thread_correlation_id}-FAIL" 729 | ) 730 | if failure_response: 731 | responses.append(failure_response) 732 | current_parent_uri = failure_response.uri 733 | current_parent_cid = failure_response.cid 734 | else: 735 | logger.error(f"[{thread_correlation_id}] Could not even send system failure message, stopping thread") 736 | return responses if responses else None 737 | else: 738 | responses.append(response) 739 | # Update parent references for next reply (if any) 740 | if i < len(reply_messages) - 1: # Not the last message 741 | current_parent_uri = response.uri 742 | current_parent_cid = response.cid 743 | 744 | logger.info(f"[{correlation_id}] Successfully sent {len(responses)} threaded replies", extra={ 745 | 'correlation_id': correlation_id, 746 | 'replies_sent': len(responses), 747 | 'replies_requested': len(reply_messages) 748 | }) 749 | return responses 750 | 751 | except Exception as e: 752 | logger.error(f"[{correlation_id}] Error sending threaded reply to notification: {e}", extra={ 753 | 'correlation_id': correlation_id, 754 | 'error': str(e), 755 | 'error_type': type(e).__name__, 756 | 'message_count': len(reply_messages) 757 | }) 758 | return None 759 | 760 | 761 | def create_synthesis_ack(client: Client, note: str) -> Optional[Dict[str, Any]]: 762 | """ 763 | Create a stream.thought.ack record for synthesis without a target post. 764 | 765 | This creates a synthesis acknowledgment with null subject field. 766 | 767 | Args: 768 | client: Authenticated Bluesky client 769 | note: The synthesis note/content 770 | 771 | Returns: 772 | The response from creating the acknowledgment record or None if failed 773 | """ 774 | try: 775 | import requests 776 | import json 777 | from datetime import datetime, timezone 778 | 779 | # Get session info from the client 780 | access_token = None 781 | user_did = None 782 | 783 | # Try different ways to get the session info 784 | if hasattr(client, '_session') and client._session: 785 | access_token = client._session.access_jwt 786 | user_did = client._session.did 787 | elif hasattr(client, 'access_jwt'): 788 | access_token = client.access_jwt 789 | user_did = client.did if hasattr(client, 'did') else None 790 | else: 791 | logger.error("Cannot access client session information") 792 | return None 793 | 794 | if not access_token or not user_did: 795 | logger.error("Missing access token or DID from session") 796 | return None 797 | 798 | # Get PDS URI from config instead of environment variables 799 | from config_loader import get_bluesky_config 800 | bluesky_config = get_bluesky_config() 801 | pds_host = bluesky_config['pds_uri'] 802 | 803 | # Create acknowledgment record with null subject 804 | now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") 805 | ack_record = { 806 | "$type": "stream.thought.ack", 807 | "subject": None, # Null subject for synthesis 808 | "createdAt": now, 809 | "note": note 810 | } 811 | 812 | # Create the record 813 | headers = {"Authorization": f"Bearer {access_token}"} 814 | create_record_url = f"{pds_host}/xrpc/com.atproto.repo.createRecord" 815 | 816 | create_data = { 817 | "repo": user_did, 818 | "collection": "stream.thought.ack", 819 | "record": ack_record 820 | } 821 | 822 | response = requests.post(create_record_url, headers=headers, json=create_data, timeout=10) 823 | response.raise_for_status() 824 | result = response.json() 825 | 826 | logger.info(f"Successfully created synthesis acknowledgment") 827 | return result 828 | 829 | except Exception as e: 830 | logger.error(f"Error creating synthesis acknowledgment: {e}") 831 | return None 832 | 833 | 834 | def acknowledge_post(client: Client, post_uri: str, post_cid: str, note: Optional[str] = None) -> Optional[Dict[str, Any]]: 835 | """ 836 | Create a stream.thought.ack record to acknowledge a post. 837 | 838 | This creates a custom acknowledgment record instead of a standard Bluesky like, 839 | allowing void to track which posts it has engaged with. 840 | 841 | Args: 842 | client: Authenticated Bluesky client 843 | post_uri: The URI of the post to acknowledge 844 | post_cid: The CID of the post to acknowledge 845 | note: Optional note to attach to the acknowledgment 846 | 847 | Returns: 848 | The response from creating the acknowledgment record or None if failed 849 | """ 850 | try: 851 | import requests 852 | import json 853 | from datetime import datetime, timezone 854 | 855 | # Get session info from the client 856 | # The atproto Client stores the session differently 857 | access_token = None 858 | user_did = None 859 | 860 | # Try different ways to get the session info 861 | if hasattr(client, '_session') and client._session: 862 | access_token = client._session.access_jwt 863 | user_did = client._session.did 864 | elif hasattr(client, 'access_jwt'): 865 | access_token = client.access_jwt 866 | user_did = client.did if hasattr(client, 'did') else None 867 | else: 868 | logger.error("Cannot access client session information") 869 | return None 870 | 871 | if not access_token or not user_did: 872 | logger.error("Missing access token or DID from session") 873 | return None 874 | 875 | # Get PDS URI from config instead of environment variables 876 | from config_loader import get_bluesky_config 877 | bluesky_config = get_bluesky_config() 878 | pds_host = bluesky_config['pds_uri'] 879 | 880 | # Create acknowledgment record with stream.thought.ack type 881 | now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") 882 | ack_record = { 883 | "$type": "stream.thought.ack", 884 | "subject": { 885 | "uri": post_uri, 886 | "cid": post_cid 887 | }, 888 | "createdAt": now, 889 | "note": note # Will be null if no note provided 890 | } 891 | 892 | # Create the record 893 | headers = {"Authorization": f"Bearer {access_token}"} 894 | create_record_url = f"{pds_host}/xrpc/com.atproto.repo.createRecord" 895 | 896 | create_data = { 897 | "repo": user_did, 898 | "collection": "stream.thought.ack", 899 | "record": ack_record 900 | } 901 | 902 | response = requests.post(create_record_url, headers=headers, json=create_data, timeout=10) 903 | response.raise_for_status() 904 | result = response.json() 905 | 906 | logger.info(f"Successfully acknowledged post: {post_uri}") 907 | return result 908 | 909 | except Exception as e: 910 | logger.error(f"Error acknowledging post: {e}") 911 | return None 912 | 913 | 914 | def create_tool_call_record(client: Client, tool_name: str, arguments: str, tool_call_id: Optional[str] = None) -> Optional[Dict[str, Any]]: 915 | """ 916 | Create a stream.thought.tool_call record to track tool usage. 917 | 918 | This creates a record of tool calls made by void during processing, 919 | allowing for analysis of tool usage patterns and debugging. 920 | 921 | Args: 922 | client: Authenticated Bluesky client 923 | tool_name: Name of the tool being called 924 | arguments: Raw JSON string of the tool arguments 925 | tool_call_id: Optional ID of the tool call for correlation 926 | 927 | Returns: 928 | The response from creating the tool call record or None if failed 929 | """ 930 | try: 931 | import requests 932 | import json 933 | from datetime import datetime, timezone 934 | 935 | # Get session info from the client 936 | access_token = None 937 | user_did = None 938 | 939 | # Try different ways to get the session info 940 | if hasattr(client, '_session') and client._session: 941 | access_token = client._session.access_jwt 942 | user_did = client._session.did 943 | elif hasattr(client, 'access_jwt'): 944 | access_token = client.access_jwt 945 | user_did = client.did if hasattr(client, 'did') else None 946 | else: 947 | logger.error("Cannot access client session information") 948 | return None 949 | 950 | if not access_token or not user_did: 951 | logger.error("Missing access token or DID from session") 952 | return None 953 | 954 | # Get PDS URI from config instead of environment variables 955 | from config_loader import get_bluesky_config 956 | bluesky_config = get_bluesky_config() 957 | pds_host = bluesky_config['pds_uri'] 958 | 959 | # Create tool call record 960 | now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") 961 | tool_record = { 962 | "$type": "stream.thought.tool.call", 963 | "tool_name": tool_name, 964 | "arguments": arguments, # Store as string to avoid parsing issues 965 | "createdAt": now 966 | } 967 | 968 | # Add tool_call_id if provided 969 | if tool_call_id: 970 | tool_record["tool_call_id"] = tool_call_id 971 | 972 | # Create the record 973 | headers = {"Authorization": f"Bearer {access_token}"} 974 | create_record_url = f"{pds_host}/xrpc/com.atproto.repo.createRecord" 975 | 976 | create_data = { 977 | "repo": user_did, 978 | "collection": "stream.thought.tool.call", 979 | "record": tool_record 980 | } 981 | 982 | response = requests.post(create_record_url, headers=headers, json=create_data, timeout=10) 983 | if response.status_code != 200: 984 | logger.error(f"Tool call record creation failed: {response.status_code} - {response.text}") 985 | response.raise_for_status() 986 | result = response.json() 987 | 988 | logger.debug(f"Successfully recorded tool call: {tool_name}") 989 | return result 990 | 991 | except Exception as e: 992 | logger.error(f"Error creating tool call record: {e}") 993 | return None 994 | 995 | 996 | def create_reasoning_record(client: Client, reasoning_text: str) -> Optional[Dict[str, Any]]: 997 | """ 998 | Create a stream.thought.reasoning record to track agent reasoning. 999 | 1000 | This creates a record of void's reasoning during message processing, 1001 | providing transparency into the decision-making process. 1002 | 1003 | Args: 1004 | client: Authenticated Bluesky client 1005 | reasoning_text: The reasoning text from the agent 1006 | 1007 | Returns: 1008 | The response from creating the reasoning record or None if failed 1009 | """ 1010 | try: 1011 | import requests 1012 | import json 1013 | from datetime import datetime, timezone 1014 | 1015 | # Get session info from the client 1016 | access_token = None 1017 | user_did = None 1018 | 1019 | # Try different ways to get the session info 1020 | if hasattr(client, '_session') and client._session: 1021 | access_token = client._session.access_jwt 1022 | user_did = client._session.did 1023 | elif hasattr(client, 'access_jwt'): 1024 | access_token = client.access_jwt 1025 | user_did = client.did if hasattr(client, 'did') else None 1026 | else: 1027 | logger.error("Cannot access client session information") 1028 | return None 1029 | 1030 | if not access_token or not user_did: 1031 | logger.error("Missing access token or DID from session") 1032 | return None 1033 | 1034 | # Get PDS URI from config instead of environment variables 1035 | from config_loader import get_bluesky_config 1036 | bluesky_config = get_bluesky_config() 1037 | pds_host = bluesky_config['pds_uri'] 1038 | 1039 | # Create reasoning record 1040 | now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") 1041 | reasoning_record = { 1042 | "$type": "stream.thought.reasoning", 1043 | "reasoning": reasoning_text, 1044 | "createdAt": now 1045 | } 1046 | 1047 | # Create the record 1048 | headers = {"Authorization": f"Bearer {access_token}"} 1049 | create_record_url = f"{pds_host}/xrpc/com.atproto.repo.createRecord" 1050 | 1051 | create_data = { 1052 | "repo": user_did, 1053 | "collection": "stream.thought.reasoning", 1054 | "record": reasoning_record 1055 | } 1056 | 1057 | response = requests.post(create_record_url, headers=headers, json=create_data, timeout=10) 1058 | response.raise_for_status() 1059 | result = response.json() 1060 | 1061 | logger.debug(f"Successfully recorded reasoning (length: {len(reasoning_text)} chars)") 1062 | return result 1063 | 1064 | except Exception as e: 1065 | logger.error(f"Error creating reasoning record: {e}") 1066 | return None 1067 | 1068 | 1069 | def create_memory_record(client: Client, content: str, tags: Optional[List[str]] = None) -> Optional[Dict[str, Any]]: 1070 | """ 1071 | Create a stream.thought.memory record to store archival memory insertions. 1072 | 1073 | This creates a record of archival_memory_insert tool calls, preserving 1074 | important memories and context in the AT Protocol. 1075 | 1076 | Args: 1077 | client: Authenticated Bluesky client 1078 | content: The memory content being archived 1079 | tags: Optional list of tags associated with this memory 1080 | 1081 | Returns: 1082 | The response from creating the memory record or None if failed 1083 | """ 1084 | try: 1085 | import requests 1086 | import json 1087 | from datetime import datetime, timezone 1088 | 1089 | # Get session info from the client 1090 | access_token = None 1091 | user_did = None 1092 | 1093 | # Try different ways to get the session info 1094 | if hasattr(client, '_session') and client._session: 1095 | access_token = client._session.access_jwt 1096 | user_did = client._session.did 1097 | elif hasattr(client, 'access_jwt'): 1098 | access_token = client.access_jwt 1099 | user_did = client.did if hasattr(client, 'did') else None 1100 | else: 1101 | logger.error("Cannot access client session information") 1102 | return None 1103 | 1104 | if not access_token or not user_did: 1105 | logger.error("Missing access token or DID from session") 1106 | return None 1107 | 1108 | # Get PDS URI from config instead of environment variables 1109 | from config_loader import get_bluesky_config 1110 | bluesky_config = get_bluesky_config() 1111 | pds_host = bluesky_config['pds_uri'] 1112 | 1113 | # Create memory record 1114 | now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") 1115 | memory_record = { 1116 | "$type": "stream.thought.memory", 1117 | "content": content, 1118 | "createdAt": now 1119 | } 1120 | 1121 | # Add tags if provided (can be null) 1122 | if tags is not None: 1123 | memory_record["tags"] = tags 1124 | 1125 | # Create the record 1126 | headers = {"Authorization": f"Bearer {access_token}"} 1127 | create_record_url = f"{pds_host}/xrpc/com.atproto.repo.createRecord" 1128 | 1129 | create_data = { 1130 | "repo": user_did, 1131 | "collection": "stream.thought.memory", 1132 | "record": memory_record 1133 | } 1134 | 1135 | response = requests.post(create_record_url, headers=headers, json=create_data, timeout=10) 1136 | response.raise_for_status() 1137 | result = response.json() 1138 | 1139 | tags_info = f" with {len(tags)} tags" if tags else " (no tags)" 1140 | logger.debug(f"Successfully recorded memory (length: {len(content)} chars{tags_info})") 1141 | return result 1142 | 1143 | except Exception as e: 1144 | logger.error(f"Error creating memory record: {e}") 1145 | return None 1146 | 1147 | 1148 | def sync_followers(client: Client, dry_run: bool = False) -> Dict[str, Any]: 1149 | """ 1150 | Check who is following the bot and who the bot is following, 1151 | then follow back users who aren't already followed. 1152 | 1153 | This implements the autofollow feature by creating follow records 1154 | (app.bsky.graph.follow) for users who follow the bot. 1155 | 1156 | Args: 1157 | client: Authenticated Bluesky client 1158 | dry_run: If True, only report what would be done without actually following 1159 | 1160 | Returns: 1161 | Dict with stats: { 1162 | 'followers_count': int, 1163 | 'following_count': int, 1164 | 'to_follow': List[str], # List of handles to follow 1165 | 'newly_followed': List[str], # List of handles actually followed (empty if dry_run) 1166 | 'errors': List[str] # Any errors encountered 1167 | } 1168 | """ 1169 | try: 1170 | from datetime import datetime, timezone 1171 | 1172 | # Get session info from the client 1173 | access_token = None 1174 | user_did = None 1175 | 1176 | if hasattr(client, '_session') and client._session: 1177 | access_token = client._session.access_jwt 1178 | user_did = client._session.did 1179 | elif hasattr(client, 'access_jwt'): 1180 | access_token = client.access_jwt 1181 | user_did = client.did if hasattr(client, 'did') else None 1182 | else: 1183 | logger.error("Cannot access client session information") 1184 | return {'error': 'Cannot access client session'} 1185 | 1186 | if not access_token or not user_did: 1187 | logger.error("Missing access token or DID from session") 1188 | return {'error': 'Missing access token or DID'} 1189 | 1190 | # Get PDS URI from config 1191 | from config_loader import get_bluesky_config 1192 | bluesky_config = get_bluesky_config() 1193 | pds_host = bluesky_config['pds_uri'] 1194 | 1195 | # Get followers using the API 1196 | followers_response = client.app.bsky.graph.get_followers({'actor': user_did}) 1197 | followers = followers_response.followers if hasattr(followers_response, 'followers') else [] 1198 | follower_dids = {f.did for f in followers} 1199 | 1200 | # Get following using the API 1201 | following_response = client.app.bsky.graph.get_follows({'actor': user_did}) 1202 | following = following_response.follows if hasattr(following_response, 'follows') else [] 1203 | following_dids = {f.did for f in following} 1204 | 1205 | # Find users who follow us but we don't follow back 1206 | to_follow_dids = follower_dids - following_dids 1207 | 1208 | # Build result object 1209 | result = { 1210 | 'followers_count': len(followers), 1211 | 'following_count': len(following), 1212 | 'to_follow': [], 1213 | 'newly_followed': [], 1214 | 'errors': [] 1215 | } 1216 | 1217 | # Get handles for users to follow 1218 | to_follow_handles = [] 1219 | for follower in followers: 1220 | if follower.did in to_follow_dids: 1221 | handle = follower.handle if hasattr(follower, 'handle') else follower.did 1222 | to_follow_handles.append(handle) 1223 | result['to_follow'].append(handle) 1224 | 1225 | logger.info(f"Follower sync: {len(followers)} followers, {len(following)} following, {len(to_follow_dids)} to follow") 1226 | 1227 | # If dry run, just return the stats 1228 | if dry_run: 1229 | logger.info(f"Dry run - would follow: {', '.join(to_follow_handles)}") 1230 | return result 1231 | 1232 | # Actually follow the users with rate limiting 1233 | import requests 1234 | headers = {"Authorization": f"Bearer {access_token}"} 1235 | create_record_url = f"{pds_host}/xrpc/com.atproto.repo.createRecord" 1236 | 1237 | for i, did in enumerate(to_follow_dids): 1238 | try: 1239 | # Rate limiting: wait 2 seconds between follows to avoid spamming the server 1240 | if i > 0: 1241 | time.sleep(2) 1242 | 1243 | # Create follow record 1244 | now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") 1245 | follow_record = { 1246 | "$type": "app.bsky.graph.follow", 1247 | "subject": did, 1248 | "createdAt": now 1249 | } 1250 | 1251 | create_data = { 1252 | "repo": user_did, 1253 | "collection": "app.bsky.graph.follow", 1254 | "record": follow_record 1255 | } 1256 | 1257 | response = requests.post(create_record_url, headers=headers, json=create_data, timeout=10) 1258 | response.raise_for_status() 1259 | 1260 | # Find handle for this DID 1261 | handle = next((f.handle for f in followers if f.did == did), did) 1262 | result['newly_followed'].append(handle) 1263 | logger.info(f"Followed: {handle}") 1264 | 1265 | except Exception as e: 1266 | error_msg = f"Failed to follow {did}: {e}" 1267 | logger.error(error_msg) 1268 | result['errors'].append(error_msg) 1269 | 1270 | return result 1271 | 1272 | except Exception as e: 1273 | logger.error(f"Error syncing followers: {e}") 1274 | return {'error': str(e)} 1275 | 1276 | 1277 | if __name__ == "__main__": 1278 | client = default_login() 1279 | # do something with the client 1280 | logger.info("Client is ready to use!") 1281 | --------------------------------------------------------------------------------