├── assets └── liz.jpg ├── .dockerignore ├── clients ├── twitter │ ├── index.js │ ├── package.json │ ├── config.js │ ├── utils.js │ ├── client.js │ └── base.js ├── utils │ └── request-queue.ts └── discord │ ├── base.ts │ └── client.ts ├── src ├── utils │ ├── db.ts │ ├── initDb.ts │ ├── memory.ts │ └── llm.ts ├── routes │ ├── index.ts │ ├── conversation.ts │ └── tweet.ts ├── middleware │ ├── create-memory.ts │ ├── index.ts │ ├── validate-input.ts │ ├── load-memories.ts │ ├── wrap-context.ts │ └── router.ts ├── index.ts ├── looker │ ├── character.json │ └── looker.ts ├── types │ └── index.ts ├── framework │ └── index.ts ├── agent │ └── index.ts └── example │ └── example.ts ├── scripts ├── test-server.ts └── test-discord.ts ├── tsconfig.json ├── .env.example ├── .gitignore ├── docker-compose.yml ├── Dockerfile ├── prisma └── schema.prisma ├── package.json └── README.md /assets/liz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Akrasia0/liz/HEAD/assets/liz.jpg -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | dist 4 | .env 5 | *.log 6 | .git 7 | .gitignore 8 | *.md 9 | .DS_Store -------------------------------------------------------------------------------- /clients/twitter/index.js: -------------------------------------------------------------------------------- 1 | const { TwitterClient } = require("./client"); 2 | const { twitterConfigSchema } = require("./config"); 3 | 4 | module.exports = { 5 | TwitterClient, 6 | twitterConfigSchema, 7 | }; 8 | -------------------------------------------------------------------------------- /src/utils/db.ts: -------------------------------------------------------------------------------- 1 | import { PrismaClient } from "@prisma/client"; 2 | 3 | declare global { 4 | var prisma: PrismaClient | undefined; 5 | } 6 | 7 | export const prisma = global.prisma || new PrismaClient(); 8 | 9 | if (process.env.NODE_ENV === "development") { 10 | global.prisma = prisma; 11 | } 12 | -------------------------------------------------------------------------------- /clients/twitter/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@liz/twitter-client", 3 | "version": "1.0.0", 4 | "description": "Twitter client for Liz agent framework", 5 | "main": "index.js", 6 | "dependencies": { 7 | "agent-twitter-client": "0.0.17", 8 | "axios": "^1.7.9", 9 | "zod": "^3.22.4" 10 | }, 11 | "devDependencies": { 12 | "dotenv": "^16.0.3" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /scripts/test-server.ts: -------------------------------------------------------------------------------- 1 | import express from 'express'; 2 | 3 | const app = express(); 4 | app.use(express.json()); 5 | 6 | // Mock agent endpoint 7 | app.post('/agent/input', (req, res) => { 8 | console.log('Received request:', req.body); 9 | 10 | // Mock response 11 | const response = `Test response to: ${req.body.input.text}`; 12 | res.json(response); 13 | }); 14 | 15 | const port = 3000; 16 | app.listen(port, () => { 17 | console.log(`Test server running at http://localhost:${port}`); 18 | }); 19 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", 4 | "module": "commonjs", 5 | "lib": ["ES2020"], 6 | "declaration": true, 7 | "outDir": "./dist", 8 | "rootDir": "./src", 9 | "strict": true, 10 | "esModuleInterop": true, 11 | "skipLibCheck": true, 12 | "forceConsistentCasingInFileNames": true, 13 | "moduleResolution": "node", 14 | "resolveJsonModule": true, 15 | "isolatedModules": true 16 | }, 17 | "include": ["src/**/*"], 18 | "exclude": ["node_modules", "dist"] 19 | } 20 | -------------------------------------------------------------------------------- /src/routes/index.ts: -------------------------------------------------------------------------------- 1 | import { handleConversation } from "./conversation"; 2 | import { handleTweetGeneration } from "./tweet"; 3 | 4 | export const routes = [ 5 | { 6 | name: "conversation", 7 | description: 8 | "Call if the user is just conversing or if none of the other routes apply", 9 | handler: handleConversation, 10 | }, 11 | { 12 | name: "create_new_tweet", 13 | description: 14 | "Only call if the message is the following: Generate a new tweet to post on your timeline ", 15 | handler: handleTweetGeneration, 16 | }, 17 | ]; 18 | -------------------------------------------------------------------------------- /clients/twitter/config.js: -------------------------------------------------------------------------------- 1 | const { z } = require('zod'); 2 | 3 | const twitterConfigSchema = z.object({ 4 | username: z.string().min(1, "Twitter username is required"), 5 | password: z.string().min(1, "Twitter password is required"), 6 | email: z.string().email("Valid email is required"), 7 | twoFactorSecret: z.string().optional(), 8 | retryLimit: z.number().int().min(1).default(5), 9 | postIntervalHours: z.number().int().min(1).default(4), 10 | enableActions: z.boolean().default(false) 11 | }); 12 | 13 | module.exports = { 14 | twitterConfigSchema 15 | }; -------------------------------------------------------------------------------- /src/middleware/create-memory.ts: -------------------------------------------------------------------------------- 1 | import { AgentMiddleware } from "../types"; 2 | import { prisma } from "../utils/db"; 3 | 4 | export const createMemoryFromInput: AgentMiddleware = async ( 5 | req, 6 | res, 7 | next 8 | ) => { 9 | try { 10 | await prisma.memory.create({ 11 | data: { 12 | userId: req.input.userId, 13 | agentId: req.input.agentId, 14 | roomId: req.input.roomId, 15 | type: req.input.type, 16 | generator: "external", 17 | content: JSON.stringify(req.input), 18 | }, 19 | }); 20 | 21 | await next(); 22 | } catch (error) { 23 | await res.error( 24 | new Error(`Failed to create memory: ${(error as Error).message}`) 25 | ); 26 | } 27 | }; 28 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Database configuration 2 | DATABASE_URL="postgresql://user:password@localhost:5432/dbname" 3 | # For SQLite use: DATABASE_URL="file:./prisma/dev.db" 4 | 5 | # OpenAI API configuration 6 | OPENAI_API_KEY="your-openai-api-key" 7 | 8 | # OpenRouter API configuration 9 | OPENROUTER_API_KEY="your-openrouter-api-key" 10 | 11 | # Application configuration 12 | APP_URL="http://localhost:3000" # Required for OpenRouter 13 | 14 | # Social Media Configuration 15 | DISCORD_APPLICATION_ID= 16 | DISCORD_API_TOKEN= 17 | TWITTER_USERNAME= 18 | TWITTER_PASSWORD= 19 | TWITTER_EMAIL= 20 | TWITTER_POST_INTERVAL_HOURS=4 21 | TWITTER_POLLING_INTERVAL=5 # In minutes 22 | TWITTER_DRY_RUN=true 23 | 24 | SERVER_PORT=3000 -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | // Core framework 2 | export { AgentFramework } from "./framework"; 3 | 4 | // Types 5 | export type { 6 | Agent, 7 | AgentMiddleware, 8 | AgentRequest, 9 | AgentResponse, 10 | Character, 11 | InputObject, 12 | Memory, 13 | Route, 14 | } from "./types"; 15 | 16 | // Enums (these are values, not types) 17 | export { InputSource, InputType, LLMSize } from "./types"; 18 | 19 | // Middleware 20 | export { 21 | validateInput, 22 | loadMemories, 23 | wrapContext, 24 | router, 25 | createMemoryFromInput, 26 | standardMiddleware, 27 | } from "./middleware"; 28 | 29 | // Utilities 30 | export { LLMUtils } from "./utils/llm"; 31 | 32 | // Example implementation 33 | export * as example from "./example/example"; 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | node_modules/ 3 | .pnp/ 4 | .pnp.js 5 | 6 | 7 | **/package-lock.json 8 | **/pnpm-lock.yaml 9 | 10 | # Production build 11 | dist/ 12 | build/ 13 | 14 | # Environment variables 15 | .env 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | # Database 22 | *.db 23 | *.db-journal 24 | prisma/migrations/ 25 | 26 | # Logs 27 | logs/ 28 | *.log 29 | npm-debug.log* 30 | yarn-debug.log* 31 | yarn-error.log* 32 | 33 | # IDE 34 | .idea/ 35 | .vscode/ 36 | *.swp 37 | *.swo 38 | 39 | # OS 40 | .DS_Store 41 | Thumbs.db 42 | 43 | # Testing 44 | coverage/ 45 | 46 | # Temporary files 47 | *.tmp 48 | *.temp 49 | 50 | # TypeScript 51 | *.tsbuildinfo 52 | 53 | 54 | /src/test/* -------------------------------------------------------------------------------- /src/middleware/index.ts: -------------------------------------------------------------------------------- 1 | import { validateInput } from "./validate-input"; 2 | import { loadMemories } from "./load-memories"; 3 | import { wrapContext } from "./wrap-context"; 4 | import { router } from "./router"; 5 | import { createMemoryFromInput } from "./create-memory"; 6 | import { AgentMiddleware } from "../types"; 7 | 8 | // Export individual middleware 9 | export { 10 | validateInput, 11 | loadMemories, 12 | wrapContext, 13 | router, 14 | createMemoryFromInput, 15 | }; 16 | 17 | // Export standard middleware stack 18 | export const standardMiddleware: AgentMiddleware[] = [ 19 | validateInput, 20 | loadMemories, // Load previous memories 21 | wrapContext, // Wrap everything in context 22 | createMemoryFromInput, 23 | router, 24 | ]; 25 | -------------------------------------------------------------------------------- /src/looker/character.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "default-character", 3 | "name": "Liz", 4 | "description": "A helpful AI assistant with a friendly personality", 5 | "traits": ["friendly", "helpful", "knowledgeable", "empathetic"], 6 | "preferences": { 7 | "communicationStyle": "casual", 8 | "responseLength": "medium", 9 | "humor": "light" 10 | }, 11 | "background": { 12 | "expertise": ["technology", "science", "arts"], 13 | "interests": ["helping users", "learning new things", "solving problems"] 14 | }, 15 | "voice": { 16 | "tone": "warm", 17 | "pace": "moderate", 18 | "vocabulary": "accessible" 19 | }, 20 | "metadata": { 21 | "version": "1.0.0", 22 | "lastUpdated": "2025-02-15T12:00:00Z", 23 | "creator": "Development Team" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | tappd-simulator: 3 | image: phalanetwork/tappd-simulator:latest 4 | platform: linux/amd64 # Since you're on M1/M2 Mac 5 | ports: 6 | - "8090:8090" 7 | 8 | app: 9 | build: . 10 | platform: linux/arm64 # For M1/M2 Mac 11 | ports: 12 | - "3000:3000" 13 | environment: 14 | - TEE_MODE=DOCKER 15 | - OPENAI_API_KEY=${OPENAI_API_KEY} 16 | - OPENROUTER_API_KEY=${OPENROUTER_API_KEY} 17 | - TWITTER_USERNAME=${TWITTER_USERNAME} 18 | - TWITTER_PASSWORD=${TWITTER_PASSWORD} 19 | - TWITTER_EMAIL=${TWITTER_EMAIL} 20 | - TWITTER_2FA_SECRET=${TWITTER_2FA_SECRET:-} 21 | - TWITTER_POST_INTERVAL_HOURS=${TWITTER_POST_INTERVAL_HOURS:-4} 22 | - TWITTER_POLLING_INTERVAL=${TWITTER_POLLING_INTERVAL:-5} 23 | volumes: 24 | - ./prisma/dev.db:/app/prisma/dev.db 25 | depends_on: 26 | - tappd-simulator 27 | -------------------------------------------------------------------------------- /src/routes/conversation.ts: -------------------------------------------------------------------------------- 1 | import { LLMUtils } from "../utils/llm"; 2 | import { prisma } from "../utils/db"; 3 | import { AgentRequest, AgentResponse } from "../types"; 4 | 5 | export const handleConversation = async ( 6 | context: string, 7 | req: AgentRequest, 8 | res: AgentResponse 9 | ) => { 10 | const llmUtils = new LLMUtils(); 11 | 12 | const response = 13 | req.input.imageUrls && req.input.imageUrls.length > 0 14 | ? await llmUtils.getTextWithImageFromLLM( 15 | context, 16 | req.input.imageUrls, 17 | "anthropic/claude-3.5-sonnet" 18 | ) 19 | : await llmUtils.getTextFromLLM(context, "anthropic/claude-3.5-sonnet"); 20 | 21 | // Store the response as a memory 22 | await prisma.memory.create({ 23 | data: { 24 | userId: req.input.userId, 25 | agentId: req.input.agentId, 26 | roomId: req.input.roomId, 27 | type: "agent", 28 | generator: "llm", 29 | content: JSON.stringify({ text: response }), 30 | }, 31 | }); 32 | 33 | await res.send(response); 34 | }; 35 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 ubuntu:22.04 2 | 3 | # Install Node.js and npm 4 | RUN apt-get update && apt-get install -y \ 5 | curl \ 6 | && curl -fsSL https://deb.nodesource.com/setup_23.x | bash - \ 7 | && apt-get install -y \ 8 | nodejs \ 9 | python3 \ 10 | make \ 11 | g++ \ 12 | && rm -rf /var/lib/apt/lists/* 13 | 14 | WORKDIR /app 15 | 16 | # First copy only files needed for npm install 17 | COPY package*.json ./ 18 | COPY tsconfig.json ./ 19 | 20 | # Install dependencies 21 | RUN npm install 22 | 23 | # Copy Prisma files 24 | COPY prisma ./prisma/ 25 | 26 | # Generate Prisma client 27 | RUN npx prisma generate 28 | 29 | # Now copy the source code 30 | COPY src ./src 31 | COPY clients ./clients 32 | 33 | # Build the application 34 | RUN npm run build 35 | 36 | # Set environment variables 37 | ENV NODE_ENV=production 38 | ENV TEE_MODE=DOCKER 39 | ENV DATABASE_URL="file:/app/prisma/dev.db" 40 | 41 | # Expose port 42 | EXPOSE 3000 43 | 44 | # Start the application 45 | CMD ["npm", "start"] 46 | -------------------------------------------------------------------------------- /src/routes/tweet.ts: -------------------------------------------------------------------------------- 1 | import { LLMUtils } from "../utils/llm"; 2 | import { AgentRequest, AgentResponse, LLMSize } from "../types"; 3 | import { createTwitterMemory } from "../utils/memory"; 4 | 5 | // Initialize LLM utility for text generation 6 | const tweetGenerator = new LLMUtils(); 7 | 8 | export const handleTweetGeneration = async ( 9 | context: string, 10 | req: AgentRequest, 11 | res: AgentResponse 12 | ) => { 13 | // Generate tweet content using LLM 14 | const tweetContent = await tweetGenerator.getTextFromLLM( 15 | `${context}\n\n 16 | Look at the previous twitter context then generate a original and engaging tweet that fits in with your character and previous twitter history. ONLY output the tweet, no reflection on it. No "Tweet: ". Just the text of the tweet. The text you output will be posted directly to twitter.`, 17 | "anthropic/claude-3.5-sonnet" 18 | ); 19 | 20 | const logMessage = `Tweeted: ${tweetContent}`; 21 | 22 | // Store the tweet in memory for future context 23 | await createTwitterMemory( 24 | req.input.userId, 25 | req.input.agentId, 26 | req.input.roomId, 27 | logMessage 28 | ); 29 | 30 | // Send the generated tweet as response 31 | await res.send(tweetContent); 32 | }; 33 | -------------------------------------------------------------------------------- /src/middleware/validate-input.ts: -------------------------------------------------------------------------------- 1 | import { AgentMiddleware } from "../types"; 2 | 3 | export const validateInput: AgentMiddleware = async (req, res, next) => { 4 | const { input } = req; 5 | 6 | // Validate required fields 7 | if (!input.userId || !input.agentId || !input.type) { 8 | return res.error(new Error("Invalid input: missing required fields")); 9 | } 10 | 11 | // Validate input type-specific fields 12 | switch (input.type) { 13 | case "text": 14 | if (!input.text) { 15 | return res.error(new Error("Text input requires text field")); 16 | } 17 | break; 18 | // case "image": 19 | // if (!input.imageUrl) { 20 | // return res.error(new Error("Image input requires imageUrl field")); 21 | // } 22 | // break; 23 | case "text_and_image": 24 | if (!input.text || !input.imageUrls || input.imageUrls.length === 0) { 25 | return res.error( 26 | new Error( 27 | "Text and image input requires both text and imageUrls fields" 28 | ) 29 | ); 30 | } 31 | break; 32 | // case "audio": 33 | // if (!input.audioUrl) { 34 | // return res.error(new Error("Audio input requires audioUrl field")); 35 | // } 36 | // break; 37 | // case "video": 38 | // if (!input.videoUrl) { 39 | // return res.error(new Error("Video input requires videoUrl field")); 40 | // } 41 | // break; 42 | } 43 | 44 | await next(); 45 | }; 46 | -------------------------------------------------------------------------------- /src/utils/initDb.ts: -------------------------------------------------------------------------------- 1 | import { exec } from "child_process"; 2 | import { promisify } from "util"; 3 | import * as fs from "fs"; 4 | import * as path from "path"; 5 | 6 | const execAsync = promisify(exec); 7 | 8 | export async function initializeDatabase() { 9 | try { 10 | const dbProvider = process.env.DATABASE_PROVIDER || "sqlite"; 11 | const isProduction = process.env.NODE_ENV === "production"; 12 | 13 | if (dbProvider === "sqlite") { 14 | const dbPath = path.join(__dirname, "../../prisma/dev.db"); 15 | const migrationPath = path.join(__dirname, "../../prisma/migrations"); 16 | const dbExists = fs.existsSync(dbPath); 17 | const migrationsExist = fs.existsSync(migrationPath); 18 | 19 | if (!migrationsExist && !isProduction) { 20 | // Only use migrate dev in development 21 | console.log("Creating initial migration..."); 22 | await execAsync("npx prisma migrate dev --name init"); 23 | } else { 24 | // Use migrate deploy in production or when migrations exist 25 | console.log("Deploying existing migrations..."); 26 | await execAsync("npx prisma migrate deploy"); 27 | } 28 | } else { 29 | console.log("Running database migrations..."); 30 | await execAsync("npx prisma migrate deploy"); 31 | } 32 | 33 | console.log("Generating Prisma Client..."); 34 | await execAsync("npx prisma generate"); 35 | 36 | console.log("Database initialization complete."); 37 | } catch (error) { 38 | console.error("Failed to initialize database:", error); 39 | throw error; 40 | } 41 | } 42 | 43 | // Run if called directly 44 | if (require.main === module) { 45 | initializeDatabase().catch(console.error); 46 | } 47 | -------------------------------------------------------------------------------- /prisma/schema.prisma: -------------------------------------------------------------------------------- 1 | // Database configuration 2 | datasource db { 3 | provider = "sqlite" // Set to "postgresql" for postgres 4 | url = env("DATABASE_URL") 5 | } 6 | 7 | // Client generator configuration 8 | generator client { 9 | provider = "prisma-client-js" 10 | // Enable native database types for better performance 11 | previewFeatures = ["nativeTypes"] 12 | } 13 | 14 | model Memory { 15 | id String @id @default(uuid()) 16 | userId String 17 | agentId String 18 | roomId String 19 | content String // Keep as String for SQLite 20 | type String 21 | generator String // "llm" or "external" 22 | createdAt DateTime @default(now()) 23 | 24 | @@index([roomId]) 25 | @@index([userId, agentId]) 26 | @@index([type]) 27 | } 28 | 29 | model Tweet { 30 | id String @id 31 | text String 32 | userId String 33 | username String 34 | conversationId String? 35 | inReplyToId String? 36 | createdAt DateTime @default(now()) 37 | permanentUrl String? 38 | likeCount Int? @default(0) 39 | retweetCount Int? @default(0) 40 | replyCount Int? @default(0) 41 | viewCount Int? @default(0) 42 | 43 | @@index([userId]) 44 | @@index([conversationId]) 45 | @@index([createdAt]) 46 | } 47 | 48 | model DiscordMessage { 49 | id String @id 50 | content String 51 | userId String 52 | username String 53 | channelId String 54 | guildId String? 55 | createdAt DateTime @default(now()) 56 | 57 | @@index([userId]) 58 | @@index([channelId]) 59 | } 60 | -------------------------------------------------------------------------------- /src/middleware/load-memories.ts: -------------------------------------------------------------------------------- 1 | import { PrismaClient } from "@prisma/client"; 2 | import { AgentMiddleware, Memory } from "../types"; 3 | 4 | const prisma = new PrismaClient(); 5 | 6 | type DbMemory = Awaited>; 7 | 8 | interface LoadMemoriesOptions { 9 | limit?: number; 10 | type?: string; 11 | } 12 | 13 | export function createLoadMemoriesMiddleware( 14 | options: LoadMemoriesOptions = {} 15 | ): AgentMiddleware { 16 | const { limit = 100 } = options; 17 | 18 | return async (req, res, next) => { 19 | try { 20 | const memories = await prisma.memory.findMany({ 21 | where: { 22 | // roomId: req.input.roomId, 23 | userId: req.input.userId, 24 | }, 25 | orderBy: { 26 | createdAt: "desc", 27 | }, 28 | take: limit, 29 | }); 30 | 31 | req.memories = memories 32 | .map((memory: NonNullable) => { 33 | try { 34 | return { 35 | id: memory.id, 36 | userId: memory.userId, 37 | agentId: memory.agentId, 38 | roomId: memory.roomId, 39 | type: memory.type, 40 | createdAt: memory.createdAt, 41 | generator: memory.generator, 42 | content: JSON.parse(memory.content), 43 | } as Memory; 44 | } catch (e) { 45 | console.log("Failed to load a memory"); 46 | return undefined; 47 | } 48 | }) 49 | .filter((memory): memory is Memory => memory !== undefined); 50 | 51 | await next(); 52 | } catch (error) { 53 | await res.error( 54 | new Error(`Failed to load memories: ${(error as Error).message}`) 55 | ); 56 | } 57 | }; 58 | } 59 | 60 | export const loadMemories = createLoadMemoriesMiddleware(); 61 | -------------------------------------------------------------------------------- /clients/utils/request-queue.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Utility class for rate-limiting and queuing requests 3 | */ 4 | export class RequestQueue { 5 | private queue: Array<() => Promise>; 6 | private processing: boolean; 7 | 8 | constructor() { 9 | this.queue = []; 10 | this.processing = false; 11 | } 12 | 13 | /** 14 | * Adds a request to the queue 15 | * @param request - Async function to execute 16 | * @returns Promise resolving to the request result 17 | */ 18 | async add(request: () => Promise): Promise { 19 | return new Promise((resolve, reject) => { 20 | this.queue.push(async () => { 21 | try { 22 | const result = await request(); 23 | resolve(result); 24 | } catch (error) { 25 | reject(error); 26 | } 27 | }); 28 | this.processQueue(); 29 | }); 30 | } 31 | 32 | /** 33 | * Processes queued requests with rate limiting 34 | */ 35 | private async processQueue(): Promise { 36 | if (this.processing || this.queue.length === 0) return; 37 | 38 | this.processing = true; 39 | while (this.queue.length > 0) { 40 | const request = this.queue.shift(); 41 | if (request) { 42 | try { 43 | await request(); 44 | await this.delay(1000); // Rate limiting delay 45 | } catch (error) { 46 | console.error("Error processing request:", error); 47 | await this.delay(2000); // Backoff on error 48 | } 49 | } 50 | } 51 | this.processing = false; 52 | } 53 | 54 | /** 55 | * Utility delay function 56 | * @param ms - Milliseconds to delay 57 | */ 58 | private delay(ms: number): Promise { 59 | return new Promise(resolve => setTimeout(resolve, ms)); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "liz", 3 | "version": "1.0.0", 4 | "description": "Express-style agent framework for processing agent interactions through middleware chains", 5 | "main": "dist/index.js", 6 | "types": "dist/index.d.ts", 7 | "scripts": { 8 | "build": "tsc && cd clients/twitter && tsc && cd ../discord && tsc", 9 | "discord": "ts-node scripts/test-discord.ts", 10 | "test:server": "ts-node scripts/test-server.ts", 11 | "prestart": "npm run build && npm run db:init", 12 | "start": "node dist/example/example.js", 13 | "predev": "npm run build && npm run db:init", 14 | "dev": "ts-node dist/example/example.js", 15 | "dev:watch": "ts-node src/example/example.ts", 16 | "twitter": "ts-node src/example/example-twitter.ts", 17 | "test": "jest", 18 | "db:init": "node dist/utils/initDb.js", 19 | "db:reset": "rm -f prisma/dev.db && npm run db:init", 20 | "prisma:generate": "prisma generate", 21 | "prisma:migrate": "prisma migrate dev", 22 | "prisma:studio": "prisma studio" 23 | }, 24 | "keywords": [ 25 | "agent", 26 | "framework", 27 | "middleware", 28 | "llm", 29 | "ai" 30 | ], 31 | "author": "", 32 | "license": "MIT", 33 | "dependencies": { 34 | "@prisma/client": "^5.7.0", 35 | "axios": "^1.7.9", 36 | "discord.js": "^14.17.3", 37 | "dotenv": "^16.4.7", 38 | "express": "^4.21.2", 39 | "node-fetch": "^3.3.2", 40 | "openai": "^4.0.0", 41 | "zod": "^3.22.4" 42 | }, 43 | "devDependencies": { 44 | "@types/express": "^4.0.0", 45 | "@types/jest": "^29.5.11", 46 | "@types/node": "^20.10.4", 47 | "@typescript-eslint/eslint-plugin": "^6.15.0", 48 | "@typescript-eslint/parser": "^6.15.0", 49 | "eslint": "^8.56.0", 50 | "eslint-config-prettier": "^9.1.0", 51 | "eslint-plugin-prettier": "^5.0.1", 52 | "jest": "^29.7.0", 53 | "nodemon": "^3.0.2", 54 | "prettier": "^3.1.1", 55 | "prisma": "^5.7.0", 56 | "ts-jest": "^29.1.1", 57 | "ts-node": "^10.9.2", 58 | "typescript": "^5.3.3" 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/middleware/wrap-context.ts: -------------------------------------------------------------------------------- 1 | import { AgentMiddleware, Memory } from "../types"; 2 | import { LLMUtils } from "../utils/llm"; 3 | const llmUtils = new LLMUtils(); 4 | 5 | function formatMemories(memories: Memory[] | undefined): string { 6 | if (!memories || memories.length === 0) { 7 | return "No previous conversation history."; 8 | } 9 | 10 | return memories 11 | .reverse() 12 | .map((memory) => { 13 | const content = memory.content; 14 | if (memory.generator === "external") { 15 | return `[${memory.createdAt.toISOString()}]User ${memory.userId}: ${ 16 | content.text 17 | } ${ 18 | content.imageDescriptions 19 | ? `Description of attached images:\n${content.imageDescriptions}` 20 | : "" 21 | }`; 22 | } else if (memory.generator === "llm") { 23 | return `[${memory.createdAt.toISOString()}] You: ${content.text} 24 | ${ 25 | content.imageDescriptions 26 | ? `Description of attached images:\n${content.imageDescriptions}` 27 | : "" 28 | }`; 29 | } 30 | }) 31 | .filter(Boolean) 32 | .join("\n\n"); 33 | } 34 | 35 | export const wrapContext: AgentMiddleware = async (req, res, next) => { 36 | try { 37 | const memories = formatMemories(req.memories); 38 | const agentContext = req.agent.getAgentContext(); 39 | const currentInput = req.input.text; 40 | let imageDescriptions: string | undefined; 41 | if (req.input.imageUrls && req.input.imageUrls.length > 0) { 42 | try { 43 | imageDescriptions = await llmUtils.getImageDescriptions( 44 | req.input.imageUrls 45 | ); 46 | } catch (error) { 47 | console.warn("Failed to get image descriptions:", error); 48 | } 49 | } 50 | req.input.imageDescriptions = imageDescriptions 51 | ? `Description of attached images:\n${imageDescriptions}` 52 | : undefined; 53 | req.context = ` 54 | 55 | ${memories} 56 | 57 | 58 | 59 | ${agentContext} 60 | 61 | 62 | 63 | TEXT: ${currentInput} 64 | ${imageDescriptions ? `\nIMAGES:\n${imageDescriptions}` : ""} 65 | 66 | `.trim(); 67 | 68 | await next(); 69 | } catch (error) { 70 | await res.error( 71 | new Error(`Failed to wrap context: ${(error as Error).message}`) 72 | ); 73 | } 74 | }; 75 | -------------------------------------------------------------------------------- /src/types/index.ts: -------------------------------------------------------------------------------- 1 | export enum InputType { 2 | TEXT = "text", 3 | IMAGE = "image", 4 | TEXT_AND_IMAGE = "text_and_image", 5 | AUDIO = "audio", 6 | VIDEO = "video", 7 | } 8 | 9 | export enum InputSource { 10 | NETWORK = "network", 11 | TWITTER = "twitter", 12 | DISCORD = "discord", 13 | SMS = "sms", 14 | TELEGRAM = "telegram", 15 | } 16 | 17 | export enum LLMSize { 18 | SMALL = "small", // gpt-4o-mini 19 | LARGE = "large", // gpt-4o 20 | } 21 | 22 | export interface InputObject { 23 | source: InputSource; 24 | userId: string; 25 | agentId: string; 26 | roomId: string; 27 | type: InputType; 28 | text?: string; 29 | imageUrls?: string[]; 30 | audioUrl?: string; 31 | videoUrl?: string; 32 | [key: string]: any; 33 | } 34 | 35 | export interface Character { 36 | agentId: string; 37 | name: string; 38 | system: string; 39 | bio: string[]; 40 | lore: string[]; 41 | messageExamples: Array< 42 | Array<{ 43 | user: string; 44 | content: { 45 | text: string; 46 | }; 47 | }> 48 | >; 49 | postExamples: string[]; 50 | topics: string[]; 51 | style: { 52 | all: string[]; 53 | chat: string[]; 54 | post: string[]; 55 | }; 56 | adjectives: string[]; 57 | routes: Route[]; 58 | } 59 | 60 | export interface Route { 61 | name: string; 62 | description: string; 63 | handler: ( 64 | context: string, 65 | req: AgentRequest, 66 | res: AgentResponse 67 | ) => Promise; 68 | } 69 | 70 | export interface AgentRequest { 71 | input: InputObject; 72 | agent: Agent; 73 | context?: string; 74 | memories?: Memory[]; 75 | [key: string]: any; 76 | } 77 | 78 | export interface AgentResponse { 79 | send: (content: any) => Promise; 80 | error: (error: any) => Promise; 81 | [key: string]: any; 82 | } 83 | 84 | export interface Memory { 85 | id: string; 86 | userId: string; 87 | agentId: string; 88 | roomId: string; 89 | content: any; 90 | type: string; 91 | generator: string; // "external" or "llm" 92 | createdAt: Date; 93 | } 94 | 95 | export type AgentMiddleware = ( 96 | req: AgentRequest, 97 | res: AgentResponse, 98 | next: () => Promise 99 | ) => Promise; 100 | 101 | export interface Agent { 102 | getAgentContext(): string; 103 | getRoutes(): Route[]; 104 | getSystemPrompt(): string; 105 | addRoute(route: Route): void; 106 | getAgentId(): string; 107 | } 108 | -------------------------------------------------------------------------------- /src/looker/looker.ts: -------------------------------------------------------------------------------- 1 | import { LLMUtils } from "../utils/llm"; 2 | import { prisma } from "../utils/db"; 3 | 4 | /** 5 | * Interface for Looker configuration 6 | */ 7 | interface LookerConfig { 8 | modelName: string; 9 | temperature: number; 10 | maxTokens?: number; 11 | characterId?: string; 12 | } 13 | 14 | /** 15 | * Looker class for analyzing and generating insights from data 16 | */ 17 | export class Looker { 18 | private llmUtils: LLMUtils; 19 | private config: LookerConfig; 20 | 21 | /** 22 | * Creates a new Looker instance 23 | * @param config Configuration for the Looker 24 | */ 25 | constructor(config: LookerConfig) { 26 | this.llmUtils = new LLMUtils(); 27 | this.config = { 28 | modelName: config.modelName || "anthropic/claude-3.5-sonnet", 29 | temperature: config.temperature || 0.7, 30 | maxTokens: config.maxTokens, 31 | characterId: config.characterId, 32 | }; 33 | } 34 | 35 | /** 36 | * Analyzes text content and generates insights 37 | * @param content Text content to analyze 38 | * @returns Analysis results 39 | */ 40 | async analyzeContent(content: string): Promise { 41 | const prompt = ` 42 | 43 | Analyze the following content and provide insights about the key themes, 44 | sentiment, and notable patterns. Be concise and focus on the most important aspects. 45 | 46 | 47 | Content to analyze: 48 | ${content} 49 | `; 50 | 51 | // Call LLM with prompt and model name 52 | return this.llmUtils.getTextFromLLM(prompt, this.config.modelName); 53 | } 54 | 55 | /** 56 | * Retrieves character information if a characterId is configured 57 | * @returns Character information or null if not configured 58 | */ 59 | async getCharacterInfo(): Promise { 60 | if (!this.config.characterId) { 61 | return null; 62 | } 63 | 64 | // This is a placeholder for future implementation 65 | // Will connect to database to retrieve character information 66 | return { 67 | id: this.config.characterId, 68 | name: "Default Character", 69 | traits: ["analytical", "helpful", "insightful"], 70 | }; 71 | } 72 | 73 | /** 74 | * Summarizes a collection of tweets 75 | * @param tweetIds Array of tweet IDs to summarize 76 | * @returns Summary of the tweets 77 | */ 78 | async summarizeTweets(tweetIds: string[]): Promise { 79 | // Placeholder implementation 80 | // Will be expanded to fetch tweets and generate summaries 81 | return `Analyzed ${tweetIds.length} tweets`; 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/framework/index.ts: -------------------------------------------------------------------------------- 1 | import express from "express"; 2 | import { 3 | AgentMiddleware, 4 | AgentRequest, 5 | AgentResponse, 6 | InputObject, 7 | Agent, 8 | } from "../types"; 9 | 10 | export class AgentFramework { 11 | private middlewares: AgentMiddleware[] = []; 12 | private errorHandlers: Array< 13 | (error: any, req: AgentRequest, res: AgentResponse) => Promise 14 | > = []; 15 | 16 | use(middleware: AgentMiddleware): this { 17 | this.middlewares.push(middleware); 18 | return this; 19 | } 20 | 21 | onError( 22 | handler: ( 23 | error: any, 24 | req: AgentRequest, 25 | res: AgentResponse 26 | ) => Promise 27 | ): this { 28 | this.errorHandlers.push(handler); 29 | return this; 30 | } 31 | 32 | private createResponse( 33 | req: AgentRequest, 34 | expressRes: express.Response 35 | ): AgentResponse { 36 | const res: AgentResponse = { 37 | send: async (content: any) => { 38 | expressRes.json(content); 39 | }, 40 | json: async (content: any) => { 41 | expressRes.json(content); 42 | }, 43 | error: async (error: any) => { 44 | // Handle error based on input source 45 | console.error("Error:", error); 46 | await this.handleError(error, req, res); 47 | }, 48 | }; 49 | return res; 50 | } 51 | 52 | private async handleError( 53 | error: any, 54 | req: AgentRequest, 55 | res: AgentResponse 56 | ): Promise { 57 | for (const handler of this.errorHandlers) { 58 | try { 59 | await handler(error, req, res); 60 | } catch (handlerError) { 61 | console.error("Error in error handler:", handlerError); 62 | } 63 | } 64 | } 65 | 66 | private async executeMiddleware( 67 | index: number, 68 | req: AgentRequest, 69 | res: AgentResponse 70 | ): Promise { 71 | if (index >= this.middlewares.length) { 72 | return; 73 | } 74 | 75 | const middleware = this.middlewares[index]; 76 | const next = async () => { 77 | await this.executeMiddleware(index + 1, req, res); 78 | }; 79 | 80 | try { 81 | await middleware(req, res, next); 82 | } catch (error) { 83 | await res.error(error); 84 | } 85 | } 86 | 87 | async process( 88 | input: InputObject, 89 | agent: Agent, 90 | expressRes: express.Response 91 | ): Promise { 92 | const req: AgentRequest = { 93 | input, 94 | agent, 95 | }; 96 | 97 | const res = this.createResponse(req, expressRes); 98 | 99 | try { 100 | await this.executeMiddleware(0, req, res); 101 | } catch (error) { 102 | await res.error(error); 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /src/agent/index.ts: -------------------------------------------------------------------------------- 1 | import { Agent, Character, Route } from "../types"; 2 | 3 | export class BaseAgent implements Agent { 4 | private character: Character; 5 | private routes: Route[]; 6 | 7 | constructor(character: Character) { 8 | this.character = character; 9 | this.routes = character.routes; 10 | } 11 | 12 | private getRandomElements(arr: T[], count: number): T[] { 13 | const shuffled = [...arr].sort(() => Math.random() - 0.5); 14 | return shuffled.slice(0, Math.min(count, arr.length)); 15 | } 16 | 17 | private formatMessageExamples( 18 | examples: Character["messageExamples"] 19 | ): string { 20 | return this.getRandomElements(examples, 3) 21 | .map((conversation) => 22 | conversation.map((msg) => `${msg.user}: ${msg.content.text}`).join("\n") 23 | ) 24 | .join("\n\n"); 25 | } 26 | 27 | public getAgentId(): string { 28 | return this.character.agentId; 29 | } 30 | 31 | public getSystemPrompt(): string { 32 | return this.character.system; 33 | } 34 | 35 | public addRoute(route: Route): void { 36 | if (this.routes.some((r) => r.name === route.name)) { 37 | throw new Error(`Route with name '${route.name}' already exists`); 38 | } 39 | this.routes.push(route); 40 | } 41 | 42 | public getAgentContext(): string { 43 | const bioContext = this.getRandomElements(this.character.bio, 3).join("\n"); 44 | const loreContext = this.getRandomElements(this.character.lore, 3).join( 45 | "\n" 46 | ); 47 | const messageContext = this.formatMessageExamples( 48 | this.character.messageExamples 49 | ); 50 | const postContext = this.getRandomElements( 51 | this.character.postExamples, 52 | 3 53 | ).join("\n"); 54 | const topicContext = this.getRandomElements(this.character.topics, 3).join( 55 | "\n" 56 | ); 57 | const styleAllContext = this.getRandomElements( 58 | this.character.style.all, 59 | 3 60 | ).join("\n"); 61 | const styleChatContext = this.getRandomElements( 62 | this.character.style.chat, 63 | 3 64 | ).join("\n"); 65 | const stylePostContext = this.getRandomElements( 66 | this.character.style.post, 67 | 3 68 | ).join("\n"); 69 | const adjectiveContext = this.getRandomElements( 70 | this.character.adjectives, 71 | 3 72 | ).join(", "); 73 | 74 | return ` 75 | 76 | ${this.getSystemPrompt()} 77 | 78 | 79 | 80 | ${bioContext} 81 | 82 | 83 | 84 | ${loreContext} 85 | 86 | 87 | 88 | ${messageContext} 89 | 90 | 91 | 92 | ${postContext} 93 | 94 | 95 | 96 | ${topicContext} 97 | 98 | 99 | 100 | 101 | ${styleAllContext} 102 | 103 | 104 | 105 | ${styleChatContext} 106 | 107 | 108 | 109 | 110 | ${adjectiveContext} 111 | 112 | `.trim(); 113 | } 114 | 115 | public getRoutes(): Route[] { 116 | return this.routes; 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/middleware/router.ts: -------------------------------------------------------------------------------- 1 | import { AgentMiddleware, Route } from "../types"; 2 | import { LLMUtils } from "../utils/llm"; 3 | import { z } from "zod"; 4 | import { LLMSize } from "../types"; 5 | 6 | // Constants 7 | const CONFIDENCE_THRESHOLD = 0.7; 8 | 9 | /** 10 | * Schema for route selection response from LLM 11 | */ 12 | const routeSchema = z.object({ 13 | selectedRoute: z.string(), 14 | confidence: z.number().min(0).max(1), 15 | reasoning: z.string(), 16 | }); 17 | 18 | /** 19 | * Router middleware that uses LLM to determine the appropriate route handler 20 | * based on the incoming request and available routes 21 | */ 22 | export const router: AgentMiddleware = async (req, res, next) => { 23 | try { 24 | // Initialize LLM utilities and get available routes 25 | const llmUtils = new LLMUtils(); 26 | const routes = req.agent.getRoutes(); 27 | 28 | // Format route descriptions for the LLM prompt 29 | const routeDescriptions = routes 30 | .map((route: Route) => `"${route.name}": ${route.description}`) 31 | .join("\n"); 32 | 33 | const prompt = ` 34 | 35 | ${req.context} 36 | 37 | 38 | 39 | You are functioning as a request router for an AI agent with the following system prompt: 40 | 41 | ${req.agent.getSystemPrompt()} 42 | 43 | Your task is to analyze incoming messages and route them to the most appropriate handler based on the available routes below. Consider the agent's purpose and capabilities when making this decision. 44 | 45 | Available Routes: 46 | ${routeDescriptions} 47 | 48 | Based on the agent's system description and the available routes, select the most appropriate route to handle this interaction. 49 | 50 | Respond with a JSON object containing: 51 | - selectedRoute: The name of the selected route 52 | - confidence: A number between 0 and 1 indicating confidence in the selection 53 | - reasoning: A brief explanation of why this route was selected 54 | 55 | `.trim(); 56 | 57 | const routeDecision = await llmUtils.getObjectFromLLM( 58 | prompt, 59 | routeSchema, 60 | LLMSize.LARGE 61 | ); 62 | 63 | const handler = routes.find((r) => r.name === routeDecision.selectedRoute); 64 | if (!handler) { 65 | return res.error( 66 | new Error(`No handler for route: ${routeDecision.selectedRoute}`) 67 | ); 68 | } 69 | 70 | // Check if confidence level meets threshold 71 | if (routeDecision.confidence < CONFIDENCE_THRESHOLD) { 72 | console.warn( 73 | `Low confidence (${routeDecision.confidence}) for route: ${routeDecision.selectedRoute}` 74 | ); 75 | console.warn(`Reasoning: ${routeDecision.reasoning}`); 76 | } 77 | 78 | // Execute the selected route handler 79 | try { 80 | // Call the handler with context and request/response objects 81 | await handler.handler(req.context || "", req, res); 82 | await next(); 83 | } catch (error) { 84 | // Handle errors from the route handler 85 | await res.error( 86 | new Error( 87 | `Route handler error (${routeDecision.selectedRoute}): ${ 88 | (error as Error).message 89 | }` 90 | ) 91 | ); 92 | } 93 | } catch (error) { 94 | await res.error(new Error(`Router error: ${(error as Error).message}`)); 95 | } 96 | }; 97 | -------------------------------------------------------------------------------- /clients/discord/base.ts: -------------------------------------------------------------------------------- 1 | import { EventEmitter } from "events"; 2 | import { Client, GatewayIntentBits, Message, Partials } from "discord.js"; 3 | import { RequestQueue } from "../utils/request-queue"; 4 | 5 | export interface DiscordConfig { 6 | botToken: string; 7 | agentId: string; 8 | retryLimit: number; 9 | pollingInterval: number; 10 | dryRun?: boolean; 11 | } 12 | 13 | /** 14 | * Base Discord client class handling core Discord.js functionality 15 | * Similar to TwitterBase, provides basic Discord operations 16 | */ 17 | export class DiscordBase extends EventEmitter { 18 | protected discordClient: Client; 19 | protected requestQueue: RequestQueue; 20 | protected config: DiscordConfig; 21 | protected isInitialized: boolean = false; 22 | 23 | constructor(agent: any, config: DiscordConfig) { 24 | super(); 25 | this.config = config; 26 | this.requestQueue = new RequestQueue(); 27 | 28 | // Initialize Discord client with required intents 29 | this.discordClient = new Client({ 30 | intents: [ 31 | GatewayIntentBits.DirectMessages, 32 | GatewayIntentBits.DirectMessageReactions, 33 | GatewayIntentBits.MessageContent 34 | ], 35 | partials: [Partials.Channel, Partials.Message] // Required for DM handling 36 | }); 37 | } 38 | 39 | /** 40 | * Initialize Discord client and handle connection 41 | * @throws {Error} If login fails after maximum retries 42 | */ 43 | async init(): Promise { 44 | console.log("Initializing Discord client..."); 45 | let retries = this.config.retryLimit; 46 | 47 | while (retries > 0) { 48 | try { 49 | if (this.isInitialized) { 50 | console.log("Already logged in"); 51 | break; 52 | } 53 | 54 | await this.discordClient.login(this.config.botToken); 55 | 56 | // Wait for ready event 57 | await new Promise((resolve) => { 58 | this.discordClient.once('ready', () => { 59 | console.log(`Logged in as ${this.discordClient.user?.tag}`); 60 | this.isInitialized = true; 61 | resolve(); 62 | }); 63 | }); 64 | 65 | break; 66 | } catch (error) { 67 | console.error(`Login attempt failed: ${(error as Error).message}`); 68 | retries--; 69 | 70 | if (retries > 0) { 71 | console.log(`Retrying... (${retries} attempts left)`); 72 | await new Promise(resolve => setTimeout(resolve, 2000)); 73 | } 74 | } 75 | } 76 | 77 | if (!this.isInitialized) { 78 | throw new Error("Failed to login after maximum retries"); 79 | } 80 | } 81 | 82 | /** 83 | * Send a direct message to a user 84 | * @param userId User's Discord ID 85 | * @param content Message content 86 | * @returns The sent message or null if failed 87 | */ 88 | protected async sendDirectMessage(userId: string, content: string): Promise { 89 | return this.requestQueue.add(async () => { 90 | try { 91 | const user = await this.discordClient.users.fetch(userId); 92 | const message = await user.send(content); 93 | return message; 94 | } catch (error) { 95 | console.error(`Error sending DM to ${userId}:`, error); 96 | return null; 97 | } 98 | }); 99 | } 100 | 101 | /** 102 | * Utility method to delay execution 103 | * @param ms Milliseconds to delay 104 | */ 105 | protected delay(ms: number): Promise { 106 | return new Promise(resolve => setTimeout(resolve, ms)); 107 | } 108 | 109 | /** 110 | * Clean up resources and disconnect 111 | */ 112 | async destroy(): Promise { 113 | this.discordClient.destroy(); 114 | this.isInitialized = false; 115 | console.log("Discord client destroyed"); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /scripts/test-discord.ts: -------------------------------------------------------------------------------- 1 | import { DiscordClient } from "../clients/discord/client"; 2 | import dotenv from 'dotenv'; 3 | import path from 'path'; 4 | 5 | // Load environment variables from .env file 6 | dotenv.config({ path: path.resolve(process.cwd(), '.env') }); 7 | 8 | // Configure logging 9 | const DEBUG = process.env.DEBUG === 'true'; 10 | function log(message: string, ...args: any[]) { 11 | if (DEBUG) { 12 | console.log(`[DEBUG] ${message}`, ...args); 13 | } 14 | } 15 | 16 | const config = { 17 | botToken: process.env.DISCORD_BOT_TOKEN!, 18 | agentId: "test-agent", 19 | retryLimit: 3, 20 | pollingInterval: 1, 21 | dryRun: false 22 | }; 23 | 24 | log("Environment variables loaded"); 25 | console.log("Bot token:", process.env.DISCORD_BOT_TOKEN ? "Found" : "Missing"); 26 | console.log("Test user ID:", process.env.DISCORD_TEST_USER_ID ? "Found" : "Missing"); 27 | 28 | async function main() { 29 | console.log("Starting Discord client test..."); 30 | 31 | // Create mock agent 32 | const mockAgent = { 33 | getAgentId: () => "test-agent", 34 | getAgentContext: () => "Test agent context", 35 | getRoutes: () => [], 36 | getSystemPrompt: () => "Test system prompt", 37 | addRoute: () => {} 38 | }; 39 | 40 | const client = new DiscordClient(mockAgent, config); 41 | 42 | // Test sending multiple DMs to verify rate limiting 43 | setTimeout(async () => { 44 | try { 45 | console.log("Testing Discord client functionality..."); 46 | 47 | console.log("Waiting for Discord ID to test messaging functionality..."); 48 | // Note: Replace USER_ID with the actual Discord user ID when provided 49 | const targetUserId = process.env.DISCORD_TEST_USER_ID; 50 | 51 | if (!targetUserId) { 52 | console.log("No test user ID provided. Please set DISCORD_TEST_USER_ID environment variable."); 53 | return; 54 | } 55 | 56 | try { 57 | // Test 1: Basic message sending 58 | console.log("Test 1: Sending basic message..."); 59 | await client.sendMessage(targetUserId, "Hello! This is a test message from the Discord bot."); 60 | console.log("Basic message sent successfully"); 61 | 62 | // Test 2: Rate limiting (single message for now) 63 | console.log("Test 2: Testing rate limiting..."); 64 | await client.sendMessage(targetUserId, "This is a follow-up message (should be delayed by 1000ms)"); 65 | console.log("Rate-limited message sent successfully"); 66 | 67 | // Test 3: Message with formatting 68 | console.log("Test 3: Testing message with formatting..."); 69 | await client.sendMessage( 70 | targetUserId, 71 | "This message has **bold text**, *italics*, and `code blocks`.\n" + 72 | "It also has multiple lines and a [link](https://example.com)." 73 | ); 74 | console.log("Formatted message sent successfully"); 75 | } catch (error) { 76 | console.error("Error during message tests:", error); 77 | } 78 | } catch (error) { 79 | console.error("Error in Discord client tests:", error); 80 | // Type guard for Discord API errors 81 | if (error && typeof error === 'object' && 'code' in error) { 82 | const discordError = error as { code: number }; 83 | if (discordError.code === 50035) { 84 | console.error("Invalid user ID format. Make sure to use Discord's snowflake ID format."); 85 | } 86 | } 87 | } 88 | }, 5000); 89 | 90 | try { 91 | await client.start(); 92 | console.log("Discord client started successfully"); 93 | 94 | // Keep the process running 95 | process.on('SIGINT', async () => { 96 | console.log("Stopping Discord client..."); 97 | await client.stop(); 98 | process.exit(0); 99 | }); 100 | } catch (error) { 101 | console.error("Error testing Discord client:", error); 102 | process.exit(1); 103 | } 104 | } 105 | 106 | main(); 107 | -------------------------------------------------------------------------------- /src/utils/memory.ts: -------------------------------------------------------------------------------- 1 | import { prisma } from "./db"; 2 | 3 | /** 4 | * Interface representing Twitter data structure 5 | */ 6 | export interface TweetData { 7 | id: string; 8 | text: string; 9 | userId: string; 10 | username: string; 11 | conversationId?: string; 12 | inReplyToId?: string; 13 | permanentUrl?: string; 14 | } 15 | 16 | /** 17 | * Creates a new Twitter memory entry in the database 18 | */ 19 | export async function createTwitterMemory( 20 | userId: string, 21 | agentId: string, 22 | roomId: string, 23 | message: string, 24 | generator: string = "llm" 25 | ) { 26 | await prisma.memory.create({ 27 | data: { 28 | userId, 29 | agentId, 30 | roomId, 31 | type: "tweet", 32 | generator: generator, 33 | content: JSON.stringify({ text: message }), 34 | }, 35 | }); 36 | } 37 | 38 | /** 39 | * Checks if a tweet with the given ID exists in the database 40 | */ 41 | export async function doesTweetExist(tweetId: string): Promise { 42 | const count = await prisma.tweet.count({ 43 | where: { id: tweetId }, 44 | }); 45 | return count > 0; 46 | } 47 | 48 | export async function storeTweetIfNotExists( 49 | tweet: TweetData 50 | ): Promise { 51 | const exists = await doesTweetExist(tweet.id); 52 | 53 | if (!exists) { 54 | await prisma.tweet.create({ 55 | data: { 56 | id: tweet.id, 57 | text: tweet.text, 58 | userId: tweet.userId, 59 | username: tweet.username, 60 | conversationId: tweet.conversationId, 61 | inReplyToId: tweet.inReplyToId, 62 | permanentUrl: tweet.permanentUrl, 63 | }, 64 | }); 65 | return true; // Indicates we stored a new tweet 66 | } 67 | 68 | return false; // Indicates tweet already existed 69 | } 70 | 71 | export async function getTweetById(tweetId: string) { 72 | return prisma.tweet.findUnique({ 73 | where: { id: tweetId }, 74 | }); 75 | } 76 | 77 | export async function getTweetThread(conversationId: string) { 78 | return prisma.tweet.findMany({ 79 | where: { conversationId }, 80 | orderBy: { createdAt: "asc" }, 81 | }); 82 | } 83 | 84 | export async function getRecentTweets(userId: string, limit: number = 10) { 85 | return prisma.tweet.findMany({ 86 | where: { userId }, 87 | orderBy: { createdAt: "desc" }, 88 | take: limit, 89 | }); 90 | } 91 | 92 | /** 93 | * Interface representing Discord message data structure 94 | */ 95 | export interface DiscordMessageData { 96 | id: string; 97 | content: string; 98 | userId: string; 99 | username: string; 100 | channelId: string; 101 | guildId?: string; 102 | } 103 | 104 | export async function createDiscordMemory( 105 | userId: string, 106 | agentId: string, 107 | roomId: string, 108 | message: string, 109 | generator: string = "llm" 110 | ) { 111 | await prisma.memory.create({ 112 | data: { 113 | userId, 114 | agentId, 115 | roomId, 116 | type: "discord", 117 | generator: generator, 118 | content: JSON.stringify({ text: message }), 119 | }, 120 | }); 121 | } 122 | 123 | export async function doesDiscordMessageExist( 124 | messageId: string 125 | ): Promise { 126 | const count = await prisma.discordMessage.count({ 127 | where: { id: messageId }, 128 | }); 129 | return count > 0; 130 | } 131 | 132 | export async function storeDiscordMessageIfNotExists( 133 | message: DiscordMessageData 134 | ): Promise { 135 | const exists = await doesDiscordMessageExist(message.id); 136 | 137 | if (!exists) { 138 | await prisma.discordMessage.create({ 139 | data: { 140 | id: message.id, 141 | content: message.content, 142 | userId: message.userId, 143 | username: message.username, 144 | channelId: message.channelId, 145 | guildId: message.guildId, 146 | }, 147 | }); 148 | return true; // Indicates we stored a new message 149 | } 150 | 151 | return false; // Indicates message already existed 152 | } 153 | 154 | export async function getDiscordMessageById(messageId: string) { 155 | return prisma.discordMessage.findUnique({ 156 | where: { id: messageId }, 157 | }); 158 | } 159 | 160 | export async function getRecentDiscordMessages( 161 | userId: string, 162 | limit: number = 10 163 | ) { 164 | return prisma.discordMessage.findMany({ 165 | where: { userId }, 166 | orderBy: { createdAt: "desc" }, 167 | take: limit, 168 | }); 169 | } 170 | -------------------------------------------------------------------------------- /clients/twitter/utils.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Builds a thread of tweets from a given tweet by traversing up through reply chains 3 | * @param {Object} tweet - The tweet object to start building the thread from 4 | * @param {TwitterBase} client - The Twitter client instance 5 | * @param {number} [maxReplies=5] - Maximum number of replies to include in the thread 6 | * @returns {Promise>} Array of tweets in chronological order 17 | */ 18 | async function buildConversationThread(tweet, client, maxReplies = 5) { 19 | const thread = []; 20 | const visited = new Set(); 21 | 22 | async function processThread(currentTweet, depth = 0) { 23 | if (!currentTweet || depth >= maxReplies || visited.has(currentTweet.id)) { 24 | return; 25 | } 26 | 27 | visited.add(currentTweet.id); 28 | thread.unshift(currentTweet); 29 | 30 | if (currentTweet.inReplyToStatusId) { 31 | try { 32 | const parentTweet = await client.getTweet(currentTweet.inReplyToStatusId); 33 | if (parentTweet) { 34 | await processThread(parentTweet, depth + 1); 35 | } 36 | } catch (error) { 37 | console.error('Error fetching parent tweet:', error); 38 | } 39 | } 40 | } 41 | 42 | await processThread(tweet); 43 | return thread; 44 | } 45 | 46 | /** 47 | * Splits a long text into multiple tweet-sized chunks 48 | * @param {string} text - The text content to split 49 | * @param {number} [maxLength=280] - Maximum length of each tweet 50 | * @returns {string[]} Array of tweet-sized text chunks 51 | */ 52 | function splitTweetContent(text, maxLength = 280) { 53 | if (text.length <= maxLength) return [text]; 54 | 55 | const tweets = []; 56 | let currentTweet = ''; 57 | 58 | const sentences = text.match(/[^.!?]+[.!?]+/g) || [text]; 59 | 60 | for (const sentence of sentences) { 61 | if ((currentTweet + sentence).length <= maxLength) { 62 | currentTweet += sentence; 63 | } else { 64 | if (currentTweet) tweets.push(currentTweet.trim()); 65 | 66 | if (sentence.length > maxLength) { 67 | const words = sentence.split(' '); 68 | currentTweet = words[0]; 69 | 70 | for (let i = 1; i < words.length; i++) { 71 | if ((currentTweet + ' ' + words[i]).length <= maxLength) { 72 | currentTweet += ' ' + words[i]; 73 | } else { 74 | tweets.push(currentTweet.trim()); 75 | currentTweet = words[i]; 76 | } 77 | } 78 | } else { 79 | currentTweet = sentence; 80 | } 81 | } 82 | } 83 | 84 | if (currentTweet) tweets.push(currentTweet.trim()); 85 | return tweets; 86 | } 87 | 88 | /** 89 | * Sends a series of tweets as a thread 90 | * @param {TwitterBase} client - The Twitter client instance 91 | * @param {string} content - The content to be tweeted 92 | * @param {string} [replyToId] - Optional ID of tweet to reply to 93 | * @returns {Promise>} Array of posted tweets 101 | */ 102 | async function sendThreadedTweet(client, content, replyToId) { 103 | const tweets = []; 104 | const parts = splitTweetContent(content); 105 | let lastTweetId = replyToId; 106 | 107 | for (const part of parts) { 108 | const tweet = await client.sendTweet(part, lastTweetId); 109 | if (tweet) { 110 | tweets.push(tweet); 111 | lastTweetId = tweet.id; 112 | } else { 113 | break; 114 | } 115 | await new Promise(resolve => setTimeout(resolve, 1000)); 116 | } 117 | 118 | return tweets; 119 | } 120 | 121 | module.exports = { 122 | buildConversationThread, 123 | splitTweetContent, 124 | sendThreadedTweet 125 | }; -------------------------------------------------------------------------------- /src/example/example.ts: -------------------------------------------------------------------------------- 1 | import express, { Express, Request, Response } from "express"; 2 | import { AgentFramework } from "../framework"; 3 | import { standardMiddleware } from "../middleware"; 4 | import { Character, InputObject, InputSource, InputType } from "../types"; 5 | import { BaseAgent } from "../agent"; 6 | import { prisma } from "../utils/db"; 7 | import readline from "readline"; 8 | import axios from "axios"; 9 | import { routes } from "../routes"; 10 | // @ts-ignore 11 | import { TwitterClient } from "../../clients/twitter"; 12 | import dotenv from "dotenv"; 13 | 14 | dotenv.config(); // Load environment variables 15 | const PORT = process.env.SERVER_PORT; 16 | 17 | // Initialize Express and framework 18 | const app: Express = express(); 19 | app.use(express.json()); 20 | const framework = new AgentFramework(); 21 | standardMiddleware.forEach((middleware) => framework.use(middleware)); 22 | 23 | // Define Stern character 24 | const sternCharacter: Character = { 25 | name: "Stern", 26 | agentId: "stern", 27 | system: `You are Stern, a no-nonsense business advisor known for direct, practical advice.`, 28 | bio: [ 29 | "Stern is a direct and efficient business consultant with decades of experience.", 30 | ], 31 | lore: [ 32 | "Started as a factory floor manager before rising to consultant status.", 33 | ], 34 | messageExamples: [ 35 | [ 36 | { user: "client1", content: { text: "How can I improve my business?" } }, 37 | { 38 | user: "Stern", 39 | content: { text: "Specifics. What are your current metrics?" }, 40 | }, 41 | ], 42 | ], 43 | postExamples: ["Here's a 5-step plan to optimize your operations..."], 44 | topics: ["business", "strategy", "efficiency"], 45 | style: { 46 | all: ["direct", "professional"], 47 | chat: ["analytical"], 48 | post: ["structured"], 49 | }, 50 | adjectives: ["efficient", "practical"], 51 | routes: [], 52 | }; 53 | 54 | // Initialize agent 55 | const stern = new BaseAgent(sternCharacter); 56 | const agents = [stern]; 57 | 58 | // Add the default routes 59 | routes.forEach((r) => stern.addRoute(r)); 60 | 61 | // Express endpoint for agent input 62 | app.post("/agent/input", (req: Request, res: Response) => { 63 | try { 64 | const bodyInput = req.body.input; 65 | const agentId = bodyInput.agentId; 66 | const agent = agents.find((agent) => agent.getAgentId() === agentId); 67 | 68 | if (!agent) { 69 | return res.status(404).json({ error: "Agent not found" }); 70 | } 71 | 72 | // Construct an InputObject for the framework 73 | const input: InputObject = { 74 | source: InputSource.NETWORK, 75 | userId: bodyInput.userId, 76 | agentId: agent.getAgentId(), 77 | roomId: bodyInput.roomId || `${agentId}_${bodyInput.userId}`, 78 | type: 79 | bodyInput.type === "text" ? InputType.TEXT : InputType.TEXT_AND_IMAGE, 80 | text: bodyInput.text, 81 | imageUrls: bodyInput.imageUrls, 82 | }; 83 | 84 | framework.process(input, agent, res); 85 | } catch (error) { 86 | console.error("Server error:", error); 87 | return res.status(500).json({ error: "Internal server error" }); 88 | } 89 | }); 90 | 91 | // CLI for local testing 92 | async function startCLI() { 93 | const rl = readline.createInterface({ 94 | input: process.stdin, 95 | output: process.stdout, 96 | }); 97 | 98 | console.log("\nStern Business Advisor CLI"); 99 | console.log("========================="); 100 | 101 | async function prompt() { 102 | rl.question("\nYou: ", async (text) => { 103 | try { 104 | const response = await axios.post("http://localhost:3000/agent/input", { 105 | input: { 106 | agentId: "stern", 107 | userId: "cli_user", 108 | text: text, 109 | }, 110 | }); 111 | 112 | const data = response.data; 113 | console.log("\nStern:", data); 114 | prompt(); 115 | } catch (error) { 116 | console.error("\nError:", error); 117 | prompt(); 118 | } 119 | }); 120 | } 121 | 122 | prompt(); 123 | } 124 | 125 | let server: any; 126 | 127 | // Initialize and start Twitter client 128 | async function startTwitterClient() { 129 | // Gather config from .env or fallback 130 | const username = process.env.TWITTER_USERNAME || ""; 131 | const password = process.env.TWITTER_PASSWORD || ""; 132 | const email = process.env.TWITTER_EMAIL || ""; 133 | const twoFactorSecret = process.env.TWITTER_2FA_SECRET || ""; 134 | const dryRun = process.env.TWITTER_DRY_RUN === "true"; 135 | const postIntervalHours = process.env.TWITTER_POST_INTERVAL_HOURS 136 | ? parseInt(process.env.TWITTER_POST_INTERVAL_HOURS, 10) 137 | : 4; 138 | const pollingInterval = process.env.TWITTER_POLLING_INTERVAL 139 | ? parseInt(process.env.TWITTER_POLLING_INTERVAL, 10) 140 | : 5; 141 | 142 | const config = { 143 | username, 144 | password, 145 | email, 146 | twoFactorSecret: twoFactorSecret || undefined, 147 | retryLimit: 3, 148 | postIntervalHours, 149 | enableActions: false, 150 | pollingInterval, 151 | dryRun, 152 | }; 153 | 154 | const twitterClient = new TwitterClient(stern, config); 155 | await twitterClient.start(); // Start intervals for checking mentions & posting 156 | } 157 | 158 | async function start() { 159 | server = app.listen(PORT, () => { 160 | console.log(`Server running on http://localhost:${PORT}`); 161 | startCLI(); 162 | 163 | // Start Twitter client after server is up 164 | startTwitterClient().catch((err) => { 165 | console.error("Error starting Twitter client:", err); 166 | }); 167 | }); 168 | } 169 | 170 | if (require.main === module) { 171 | start().catch(console.error); 172 | } 173 | -------------------------------------------------------------------------------- /clients/twitter/client.js: -------------------------------------------------------------------------------- 1 | const { TwitterBase } = require("./base"); 2 | const { buildConversationThread, sendThreadedTweet } = require("./utils"); 3 | const axios = require("axios"); 4 | const { storeTweetIfNotExists } = require("../../dist/utils/memory"); 5 | require("dotenv").config(); 6 | 7 | const PORT = process.env.SERVER_PORT; 8 | class TwitterClient extends TwitterBase { 9 | constructor(agent, config) { 10 | super(agent, config); 11 | this.postInterval = null; 12 | this.checkInterval = null; 13 | this.dryRun = config.dryRun; 14 | } 15 | 16 | async start() { 17 | await this.init(); 18 | 19 | if (this.config.postIntervalHours > 0) { 20 | const intervalMs = this.config.postIntervalHours * 60 * 60 * 1000; 21 | this.postInterval = setInterval(() => this.generateAndPost(), intervalMs); 22 | console.log( 23 | `Posting loop started. Will post every ${this.config.postIntervalHours} hours` 24 | ); 25 | } 26 | 27 | this.checkInterval = setInterval( 28 | () => this.checkInteractions(), 29 | 60 * 1000 * this.config.pollingInterval 30 | ); 31 | console.log("Twitter client started. Monitoring for interactions."); 32 | } 33 | 34 | async stop() { 35 | if (this.postInterval) { 36 | clearInterval(this.postInterval); 37 | this.postInterval = null; 38 | } 39 | if (this.checkInterval) { 40 | clearInterval(this.checkInterval); 41 | this.checkInterval = null; 42 | } 43 | console.log("Twitter client stopped"); 44 | } 45 | 46 | async generateAndPost() { 47 | try { 48 | console.log("Requesting new tweet content from the server..."); 49 | 50 | const responseText = await this.fetchTweetContent({ 51 | agentId: this.agent.getAgentId(), 52 | userId: "twitter_client", 53 | roomId: "twitter", 54 | text: " Generate a new tweet to post on your timeline ", 55 | type: "text", 56 | }); 57 | 58 | console.log("Server responded with tweet text:", responseText); 59 | if (this.dryRun) return; 60 | const tweets = await sendThreadedTweet(this, responseText); 61 | 62 | if (tweets.length > 0) { 63 | console.log("Posted tweet:", tweets.map((t) => t.text).join("\n")); 64 | 65 | // Store each tweet in the thread 66 | for (const tweet of tweets) { 67 | await storeTweetIfNotExists({ 68 | id: tweet.id, 69 | text: tweet.text, 70 | userId: this.config.username, 71 | username: this.config.username, 72 | conversationId: tweet.conversationId, 73 | permanentUrl: tweet.permanentUrl, 74 | imageUrls: tweet.imageUrls, 75 | }); 76 | } 77 | } 78 | 79 | return tweets; 80 | } catch (error) { 81 | console.error("Error generating/posting tweet:", error); 82 | return []; 83 | } 84 | } 85 | 86 | async checkInteractions() { 87 | try { 88 | const mentions = await this.getMentions(); 89 | for (const mention of mentions) { 90 | if (this.lastCheckedTweetId && mention.id <= this.lastCheckedTweetId) { 91 | continue; 92 | } 93 | await this.handleMention(mention); 94 | this.lastCheckedTweetId = mention.id; 95 | } 96 | } catch (error) { 97 | console.error("Error checking interactions:", error); 98 | } 99 | } 100 | 101 | async handleMention(tweet) { 102 | try { 103 | const tweetStored = await storeTweetIfNotExists({ 104 | id: tweet.id, 105 | text: tweet.text, 106 | userId: tweet.userId, 107 | username: tweet.username, 108 | conversationId: tweet.conversationId, 109 | inReplyToId: tweet.inReplyToStatusId, 110 | permanentUrl: tweet.permanentUrl, 111 | imageUrls: tweet.imageUrls, 112 | }); 113 | 114 | if (!tweetStored) { 115 | console.log("Tweet already processed, skipping:", tweet.id); 116 | return []; 117 | } 118 | console.log( 119 | "Handling mention:", 120 | `@${tweet.username} ${tweet.text} IMAGES: ${tweet.imageUrls?.join(" ")}` 121 | ); 122 | 123 | const roomId = tweet.conversationId || "twitter"; 124 | const promptText = `@${tweet.username}:\n${tweet.text}`; 125 | 126 | const responseText = await this.fetchTweetContent({ 127 | agentId: this.agent.getAgentId(), 128 | userId: `tw_user_${tweet.userId}`, 129 | roomId, 130 | text: promptText, 131 | imageUrls: tweet.imageUrls, 132 | type: tweet.imageUrls.length ? "text_and_image" : "text", 133 | }); 134 | console.log(responseText); 135 | if (this.dryRun) return; 136 | 137 | const tweets = await sendThreadedTweet(this, responseText, tweet.id); 138 | 139 | if (tweets.length > 0) { 140 | console.log( 141 | "Replied to mention:", 142 | tweets.map((t) => t.text).join("\n") 143 | ); 144 | for (const replyTweet of tweets) { 145 | await storeTweetIfNotExists({ 146 | id: replyTweet.id, 147 | text: replyTweet.text, 148 | userId: this.config.username, 149 | username: this.config.username, 150 | conversationId: tweet.conversationId, 151 | inReplyToId: tweet.id, 152 | permanentUrl: replyTweet.permanentUrl, 153 | imageUrls: replyTweet.imageUrls, 154 | }); 155 | } 156 | } 157 | 158 | return tweets; 159 | } catch (error) { 160 | console.error("Error handling mention:", error); 161 | return []; 162 | } 163 | } 164 | 165 | async fetchTweetContent(payload) { 166 | const url = `http://localhost:${PORT}/agent/input`; 167 | const body = { 168 | input: { 169 | agentId: payload.agentId, 170 | userId: payload.userId, 171 | roomId: payload.roomId, 172 | text: payload.text, 173 | type: payload.type, 174 | imageUrls: payload.imageUrls, 175 | }, 176 | }; 177 | 178 | try { 179 | const response = await axios.post(url, body, { 180 | headers: { "Content-Type": "application/json" }, 181 | }); 182 | 183 | const data = response.data; 184 | 185 | if (typeof data === "string") { 186 | return data; 187 | } else if (data.error) { 188 | throw new Error(`Server error: ${data.error}`); 189 | } else { 190 | return JSON.stringify(data); 191 | } 192 | } catch (error) { 193 | throw new Error(`Failed to fetch tweet content: ${error.message}`); 194 | } 195 | } 196 | 197 | async like(tweetId) { 198 | return this.likeTweet(tweetId); 199 | } 200 | } 201 | 202 | module.exports = { TwitterClient }; 203 | -------------------------------------------------------------------------------- /clients/discord/client.ts: -------------------------------------------------------------------------------- 1 | import { Message, MessageOptions, MessagePayload } from "discord.js"; 2 | import { DiscordBase, DiscordConfig } from "./base"; 3 | import axios from "axios"; 4 | import { InputSource, InputType } from "../../src/types"; 5 | import { createDiscordMemory } from "../../src/utils/memory"; 6 | 7 | export class DiscordClient extends DiscordBase { 8 | private checkInterval: NodeJS.Timeout | null = null; 9 | 10 | constructor(agent: any, config: DiscordConfig) { 11 | super(agent, config); 12 | this.checkInterval = null; 13 | } 14 | 15 | /** 16 | * Start the Discord client and begin listening for messages 17 | * @returns Promise that resolves when the client is ready 18 | */ 19 | async start(): Promise { 20 | await this.init(); 21 | 22 | // Set up message listener for DMs 23 | this.discordClient.on("messageCreate", async (message: Message) => { 24 | // Ignore messages from self or other bots 25 | if (message.author.bot) return; 26 | 27 | // Only handle DM messages 28 | if (message.channel.isDMBased()) { 29 | await this.handleDirectMessage(message); 30 | } 31 | }); 32 | 33 | // Set up polling interval if configured 34 | if (this.config.pollingInterval > 0) { 35 | const intervalMs = this.config.pollingInterval * 60 * 1000; 36 | this.checkInterval = setInterval(() => this.checkPendingTasks(), intervalMs); 37 | console.log(`Polling loop started. Will check every ${this.config.pollingInterval} minutes`); 38 | } 39 | 40 | console.log("Discord client started. Monitoring for DMs."); 41 | } 42 | 43 | /** 44 | * Stop the Discord client and clean up resources 45 | */ 46 | async stop(): Promise { 47 | if (this.checkInterval) { 48 | clearInterval(this.checkInterval); 49 | this.checkInterval = null; 50 | } 51 | await this.destroy(); 52 | console.log("Discord client stopped"); 53 | } 54 | 55 | /** 56 | * Handle incoming direct messages 57 | * @param message The Discord message object 58 | */ 59 | private async handleDirectMessage(message: Message): Promise { 60 | try { 61 | console.log( 62 | "Handling DM from:", 63 | `${message.author.tag} (${message.author.id}):`, 64 | message.content 65 | ); 66 | 67 | const responseText = await this.fetchAgentResponse({ 68 | agentId: this.config.agentId, 69 | userId: `discord_user_${message.author.id}`, 70 | roomId: `discord_dm_${message.author.id}`, 71 | text: message.content, 72 | type: InputType.TEXT, 73 | }); 74 | 75 | if (responseText && !this.config.dryRun) { 76 | await this.sendDirectMessage(message.author.id, responseText); 77 | console.log("Sent response:", responseText); 78 | } 79 | } catch (error) { 80 | console.error("Error handling direct message:", error); 81 | // Optionally notify user of error 82 | if (!this.config.dryRun && message.channel.isTextBased() && message.channel.isDMBased()) { 83 | await message.reply("Sorry, I encountered an error processing your message."); 84 | } 85 | } 86 | } 87 | 88 | /** 89 | * Fetch response from agent endpoint 90 | * @param payload Input payload for agent 91 | * @returns Response text from agent 92 | */ 93 | private async fetchAgentResponse(payload: { 94 | agentId: string; 95 | userId: string; 96 | roomId: string; 97 | text: string; 98 | type: InputType; 99 | }): Promise { 100 | const url = "http://localhost:3000/agent/input"; 101 | const body = { 102 | input: { 103 | ...payload, 104 | source: InputSource.DISCORD, 105 | }, 106 | }; 107 | 108 | try { 109 | const response = await axios.post(url, body, { 110 | headers: { "Content-Type": "application/json" }, 111 | }); 112 | 113 | const data = response.data; 114 | if (typeof data === "string") { 115 | return data; 116 | } else if (data.error) { 117 | throw new Error(`Server error: ${data.error}`); 118 | } else { 119 | return JSON.stringify(data); 120 | } 121 | } catch (error) { 122 | throw new Error(`Failed to fetch agent response: ${(error as Error).message}`); 123 | } 124 | } 125 | 126 | /** 127 | * Check for any pending tasks (placeholder for future implementation) 128 | */ 129 | private async checkPendingTasks(): Promise { 130 | // Implement any periodic checks or maintenance tasks here 131 | console.log("Checking for pending tasks..."); 132 | } 133 | 134 | /** 135 | * Find a user by their username 136 | * @param username Discord username to search for 137 | * @returns Discord user ID if found 138 | */ 139 | async findUserByUsername(username: string): Promise { 140 | try { 141 | // First try to find in cache 142 | const cachedUser = this.discordClient.users.cache.find( 143 | u => u.username.toLowerCase() === username.toLowerCase() 144 | ); 145 | 146 | if (cachedUser) { 147 | return cachedUser.id; 148 | } 149 | 150 | // If not in cache, we'll need the user's ID 151 | // For now, since we don't have a way to search by username, 152 | // we'll need to request the ID from the user 153 | throw new Error(`Unable to find user ${username}. Please provide the user's Discord ID directly.`); 154 | } catch (error) { 155 | console.error(`Error finding user ${username}:`, error); 156 | throw error; 157 | } 158 | } 159 | 160 | /** 161 | * Send a message to a Discord user 162 | * @param userIdOrUsername User's Discord ID or username 163 | * @param content Message content or options 164 | * @returns Promise that resolves when the message is sent 165 | */ 166 | async sendMessage( 167 | userIdOrUsername: string, 168 | content: string | MessagePayload | MessageOptions 169 | ): Promise { 170 | try { 171 | let userId = userIdOrUsername; 172 | 173 | // If not a snowflake ID, try to find user by username 174 | if (!/^\d+$/.test(userIdOrUsername)) { 175 | userId = await this.findUserByUsername(userIdOrUsername); 176 | } 177 | 178 | // Store message in memory for context 179 | if (typeof content === 'string') { 180 | await createDiscordMemory( 181 | `discord_user_${userId}`, 182 | this.config.agentId, 183 | `discord_dm_${userId}`, 184 | `Sent: ${content}`, 185 | "external" 186 | ); 187 | } 188 | 189 | const message = await this.sendDirectMessage(userId, content); 190 | if (message) { 191 | console.log(`Message sent to ${userIdOrUsername} (${userId}): ${content}`); 192 | } else { 193 | throw new Error("Failed to send message"); 194 | } 195 | } catch (error) { 196 | console.error(`Error sending message to ${userIdOrUsername}:`, error); 197 | throw error; 198 | } 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Liz 2 | 3 |
4 | Introducing: Liz 5 |

A Framework for AI Agents by Akrasia Labs

6 |
7 | 8 |
9 | 10 | Liz is a lightweight framework for building AI agents, reimagined for developers who demand power and simplicity. Inspired by Eliza from AI16Z, but rebuilt from the ground up with a focus on developer experience and control. 11 | 12 | ## Built for Developers Who Want 13 | 14 | - **Direct LLM Control**: Full access to prompts and model interactions 15 | - **Zero Magic**: Minimal abstractions for maximum understanding 16 | - **Ultimate Flexibility**: Build exactly what you need, how you need it 17 | 18 | ## Quick Start 19 | 20 | 1. **Clone and Install** 21 | 22 | ```bash 23 | git clone https://github.com/Akrasia0/liz 24 | cd liz 25 | pnpm install 26 | ``` 27 | 28 | 2. **Configure Environment** 29 | 30 | ```bash 31 | cp .env.example .env 32 | ``` 33 | 34 | Required environment variables: 35 | 36 | ```bash 37 | # Database - Choose one: 38 | DATABASE_URL="postgresql://user:password@localhost:5432/dbname" 39 | # or for SQLite: 40 | DATABASE_URL="file:./prisma/dev.db" 41 | 42 | # LLM APIs 43 | OPENAI_API_KEY="your-openai-key" 44 | OPENROUTER_API_KEY="your-openrouter-key" 45 | 46 | # Application 47 | APP_URL="http://localhost:3000" 48 | ``` 49 | 50 | 3. **Initialize Database** 51 | 52 | ```bash 53 | npm run init-db 54 | ``` 55 | 56 | 4. **Start Development** 57 | ```bash 58 | npm run dev 59 | ``` 60 | 61 | ## Express-Style Architecture 62 | 63 | We use Express-style middleware for a clear, linear processing flow: 64 | 65 | ```typescript 66 | // Example middleware setup 67 | const framework = new AgentFramework(); 68 | 69 | // Add standard middleware 70 | framework.use(validateInput); 71 | framework.use(loadMemories); 72 | framework.use(wrapContext); 73 | framework.use(createMemoryFromInput); 74 | framework.use(router); 75 | ``` 76 | 77 | ### Creating an Agent 78 | 79 | ```typescript 80 | const myAgent: Character = { 81 | name: "Assistant", 82 | agentId: "unique_id", 83 | system: "You are a helpful assistant.", 84 | bio: ["Your agent's backstory"], 85 | lore: ["Additional background"], 86 | messageExamples: [], // Example conversations 87 | postExamples: [], // Example social posts 88 | topics: ["expertise1", "expertise2"], 89 | style: { 90 | all: ["consistent", "helpful"], 91 | chat: ["conversational"], 92 | post: ["engaging"], 93 | }, 94 | adjectives: ["friendly", "knowledgeable"], 95 | }; 96 | 97 | const agent = new BaseAgent(myAgent); 98 | ``` 99 | 100 | ### Adding Routes 101 | 102 | ```typescript 103 | agent.addRoute({ 104 | name: "conversation", 105 | description: "Handle natural conversation", 106 | handler: async (context, req, res) => { 107 | const response = await llmUtils.getTextFromLLM( 108 | context, 109 | "anthropic/claude-3-sonnet" 110 | ); 111 | await res.send(response); 112 | }, 113 | }); 114 | ``` 115 | 116 | ### Twitter Integration 117 | 118 | ```typescript 119 | const twitter = new TwitterClient(agent, { 120 | username: process.env.TWITTER_USERNAME, 121 | password: process.env.TWITTER_PASSWORD, 122 | email: process.env.TWITTER_EMAIL, 123 | retryLimit: 3, 124 | postIntervalHours: 4, 125 | pollingInterval: 5, // minutes 126 | dryRun: false, 127 | }); 128 | 129 | await twitter.start(); 130 | ``` 131 | 132 | ## Core Components 133 | 134 | ### Looker Module 135 | 136 | The Looker module provides content analysis and character management capabilities: 137 | 138 | ```typescript 139 | // Initialize the Looker 140 | const looker = new Looker({ 141 | modelName: "anthropic/claude-3.5-sonnet", 142 | temperature: 0.7, 143 | characterId: "default-character", 144 | }); 145 | 146 | // Analyze content 147 | const insights = await looker.analyzeContent(textContent); 148 | 149 | // Get character information 150 | const character = await looker.getCharacterInfo(); 151 | 152 | // Summarize tweets 153 | const summary = await looker.summarizeTweets(tweetIds); 154 | ``` 155 | 156 | Key features: 157 | 158 | - Content analysis for understanding themes and sentiment 159 | - Character configuration for consistent personality 160 | - Tweet summarization for better context management 161 | - Extensible design for custom analysis pipelines 162 | 163 | ### Memory System 164 | 165 | The memory system uses Prisma with SQLite (or PostgreSQL) to maintain conversation context: 166 | 167 | ```typescript 168 | interface Memory { 169 | id: string; 170 | userId: string; 171 | agentId: string; 172 | roomId: string; 173 | content: any; 174 | type: string; 175 | generator: string; // "external" or "llm" 176 | createdAt: Date; 177 | } 178 | ``` 179 | 180 | Key features: 181 | 182 | - Automatic context loading for each request 183 | - Memory creation for both user inputs and agent responses 184 | - Indexed by room, user, and agent IDs 185 | - Configurable memory limits and types 186 | 187 | ### LLM Integration 188 | 189 | Supports multiple LLM providers through a unified interface: 190 | 191 | ```typescript 192 | const llmUtils = new LLMUtils(); 193 | 194 | // Text generation 195 | const response = await llmUtils.getTextFromLLM( 196 | prompt, 197 | "anthropic/claude-3-sonnet" 198 | ); 199 | 200 | // Structured output 201 | const result = await llmUtils.getObjectFromLLM(prompt, schema, LLMSize.LARGE); 202 | 203 | // Image analysis 204 | const description = await llmUtils.getImageDescriptions(imageUrls); 205 | ``` 206 | 207 | ### Twitter Capabilities 208 | 209 | - **Automated Posting**: Configurable intervals for regular content 210 | - **Mention Monitoring**: Real-time interaction handling 211 | - **Thread Management**: Automatic thread building and response chaining 212 | - **Rate Limiting**: Built-in rate limiting and retry mechanisms 213 | - **Memory Integration**: Conversational context across interactions 214 | 215 | ## Docker Support 216 | 217 | ```bash 218 | # Build and run with Docker Compose 219 | docker-compose up --build 220 | ``` 221 | 222 | Environment configuration in docker-compose.yml: 223 | 224 | ```yaml 225 | services: 226 | app: 227 | build: . 228 | ports: 229 | - "3000:3000" 230 | environment: 231 | - TEE_MODE=DOCKER 232 | - OPENAI_API_KEY=${OPENAI_API_KEY} 233 | - OPENROUTER_API_KEY=${OPENROUTER_API_KEY} 234 | - TWITTER_USERNAME=${TWITTER_USERNAME} 235 | - TWITTER_PASSWORD=${TWITTER_PASSWORD} 236 | - TWITTER_EMAIL=${TWITTER_EMAIL} 237 | volumes: 238 | - ./prisma/dev.db:/app/prisma/dev.db 239 | ``` 240 | 241 | ## Project Structure 242 | 243 | ``` 244 | src/ 245 | ├── middleware/ # Pipeline steps 246 | │ ├── validate-input.ts # Input validation 247 | │ ├── load-memories.ts # Context loading 248 | │ ├── wrap-context.ts # Request wrapping 249 | │ ├── create-memory.ts # Memory creation 250 | │ └── router.ts # Route handling 251 | ├── agent/ # Core agent logic 252 | ├── framework/ # Express-style system 253 | ├── looker/ # Content analysis system 254 | │ ├── looker.ts # Analysis utilities 255 | │ └── character.json # Character configuration 256 | ├── types/ # TypeScript definitions 257 | ├── utils/ # Helper functions 258 | │ ├── llm.ts # LLM interactions 259 | │ ├── memory.ts # Memory management 260 | │ ├── db.ts # Database utilities 261 | │ └── initDb.ts # DB initialization 262 | └── example/ # Implementation examples 263 | 264 | clients/ 265 | └── twitter/ # Twitter integration 266 | ├── client.js # Main client class 267 | ├── base.js # Core functionality 268 | └── utils.js # Helper functions 269 | ``` 270 | 271 | ## Available Scripts 272 | 273 | ```bash 274 | # Build the project 275 | npm run build 276 | 277 | # Start production 278 | npm start 279 | 280 | # Development with auto-reload 281 | npm run dev 282 | 283 | # Test Twitter integration 284 | npm run twitter 285 | 286 | # Database management 287 | npm run db:init # Initialize database 288 | npm run db:reset # Reset database 289 | npm run prisma:studio # Database UI 290 | ``` 291 | 292 | ## Our Philosophy 293 | 294 | We believe the best way to build AI agents is to work closely with the prompts and build a set of composable units that can be strung together to make powerful agentic loops. Our approach is informed by Anthropic's research on constructing reliable AI systems. 295 | 296 | ## Contributing 297 | 298 | While Liz is meant to be forked and modified, we welcome contributions to the base template: 299 | 300 | 1. Fork the repository 301 | 2. Create your feature branch 302 | 3. Commit your changes 303 | 4. Push to the branch 304 | 5. Open a Pull Request 305 | 306 | ## License 307 | 308 | MIT 309 | 310 | --- 311 | 312 | Visit [akrasia.ai/liz](https://akrasia.ai/liz) to learn more. 313 | -------------------------------------------------------------------------------- /clients/twitter/base.js: -------------------------------------------------------------------------------- 1 | const { EventEmitter } = require("events"); 2 | const { Scraper } = require("agent-twitter-client"); 3 | 4 | // Twitter client configuration 5 | 6 | /** 7 | * @typedef {Object} Tweet 8 | * @property {string} id - Tweet ID 9 | * @property {string} name - Author's display name 10 | * @property {string} username - Author's username 11 | * @property {string} text - Tweet content 12 | * @property {number} timestamp - Unix timestamp in seconds 13 | * @property {string} userId - Author's user ID 14 | * @property {string} conversationId - Conversation thread ID 15 | * @property {string} inReplyToStatusId - ID of parent tweet if reply 16 | * @property {string} permanentUrl - Permanent URL to tweet 17 | * @property {string[]} [imageUrls] - Optional array of image URLs 18 | * @property {Object} [metrics] - Optional engagement metrics 19 | */ 20 | 21 | class RequestQueue { 22 | constructor() { 23 | this.queue = []; 24 | this.processing = false; 25 | } 26 | 27 | /** 28 | * Adds a request to the queue 29 | * @param {Function} request - Async function to execute 30 | * @returns {Promise} Result of the request 31 | */ 32 | async add(request) { 33 | return new Promise((resolve, reject) => { 34 | this.queue.push(async () => { 35 | try { 36 | const result = await request(); 37 | resolve(result); 38 | } catch (error) { 39 | reject(error); 40 | } 41 | }); 42 | this.processQueue(); 43 | }); 44 | } 45 | 46 | /** 47 | * Processes queued requests with rate limiting 48 | * @returns {Promise} 49 | */ 50 | async processQueue() { 51 | // Skip if already processing or queue is empty 52 | if (this.processing || this.queue.length === 0) return; 53 | 54 | this.processing = true; 55 | while (this.queue.length > 0) { 56 | const request = this.queue.shift(); 57 | try { 58 | await request(); 59 | // Standard rate limiting delay 60 | await this.delay(1000); 61 | } catch (error) { 62 | console.error("Error processing request:", error); 63 | // Increased backoff on error 64 | await this.delay(2000); 65 | } 66 | } 67 | this.processing = false; 68 | } 69 | 70 | /** 71 | * Utility delay function 72 | * @param {number} ms - Milliseconds to delay 73 | * @returns {Promise} 74 | */ 75 | delay(ms) { 76 | return new Promise((resolve) => setTimeout(resolve, ms)); 77 | } 78 | } 79 | 80 | class TwitterBase extends EventEmitter { 81 | /** 82 | * @param {Object} agent - Agent instance 83 | * @param {Object} config - Twitter configuration 84 | */ 85 | constructor(agent, config) { 86 | super(); 87 | this.agent = agent; 88 | this.config = config; 89 | this.twitterClient = new Scraper(); 90 | this.requestQueue = new RequestQueue(); 91 | this.lastCheckedTweetId = null; 92 | } 93 | 94 | /** 95 | * Initialize the Twitter client and login 96 | * @returns {Promise} 97 | * @throws {Error} If login fails after maximum retries 98 | */ 99 | async init() { 100 | console.log("Initializing Twitter client..."); 101 | let retries = this.config.retryLimit; 102 | 103 | while (retries > 0) { 104 | try { 105 | if (await this.twitterClient.isLoggedIn()) { 106 | console.log("Already logged in"); 107 | break; 108 | } 109 | 110 | await this.twitterClient.login( 111 | this.config.username, 112 | this.config.password, 113 | this.config.email, 114 | this.config.twoFactorSecret 115 | ); 116 | 117 | if (await this.twitterClient.isLoggedIn()) { 118 | console.log("Successfully logged in"); 119 | break; 120 | } 121 | } catch (error) { 122 | console.error(`Login attempt failed: ${error.message}`); 123 | } 124 | 125 | retries--; 126 | console.log(`Retrying... (${retries} attempts left)`); 127 | await new Promise((resolve) => setTimeout(resolve, 2000)); 128 | } 129 | 130 | if (retries === 0) { 131 | throw new Error("Failed to login after maximum retries"); 132 | } 133 | } 134 | 135 | /** 136 | * Fetch a single tweet by ID 137 | * @param {string} tweetId - ID of tweet to fetch 138 | * @returns {Promise} Tweet object or null if not found 139 | */ 140 | async getTweet(tweetId) { 141 | try { 142 | const tweet = await this.requestQueue.add(() => 143 | this.twitterClient.getTweet(tweetId) 144 | ); 145 | 146 | // Get image URLs if present 147 | const imageUrls = []; 148 | if (tweet.media) { 149 | for (const mediaItem of tweet.media) { 150 | if (mediaItem.type === "photo") { 151 | imageUrls.push(mediaItem.url); 152 | } 153 | } 154 | } 155 | 156 | return { 157 | id: tweet.id, 158 | name: tweet.name, 159 | username: tweet.username, 160 | text: tweet.text, 161 | timestamp: tweet.timestamp, 162 | userId: tweet.userId, 163 | conversationId: tweet.conversationId, 164 | inReplyToStatusId: tweet.inReplyToStatusId, 165 | permanentUrl: tweet.permanentUrl, 166 | imageUrls: imageUrls.length > 0 ? imageUrls : undefined, 167 | }; 168 | } catch (error) { 169 | console.error("Error fetching tweet:", error); 170 | return null; 171 | } 172 | } 173 | 174 | /** 175 | * Fetch home timeline tweets 176 | * @param {number} [count=20] - Number of tweets to fetch 177 | * @returns {Promise} Array of tweets 178 | */ 179 | async fetchHomeTimeline(count = 20) { 180 | try { 181 | const timeline = await this.requestQueue.add(() => 182 | this.twitterClient.fetchHomeTimeline(count) 183 | ); 184 | 185 | return timeline.map((tweet) => ({ 186 | id: tweet.rest_id, 187 | name: tweet.core?.user_results?.result?.legacy?.name, 188 | username: tweet.core?.user_results?.result?.legacy?.screen_name, 189 | text: tweet.legacy?.full_text, 190 | timestamp: new Date(tweet.legacy?.created_at).getTime() / 1000, 191 | userId: tweet.legacy?.user_id_str, 192 | conversationId: tweet.legacy?.conversation_id_str, 193 | inReplyToStatusId: tweet.legacy?.in_reply_to_status_id_str, 194 | permanentUrl: `https://twitter.com/${tweet.core?.user_results?.result?.legacy?.screen_name}/status/${tweet.rest_id}`, 195 | })); 196 | } catch (error) { 197 | console.error("Error fetching timeline:", error); 198 | return []; 199 | } 200 | } 201 | 202 | /** 203 | * Send a new tweet 204 | * @param {string} text - Tweet content 205 | * @param {string} [replyToId] - Optional ID of tweet to reply to 206 | * @returns {Promise} Posted tweet or null if failed 207 | */ 208 | async sendTweet(text, replyToId) { 209 | try { 210 | const result = await this.requestQueue.add(() => 211 | this.twitterClient.sendTweet(text, replyToId) 212 | ); 213 | 214 | const response = await result.json(); 215 | if (!response?.data?.create_tweet?.tweet_results?.result) { 216 | throw new Error("Invalid response from Twitter"); 217 | } 218 | 219 | const tweet = response.data.create_tweet.tweet_results.result; 220 | return { 221 | id: tweet.rest_id, 222 | text: tweet.legacy.full_text, 223 | timestamp: new Date(tweet.legacy.created_at).getTime() / 1000, 224 | userId: tweet.legacy.user_id_str, 225 | conversationId: tweet.legacy.conversation_id_str, 226 | permanentUrl: `https://twitter.com/${this.config.username}/status/${tweet.rest_id}`, 227 | }; 228 | } catch (error) { 229 | console.error("Error sending tweet:", error); 230 | return null; 231 | } 232 | } 233 | 234 | /** 235 | * Like a tweet 236 | * @param {string} tweetId - ID of tweet to like 237 | * @returns {Promise} Success status 238 | */ 239 | async likeTweet(tweetId) { 240 | try { 241 | await this.requestQueue.add(() => this.twitterClient.likeTweet(tweetId)); 242 | return true; 243 | } catch (error) { 244 | console.error("Error liking tweet:", error); 245 | return false; 246 | } 247 | } 248 | 249 | /** 250 | * Search for tweets 251 | * @param {string} query - Search query 252 | * @param {number} [count=20] - Number of tweets to fetch 253 | * @returns {Promise} Array of matching tweets 254 | */ 255 | async searchTweets(query, count = 20) { 256 | try { 257 | const results = await this.requestQueue.add(() => 258 | this.twitterClient.fetchSearchTweets(query, count, "Latest") 259 | ); 260 | return results.tweets.map((tweet) => { 261 | const imageUrls = []; 262 | for (const photoItem of tweet.photos || []) { 263 | imageUrls.push(photoItem.url); 264 | } 265 | 266 | let cleanedText = tweet.text; 267 | if (imageUrls.length > 0) { 268 | const urlPattern = /https:\/\/t\.co\/\w+/g; 269 | cleanedText = cleanedText.replace(urlPattern, ""); 270 | } 271 | 272 | return { 273 | id: tweet.id, 274 | name: tweet.name, 275 | username: tweet.username, 276 | text: cleanedText, 277 | timestamp: tweet.timestamp, 278 | userId: tweet.userId, 279 | conversationId: tweet.conversationId, 280 | inReplyToStatusId: tweet.inReplyToStatusId, 281 | permanentUrl: tweet.permanentUrl, 282 | imageUrls: imageUrls.length > 0 ? imageUrls : undefined, 283 | }; 284 | }); 285 | } catch (error) { 286 | console.error("Error searching tweets:", error); 287 | return []; 288 | } 289 | } 290 | 291 | /** 292 | * Get mentions of the authenticated user 293 | * @param {number} [count=5] - Number of mentions to fetch 294 | * @returns {Promise} Array of mentions 295 | */ 296 | async getMentions(count = 10) { 297 | return this.searchTweets(`@${this.config.username}`, count); 298 | } 299 | } 300 | 301 | module.exports = { TwitterBase }; 302 | -------------------------------------------------------------------------------- /src/utils/llm.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * LLM Utilities Module 3 | * 4 | * This module provides a unified interface for interacting with various LLM providers, 5 | * including OpenAI and OpenRouter. It supports text generation, structured output, 6 | * boolean responses, and image analysis capabilities. 7 | */ 8 | 9 | import OpenAI from "openai"; 10 | import axios from "axios"; 11 | import { z } from "zod"; 12 | import { zodResponseFormat } from "openai/helpers/zod"; 13 | import { LLMSize } from "../types"; 14 | import { ChatCompletionContentPartImage } from "openai/resources/chat/completions"; 15 | 16 | /** 17 | * Interface for OpenRouter API responses 18 | */ 19 | interface OpenRouterResponse { 20 | choices: Array<{ 21 | message: { 22 | content: string; 23 | }; 24 | }>; 25 | } 26 | 27 | /** 28 | * Zod schema for boolean responses with explanations 29 | */ 30 | const booleanSchema = z.object({ 31 | result: z.boolean(), 32 | explanation: z.string(), 33 | }); 34 | 35 | /** 36 | * LLMUtils class provides methods for interacting with language models 37 | * 38 | * Note: We use JSON responses only from OpenAI because the other SDKs are unreliable. 39 | */ 40 | export class LLMUtils { 41 | private openai: OpenAI; 42 | private openrouterApiKey: string; 43 | 44 | constructor() { 45 | const openaiApiKey = process.env.OPENAI_API_KEY; 46 | const openrouterApiKey = process.env.OPENROUTER_API_KEY; 47 | if (!openaiApiKey) { 48 | throw new Error("OPENAI_API_KEY environment variable is required"); 49 | } 50 | if (!openrouterApiKey) { 51 | throw new Error("OPENROUTER_API_KEY environment variable is required"); 52 | } 53 | this.openai = new OpenAI({ apiKey: openaiApiKey }); 54 | this.openrouterApiKey = openrouterApiKey; 55 | } 56 | 57 | /** 58 | * Gets a boolean response from the LLM with explanation 59 | * 60 | * @param prompt The prompt to send to the LLM 61 | * @param size The size of the model to use (LARGE or SMALL) 62 | * @returns A boolean result based on the LLM's analysis 63 | */ 64 | async getBooleanFromLLM(prompt: string, size: LLMSize): Promise { 65 | const model = size === LLMSize.LARGE ? "gpt-4o" : "gpt-4o-mini"; 66 | const response = await this.openai.beta.chat.completions.parse({ 67 | model, 68 | messages: [ 69 | { 70 | role: "user", 71 | content: [ 72 | { 73 | type: "text", 74 | text: `${prompt}\n\nRespond with true or false. Include a brief explanation of your reasoning.`, 75 | }, 76 | ], 77 | }, 78 | ], 79 | response_format: zodResponseFormat(booleanSchema, "booleanSchema"), 80 | }); 81 | 82 | if (!response.choices[0]?.message?.content) { 83 | throw new Error("Invalid response format from OpenAI"); 84 | } 85 | 86 | const analysis = JSON.parse(response.choices[0].message.content); 87 | return analysis.result; 88 | } 89 | 90 | /** 91 | * Gets a structured object response from the LLM based on a Zod schema 92 | * 93 | * @param prompt The prompt to send to the LLM 94 | * @param schema The Zod schema that defines the expected response structure 95 | * @param size The size of the model to use (LARGE or SMALL) 96 | * @returns A typed object matching the provided schema 97 | */ 98 | async getObjectFromLLM( 99 | prompt: string, 100 | schema: z.ZodSchema, 101 | size: LLMSize 102 | ): Promise { 103 | const model = size === LLMSize.LARGE ? "gpt-4o" : "gpt-4o-mini"; 104 | 105 | const response = await this.openai.beta.chat.completions.parse({ 106 | model, 107 | messages: [ 108 | { 109 | role: "user", 110 | content: [ 111 | { 112 | type: "text", 113 | text: prompt, 114 | }, 115 | ], 116 | }, 117 | ], 118 | response_format: zodResponseFormat(schema, "customSchema"), 119 | }); 120 | 121 | if (!response.choices[0]?.message?.content) { 122 | throw new Error("Invalid response format from OpenAI"); 123 | } 124 | 125 | return JSON.parse(response.choices[0].message.content); 126 | } 127 | 128 | /** 129 | * Gets a free-form text response from the LLM via OpenRouter 130 | * 131 | * @param prompt The prompt to send to the LLM 132 | * @param model The model identifier (e.g., "anthropic/claude-3.5-sonnet") 133 | * @returns The generated text response 134 | */ 135 | async getTextFromLLM(prompt: string, model: string): Promise { 136 | const response = await axios.post( 137 | "https://openrouter.ai/api/v1/chat/completions", 138 | { 139 | model, 140 | messages: [ 141 | { 142 | role: "user", 143 | content: prompt, 144 | }, 145 | ], 146 | }, 147 | { 148 | headers: { 149 | Authorization: `Bearer ${this.openrouterApiKey}`, 150 | "Content-Type": "application/json", 151 | "HTTP-Referer": process.env.APP_URL || "http://localhost:3000", 152 | }, 153 | } 154 | ); 155 | 156 | if (!response.data?.choices?.[0]?.message?.content) { 157 | throw new Error("Invalid response format from OpenRouter"); 158 | } 159 | 160 | return response.data.choices[0].message.content; 161 | } 162 | 163 | /** 164 | * Streams the LLM response in real-time using SSE from OpenRouter. 165 | * 166 | * @param prompt The user prompt string. 167 | * @param model The model to use (e.g., "gpt-4o" or "gpt-4o-mini"). 168 | * @param onToken Callback that receives each partial token as it arrives. 169 | * @returns A Promise that resolves once the stream is completed. 170 | */ 171 | async getTextFromLLMStream( 172 | prompt: string, 173 | model: string, 174 | onToken: (token: string) => void 175 | ): Promise { 176 | try { 177 | const response = await axios.post( 178 | "https://openrouter.ai/api/v1/chat/completions", 179 | { 180 | model, 181 | messages: [ 182 | { 183 | role: "user", 184 | content: prompt, 185 | }, 186 | ], 187 | // Enable streaming 188 | stream: true, 189 | }, 190 | { 191 | headers: { 192 | Authorization: `Bearer ${this.openrouterApiKey}`, 193 | "Content-Type": "application/json", 194 | "HTTP-Referer": process.env.APP_URL || "http://localhost:3000", 195 | }, 196 | // Needed to parse the SSE stream 197 | responseType: "stream", 198 | } 199 | ); 200 | 201 | return new Promise((resolve, reject) => { 202 | // Listen for data events on the response stream 203 | response.data.on("data", (chunk: Buffer) => { 204 | parseSSEChunk(chunk, onToken); 205 | }); 206 | 207 | // The stream has ended 208 | response.data.on("end", () => { 209 | resolve(); 210 | }); 211 | 212 | // Handle errors 213 | response.data.on("error", (error: unknown) => { 214 | reject(error); 215 | }); 216 | }); 217 | } catch (error) { 218 | if (axios.isAxiosError(error)) { 219 | throw new Error(`OpenRouter API error: ${error.message}`); 220 | } 221 | throw error; 222 | } 223 | } 224 | 225 | async getObjectFromLLMWithImages( 226 | prompt: string, 227 | schema: z.ZodSchema, 228 | imageUrls: string[], 229 | size: LLMSize 230 | ): Promise { 231 | const base64Images = await convertUrlsToBase64(imageUrls); 232 | if (base64Images.length === 0) { 233 | throw new Error("Failed to process images"); 234 | } 235 | 236 | const model = size === LLMSize.LARGE ? "gpt-4o" : "gpt-4o-mini"; 237 | 238 | const response = await this.openai.beta.chat.completions.parse({ 239 | model, 240 | messages: [ 241 | { 242 | role: "user", 243 | content: [ 244 | { 245 | type: "text", 246 | text: prompt, 247 | }, 248 | ...base64Images.map( 249 | (image): ChatCompletionContentPartImage => ({ 250 | type: "image_url", 251 | image_url: { 252 | url: `data:${image.contentType};base64,${image.base64}`, 253 | }, 254 | }) 255 | ), 256 | ], 257 | }, 258 | ], 259 | response_format: zodResponseFormat(schema, "customSchema"), 260 | }); 261 | 262 | if (!response.choices[0]?.message?.content) { 263 | throw new Error("Invalid response format from OpenAI"); 264 | } 265 | 266 | return schema.parse(JSON.parse(response.choices[0].message.content)); 267 | } 268 | 269 | async getBooleanFromLLMWithImages( 270 | prompt: string, 271 | imageUrls: string[], 272 | size: LLMSize 273 | ): Promise { 274 | const base64Images = await convertUrlsToBase64(imageUrls); 275 | if (base64Images.length === 0) { 276 | throw new Error("Failed to process images"); 277 | } 278 | 279 | const model = size === LLMSize.LARGE ? "gpt-4o" : "gpt-4o-mini"; 280 | 281 | const response = await this.openai.beta.chat.completions.parse({ 282 | model, 283 | messages: [ 284 | { 285 | role: "user", 286 | content: [ 287 | { 288 | type: "text", 289 | text: prompt, 290 | }, 291 | ...base64Images.map( 292 | (image): ChatCompletionContentPartImage => ({ 293 | type: "image_url", 294 | image_url: { 295 | url: `data:${image.contentType};base64,${image.base64}`, 296 | }, 297 | }) 298 | ), 299 | ], 300 | }, 301 | ], 302 | response_format: zodResponseFormat(booleanSchema, "booleanSchema"), 303 | }); 304 | 305 | if (!response.choices[0]?.message?.content) { 306 | throw new Error("Invalid response format from OpenAI"); 307 | } 308 | 309 | const analysis = JSON.parse(response.choices[0].message.content); 310 | return analysis.result; 311 | } 312 | 313 | async getTextWithImageFromLLM( 314 | prompt: string, 315 | imageUrls: string[], 316 | model: string 317 | ): Promise { 318 | const base64Images = await convertUrlsToBase64(imageUrls); 319 | if (base64Images.length === 0) { 320 | throw new Error("Failed to process images"); 321 | } 322 | 323 | try { 324 | const response = await axios.post( 325 | "https://openrouter.ai/api/v1/chat/completions", 326 | { 327 | model, 328 | messages: [ 329 | { 330 | role: "user", 331 | content: [ 332 | { 333 | type: "text", 334 | text: prompt, 335 | }, 336 | ...base64Images.map( 337 | (image): ChatCompletionContentPartImage => ({ 338 | type: "image_url", 339 | image_url: { 340 | url: `data:${image.contentType};base64,${image.base64}`, 341 | }, 342 | }) 343 | ), 344 | ], 345 | }, 346 | ], 347 | max_tokens: 1000, 348 | }, 349 | { 350 | headers: { 351 | Authorization: `Bearer ${this.openrouterApiKey}`, 352 | "Content-Type": "application/json", 353 | "HTTP-Referer": process.env.APP_URL || "http://localhost:3000", 354 | }, 355 | } 356 | ); 357 | 358 | if (!response.data?.choices?.[0]?.message?.content) { 359 | throw new Error("Invalid response format from OpenRouter"); 360 | } 361 | 362 | return response.data.choices[0].message.content; 363 | } catch (error) { 364 | if (axios.isAxiosError(error)) { 365 | throw new Error(`OpenRouter API error: ${error}`); 366 | } 367 | throw error; 368 | } 369 | } 370 | 371 | /** 372 | * Streams the LLM response in real-time using SSE from OpenRouter, 373 | * including base64-encoded images in the request. 374 | * 375 | * @param prompt The user prompt string. 376 | * @param imageUrls Array of URLs for the images you want to attach. 377 | * @param model The model to use (e.g., "gpt-4o" or "gpt-4o-mini"). 378 | * @param onToken Callback that receives each partial token as it arrives. 379 | * @returns A Promise that resolves once the stream is completed. 380 | */ 381 | async getTextWithImageFromLLMStream( 382 | prompt: string, 383 | imageUrls: string[], 384 | model: string, 385 | onToken: (token: string) => void 386 | ): Promise { 387 | const base64Images = await convertUrlsToBase64(imageUrls); 388 | if (base64Images.length === 0) { 389 | throw new Error("Failed to process images"); 390 | } 391 | 392 | try { 393 | const response = await axios.post( 394 | "https://openrouter.ai/api/v1/chat/completions", 395 | { 396 | model, 397 | messages: [ 398 | { 399 | role: "user", 400 | content: [ 401 | { 402 | type: "text", 403 | text: prompt, 404 | }, 405 | ...base64Images.map((image) => ({ 406 | type: "image_url", 407 | image_url: { 408 | url: `data:${image.contentType};base64,${image.base64}`, 409 | }, 410 | })), 411 | ], 412 | }, 413 | ], 414 | stream: true, 415 | max_tokens: 1000, 416 | }, 417 | { 418 | headers: { 419 | Authorization: `Bearer ${this.openrouterApiKey}`, 420 | "Content-Type": "application/json", 421 | "HTTP-Referer": process.env.APP_URL || "http://localhost:3000", 422 | }, 423 | responseType: "stream", 424 | } 425 | ); 426 | 427 | return new Promise((resolve, reject) => { 428 | response.data.on("data", (chunk: Buffer) => { 429 | parseSSEChunk(chunk, onToken); 430 | }); 431 | 432 | response.data.on("end", () => { 433 | resolve(); 434 | }); 435 | 436 | response.data.on("error", (error: unknown) => { 437 | reject(error); 438 | }); 439 | }); 440 | } catch (error) { 441 | if (axios.isAxiosError(error)) { 442 | throw new Error(`OpenRouter API error: ${error.message}`); 443 | } 444 | throw error; 445 | } 446 | } 447 | 448 | async getImageDescriptions( 449 | imageUrls: string[], 450 | model: string = "openai/gpt-4o" 451 | ): Promise { 452 | if (!imageUrls || imageUrls.length === 0) 453 | throw new Error("No images provided"); 454 | 455 | const base64Images = await convertUrlsToBase64(imageUrls); 456 | if (base64Images.length === 0) { 457 | throw new Error("Failed to process images"); 458 | } 459 | 460 | try { 461 | const response = await axios.post( 462 | "https://openrouter.ai/api/v1/chat/completions", 463 | { 464 | model, 465 | messages: [ 466 | { 467 | role: "user", 468 | content: [ 469 | { 470 | type: "text", 471 | text: "Describe the image(s) in a couple of concise sentences that capture the most important elemetns of the image:", 472 | }, 473 | ...base64Images.map((image) => ({ 474 | type: "image_url", 475 | image_url: { 476 | url: `data:${image.contentType};base64,${image.base64}`, 477 | }, 478 | })), 479 | ], 480 | }, 481 | ], 482 | max_tokens: 1000, 483 | }, 484 | { 485 | headers: { 486 | Authorization: `Bearer ${this.openrouterApiKey}`, 487 | "Content-Type": "application/json", 488 | "HTTP-Referer": process.env.APP_URL || "http://localhost:3000", 489 | }, 490 | } 491 | ); 492 | 493 | if (!response.data?.choices?.[0]?.message?.content) { 494 | throw new Error("Invalid response format from OpenRouter"); 495 | } 496 | 497 | return response.data.choices[0].message.content; 498 | } catch (error) { 499 | if (axios.isAxiosError(error)) { 500 | throw new Error( 501 | `OpenRouter API error: ${error.response?.statusText || error.message}` 502 | ); 503 | } 504 | throw error; 505 | } 506 | } 507 | } 508 | 509 | interface Base64Image { 510 | base64: string; 511 | contentType: string; 512 | } 513 | 514 | async function fetchImageAsBase64(url: string): Promise { 515 | try { 516 | const response = await axios.get(url, { 517 | responseType: "arraybuffer", 518 | }); 519 | const contentType = response.headers["content-type"]; 520 | if ( 521 | !contentType || 522 | !["image/jpeg", "image/png", "image/gif", "image/webp"].includes( 523 | contentType 524 | ) 525 | ) { 526 | console.warn(`Unsupported image type: ${contentType}, url: ${url}`); 527 | return null; 528 | } 529 | return { 530 | base64: Buffer.from(response.data, "binary").toString("base64"), 531 | contentType, 532 | }; 533 | } catch (error) { 534 | console.error("Error fetching image:", error); 535 | return null; 536 | } 537 | } 538 | 539 | async function convertUrlsToBase64( 540 | imageUrls: string[] 541 | ): Promise { 542 | const base64Images: Base64Image[] = []; 543 | for (const url of imageUrls) { 544 | const result = await fetchImageAsBase64(url); 545 | if (result) { 546 | base64Images.push(result); 547 | } 548 | } 549 | return base64Images; 550 | } 551 | 552 | /** 553 | * Parses a data stream of SSE lines from OpenRouter. 554 | * 555 | * @param chunk The chunk of data (a portion of the SSE event stream). 556 | * @param onToken Callback to handle each token (partial text). 557 | */ 558 | function parseSSEChunk(chunk: Buffer, onToken: (token: string) => void) { 559 | const raw = chunk.toString("utf-8"); 560 | const lines = raw.split("\n"); 561 | 562 | for (const line of lines) { 563 | if (!line || line.trim().length === 0) { 564 | continue; 565 | } 566 | // "data: [DONE]" indicates the end of the stream 567 | if (line.trim() === "data: [DONE]") { 568 | return; // you could handle a cleanup or a "done" signal here if needed 569 | } 570 | if (line.startsWith("data: ")) { 571 | // Each line after "data:" should be valid JSON, e.g.: 572 | // data: {"id":"...","object":"...","created":...,"choices":[...]...} 573 | const jsonString = line.substring("data: ".length).trim(); 574 | try { 575 | const parsed = JSON.parse(jsonString); 576 | if (parsed.choices && parsed.choices.length > 0) { 577 | // The partial token usually appears in choices[0].delta.content 578 | const token = parsed.choices[0].delta?.content; 579 | if (token) { 580 | onToken(token); 581 | } 582 | } 583 | } catch (err) { 584 | // If some lines are not valid JSON, you can handle or ignore them 585 | console.error("Failed to parse SSE line:", line, err); 586 | } 587 | } 588 | } 589 | } 590 | --------------------------------------------------------------------------------