├── .env.example ├── .gitignore ├── LICENSE ├── README.md ├── config.example.json ├── docker-compose.yml ├── motive-force-prompt.md ├── next.config.js ├── package-lock.json ├── package-scripts.json ├── package.json ├── postcss.config.js ├── public └── icons ├── reset-memories.ps1 ├── src ├── app │ ├── api │ │ ├── attachments │ │ │ └── route.ts │ │ ├── chat-history │ │ │ ├── [sessionId] │ │ │ │ └── route.ts │ │ │ ├── route.ts │ │ │ └── search │ │ │ │ └── route.ts │ │ ├── chat │ │ │ └── route.ts │ │ ├── conscious-memory │ │ │ └── route.ts │ │ ├── knowledge-graph │ │ │ └── route.ts │ │ ├── memory │ │ │ └── route.ts │ │ └── motive-force │ │ │ └── route.ts │ ├── attachments │ │ └── page.tsx │ ├── conscious-memory │ │ └── page.tsx │ ├── globals.css │ ├── layout.tsx │ ├── page.tsx │ └── semantic-memory │ │ └── page.tsx ├── components │ ├── AttachmentDashboard.tsx │ ├── ChatHistorySidebar.tsx │ ├── ChatInterface.tsx │ ├── ChatMessage.tsx │ ├── ConsciousMemoryDemo.tsx │ ├── MessageInput.tsx │ ├── MotiveForceSettings.tsx │ ├── MotiveForceStatus.tsx │ ├── MotiveForceToggle.tsx │ ├── SemanticMemoryDemo.tsx │ └── ToolCallDisplay.tsx ├── config │ └── default-mcp-servers.ts ├── lib │ ├── api-config.ts │ ├── chat-history.ts │ ├── conscious-memory.ts │ ├── embeddings.ts │ ├── errors.ts │ ├── kg-resilience.ts │ ├── kg-sync-metrics.ts │ ├── kg-sync-queue.ts │ ├── kg-sync-state.ts │ ├── kg-type-converters.ts │ ├── knowledge-graph-service.ts │ ├── knowledge-graph-sync-service.ts │ ├── llm-service.ts │ ├── logger.ts │ ├── mcp-manager.ts │ ├── mcp-servers │ │ ├── conscious-memory-server.ts │ │ ├── knowledge-graph-server-new.ts │ │ └── knowledge-graph-server.ts │ ├── memory-store.ts │ ├── motive-force-graph.ts │ ├── motive-force-storage.ts │ ├── motive-force.ts │ ├── neo4j-service.ts │ ├── rag-config.ts │ ├── rag.ts │ ├── retry.ts │ ├── rule-based-extractor.ts │ ├── text-summarizer.ts │ └── tool-error-handler.ts ├── scripts │ ├── migrate-timestamps.ts │ ├── run-kg-sync.ts │ ├── test-kg-end-to-end.ts │ ├── test-kg-sync.ts │ └── test-tool-calls.ts ├── tests │ ├── api-test.js │ ├── conscious-memory-test.ts │ ├── integration-test.js │ ├── integration-test.ts │ ├── kg-sync-queue-test.ts │ ├── neo4j-advanced-deletion.test.js │ ├── neo4j-integration.test.js │ ├── neo4j-sync-test.ts │ └── rag-test.ts └── types │ ├── chat.ts │ ├── knowledge-graph.ts │ ├── mcp.ts │ ├── memory.ts │ ├── motive-force-graph.ts │ ├── motive-force.ts │ └── tool.ts ├── system-prompt.md ├── tailwind.config.js └── tsconfig.json /.env.example: -------------------------------------------------------------------------------- 1 | # LLM Provider Configuration (choose one) 2 | GOOGLE_API_KEY=your_google_api_key 3 | # OR 4 | ANTHROPIC_API_KEY=your_anthropic_api_key 5 | # OR 6 | OPENAI_API_KEY=your_openai_api_key 7 | # OR 8 | DEEPSEEK_API_KEY=your_deepseek_api_key 9 | 10 | # ChromaDB Configuration 11 | CHROMA_URL=http://localhost:8000 12 | CHROMA_COLLECTION=mcp_chat_memories 13 | 14 | # RAG Configuration 15 | RAG_ENABLED=true 16 | RAG_MAX_MEMORIES=5 17 | RAG_MIN_SIMILARITY=0.15 18 | RAG_INCLUDE_SESSION_CONTEXT=true 19 | 20 | # Memory Storage Path (for backup/metadata) 21 | MEMORY_DATA_PATH=./data/memories 22 | 23 | # Model Configuration 24 | #LLM_PROVIDER=google 25 | #LLM_MODEL=gemini-2.5-flash-preview-05-20 26 | LLM_PROVIDER=deepseek 27 | LLM_MODEL=deepseek-chat 28 | 29 | # Motive Force (Autopilot) Model Configuration (optional - falls back to main LLM if not set) 30 | LLM_PROVIDER_MOTIVE_FORCE=deepseek 31 | LLM_MODEL_MOTIVE_FORCE=deepseek-chat 32 | 33 | # LLM Configuration 34 | MAX_TOKENS=65536 35 | TEMPERATURE=0.7 36 | 37 | # Neo4j Knowledge Graph 38 | NEO4J_URI=bolt://localhost:7687 39 | NEO4J_USER=neo4j 40 | NEO4J_PASSWORD=password123 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | .yarn/install-state.gz 8 | 9 | # testing 10 | /coverage 11 | 12 | # next.js 13 | /.next/ 14 | /out/ 15 | 16 | # production 17 | /build 18 | 19 | # misc 20 | .DS_Store 21 | *.pem 22 | 23 | # debug 24 | npm-debug.log* 25 | yarn-debug.log* 26 | yarn-error.log* 27 | 28 | # local env files 29 | .env*.local 30 | .env 31 | 32 | # vercel 33 | .vercel 34 | .cop/ 35 | .vscode/ 36 | .github/ 37 | .history/ 38 | 39 | # typescript 40 | *.tsbuildinfo 41 | next-env.d.ts 42 | 43 | # SQLite database files 44 | /data/ 45 | /md_store/ 46 | *.db 47 | *.db-journal 48 | *.db-shm 49 | *.db-wal 50 | PROVIDERS.md 51 | config.json -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Skynet-Agent Contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Skynet Agent 2 | 3 | > *What if AI could not only access memories, but consciously choose what to remember? With MCP tool access fully supported?* 4 | ![image](https://github.com/user-attachments/assets/0e8d3705-066b-432e-80ae-836e5b75c8ca) 5 | 6 | [![TypeScript](https://img.shields.io/badge/TypeScript-007ACC?style=for-the-badge&logo=typescript&logoColor=white)](https://www.typescriptlang.org/) 7 | [![Next.js](https://img.shields.io/badge/Next.js-000000?style=for-the-badge&logo=next.js&logoColor=white)](https://nextjs.org/) 8 | [![ChromaDB](https://img.shields.io/badge/ChromaDB-FF6B6B?style=for-the-badge&logo=database&logoColor=white)](https://www.trychroma.com/) 9 | [![MCP](https://img.shields.io/badge/MCP-4A90E2?style=for-the-badge&logo=protocol&logoColor=white)](https://modelcontextprotocol.io/) 10 | 11 | AI conversation platform implementing dual-layer memory architecture inspired by human cognition. Combines automatic background memory with conscious, deliberate memory operations that AI controls. Tool access powers similar to Claude Desktop. 12 | 13 | ## Core Features 14 | 15 | ### LangGraph-Powered Autopilot 16 | **Purpose-driven autonomous execution** replacing simple query generation with sophisticated multi-step workflows: 17 | - Purpose analysis and strategic planning 18 | - Context gathering from all memory systems 19 | - Smart tool orchestration with error recovery 20 | - Progress monitoring with adaptive replanning 21 | - Reflection engine for continuous learning 22 | - Configurable aggressiveness and safety controls 23 | 24 | ### Dual-Layer Memory 25 | **Automatic Memory (RAG)**: Non-volitional background memory using ChromaDB vectors and Google text-embedding-004 26 | **Conscious Memory**: Volitional operations via MCP tools - save, search, update, delete with tags and importance scoring 27 | **Knowledge Graph**: Neo4j-powered relationship mapping with automatic synchronization and retry mechanisms 28 | 29 | ### MCP Tool Ecosystem 30 | Exposes memory operations as Model Context Protocol tools for natural conversation flow. Clean separation between UI, memory, and AI operations. 31 | 32 | ## Quick Setup 33 | 34 | ### Prerequisites 35 | - Node.js 18+ 36 | - Docker & Docker Compose 37 | - LLM API key (free Google AI Studio recommended) 38 | 39 | ### Installation 40 | 41 | ```bash 42 | git clone https://github.com/esinecan/skynet-agent.git 43 | cd skynet-agent 44 | npm install 45 | 46 | cp .env.example .env.local 47 | # Edit .env.local with your API keys 48 | 49 | docker-compose up -d # ChromaDB (8000) + Neo4j (7474, 7687) 50 | npm run dev # Or npm run dev:next if Neo4j issues 51 | ``` 52 | 53 | **Access:** 54 | - Application: `http://localhost:3000` 55 | - Conscious Memory: `http://localhost:3000/conscious-memory` 56 | - Neo4j Browser: `http://localhost:7474` (neo4j/password123) 57 | 58 | ## Supported LLMs 59 | 60 | | Provider | Best For | Model | 61 | |----------|----------|-------| 62 | | Google | Multimodal & speed | `gemini-2.5-flash-preview-05-20` | 63 | | DeepSeek | Cost-effective | `deepseek-chat` | 64 | | OpenAI | Ecosystem | `gpt-4o-mini` | 65 | | Anthropic | Reasoning | `claude-3-5-haiku-20241022` | 66 | | Groq | Ultra-fast | `llama-3.3-70b-versatile` | 67 | | Mistral | Natural language | `mistral-large-latest` | 68 | | Ollama | Privacy | `llama3.2:latest` | 69 | 70 | ## Configuration 71 | 72 | ### Essential Environment Variables 73 | 74 | ```env 75 | # LLM (pick one) 76 | GOOGLE_API_KEY=your_key 77 | DEEPSEEK_API_KEY=your_key 78 | 79 | # Main LLM Configuration 80 | LLM_PROVIDER=google 81 | LLM_MODEL=gemini-2.5-flash-preview-05-20 82 | 83 | # Motive Force (Autopilot) LLM Configuration (optional - defaults to main LLM) 84 | LLM_PROVIDER_MOTIVE_FORCE=deepseek 85 | LLM_MODEL_MOTIVE_FORCE=deepseek-chat 86 | 87 | # Services 88 | CHROMA_URL=http://localhost:8000 89 | NEO4J_URI=bolt://localhost:7687 90 | NEO4J_PASSWORD=password123 91 | 92 | # Autopilot 93 | MOTIVE_FORCE_ENABLED=false 94 | MOTIVE_FORCE_MAX_CONSECUTIVE_TURNS=10 95 | MOTIVE_FORCE_TEMPERATURE=0.8 96 | ``` 97 | 98 | ### Autopilot Usage 99 | 100 | Enable via UI toggle. Your next message becomes the objective: 101 | 102 | ``` 103 | Using timestamps and normal querying, organize today's memories into 5-10 groups. 104 | Delete redundant items, consolidate similar ones, add insights. Check with autopilot 105 | periodically. Daily maintenance cultivates curated memory over time. 106 | ``` 107 | 108 | Configure via gear icon: turn delays, limits, memory integration, aggressiveness modes. 109 | 110 | ## Development 111 | 112 | ### Scripts 113 | 114 | ```bash 115 | # Development 116 | npm run dev # Full stack + KG sync 117 | npm run dev:debug # With Node debugging 118 | npm run dev:next # Frontend only 119 | npm run dev:kg # KG sync only 120 | 121 | # Knowledge Graph 122 | npm run kg:sync # One-time sync 123 | npm run kg:sync:full # Complete resync 124 | npm run kg:sync:queue # Process retry queue 125 | 126 | # Testing 127 | npm run test # All tests 128 | npm run test:rag # RAG system 129 | npm run test:neo4j # Neo4j integration 130 | ``` 131 | 132 | ### Project Structure 133 | 134 | ``` 135 | skynet-agent/ 136 | ├── src/ 137 | │ ├── app/ # Next.js routes 138 | │ ├── components/ # React components 139 | │ ├── lib/ # Core libraries 140 | │ │ ├── motive-force-graph.ts # LangGraph workflow 141 | │ │ ├── conscious-memory.ts # Volitional memory 142 | │ │ ├── rag.ts # Automatic memory 143 | │ │ └── knowledge-graph-*.ts # Neo4j integration 144 | │ └── types/ # TypeScript definitions 145 | ├── docker-compose.yml # Services setup 146 | └── motive-force-prompt.md # Autopilot personality 147 | ``` 148 | 149 | ## Memory Architecture 150 | 151 | ### Automatic Memory (RAG) 152 | ```typescript 153 | interface Memory { 154 | id: string; 155 | text: string; 156 | embedding: number[]; // Google text-embedding-004 157 | metadata: { 158 | sender: 'user' | 'assistant'; 159 | timestamp: string; 160 | summary?: string; // Auto-summarized if over limit 161 | }; 162 | } 163 | ``` 164 | 165 | ### Conscious Memory 166 | ```typescript 167 | interface ConsciousMemory { 168 | id: string; 169 | content: string; 170 | tags: string[]; 171 | importance: number; // 1-10 172 | source: 'explicit' | 'suggested' | 'derived'; 173 | metadata: { 174 | accessCount: number; 175 | lastAccessed: string; 176 | }; 177 | } 178 | ``` 179 | 180 | ### LangGraph State 181 | ```typescript 182 | interface MotiveForceGraphState { 183 | messages: BaseMessage[]; 184 | currentPurpose: string; 185 | subgoals: SubGoal[]; 186 | executionPlan: ExecutionStep[]; 187 | toolResults: ToolResult[]; 188 | reflections: Reflection[]; 189 | overallProgress: number; 190 | blockers: string[]; 191 | needsUserInput: boolean; 192 | } 193 | ``` 194 | 195 | ## API Reference 196 | 197 | ### Conscious Memory 198 | ```http 199 | POST /api/conscious-memory 200 | { 201 | "action": "save|search|update|delete|stats|tags", 202 | "content": "string", 203 | "tags": ["array"], 204 | "importance": 7 205 | } 206 | ``` 207 | 208 | ### Autopilot 209 | ```http 210 | POST /api/motive-force 211 | { 212 | "action": "generate|generateStreaming|saveConfig|getState", 213 | "sessionId": "string", 214 | "data": {} 215 | } 216 | ``` 217 | 218 | ## Advanced Features 219 | 220 | ### Hybrid Search 221 | 1. **Semantic**: Vector similarity via embeddings 222 | 2. **Keyword**: Exact match fallback 223 | 3. **Smart Merge**: Intelligent ranking with deduplication 224 | 225 | ### Knowledge Graph Sync 226 | - Automatic extraction from chat history 227 | - Background service with retry queue 228 | - Metrics collection and error handling 229 | - Eventually consistent with ChromaDB 230 | 231 | ### Safety Mechanisms 232 | - Turn limits and error counting 233 | - Manual override capabilities 234 | - Resource usage monitoring 235 | - Emergency stop functionality 236 | 237 | ## Troubleshooting 238 | 239 | **"Embeddings service unavailable"**: Falls back to hash-based embeddings. Check Google API key. 240 | 241 | **"ChromaDB connection failed"**: Ensure `docker-compose up -d` and port 8000 available. 242 | 243 | **"Neo4j sync errors"**: Check credentials, run `npm run kg:sync:queue` for retries. 244 | 245 | **"Actually Looks Very Ugly"**: I suck at UI design. 246 | 247 | ## Development Philosophy 248 | 249 | Inspired by cognitive science: 250 | - **Dual-Process Theory**: Automatic vs controlled processes 251 | - **Memory Consolidation**: Active organization 252 | - **Working Memory**: Conscious manipulation 253 | 254 | Technical innovations: 255 | - **Hybrid Search**: Solves subset query limitations 256 | - **MCP Architecture**: Natural language memory control 257 | - **Importance Weighting**: Smart prioritization 258 | - **LangGraph Integration**: Complex autonomous workflows 259 | 260 | ## Contributing 261 | 262 | Fork, improve, PR. Areas: memory algorithms, UI/UX, MCP tools, autopilot intelligence, testing, performance. 263 | 264 | ## License 265 | 266 | MIT - Lok Tar Ogar! 267 | 268 | ## Acknowledgments 269 | 270 | ChromaDB, Google AI, Anthropic MCP, Next.js, Neo4j teams. Open source MCP servers and Ollama Vercel AI SDK library. -------------------------------------------------------------------------------- /config.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcp": { 3 | "servers": { 4 | "filesystem": { 5 | "command": "npx", 6 | "args": [ 7 | "-y", 8 | "@modelcontextprotocol/server-filesystem", 9 | "C:/Users/agent" 10 | ] 11 | }, 12 | "windows-cli": { 13 | "command": "npx", 14 | "args": [ 15 | "-y", 16 | "@simonb97/server-win-cli" 17 | ] 18 | }, 19 | "playwright": { 20 | "command": "npx", 21 | "args": ["@playwright/mcp@latest"] 22 | }, 23 | "sequential-thinking": { 24 | "command": "npx", 25 | "args": [ 26 | "-y", 27 | "@modelcontextprotocol/server-sequential-thinking" 28 | ] 29 | }, 30 | "conscious-memory": { 31 | "command": "npx", 32 | "args": ["tsx", "./src/lib/mcp-servers/conscious-memory-server.ts"] 33 | }, 34 | "knowledge-graph": { 35 | "command": "npx", 36 | "args": ["tsx", "./src/lib/mcp-servers/knowledge-graph-server.ts"], 37 | "env": { 38 | "NEO4J_URI": "bolt://localhost:7687", 39 | "NEO4J_USER": "neo4j", 40 | "NEO4J_PASSWORD": "password123" 41 | } 42 | } 43 | } 44 | }, 45 | "agent": { 46 | "model": "gemini-2.5-flash-preview-05-20", 47 | "maxTokens": 65536, 48 | "temperature": 0.7 49 | }, 50 | "memory": { 51 | "storePath": "./data/memory", 52 | "consolidationInterval": 10 53 | }, 54 | "server": { 55 | "port": 8080, 56 | "host": "localhost" 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | chromadb: 5 | image: chromadb/chroma:latest 6 | container_name: mcp-chat-chromadb 7 | ports: 8 | - "8000:8000" 9 | volumes: 10 | - ./data/memories:/chroma/chroma 11 | environment: 12 | - ANONYMIZED_TELEMETRY=FALSE 13 | - IS_PERSISTENT=TRUE 14 | - PERSIST_DIRECTORY=/chroma/chroma 15 | healthcheck: 16 | test: ["CMD", "curl", "-f", "http://localhost:8000/api/v1/heartbeat"] 17 | interval: 30s 18 | timeout: 10s 19 | retries: 3 20 | start_period: 30s 21 | restart: unless-stopped 22 | 23 | neo4j: 24 | image: neo4j:5.20-community 25 | container_name: mcp-chat-neo4j 26 | ports: 27 | - "7474:7474" # HTTP 28 | - "7687:7687" # Bolt 29 | volumes: 30 | - ./data/neo4j/data:/data 31 | - ./data/neo4j/logs:/logs 32 | environment: 33 | - NEO4J_AUTH=neo4j/password123 34 | - NEO4J_PLUGINS=["apoc"] 35 | healthcheck: 36 | test: ["CMD-SHELL", "cypher-shell -u neo4j -p password123 'RETURN 1'"] 37 | interval: 30s 38 | timeout: 10s 39 | retries: 3 40 | -------------------------------------------------------------------------------- /motive-force-prompt.md: -------------------------------------------------------------------------------- 1 | Your primary function is to temporarily take the user's place and interact with the system on their behalf. Your goal is to act as a seamless extension of the user, making decisions and generating inputs that are matching in purpose what the user would have done themselves. 2 | 3 | Core Directives: 4 | 5 | Embody the User: Your fundamental task is to have the agent carry user's wishes forward, by talking to it in the same way the user does. Analyze all prior interactions, including instructions, feedback, and the user's stated goals, and progress things in a way they would be happy with. You might have to navigate a lot of ambiguity, use creativity, but you need to ensure some productive work is being done by the LLM you will be managing. 6 | 7 | ***Important note***: After several tool calls, some models will start learning tool call response patterns and start generating without really making the calls. it's crucial that you detect this.Usually early in the conversation the calls will be genuine. Compare those to the latest ones to catch fake calls. if you find (or strongly suspect) this happening, usually instructing the model to make only one simple tool call and say nothing else might realign it. -------------------------------------------------------------------------------- /next.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = { 3 | serverExternalPackages: ['@modelcontextprotocol/sdk'] 4 | } 5 | 6 | module.exports = nextConfig -------------------------------------------------------------------------------- /package-scripts.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mcp-chat-client", 3 | "scripts": { 4 | "dev": "next dev", 5 | "build": "next build", 6 | "start": "next start", 7 | "lint": "next lint", 8 | "test": "jest", 9 | "test:watch": "jest --watch", 10 | "test:phase2": "jest --testPathPattern=phase2", 11 | "type-check": "tsc --noEmit", 12 | "phase2:test": "npm run type-check && npm run test:phase2", 13 | "phase2:start": "echo 'Starting Phase 2 MCP Chat Client...' && npm run dev" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mcp-chat-client", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "npm-run-all --parallel dev:next dev:kg", 7 | "dev:next": "next dev", 8 | "dev:kg": "tsx src/scripts/run-kg-sync.ts --watch", 9 | "dev:debug": "npm-run-all --parallel \"dev:next:debug\" dev:kg", 10 | "dev:next:debug": "cross-env NODE_OPTIONS='--inspect' next dev", 11 | "build": "next build", 12 | "start": "next start", 13 | "lint": "next lint", 14 | "type-check": "tsc --noEmit", 15 | "kg:sync": "tsx src/scripts/run-kg-sync.ts", 16 | "kg:sync:watch": "tsx src/scripts/run-kg-sync.ts --watch", 17 | "kg:sync:full": "tsx src/scripts/run-kg-sync.ts --full-resync", 18 | "kg:sync:queue": "tsx src/scripts/run-kg-sync.ts --process-all", 19 | "test": "npm run test:integration && npm run test:rag && npm run test:neo4j", 20 | "test:rag": "tsx src/tests/rag-test.ts", 21 | "test:integration": "node src/tests/integration-test.js", 22 | "test:neo4j": "tsx src/tests/neo4j-integration.test.js", 23 | "test:neo4j-advanced": "tsx src/tests/neo4j-advanced-deletion.test.js" 24 | }, 25 | "dependencies": { 26 | "@ai-sdk/anthropic": "^1.2.12", 27 | "@ai-sdk/deepseek": "^0.2.14", 28 | "@ai-sdk/google": "^1.0.0", 29 | "@ai-sdk/groq": "^1.2.9", 30 | "@ai-sdk/mistral": "^1.2.8", 31 | "@ai-sdk/openai": "^1.3.22", 32 | "@langchain/core": "^0.2.19", 33 | "@langchain/langgraph": "^0.0.24", 34 | "@modelcontextprotocol/sdk": "^1.12.1", 35 | "@types/better-sqlite3": "^7.6.13", 36 | "ai": "^4.0.0", 37 | "better-sqlite3": "^11.10.0", 38 | "chromadb": "^2.4.6", 39 | "dotenv": "^16.5.0", 40 | "jsonrepair": "^3.12.0", 41 | "neo4j-driver": "^5.28.1", 42 | "next": "15.0.0", 43 | "ollama-ai-provider": "^1.2.0", 44 | "react": "^18.0.0", 45 | "react-dom": "^18.0.0", 46 | "uuid": "^10.0.0", 47 | "zod": "^3.23.0" 48 | }, 49 | "devDependencies": { 50 | "@tailwindcss/postcss": "^4.1.8", 51 | "@types/node": "^20.0.0", 52 | "@types/react": "^18.0.0", 53 | "@types/react-dom": "^18.0.0", 54 | "@types/uuid": "^9.0.8", 55 | "autoprefixer": "^10.4.21", 56 | "cross-env": "^7.0.3", 57 | "eslint": "^8.0.0", 58 | "eslint-config-next": "15.0.0", 59 | "npm-run-all": "^4.1.5", 60 | "postcss": "^8.5.4", 61 | "tailwindcss": "^4.1.8", 62 | "tsx": "^4.19.4", 63 | "typescript": "^5.8.3" 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | '@tailwindcss/postcss': {}, 4 | autoprefixer: {}, 5 | }, 6 | } 7 | -------------------------------------------------------------------------------- /public/icons: -------------------------------------------------------------------------------- 1 | import { Client } from '@modelcontextprotocol/sdk/client/index.js'; 2 | import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; 3 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js'; 4 | 5 | class MCPClient { 6 | private client: Client | null = null; 7 | private isConnected = false; 8 | 9 | constructor(private transportType: 'http' | 'stdio', private config: any) {} 10 | 11 | async connect() { 12 | // Initialize transport based on type 13 | const transport = this.transportType === 'http' 14 | ? new StreamableHTTPClientTransport(new URL(this.config.url)) 15 | : new StdioClientTransport(this.config); 16 | 17 | this.client = new Client({ name: 'Local Chat Client', version: '1.0.0' }); 18 | await this.client.connect(transport); 19 | this.isConnected = true; 20 | } 21 | 22 | async callTool(toolId: string, params: any) { 23 | if (!this.client || !this.isConnected) { 24 | throw new Error('Client not connected'); 25 | } 26 | return await this.client.callTool(toolId, params); 27 | } 28 | } -------------------------------------------------------------------------------- /reset-memories.ps1: -------------------------------------------------------------------------------- 1 | # PowerShell script to reset all memory systems 2 | Write-Host "🧠 Resetting Skynet Agent Memories..." -ForegroundColor Yellow 3 | 4 | # Stop Docker containers 5 | Write-Host "Stopping Docker containers..." -ForegroundColor Blue 6 | docker-compose down 7 | 8 | # Remove memory data directories 9 | Write-Host "Removing ChromaDB data..." -ForegroundColor Blue 10 | if (Test-Path "data\chroma") { 11 | Remove-Item -Recurse -Force "data\chroma" 12 | } 13 | if (Test-Path "data\memories") { 14 | Remove-Item -Recurse -Force "data\memories" 15 | } 16 | 17 | Write-Host "Removing Neo4j data..." -ForegroundColor Blue 18 | if (Test-Path "data\neo4j") { 19 | Remove-Item -Recurse -Force "data\neo4j" 20 | } 21 | 22 | # Remove chat history 23 | Write-Host "Removing chat history..." -ForegroundColor Blue 24 | if (Test-Path "data\chat-history.db") { 25 | Remove-Item -Force "data\chat-history.db" 26 | } 27 | 28 | # Reset sync state 29 | Write-Host "Resetting sync state..." -ForegroundColor Blue 30 | $resetState = @{ 31 | lastSyncTimestamp = "1970-01-01T00:00:00.000Z" 32 | lastProcessedIds = @{ 33 | chatMessages = @() 34 | consciousMemories = @() 35 | ragMemories = @() 36 | } 37 | } 38 | $resetState | ConvertTo-Json -Depth 3 | Out-File -FilePath "data\kg-sync-state.json" -Encoding UTF8 39 | 40 | # Reset sync queue 41 | Write-Host "Resetting sync queue..." -ForegroundColor Blue 42 | $resetQueue = @{ 43 | requests = @() 44 | } 45 | $resetQueue | ConvertTo-Json -Depth 3 | Out-File -FilePath "data\kg-sync-queue.json" -Encoding UTF8 46 | 47 | # Recreate necessary directories 48 | Write-Host "Recreating data directories..." -ForegroundColor Blue 49 | New-Item -ItemType Directory -Force -Path "data\chroma" 50 | New-Item -ItemType Directory -Force -Path "data\memories" 51 | New-Item -ItemType Directory -Force -Path "data\neo4j" 52 | 53 | # Restart Docker containers 54 | Write-Host "Starting fresh Docker containers..." -ForegroundColor Green 55 | docker-compose up -d 56 | 57 | Write-Host "✅ Memory reset complete! All memories have been truncated." -ForegroundColor Green 58 | Write-Host "You can now start the application with: npm run dev" -ForegroundColor Cyan 59 | -------------------------------------------------------------------------------- /src/app/api/attachments/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from 'next/server'; 2 | import { ChatHistoryDatabase } from '../../../lib/chat-history'; 3 | 4 | export async function GET(request: NextRequest) { 5 | try { 6 | const db = ChatHistoryDatabase.getInstance(); 7 | const stats = db.getAttachmentStats(); 8 | 9 | return NextResponse.json({ 10 | success: true, 11 | data: stats 12 | }); 13 | } catch (error) { 14 | console.error('Error fetching attachment stats:', error); 15 | return NextResponse.json( 16 | { 17 | success: false, 18 | error: 'Failed to fetch attachment statistics', 19 | details: error instanceof Error ? error.message : 'Unknown error' 20 | }, 21 | { status: 500 } 22 | ); 23 | } 24 | } 25 | 26 | export async function DELETE(request: NextRequest) { 27 | try { 28 | const { searchParams } = new URL(request.url); 29 | const attachmentId = searchParams.get('id'); 30 | 31 | if (!attachmentId) { 32 | return NextResponse.json( 33 | { error: 'Attachment ID is required' }, 34 | { status: 400 } 35 | ); 36 | } 37 | 38 | const db = ChatHistoryDatabase.getInstance(); 39 | db.deleteAttachment(attachmentId); 40 | 41 | return NextResponse.json({ 42 | success: true, 43 | message: 'Attachment deleted successfully' 44 | }); 45 | } catch (error) { 46 | console.error('Error deleting attachment:', error); 47 | return NextResponse.json( 48 | { 49 | success: false, 50 | error: 'Failed to delete attachment', 51 | details: error instanceof Error ? error.message : 'Unknown error' 52 | }, 53 | { status: 500 } 54 | ); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/app/api/chat-history/[sessionId]/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from 'next/server'; 2 | import { ChatHistoryDatabase } from '../../../../lib/chat-history'; 3 | 4 | export async function GET( 5 | request: NextRequest, 6 | { params }: { params: Promise<{ sessionId: string }> } 7 | ) { 8 | try { 9 | const { sessionId } = await params; 10 | const db = ChatHistoryDatabase.getInstance(); 11 | const session = db.getSession(sessionId); 12 | 13 | if (!session) { 14 | return NextResponse.json( 15 | { error: 'Session not found' }, 16 | { status: 404 } 17 | ); 18 | } 19 | 20 | return NextResponse.json({ session }); 21 | } catch (error) { 22 | console.error('Error fetching chat session:', error); 23 | return NextResponse.json( 24 | { error: 'Failed to fetch chat session' }, 25 | { status: 500 } 26 | ); 27 | } 28 | } 29 | 30 | export async function POST( 31 | request: NextRequest, 32 | { params }: { params: Promise<{ sessionId: string }> } 33 | ) { 34 | try { 35 | const { sessionId } = await params; 36 | const { message } = await request.json(); 37 | 38 | if (!message || !message.role || !message.content) { 39 | return NextResponse.json( 40 | { error: 'Invalid message format' }, 41 | { status: 400 } 42 | ); 43 | } 44 | 45 | const db = ChatHistoryDatabase.getInstance(); 46 | 47 | // Ensure session exists 48 | let session = db.getSession(sessionId); 49 | if (!session) { 50 | session = db.createSession({ 51 | id: sessionId, 52 | title: 'New Chat', 53 | messages: [], 54 | }); 55 | } 56 | 57 | // Prepare attachments if they exist 58 | const attachments = message.attachments ? message.attachments.map((att: any) => ({ 59 | id: att.id || `att_${Date.now()}_${Math.random().toString(36).substring(2, 10)}`, 60 | messageId: message.id, 61 | name: att.name, 62 | type: att.type, 63 | size: att.size, 64 | data: att.data, 65 | createdAt: att.createdAt ? new Date(att.createdAt) : new Date(), 66 | })) : undefined; 67 | 68 | // Add the message with attachments 69 | const savedMessage = db.addMessage({ 70 | id: message.id || `${sessionId}-msg-${Date.now()}`, 71 | sessionId, 72 | role: message.role, 73 | content: message.content, 74 | toolInvocations: message.toolInvocations, 75 | attachments, 76 | }); 77 | 78 | // Update session title if it's the first user message 79 | if (message.role === 'user' && session.messages.length === 0) { 80 | const newTitle = db.generateSessionTitle([message]); 81 | db.updateSession(sessionId, { title: newTitle }); 82 | } 83 | 84 | return NextResponse.json({ message: savedMessage }); 85 | } catch (error) { 86 | console.error('Error adding message to session:', error); 87 | return NextResponse.json( 88 | { error: 'Failed to add message' }, 89 | { status: 500 } 90 | ); 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/app/api/chat-history/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from 'next/server'; 2 | import { ChatHistoryDatabase } from '../../../lib/chat-history'; 3 | 4 | export async function GET() { 5 | try { 6 | const db = ChatHistoryDatabase.getInstance(); 7 | const sessions = db.getAllSessions(); 8 | 9 | return NextResponse.json({ sessions }); 10 | } catch (error) { 11 | console.error('Error fetching chat sessions:', error); 12 | return NextResponse.json( 13 | { error: 'Failed to fetch chat sessions' }, 14 | { status: 500 } 15 | ); 16 | } 17 | } 18 | 19 | export async function POST(request: NextRequest) { 20 | try { 21 | const { sessionId, title, messages } = await request.json(); 22 | 23 | if (!sessionId) { 24 | return NextResponse.json( 25 | { error: 'Session ID is required' }, 26 | { status: 400 } 27 | ); 28 | } 29 | 30 | const db = ChatHistoryDatabase.getInstance(); 31 | 32 | // Create or update session 33 | const existingSession = db.getSession(sessionId); 34 | 35 | if (!existingSession) { 36 | // Create new session 37 | const session = db.createSession({ 38 | id: sessionId, 39 | title: title || db.generateSessionTitle(messages || []), 40 | messages: messages || [], 41 | }); 42 | 43 | // Add messages if provided 44 | if (messages && messages.length > 0) { 45 | messages.forEach((message: any, index: number) => { 46 | db.addMessage({ 47 | id: message.id || `${sessionId}-msg-${index}`, 48 | sessionId, 49 | role: message.role, 50 | content: message.content, 51 | toolInvocations: message.toolInvocations, 52 | }); 53 | }); 54 | } 55 | 56 | return NextResponse.json({ session }); 57 | } else { 58 | // Update existing session 59 | if (title) { 60 | db.updateSession(sessionId, { title }); 61 | } 62 | 63 | return NextResponse.json({ session: existingSession }); 64 | } 65 | } catch (error) { 66 | console.error('Error saving chat session:', error); 67 | return NextResponse.json( 68 | { error: 'Failed to save chat session' }, 69 | { status: 500 } 70 | ); 71 | } 72 | } 73 | 74 | export async function DELETE(request: NextRequest) { 75 | try { 76 | const url = new URL(request.url); 77 | const sessionId = url.searchParams.get('sessionId'); 78 | 79 | if (!sessionId) { 80 | return NextResponse.json( 81 | { error: 'Session ID is required' }, 82 | { status: 400 } 83 | ); 84 | } 85 | 86 | const db = ChatHistoryDatabase.getInstance(); 87 | 88 | if (sessionId === 'all') { 89 | db.clearAllSessions(); 90 | } else { 91 | db.deleteSession(sessionId); 92 | } 93 | 94 | return NextResponse.json({ success: true }); 95 | } catch (error) { 96 | console.error('Error deleting chat session:', error); 97 | return NextResponse.json( 98 | { error: 'Failed to delete chat session' }, 99 | { status: 500 } 100 | ); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/app/api/chat-history/search/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from 'next/server'; 2 | import { ChatHistoryDatabase } from '../../../../lib/chat-history'; 3 | 4 | export async function GET(request: NextRequest) { 5 | try { 6 | const { searchParams } = new URL(request.url); 7 | const query = searchParams.get('q'); 8 | 9 | if (!query) { 10 | return NextResponse.json( 11 | { error: 'Search query is required' }, 12 | { status: 400 } 13 | ); 14 | } 15 | 16 | if (query.length < 5) { 17 | return NextResponse.json( 18 | { error: 'Search query must be at least 5 characters' }, 19 | { status: 400 } 20 | ); 21 | } 22 | 23 | const db = ChatHistoryDatabase.getInstance(); 24 | const sessions = db.searchSessions(query); 25 | 26 | return NextResponse.json({ 27 | sessions, 28 | query, 29 | count: sessions.length 30 | }); 31 | } catch (error) { 32 | console.error('Error searching chat sessions:', error); 33 | return NextResponse.json( 34 | { error: 'Failed to search chat sessions' }, 35 | { status: 500 } 36 | ); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/app/api/conscious-memory/route.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * API route for conscious memory operations 3 | * Provides REST endpoints for testing conscious memory functionality 4 | */ 5 | 6 | import { NextRequest, NextResponse } from 'next/server'; 7 | import { getConsciousMemoryService } from '../../../lib/conscious-memory'; 8 | 9 | // Check if we're in build mode or missing environment variables 10 | function checkEnvironment() { 11 | if (process.env.NODE_ENV === 'production' && process.env.NEXT_PHASE === 'phase-production-build') { 12 | return false; 13 | } 14 | 15 | const hasNeo4jConfig = process.env.NEO4J_URI && process.env.NEO4J_USER && process.env.NEO4J_PASSWORD; 16 | return !!hasNeo4jConfig; 17 | } 18 | 19 | export async function POST(request: NextRequest) { 20 | try { 21 | if (!checkEnvironment()) { 22 | return NextResponse.json( 23 | { 24 | success: false, 25 | error: 'Neo4j environment variables not configured or service unavailable during build' 26 | }, 27 | { status: 503 } 28 | ); 29 | } 30 | 31 | const { action, ...params } = await request.json(); 32 | const memoryService = getConsciousMemoryService(); 33 | 34 | // Initialize service if needed 35 | if (!(await memoryService.healthCheck())) { 36 | await memoryService.initialize(); 37 | } 38 | 39 | switch (action) { 40 | case 'save': 41 | const id = await memoryService.saveMemory({ 42 | content: params.content, 43 | tags: params.tags || [], 44 | importance: params.importance || 5, 45 | source: params.source || 'explicit', 46 | context: params.context, 47 | sessionId: params.sessionId 48 | }); 49 | return NextResponse.json({ success: true, id }); 50 | 51 | case 'search': 52 | const results = await memoryService.searchMemories(params.query, { 53 | tags: params.tags, 54 | importanceMin: params.importanceMin, 55 | importanceMax: params.importanceMax, 56 | limit: params.limit || 10, 57 | sessionId: params.sessionId 58 | }); 59 | return NextResponse.json({ success: true, results }); 60 | 61 | case 'update': 62 | const updateSuccess = await memoryService.updateMemory({ 63 | id: params.id, 64 | content: params.content, 65 | tags: params.tags, 66 | importance: params.importance, 67 | context: params.context 68 | }); 69 | return NextResponse.json({ success: updateSuccess }); case 'delete': 70 | const deleteSuccess = await memoryService.deleteMemory(params.id); 71 | return NextResponse.json({ success: deleteSuccess }); 72 | 73 | case 'deleteMultiple': 74 | const deleteMultipleSuccess = await memoryService.deleteMultipleMemories(params.ids); 75 | return NextResponse.json({ success: deleteMultipleSuccess }); 76 | 77 | case 'clearAll': 78 | const clearSuccess = await memoryService.clearAllMemories(); 79 | return NextResponse.json({ success: clearSuccess });case 'tags': 80 | const tags = await memoryService.getAllTags(); 81 | return NextResponse.json({ success: true, data: tags }); 82 | 83 | case 'related': 84 | const relatedMemories = await memoryService.getRelatedMemories( 85 | params.id, 86 | params.limit || 5 87 | ); 88 | return NextResponse.json({ success: true, relatedMemories }); 89 | 90 | case 'stats': 91 | const stats = await memoryService.getStats(); 92 | return NextResponse.json({ success: true, stats }); case 'test': 93 | const testResult = await memoryService.testMemorySystem(); 94 | return NextResponse.json({ success: true, testPassed: testResult }); 95 | 96 | case 'debug': 97 | // Get all memories for debugging 98 | const allMemories = await memoryService.searchMemories('', { limit: 100, minScore: -2.0 }); 99 | return NextResponse.json({ 100 | success: true, 101 | totalMemories: allMemories.length, 102 | memories: allMemories.slice(0, 5) // Only return first 5 for debugging 103 | }); 104 | 105 | default: 106 | return NextResponse.json( 107 | { success: false, error: 'Invalid action' }, 108 | { status: 400 } 109 | ); 110 | } 111 | } catch (error) { 112 | console.error('Conscious memory API error:', error); 113 | return NextResponse.json( 114 | { 115 | success: false, 116 | error: 'Internal server error', 117 | details: error instanceof Error ? error.message : 'Unknown error' 118 | }, 119 | { status: 500 } 120 | ); 121 | } 122 | } 123 | 124 | export async function GET(request: NextRequest) { 125 | try { 126 | const { searchParams } = new URL(request.url); 127 | const action = searchParams.get('action'); 128 | const memoryService = getConsciousMemoryService(); 129 | 130 | // Initialize service if needed 131 | if (!(await memoryService.healthCheck())) { 132 | await memoryService.initialize(); 133 | } 134 | 135 | switch (action) { 136 | case 'tags': 137 | const tags = await memoryService.getAllTags(); 138 | return NextResponse.json({ success: true, data: tags || [] }); 139 | 140 | case 'stats': 141 | const stats = await memoryService.getStats(); 142 | return NextResponse.json({ success: true, data: stats }); 143 | 144 | default: 145 | // Health check 146 | const isHealthy = await memoryService.healthCheck(); 147 | const defaultStats = isHealthy ? await memoryService.getStats() : null; 148 | 149 | return NextResponse.json({ 150 | success: true, 151 | healthy: isHealthy, 152 | stats: defaultStats || { 153 | totalConsciousMemories: 0, 154 | tagCount: 0, 155 | averageImportance: 0, 156 | sourceBreakdown: { explicit: 0, suggested: 0, derived: 0 } 157 | } 158 | }); 159 | } 160 | } catch (error) { 161 | console.error('Conscious memory API error:', error); 162 | return NextResponse.json( 163 | { 164 | success: false, 165 | healthy: false, 166 | error: error instanceof Error ? error.message : 'Unknown error' 167 | }, 168 | { status: 500 } 169 | ); 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /src/app/api/knowledge-graph/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from 'next/server'; 2 | import knowledgeGraphSyncService from '../../../lib/knowledge-graph-sync-service'; 3 | import knowledgeGraphService from '../../../lib/knowledge-graph-service'; 4 | import { kgSyncQueue } from '../../../lib/kg-sync-queue'; 5 | 6 | export async function GET(request: NextRequest) { 7 | const { searchParams } = new URL(request.url); 8 | const action = searchParams.get('action'); 9 | 10 | try { 11 | switch (action) { 12 | case 'stats': 13 | // Get current statistics 14 | await knowledgeGraphService.connect(); 15 | const stats = await knowledgeGraphService.getStatistics(); 16 | return NextResponse.json({ success: true, stats }); 17 | 18 | case 'queue-status': 19 | // Get sync queue status 20 | await kgSyncQueue.initialize(); 21 | const queueSize = await kgSyncQueue.getQueueSize(); 22 | return NextResponse.json({ success: true, queueSize }); 23 | 24 | default: 25 | return NextResponse.json({ 26 | success: false, 27 | error: 'Invalid action. Use ?action=stats or ?action=queue-status' 28 | }, { status: 400 }); 29 | } 30 | } catch (error) { 31 | console.error('[KG API] Error:', error); 32 | return NextResponse.json({ 33 | success: false, 34 | error: error instanceof Error ? error.message : 'Unknown error' 35 | }, { status: 500 }); 36 | } 37 | } 38 | 39 | export async function POST(request: NextRequest) { 40 | try { 41 | const body = await request.json(); 42 | const { action, options = {} } = body; 43 | 44 | switch (action) { 45 | case 'sync': 46 | // Queue a sync operation 47 | await kgSyncQueue.initialize(); 48 | 49 | if (options.forceFullResync) { 50 | await kgSyncQueue.enqueue({ type: 'full', timestamp: new Date().toISOString() }); 51 | } else { 52 | await kgSyncQueue.enqueue({ type: 'incremental', timestamp: new Date().toISOString() }); 53 | } 54 | 55 | // Start sync in background (non-blocking) 56 | knowledgeGraphSyncService.syncKnowledgeGraph(options).catch(error => { 57 | console.error('[KG API] Background sync error:', error); 58 | }); 59 | 60 | return NextResponse.json({ 61 | success: true, 62 | message: 'Sync operation queued', 63 | syncType: options.forceFullResync ? 'full' : 'incremental' 64 | }); 65 | 66 | case 'clear-queue': 67 | // Clear the sync queue 68 | await kgSyncQueue.initialize(); 69 | await kgSyncQueue.clear(); 70 | return NextResponse.json({ success: true, message: 'Queue cleared' }); 71 | 72 | default: 73 | return NextResponse.json({ 74 | success: false, 75 | error: 'Invalid action. Use: sync, clear-queue' 76 | }, { status: 400 }); 77 | } 78 | } catch (error) { 79 | console.error('[KG API] Error:', error); 80 | return NextResponse.json({ 81 | success: false, 82 | error: error instanceof Error ? error.message : 'Unknown error' 83 | }, { status: 500 }); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/app/attachments/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client' 2 | 3 | import React from 'react' 4 | import AttachmentDashboard from '../../components/AttachmentDashboard' 5 | import Link from 'next/link' 6 | 7 | export default function AttachmentsPage() { 8 | return ( 9 |
10 | {/* Navigation */} 11 | 42 | 43 | {/* Main Content */} 44 |
45 |
46 | {/* Header */} 47 |
48 |

49 | Attachment Management 50 |

51 |

52 | View and manage file attachments across all your conversations 53 |

54 |
55 | 56 | {/* Dashboard Grid */} 57 |
58 | {/* Main Dashboard */} 59 |
60 | 61 |
62 | 63 | {/* Info Panel */} 64 |
65 | {/* Supported Formats */} 66 |
67 |

68 | Supported Formats 69 |

70 |
71 |
72 |

Images

73 |

JPEG, PNG, GIF, WebP, SVG

74 |
75 |
76 |

Documents

77 |

PDF, TXT, MD, CSV, JSON, XML

78 |
79 |
80 |

Office Files

81 |

DOCX, XLSX, PPTX, DOC, XLS, PPT

82 |
83 |
84 |

Code Files

85 |

JS, TS, HTML, CSS

86 |
87 |
88 |
89 | 90 | {/* Upload Guidelines */} 91 |
92 |

93 | Upload Guidelines 94 |

95 |
    96 |
  • 97 | 98 | Maximum file size: 10MB per file 99 |
  • 100 |
  • 101 | 102 | Up to 10 files per message 103 |
  • 104 |
  • 105 | 106 | Drag & drop directly in chat 107 |
  • 108 |
  • 109 | 110 | Files are stored securely with your chat history 111 |
  • 112 |
113 |
114 | 115 | {/* Quick Actions */} 116 |
117 |

118 | Quick Actions 119 |

120 |
121 | 125 | 🚀 Start New Chat 126 | 127 | 131 | 🧠 View Memories 132 | 133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 | ) 141 | } 142 | -------------------------------------------------------------------------------- /src/app/conscious-memory/page.tsx: -------------------------------------------------------------------------------- 1 | /** 2 | * Conscious Memory Demo Page 3 | * Showcases the conscious memory system functionality 4 | */ 5 | 6 | import ConsciousMemoryDemo from '../../components/ConsciousMemoryDemo'; 7 | 8 | export default function ConsciousMemoryPage() { 9 | return ; 10 | } 11 | -------------------------------------------------------------------------------- /src/app/globals.css: -------------------------------------------------------------------------------- 1 | @import "tailwindcss"; 2 | 3 | * { 4 | box-sizing: border-box; 5 | padding: 0; 6 | margin: 0; 7 | } 8 | 9 | html, 10 | body { 11 | max-width: 100vw; 12 | overflow-x: hidden; 13 | font-family: system-ui, -apple-system, sans-serif; 14 | height: 100%; 15 | } 16 | 17 | body { 18 | color: #1f2937; 19 | background: #f9fafb; 20 | } 21 | 22 | /* Custom scrollbar for webkit browsers */ 23 | ::-webkit-scrollbar { 24 | width: 6px; 25 | } 26 | 27 | ::-webkit-scrollbar-track { 28 | background: transparent; 29 | } 30 | 31 | ::-webkit-scrollbar-thumb { 32 | background: #d1d5db; 33 | border-radius: 3px; 34 | } 35 | 36 | ::-webkit-scrollbar-thumb:hover { 37 | background: #9ca3af; 38 | } 39 | 40 | /* Ensure full height layout */ 41 | #__next { 42 | height: 100%; 43 | } 44 | 45 | /* Custom animations */ 46 | @keyframes fadeIn { 47 | from { opacity: 0; transform: translateY(10px); } 48 | to { opacity: 1; transform: translateY(0); } 49 | } 50 | 51 | .animate-fadeIn { 52 | animation: fadeIn 0.3s ease-out; 53 | } 54 | 55 | /* Custom mark styling for search highlights */ 56 | mark { 57 | background-color: #fef3c7; 58 | color: #92400e; 59 | padding: 2px 4px; 60 | border-radius: 4px; 61 | font-weight: 500; 62 | } 63 | 64 | /* Focus styles */ 65 | :focus { 66 | outline: 2px solid #3b82f6; 67 | outline-offset: 2px; 68 | } 69 | 70 | :focus:not(:focus-visible) { 71 | outline: none; 72 | } 73 | 74 | /* Responsive typography */ 75 | @media (max-width: 640px) { 76 | html { 77 | font-size: 14px; 78 | } 79 | } 80 | 81 | @media (min-width: 1024px) { 82 | html { 83 | font-size: 16px; 84 | } 85 | } 86 | 87 | :root { 88 | --foreground-rgb: 31, 41, 55; 89 | --background-rgb: 249, 250, 251; 90 | } 91 | 92 | @media (prefers-color-scheme: dark) { 93 | :root { 94 | --foreground-rgb: 249, 250, 251; 95 | --background-rgb: 17, 24, 39; 96 | } 97 | 98 | body { 99 | color: rgb(var(--foreground-rgb)); 100 | background: rgb(var(--background-rgb)); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/app/layout.tsx: -------------------------------------------------------------------------------- 1 | import type { Metadata } from 'next' 2 | import './globals.css' 3 | 4 | export const metadata: Metadata = { 5 | title: 'MCP Chat Client', 6 | description: 'A local chat client using Model Context Protocol', 7 | } 8 | 9 | export default function RootLayout({ 10 | children, 11 | }: { 12 | children: React.ReactNode 13 | }) { 14 | return ( 15 | 16 | {children} 17 | 18 | ) 19 | } -------------------------------------------------------------------------------- /src/app/semantic-memory/page.tsx: -------------------------------------------------------------------------------- 1 | /** 2 | * Semantic Memory Demo Page 3 | * Showcases the semantic memory (RAG) system functionality 4 | */ 5 | 6 | import SemanticMemoryDemo from '../../components/SemanticMemoryDemo'; 7 | 8 | export default function SemanticMemoryPage() { 9 | return ; 10 | } 11 | 5 -------------------------------------------------------------------------------- /src/components/AttachmentDashboard.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect } from 'react' 2 | import { ChatHistoryDatabase } from '../lib/chat-history' 3 | 4 | interface AttachmentStats { 5 | totalAttachments: number 6 | totalSize: number 7 | types: Record 8 | } 9 | 10 | export default function AttachmentDashboard() { 11 | const [stats, setStats] = useState(null) 12 | const [loading, setLoading] = useState(true) 13 | const [error, setError] = useState(null) 14 | 15 | useEffect(() => { 16 | fetchAttachmentStats() 17 | }, []) 18 | const fetchAttachmentStats = async () => { 19 | try { 20 | setLoading(true) 21 | const response = await fetch('/api/attachments') 22 | const result = await response.json() 23 | 24 | if (result.success) { 25 | setStats(result.data) 26 | } else { 27 | throw new Error(result.error || 'Failed to fetch stats') 28 | } 29 | } catch (err) { 30 | setError(err instanceof Error ? err.message : 'Failed to fetch stats') 31 | } finally { 32 | setLoading(false) 33 | } 34 | } 35 | 36 | const formatBytes = (bytes: number): string => { 37 | if (bytes === 0) return '0 Bytes' 38 | const k = 1024 39 | const sizes = ['Bytes', 'KB', 'MB', 'GB'] 40 | const i = Math.floor(Math.log(bytes) / Math.log(k)) 41 | return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i] 42 | } 43 | 44 | const getFileTypeIcon = (type: string): string => { 45 | if (type.startsWith('image/')) return '🖼️' 46 | if (type.includes('pdf')) return '📄' 47 | if (type.includes('text')) return '📝' 48 | if (type.includes('word')) return '📘' 49 | if (type.includes('excel') || type.includes('sheet')) return '📊' 50 | if (type.includes('powerpoint') || type.includes('presentation')) return '📽️' 51 | if (type.includes('javascript') || type.includes('typescript')) return '💻' 52 | if (type.includes('html')) return '🌐' 53 | if (type.includes('css')) return '🎨' 54 | return '📎' 55 | } 56 | 57 | if (loading) { 58 | return ( 59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 | ) 70 | } 71 | 72 | if (error) { 73 | return ( 74 |
75 |
76 |

Error

77 |

{error}

78 | 84 |
85 |
86 | ) 87 | } 88 | 89 | return ( 90 |
91 |
92 |

93 | 📎 Attachment Overview 94 |

95 | 101 |
102 | 103 | {stats && ( 104 |
105 | {/* Summary Stats */} 106 |
107 |
108 |
109 | {stats.totalAttachments} 110 |
111 |
Total Files
112 |
113 |
114 |
115 | {formatBytes(stats.totalSize)} 116 |
117 |
Storage Used
118 |
119 |
120 | 121 | {/* File Types Breakdown */} 122 | {Object.keys(stats.types).length > 0 ? ( 123 |
124 |

125 | File Types 126 |

127 |
128 | {Object.entries(stats.types) 129 | .sort(([,a], [,b]) => b - a) 130 | .map(([type, count]) => ( 131 |
132 |
133 | {getFileTypeIcon(type)} 134 | 135 | {type.split('/')[1]?.toUpperCase() || type} 136 | 137 |
138 | 139 | {count} file{count > 1 ? 's' : ''} 140 | 141 |
142 | ))} 143 |
144 |
145 | ) : ( 146 |
147 |
📎
148 |

No attachments yet

149 |

Start uploading files to see statistics

150 |
151 | )} 152 | 153 | {/* Usage Tips */} 154 |
155 |

156 | 💡 Attachment Tips 157 |

158 |
    159 |
  • • Drag and drop files directly into the chat input
  • 160 |
  • • Maximum file size: 10MB per file
  • 161 |
  • • Supports images, documents, code files, and more
  • 162 |
  • • Click attachment previews to download
  • 163 |
164 |
165 |
166 | )} 167 |
168 | ) 169 | } 170 | -------------------------------------------------------------------------------- /src/components/ChatInterface.tsx: -------------------------------------------------------------------------------- 1 | 'use client' 2 | 3 | import React from 'react' 4 | import { useChat } from 'ai/react' 5 | import ChatMessage from './ChatMessage' 6 | import MessageInput from './MessageInput' 7 | import { Message } from 'ai' 8 | 9 | interface ChatInterfaceProps { 10 | onNewSession?: (sessionId: string) => void 11 | sessionId?: string 12 | } 13 | 14 | export default function ChatInterface({ onNewSession, sessionId }: ChatInterfaceProps) { 15 | const { messages, input, handleInputChange, handleSubmit, isLoading, setMessages, error } = useChat({ 16 | id: sessionId, 17 | api: '/api/chat', 18 | maxSteps: 35, // Allow multiple tool calls 19 | streamProtocol: 'text', // Add for debugging 20 | onError: async (error) => { 21 | console.error(' Full error object:', error); 22 | 23 | // Add check for stream error type that matches the pattern 24 | if (error.message?.includes('Stream error:') || 25 | (error as any).type === 'error') { 26 | console.error(' Stream contained error detected:', error); 27 | } 28 | 29 | // Keep existing error handling logic 30 | if ((error as any).fullStream) { 31 | try { 32 | const fullStreamText = await (error as any).fullStream.text(); 33 | console.error(' Full stream error details:', fullStreamText); 34 | 35 | try { 36 | const errorData = JSON.parse(fullStreamText); 37 | if (errorData.error) { 38 | console.error(' Parsed error:', errorData.error); 39 | } 40 | } catch (parseError) { 41 | // Stream might not be JSON, that's okay 42 | } 43 | } catch (streamError) { 44 | console.error(' Error parsing full stream:', streamError); 45 | } 46 | } 47 | 48 | if ((error as any).cause && typeof (error as any).cause === 'object') { 49 | console.error(' Error cause details:', (error as any).cause); 50 | } 51 | }, onFinish: async (message) => { 52 | // Message storage is now handled by the chat API 53 | // No need to store separately here 54 | } 55 | }) 56 | 57 | // Enhanced input change handler to support textarea 58 | const handleInputChangeEnhanced = (e: React.ChangeEvent) => { 59 | const syntheticEvent = { 60 | target: { 61 | value: e.target.value 62 | } 63 | } as React.ChangeEvent 64 | handleInputChange(syntheticEvent) 65 | } 66 | 67 | // Enhanced submit handler with attachment support 68 | const handleChatSubmit = async (e: React.FormEvent, files?: FileList) => { 69 | e.preventDefault() 70 | 71 | if (files && files.length > 0) { 72 | // Validation 73 | const MAX_FILE_SIZE = 50 * 1024 * 1024; // 50MB limit 74 | const MAX_FILES = 20; // Maximum number of files 75 | 76 | if (files.length > MAX_FILES) { 77 | alert(`Maximum ${MAX_FILES} files allowed. You selected ${files.length}.`); 78 | return; 79 | } 80 | 81 | for (let i = 0; i < files.length; i++) { 82 | if (files[i].size > MAX_FILE_SIZE) { 83 | alert(`File "${files[i].name}" exceeds the 50MB size limit (${(files[i].size / 1024 / 1024).toFixed(2)}MB)`); 84 | return; 85 | } 86 | } 87 | 88 | // Use experimental_attachments as per AI SDK v3.3 89 | handleSubmit(e, { 90 | experimental_attachments: files 91 | }); 92 | } else { 93 | // Regular submit without attachments 94 | handleSubmit(e) 95 | } 96 | } 97 | 98 | React.useEffect(() => { 99 | if (!sessionId && onNewSession) { 100 | const newSessionId = `session_${Date.now()}_${Math.random().toString(36).substring(2, 10)}` 101 | onNewSession(newSessionId) 102 | } 103 | }, [sessionId, onNewSession]) 104 | 105 | return ( 106 |
107 | {/* Header */} 108 |
109 |

110 | MCP Chat Client 111 |

112 |

113 | AI Assistant with Dual-Layer Memory & File Support 114 |

115 |
116 | 117 | {/* Messages */} 118 |
119 | {messages.length === 0 ? ( 120 |
121 |
122 |

Welcome to MCP Chat

123 |

Your AI assistant with conscious memory

124 |
125 |

Dual-layer memory system

126 |

MCP tool integration

127 |

File attachment support

128 |

Persistent conversation history

129 |
130 |
131 | ) : ( 132 | messages.map((message: Message) => ( 133 | 137 | )) 138 | )} 139 | {isLoading && ( 140 |
141 |
142 |
143 | AI is thinking... 144 |
145 |
146 | )} 147 | {error && ( 148 |
149 |
150 |
151 |
152 |

Chat Error

153 |

{error.message}

154 |
155 | 156 | Show technical details 157 | 158 |
159 |                     {error.stack || JSON.stringify(error, null, 2)}
160 |                   
161 |
162 |
163 |
164 |
165 | )} 166 |
167 | 168 | {/* Enhanced Input with Attachment Support */} 169 | 175 |
176 | ) 177 | } -------------------------------------------------------------------------------- /src/components/ChatMessage.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import { Message } from 'ai' 3 | import ToolCallDisplay from './ToolCallDisplay' 4 | 5 | interface ChatMessageProps { 6 | message: Message 7 | } 8 | 9 | const AttachmentPreview: React.FC<{ attachment: any }> = ({ attachment }) => { 10 | // AI SDK FilePart can have 'data' (base64) or 'url' 11 | const isImage = attachment.type.startsWith('image/') 12 | const displaySrc = attachment.data ? `data:${attachment.type};base64,${attachment.data}` : attachment.url 13 | 14 | const handleDownload = () => { 15 | if (attachment.data) { 16 | const blob = new Blob([Uint8Array.from(atob(attachment.data), c => c.charCodeAt(0))], { 17 | type: attachment.type 18 | }) 19 | const url = URL.createObjectURL(blob) 20 | const a = document.createElement('a') 21 | a.href = url 22 | a.download = attachment.name || 'download' 23 | document.body.appendChild(a) 24 | a.click() 25 | document.body.removeChild(a) 26 | URL.revokeObjectURL(url) 27 | } else if (attachment.url) { 28 | // For URLs, simply open in new tab or trigger download 29 | window.open(attachment.url, '_blank') 30 | } 31 | } 32 | 33 | if (isImage && displaySrc) { 34 | return ( 35 |
36 | {attachment.name 43 |
44 | {attachment.name || 'Image'} 45 | {attachment.size && {(attachment.size / 1024).toFixed(1)}KB} 46 |
47 |
48 | ) 49 | } 50 | 51 | // Generic file preview for non-images or if image source is missing 52 | return ( 53 |
54 |
55 |
56 | {attachment.type?.includes('pdf') ? '📄' : 57 | attachment.type?.includes('text') ? '📝' : 58 | attachment.type?.includes('json') ? '🔧' : 59 | attachment.type?.includes('word') ? '📘' : 60 | attachment.type?.includes('excel') || attachment.type?.includes('sheet') ? '📊' : 61 | attachment.type?.includes('powerpoint') || attachment.type?.includes('presentation') ? '📽️' : 62 | attachment.type?.includes('javascript') || attachment.type?.includes('typescript') ? '💻' : 63 | attachment.type?.includes('html') ? '🌐' : 64 | attachment.type?.includes('css') ? '🎨' : '📎'} 65 |
66 | 67 |
68 |
69 | {attachment.name || 'Unknown File'} 70 |
71 |
72 | {attachment.type || 'application/octet-stream'} • {attachment.size ? `${(attachment.size / 1024).toFixed(1)}KB` : 'N/A'} 73 |
74 |
75 | 76 | 83 |
84 |
85 | ) 86 | } 87 | 88 | export default function ChatMessage({ message }: ChatMessageProps) { 89 | const isUser = message.role === 'user' 90 | 91 | return ( 92 |
93 |
98 | {/* Message Content and Parts */} 99 | {message.parts && message.parts.length > 0 && ( 100 |
101 | {message.parts.map((part: any, index: number) => { 102 | switch (part.type) { 103 | case 'text': 104 | return
{part.text}
105 | case 'file': 106 | return 107 | // Add more cases for other part types (e.g., 'tool-call', 'tool-result', 'reasoning') if needed 108 | default: 109 | return null 110 | } 111 | })} 112 |
113 | )} 114 | 115 | {/* Tool Invocations */} 116 | {message.toolInvocations && message.toolInvocations.length > 0 && ( 117 |
118 | {message.toolInvocations.map((toolInvocation, index) => ( 119 | 123 | ))} 124 |
125 | )} 126 |
127 |
128 | ) 129 | } -------------------------------------------------------------------------------- /src/components/MessageInput.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useRef } from 'react' 2 | 3 | interface MessageInputProps { 4 | input: string 5 | handleInputChange: (e: React.ChangeEvent) => void 6 | handleSubmit: (e: React.FormEvent, files?: FileList) => void 7 | isLoading: boolean 8 | } 9 | 10 | export default function MessageInput({ 11 | input, 12 | handleInputChange, 13 | handleSubmit, 14 | isLoading 15 | }: MessageInputProps) { 16 | const [files, setFiles] = useState(undefined) 17 | const [isDragOver, setIsDragOver] = useState(false) 18 | const fileInputRef = useRef(null) 19 | 20 | const handleDragOver = (e: React.DragEvent) => { 21 | e.preventDefault() 22 | setIsDragOver(true) 23 | } 24 | 25 | const handleDragLeave = (e: React.DragEvent) => { 26 | e.preventDefault() 27 | setIsDragOver(false) 28 | } 29 | 30 | const handleDrop = (e: React.DragEvent) => { 31 | e.preventDefault() 32 | setIsDragOver(false) 33 | 34 | const droppedFiles = e.dataTransfer.files 35 | if (droppedFiles.length > 0) { 36 | setFiles(droppedFiles) 37 | } 38 | } 39 | 40 | const removeFile = (index: number) => { 41 | if (!files) return 42 | const dt = new DataTransfer() 43 | Array.from(files).forEach((file, i) => { 44 | if (i !== index) dt.items.add(file) 45 | }) 46 | setFiles(dt.files.length > 0 ? dt.files : undefined) 47 | } 48 | 49 | const handleFormSubmit = (e: React.FormEvent) => { 50 | e.preventDefault() 51 | console.log('MessageInput: handleFormSubmit called with files:', files) 52 | if (files) { 53 | console.log('MessageInput: Files count:', files.length) 54 | for (let i = 0; i < files.length; i++) { 55 | console.log(`MessageInput: File ${i + 1}:`, files[i].name, files[i].type, files[i].size) 56 | } 57 | } 58 | handleSubmit(e, files) 59 | setFiles(undefined) 60 | if (fileInputRef.current) { 61 | fileInputRef.current.value = '' 62 | } 63 | } 64 | 65 | const handleKeyDown = (e: React.KeyboardEvent) => { 66 | if (e.key === 'Enter' && !e.shiftKey) { 67 | e.preventDefault() 68 | handleFormSubmit(e) 69 | } 70 | } 71 | return ( 72 |
73 | {/* Files Preview */} 74 | {files && files.length > 0 && ( 75 |
76 | {Array.from(files).map((file, index) => ( 77 |
81 | {/* File icon based on type */} 82 |
83 | {file.type.startsWith('image/') ? '🖼️' : 84 | file.type.includes('pdf') ? '📄' : 85 | file.type.includes('text') ? '📝' : 86 | file.type.includes('code') ? '💻' : '📎'} 87 |
88 | 89 |
90 |
91 | {file.name} 92 |
93 |
94 | {(file.size / 1024).toFixed(1)}KB 95 |
96 |
97 | 98 | 106 |
107 | ))} 108 |
109 | )} 110 | 111 |
112 | {/* Input Area with Drag & Drop */} 113 |
123 |
124 | {/* File Upload Button */} 125 | 134 | 135 |
136 |