├── .gitignore ├── data └── memory.db ├── tsconfig.json ├── package.json ├── README.md └── src └── index.ts /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | build/ 3 | *.log 4 | .env* -------------------------------------------------------------------------------- /data/memory.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vic563/Memgpt-MCP-Server/HEAD/data/memory.db -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2022", 4 | "module": "Node16", 5 | "moduleResolution": "Node16", 6 | "outDir": "./build", 7 | "rootDir": "./src", 8 | "strict": true, 9 | "esModuleInterop": true, 10 | "skipLibCheck": true, 11 | "forceConsistentCasingInFileNames": true 12 | }, 13 | "include": ["src/**/*"], 14 | "exclude": ["node_modules"] 15 | } 16 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Letta(MemGPT)", 3 | "version": "0.1.0", 4 | "description": "LLM Memory", 5 | "private": true, 6 | "type": "module", 7 | "bin": { 8 | "Letta(MemGPT)": "./build/index.js" 9 | }, 10 | "files": [ 11 | "build" 12 | ], 13 | "scripts": { 14 | "build": "tsc && node -e \"require('fs').chmodSync('build/index.js', '755')\"", 15 | "prepare": "npm run build", 16 | "watch": "tsc --watch", 17 | "inspector": "npx @modelcontextprotocol/inspector build/index.js" 18 | }, 19 | "dependencies": { 20 | "@modelcontextprotocol/sdk": "0.6.0", 21 | "@types/sqlite3": "^3.1.11", 22 | "axios": "^1.7.9", 23 | "dotenv": "^16.4.7", 24 | "sqlite3": "^5.1.7" 25 | }, 26 | "devDependencies": { 27 | "@types/node": "^20.17.11", 28 | "typescript": "^5.3.3" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MemGPT MCP Server 2 | 3 | A TypeScript-based MCP server that implements a memory system for LLMs. It provides tools for chatting with different LLM providers while maintaining conversation history. 4 | 5 | ## Features 6 | 7 | ### Tools 8 | - `chat` - Send a message to the current LLM provider 9 | - Takes a message parameter 10 | - Supports multiple providers (OpenAI, Anthropic, OpenRouter, Ollama) 11 | 12 | - `get_memory` - Retrieve conversation history 13 | - Optional `limit` parameter to specify number of memories to retrieve 14 | - Pass `limit: null` for unlimited memory retrieval 15 | - Returns memories in chronological order with timestamps 16 | 17 | - `clear_memory` - Clear conversation history 18 | - Removes all stored memories 19 | 20 | - `use_provider` - Switch between different LLM providers 21 | - Supports OpenAI, Anthropic, OpenRouter, and Ollama 22 | - Persists provider selection 23 | 24 | - `use_model` - Switch to a different model for the current provider 25 | - Supports provider-specific models: 26 | - Anthropic Claude Models: 27 | - Claude 3 Series: 28 | - `claude-3-haiku`: Fastest response times, ideal for tasks like customer support and content moderation 29 | - `claude-3-sonnet`: Balanced performance for general-purpose use 30 | - `claude-3-opus`: Advanced model for complex reasoning and high-performance tasks 31 | - Claude 3.5 Series: 32 | - `claude-3.5-haiku`: Enhanced speed and cost-effectiveness 33 | - `claude-3.5-sonnet`: Superior performance with computer interaction capabilities 34 | - OpenAI: 'gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo' 35 | - OpenRouter: Any model in 'provider/model' format (e.g., 'openai/gpt-4', 'anthropic/claude-2') 36 | - Ollama: Any locally available model (e.g., 'llama2', 'codellama') 37 | - Persists model selection 38 | 39 | ## Development 40 | 41 | Install dependencies: 42 | ```bash 43 | npm install 44 | ``` 45 | 46 | Build the server: 47 | ```bash 48 | npm run build 49 | ``` 50 | 51 | For development with auto-rebuild: 52 | ```bash 53 | npm run watch 54 | ``` 55 | 56 | ## Installation 57 | 58 | To use with Claude Desktop, add the server config: 59 | 60 | On MacOS: `~/Library/Application Support/Claude/claude_desktop_config.json` 61 | On Windows: `%APPDATA%/Claude/claude_desktop_config.json` 62 | 63 | ```json 64 | { 65 | "mcpServers": { 66 | "letta-memgpt": { 67 | "command": "/path/to/memgpt-server/build/index.js", 68 | "env": { 69 | "OPENAI_API_KEY": "your-openai-key", 70 | "ANTHROPIC_API_KEY": "your-anthropic-key", 71 | "OPENROUTER_API_KEY": "your-openrouter-key" 72 | } 73 | } 74 | } 75 | } 76 | ``` 77 | 78 | ### Environment Variables 79 | - `OPENAI_API_KEY` - Your OpenAI API key 80 | - `ANTHROPIC_API_KEY` - Your Anthropic API key 81 | - `OPENROUTER_API_KEY` - Your OpenRouter API key 82 | 83 | ### Debugging 84 | 85 | Since MCP servers communicate over stdio, debugging can be challenging. We recommend using the [MCP Inspector](https://github.com/modelcontextprotocol/inspector): 86 | 87 | ```bash 88 | npm run inspector 89 | ``` 90 | 91 | The Inspector will provide a URL to access debugging tools in your browser. 92 | 93 | ## Recent Updates 94 | 95 | ### Claude 3 and 3.5 Series Support (March 2024) 96 | - Added support for latest Claude models: 97 | - Claude 3 Series (Haiku, Sonnet, Opus) 98 | - Claude 3.5 Series (Haiku, Sonnet) 99 | 100 | ### Unlimited Memory Retrieval 101 | - Added support for retrieving unlimited conversation history 102 | - Use `{ "limit": null }` with the `get_memory` tool to retrieve all stored memories 103 | - Use `{ "limit": n }` to retrieve the n most recent memories 104 | - Default limit is 10 if not specified 105 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import { Server } from '@modelcontextprotocol/sdk/server/index.js'; 3 | import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; 4 | import { 5 | CallToolRequestSchema, 6 | ErrorCode, 7 | ListToolsRequestSchema, 8 | McpError, 9 | } from '@modelcontextprotocol/sdk/types.js'; 10 | import sqlite3 from 'sqlite3'; 11 | import { Database } from 'sqlite3'; 12 | import axios from 'axios'; 13 | import * as dotenv from 'dotenv'; 14 | import { promisify } from 'util'; 15 | 16 | dotenv.config(); 17 | 18 | interface Memory { 19 | id: number; 20 | userId: string; 21 | prompt: string; 22 | response: string; 23 | timestamp: string; 24 | provider: string; 25 | } 26 | 27 | class LettaMemGPTServer { 28 | private server: Server; 29 | private db!: Database; 30 | private openaiKey: string; 31 | private anthropicKey: string; 32 | private openrouterKey: string; 33 | 34 | private async initialize() { 35 | const dbPath = '/Users/victor/Documents/Cline/MCP/letta-server/data/memory.db'; 36 | try { 37 | const fs = await import('fs/promises'); 38 | await fs.mkdir('/Users/victor/Documents/Cline/MCP/letta-server/data', { recursive: true }); 39 | } catch (error) { 40 | // Ignore if directory already exists 41 | } 42 | this.db = new sqlite3.Database(dbPath); 43 | await this.initializeDatabase(); 44 | } 45 | 46 | constructor() { 47 | this.server = new Server( 48 | { 49 | name: 'letta-memgpt', 50 | version: '0.1.0', 51 | }, 52 | { 53 | capabilities: { 54 | tools: {}, 55 | }, 56 | } 57 | ); 58 | 59 | // Initialize API keys from environment variables 60 | this.openaiKey = process.env.OPENAI_API_KEY || ''; 61 | this.anthropicKey = process.env.ANTHROPIC_API_KEY || ''; 62 | this.openrouterKey = process.env.OPENROUTER_API_KEY || ''; 63 | 64 | this.setupToolHandlers(); 65 | 66 | // Error handling 67 | this.server.onerror = (error) => console.error('[MCP Error]', error); 68 | process.on('SIGINT', async () => { 69 | await this.cleanup(); 70 | process.exit(0); 71 | }); 72 | } 73 | 74 | private DEFAULT_USER = 'default_user'; 75 | private currentProvider = 'openai'; 76 | private currentModel = { 77 | openai: 'gpt-3.5-turbo', 78 | openrouter: 'openai/gpt-3.5-turbo', 79 | anthropic: 'claude-2', 80 | ollama: 'llama3.3:latest' 81 | }; 82 | private settingsPath = '/Users/victor/Library/Application Support/Cursor/User/globalStorage/rooveterinaryinc.roo-cline/settings/cline_mcp_settings.json'; 83 | 84 | private async updateSettingsFile(provider: string) { 85 | try { 86 | const fs = await import('fs/promises'); 87 | const settingsContent = await fs.readFile(this.settingsPath, 'utf-8'); 88 | const settings = JSON.parse(settingsContent); 89 | 90 | if (settings.mcpServers['letta-memgpt']) { 91 | settings.mcpServers['letta-memgpt'].defaultProvider = provider; 92 | await fs.writeFile(this.settingsPath, JSON.stringify(settings, null, 2)); 93 | } 94 | } catch (error: any) { 95 | console.error('Failed to update settings file:', error?.message); 96 | } 97 | } 98 | 99 | private async readSettingsFile() { 100 | try { 101 | const fs = await import('fs/promises'); 102 | const settingsContent = await fs.readFile(this.settingsPath, 'utf-8'); 103 | const settings = JSON.parse(settingsContent); 104 | 105 | if (settings.mcpServers['letta-memgpt']?.defaultProvider) { 106 | this.currentProvider = settings.mcpServers['letta-memgpt'].defaultProvider; 107 | } 108 | } catch (error: any) { 109 | console.error('Failed to read settings file:', error?.message); 110 | } 111 | } 112 | 113 | private async initializeDatabase() { 114 | await this.readSettingsFile(); 115 | await new Promise((resolve, reject) => { 116 | this.db.serialize(() => { 117 | // Create memory table 118 | this.db.run(` 119 | CREATE TABLE IF NOT EXISTS memory ( 120 | id INTEGER PRIMARY KEY AUTOINCREMENT, 121 | userId TEXT DEFAULT 'default_user', 122 | prompt TEXT NOT NULL, 123 | response TEXT NOT NULL, 124 | timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, 125 | provider TEXT NOT NULL 126 | ) 127 | `); 128 | 129 | // Create settings table 130 | this.db.run(` 131 | CREATE TABLE IF NOT EXISTS settings ( 132 | key TEXT PRIMARY KEY, 133 | value TEXT NOT NULL, 134 | lastUpdated DATETIME DEFAULT CURRENT_TIMESTAMP 135 | ) 136 | `, (err) => { 137 | if (err) reject(err); 138 | else resolve(); 139 | }); 140 | }); 141 | }); 142 | 143 | // Initialize or get current provider 144 | await new Promise((resolve, reject) => { 145 | this.db.get( 146 | 'SELECT value FROM settings WHERE key = ?', 147 | ['current_provider'], 148 | (err, row: any) => { 149 | if (err) { 150 | reject(err); 151 | } else if (row) { 152 | this.currentProvider = row.value; 153 | resolve(); 154 | } else { 155 | this.db.run( 156 | 'INSERT INTO settings (key, value) VALUES (?, ?)', 157 | ['current_provider', this.currentProvider], 158 | (err) => { 159 | if (err) reject(err); 160 | else resolve(); 161 | } 162 | ); 163 | } 164 | } 165 | ); 166 | }); 167 | } 168 | 169 | private async cleanup() { 170 | await promisify(this.db.close.bind(this.db))(); 171 | await this.server.close(); 172 | } 173 | 174 | private setupToolHandlers() { 175 | this.server.setRequestHandler(ListToolsRequestSchema, async () => ({ 176 | tools: [ 177 | { 178 | name: 'chat', 179 | description: 'Send a message to the current LLM provider', 180 | inputSchema: { 181 | type: 'object', 182 | properties: { 183 | message: { 184 | type: 'string', 185 | description: 'The message to send to the LLM', 186 | } 187 | }, 188 | required: ['message'], 189 | }, 190 | }, 191 | { 192 | name: 'get_memory', 193 | description: 'Retrieve conversation history', 194 | inputSchema: { 195 | type: 'object', 196 | properties: { 197 | limit: { 198 | type: 'number', 199 | description: 'Maximum number of memories to retrieve', 200 | }, 201 | }, 202 | }, 203 | }, 204 | { 205 | name: 'clear_memory', 206 | description: 'Clear conversation history', 207 | inputSchema: { 208 | type: 'object', 209 | properties: {}, 210 | }, 211 | }, 212 | { 213 | name: 'use_provider', 214 | description: 'Switch to a different LLM provider', 215 | inputSchema: { 216 | type: 'object', 217 | properties: { 218 | provider: { 219 | type: 'string', 220 | description: 'The LLM provider to use (openai, anthropic, openrouter, or ollama)', 221 | }, 222 | }, 223 | required: ['provider'], 224 | }, 225 | }, 226 | { 227 | name: 'use_model', 228 | description: 'Switch to a different model for the current provider', 229 | inputSchema: { 230 | type: 'object', 231 | properties: { 232 | model: { 233 | type: 'string', 234 | description: 'The model to use. For OpenAI: gpt-4o, gpt-4o-mini, gpt-4-turbo. For OpenRouter: openai/gpt-4, anthropic/claude-2, etc. For Ollama: llama2, codellama, etc.', 235 | }, 236 | }, 237 | required: ['model'], 238 | }, 239 | }, 240 | ], 241 | })); 242 | 243 | this.server.setRequestHandler(CallToolRequestSchema, async (request) => { 244 | switch (request.params.name) { 245 | case 'chat': 246 | return this.handleChat(request.params.arguments); 247 | case 'get_memory': 248 | return this.handleGetMemory(request.params.arguments); 249 | case 'clear_memory': 250 | return this.handleClearMemory(); 251 | case 'use_provider': 252 | return this.handleUseProvider(request.params.arguments); 253 | case 'use_model': 254 | return this.handleUseModel(request.params.arguments); 255 | default: 256 | throw new McpError( 257 | ErrorCode.MethodNotFound, 258 | `Unknown tool: ${request.params.name}` 259 | ); 260 | } 261 | }); 262 | } 263 | 264 | private async handleUseProvider(args: any) { 265 | if (!args.provider) { 266 | throw new McpError( 267 | ErrorCode.InvalidParams, 268 | 'Missing required parameter: provider' 269 | ); 270 | } 271 | 272 | const validProviders = ['openai', 'anthropic', 'openrouter', 'ollama']; 273 | if (!validProviders.includes(args.provider)) { 274 | throw new McpError( 275 | ErrorCode.InvalidParams, 276 | `Invalid provider. Must be one of: ${validProviders.join(', ')}` 277 | ); 278 | } 279 | 280 | // Update provider in memory, database, and settings file 281 | this.currentProvider = args.provider; 282 | 283 | // Update database 284 | await new Promise((resolve, reject) => { 285 | this.db.run( 286 | 'UPDATE settings SET value = ?, lastUpdated = CURRENT_TIMESTAMP WHERE key = ?', 287 | [args.provider, 'current_provider'], 288 | (err) => { 289 | if (err) reject(err); 290 | else resolve(); 291 | } 292 | ); 293 | }); 294 | 295 | // Update settings file 296 | await this.updateSettingsFile(args.provider); 297 | 298 | return { 299 | content: [ 300 | { 301 | type: 'text', 302 | text: `Now using ${args.provider} as the LLM provider (saved to settings)`, 303 | }, 304 | ], 305 | }; 306 | } 307 | 308 | private async handleChat(args: any) { 309 | if (!args.message) { 310 | throw new McpError( 311 | ErrorCode.InvalidParams, 312 | 'Missing required parameter: message' 313 | ); 314 | } 315 | 316 | let response: string; 317 | try { 318 | switch (this.currentProvider) { 319 | case 'openai': 320 | response = await this.queryOpenAI(args.message); 321 | break; 322 | case 'anthropic': 323 | response = await this.queryAnthropic(args.message); 324 | break; 325 | case 'openrouter': 326 | response = await this.queryOpenRouter(args.message); 327 | break; 328 | case 'ollama': 329 | response = await this.queryOllama(args.message); 330 | break; 331 | default: 332 | throw new McpError( 333 | ErrorCode.InvalidParams, 334 | `Unsupported provider: ${args.provider}` 335 | ); 336 | } 337 | 338 | // Store the interaction in memory 339 | await this.storeMemory(args.message, response, this.currentProvider); 340 | 341 | return { 342 | content: [ 343 | { 344 | type: 'text', 345 | text: response, 346 | }, 347 | ], 348 | }; 349 | } catch (error: any) { 350 | throw new McpError( 351 | ErrorCode.InternalError, 352 | `Error querying ${args.provider}: ${error?.message || 'Unknown error'}` 353 | ); 354 | } 355 | } 356 | 357 | private async handleGetMemory(args: any) { 358 | const memories: Memory[] = await new Promise((resolve, reject) => { 359 | const query = args.limit === null 360 | ? 'SELECT * FROM memory ORDER BY timestamp DESC' 361 | : 'SELECT * FROM memory ORDER BY timestamp DESC LIMIT ?'; 362 | const params = args.limit === null ? [] : [args.limit || 10]; 363 | this.db.all( 364 | query, 365 | params, 366 | (err, rows) => { 367 | if (err) reject(err); 368 | else resolve(rows as Memory[]); 369 | } 370 | ); 371 | }); 372 | 373 | return { 374 | content: [ 375 | { 376 | type: 'text', 377 | text: JSON.stringify(memories, null, 2), 378 | }, 379 | ], 380 | }; 381 | } 382 | 383 | private async handleClearMemory() { 384 | await new Promise((resolve, reject) => { 385 | this.db.run('DELETE FROM memory', [], (err) => { 386 | if (err) reject(err); 387 | else resolve(); 388 | }); 389 | }); 390 | 391 | return { 392 | content: [ 393 | { 394 | type: 'text', 395 | text: 'Memory cleared', 396 | }, 397 | ], 398 | }; 399 | } 400 | 401 | private async handleUseModel(args: any) { 402 | if (!args.model) { 403 | throw new McpError( 404 | ErrorCode.InvalidParams, 405 | 'Missing required parameter: model' 406 | ); 407 | } 408 | 409 | try { 410 | // Validate model based on current provider 411 | switch (this.currentProvider) { 412 | case 'openai': 413 | const openaiModels = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo']; 414 | if (!openaiModels.includes(args.model)) { 415 | throw new Error(`Invalid OpenAI model. Available models: ${openaiModels.join(', ')}`); 416 | } 417 | break; 418 | 419 | case 'openrouter': 420 | // OpenRouter supports various models from different providers 421 | if (!args.model.includes('/')) { 422 | throw new Error('OpenRouter model must be in format: provider/model (e.g., openai/gpt-4, anthropic/claude-2)'); 423 | } 424 | break; 425 | 426 | case 'ollama': 427 | try { 428 | const modelResponse = await axios.get('http://localhost:11434/api/tags'); 429 | const availableModels = modelResponse.data.models || []; 430 | if (!availableModels.some((m: any) => m.name === args.model)) { 431 | throw new Error(`Model ${args.model} not found. Available models: ${availableModels.map((m: any) => m.name).join(', ')}`); 432 | } 433 | } catch (error: any) { 434 | if (error.code === 'ECONNREFUSED') { 435 | throw new Error('Ollama is not running. Please start Ollama first (https://ollama.ai)'); 436 | } 437 | throw error; 438 | } 439 | break; 440 | 441 | case 'anthropic': 442 | const anthropicModels = [ 443 | 'claude-3-haiku', 444 | 'claude-3-sonnet', 445 | 'claude-3-opus', 446 | 'claude-3.5-haiku', 447 | 'claude-3.5-sonnet' 448 | ]; 449 | if (!anthropicModels.includes(args.model)) { 450 | throw new Error(`Invalid Anthropic model. Available models: ${anthropicModels.join(', ')}`); 451 | } 452 | break; 453 | 454 | default: 455 | throw new Error(`Cannot set model for provider: ${this.currentProvider}`); 456 | } 457 | 458 | // Update current model 459 | this.currentModel[this.currentProvider] = args.model; 460 | 461 | // Update settings file 462 | const fs = await import('fs/promises'); 463 | const settingsContent = await fs.readFile(this.settingsPath, 'utf-8'); 464 | const settings = JSON.parse(settingsContent); 465 | 466 | if (settings.mcpServers['letta-memgpt']) { 467 | if (!settings.mcpServers['letta-memgpt'].models) { 468 | settings.mcpServers['letta-memgpt'].models = {}; 469 | } 470 | settings.mcpServers['letta-memgpt'].models[this.currentProvider] = args.model; 471 | await fs.writeFile(this.settingsPath, JSON.stringify(settings, null, 2)); 472 | } 473 | 474 | return { 475 | content: [ 476 | { 477 | type: 'text', 478 | text: `Now using ${args.model} with ${this.currentProvider}`, 479 | }, 480 | ], 481 | }; 482 | } catch (error: any) { 483 | throw new McpError( 484 | ErrorCode.InternalError, 485 | error.message || 'Failed to set model' 486 | ); 487 | } 488 | } 489 | 490 | private async storeMemory( 491 | prompt: string, 492 | response: string, 493 | provider: string 494 | ) { 495 | await new Promise((resolve, reject) => { 496 | this.db.run( 497 | 'INSERT INTO memory (userId, prompt, response, provider) VALUES (?, ?, ?, ?)', 498 | [this.DEFAULT_USER, prompt, response, provider], 499 | (err) => { 500 | if (err) reject(err); 501 | else resolve(); 502 | } 503 | ); 504 | }); 505 | } 506 | 507 | private async queryOpenAI(message: string): Promise { 508 | if (!this.openaiKey) { 509 | throw new Error('OpenAI API key not configured'); 510 | } 511 | 512 | const response = await axios.post( 513 | 'https://api.openai.com/v1/chat/completions', 514 | { 515 | model: this.currentModel.openai, 516 | messages: [{ role: 'user', content: message }], 517 | }, 518 | { 519 | headers: { 520 | 'Authorization': `Bearer ${this.openaiKey}`, 521 | 'Content-Type': 'application/json', 522 | }, 523 | } 524 | ); 525 | 526 | return response.data.choices[0].message.content; 527 | } 528 | 529 | private async queryAnthropic(message: string): Promise { 530 | if (!this.anthropicKey) { 531 | throw new Error('Anthropic API key not configured'); 532 | } 533 | 534 | const response = await axios.post( 535 | 'https://api.anthropic.com/v1/messages', 536 | { 537 | model: this.currentModel.anthropic, 538 | messages: [{ role: 'user', content: message }], 539 | }, 540 | { 541 | headers: { 542 | 'x-api-key': this.anthropicKey, 543 | 'Content-Type': 'application/json', 544 | }, 545 | } 546 | ); 547 | 548 | return response.data.content[0].text; 549 | } 550 | 551 | private async queryOpenRouter(message: string): Promise { 552 | if (!this.openrouterKey) { 553 | throw new Error('OpenRouter API key not configured'); 554 | } 555 | 556 | const response = await axios.post( 557 | 'https://openrouter.ai/api/v1/chat/completions', 558 | { 559 | model: this.currentModel.openrouter, 560 | messages: [{ role: 'user', content: message }], 561 | }, 562 | { 563 | headers: { 564 | 'Authorization': `Bearer ${this.openrouterKey}`, 565 | 'Content-Type': 'application/json', 566 | }, 567 | } 568 | ); 569 | 570 | return response.data.choices[0].message.content; 571 | } 572 | 573 | private async queryOllama(message: string): Promise { 574 | try { 575 | // Check if Ollama is running and model exists 576 | try { 577 | const modelResponse = await axios.get('http://localhost:11434/api/tags'); 578 | const availableModels = modelResponse.data.models || []; 579 | if (!availableModels.some((m: any) => m.name === this.currentModel.ollama)) { 580 | throw new Error(`Model ${this.currentModel.ollama} not found. Available models: ${availableModels.map((m: any) => m.name).join(', ')}`); 581 | } 582 | } catch (error: any) { 583 | if (error.code === 'ECONNREFUSED') { 584 | throw new Error('Ollama is not running. Please start Ollama first (https://ollama.ai)'); 585 | } 586 | throw error; 587 | } 588 | 589 | // Use axios to get the streaming response 590 | const response = await axios.post('http://localhost:11434/api/generate', { 591 | model: this.currentModel.ollama, 592 | prompt: message, 593 | stream: false // Disable streaming to get complete response 594 | }); 595 | 596 | // Extract the response text 597 | if (response.data && response.data.response) { 598 | return response.data.response; 599 | } 600 | 601 | throw new Error('No response received from Ollama'); 602 | } catch (error: any) { 603 | throw new Error(`Ollama error: ${error?.message || 'Unknown error'}`); 604 | } 605 | } 606 | 607 | async run() { 608 | await this.initialize(); 609 | const transport = new StdioServerTransport(); 610 | await this.server.connect(transport); 611 | console.error('Letta MemGPT MCP server running on stdio'); 612 | } 613 | } 614 | 615 | const server = new LettaMemGPTServer(); 616 | server.run().catch(console.error); 617 | --------------------------------------------------------------------------------