├── patterns.txt ├── .env.example ├── deploy └── e2b │ ├── scripts │ ├── chrome-devtools-wrapper.sh │ ├── setup-desktop-configs.sh │ └── create-startup-script.sh │ ├── __init__.py │ ├── MANIFEST.in │ ├── requirements.txt │ ├── chrome-devtools-wrapper.sh │ ├── .gitignore │ ├── servers.json │ ├── template.py │ ├── pyproject.toml │ ├── e2b.Dockerfile.minimal │ ├── build.py │ ├── e2b.Dockerfile │ ├── view_sandbox_logs.py │ ├── README.md │ ├── nginx.conf │ ├── e2b.Dockerfile.simple │ ├── startup.sh │ └── sandbox_deploy.py ├── tsconfig.json ├── src ├── utils │ ├── logger.ts │ └── tunnel.ts ├── server │ ├── n8n-mcp-wrapper.ts │ └── http-server.ts ├── index.ts ├── stream │ ├── session-manager.ts │ └── stream-session.ts ├── config │ └── config.ts └── client │ └── mcp-client-manager.ts ├── .gitignore ├── mcp-servers.json ├── mcp-servers.example.json ├── LICENSE ├── package.json └── README.md /patterns.txt: -------------------------------------------------------------------------------- 1 | ghp_-->REMOVED_GITHUB_TOKEN 2 | sk--->REMOVED_OPENAI_KEY 3 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Server Configuration 2 | PORT=3000 3 | NODE_ENV=development 4 | ACCESS_TOKEN= 5 | NGROK_AUTH_TOKEN= 6 | # Logging 7 | LOG_LEVEL=INFO -------------------------------------------------------------------------------- /deploy/e2b/scripts/chrome-devtools-wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Wrapper to ensure chrome-devtools-mcp connects to the running Chrome instance 3 | set -euo pipefail 4 | 5 | REMOTE_URL=${CHROME_REMOTE_DEBUGGING_URL:-http://127.0.0.1:9222} 6 | ARGS=("$@") 7 | 8 | # Pass HTTP debugging endpoint to MCP as per README (--browserUrl) 9 | exec chrome-devtools-mcp --browserUrl "$REMOTE_URL" "${ARGS[@]}" 10 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2018", 4 | "module": "Node16", 5 | "moduleResolution": "Node16", 6 | "declaration": true, 7 | "declarationMap": true, 8 | "sourceMap": true, 9 | "outDir": "./dist", 10 | "strict": true, 11 | "esModuleInterop": true, 12 | "forceConsistentCasingInFileNames": true, 13 | "resolveJsonModule": true, 14 | "isolatedModules": true, 15 | "skipLibCheck": true 16 | }, 17 | "include": ["src/**/*"], 18 | "exclude": ["node_modules", "dist"] 19 | } -------------------------------------------------------------------------------- /deploy/e2b/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | E2B MCP Sandbox packaging root. 3 | This package includes scripts and resources (startup.sh, configs) for runtime use. 4 | """ 5 | """ 6 | E2B MCP Sandbox Manager 7 | 8 | Provides SandboxConfig and E2BSandboxManager for managing E2B sandboxes 9 | with MCP servers and browser automation support. 10 | """ 11 | from sandbox_deploy import ( 12 | E2BSandboxManager, 13 | SandboxConfig, 14 | CommandExitException, 15 | ) 16 | 17 | __version__ = "0.1.0" 18 | __all__ = [ 19 | "E2BSandboxManager", 20 | "SandboxConfig", 21 | "CommandExitException", 22 | ] 23 | -------------------------------------------------------------------------------- /deploy/e2b/MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Include documentation 2 | include README.md 3 | include INSTALL.md 4 | include USAGE_IN_OTHER_PROJECTS.md 5 | include LICENSE 6 | 7 | # Include configuration and script files 8 | include startup.sh 9 | include chrome-devtools-wrapper.sh 10 | include servers.json 11 | include nginx.conf 12 | include quick_install.sh 13 | include example_usage.py 14 | include e2b.Dockerfile 15 | include e2b.Dockerfile.minimal 16 | include e2b.Dockerfile.simple 17 | recursive-include scripts * 18 | 19 | # Include config directory if present 20 | recursive-include config * 21 | 22 | # Exclude Python cache and build artifacts 23 | global-exclude __pycache__ 24 | global-exclude *.py[cod] 25 | global-exclude .DS_Store 26 | global-exclude *.so 27 | global-exclude *.egg-info 28 | -------------------------------------------------------------------------------- /src/utils/logger.ts: -------------------------------------------------------------------------------- 1 | import winston from 'winston'; 2 | import { Config } from '../config/config.js'; 3 | 4 | export function createLogger(config: Config) { 5 | return winston.createLogger({ 6 | level: config.logging.level, 7 | format: winston.format.combine( 8 | // winston.format.timestamp(), 9 | winston.format.json() 10 | ), 11 | transports: [ 12 | new winston.transports.Console({ 13 | format: winston.format.combine( 14 | winston.format.colorize(), 15 | winston.format.simple() 16 | ), 17 | }), 18 | new winston.transports.File({ 19 | filename: 'error.log', 20 | level: 'error' 21 | }), 22 | new winston.transports.File({ 23 | filename: 'combined.log' 24 | }) 25 | ], 26 | }); 27 | } 28 | 29 | export type Logger = ReturnType; -------------------------------------------------------------------------------- /deploy/e2b/requirements.txt: -------------------------------------------------------------------------------- 1 | # Minimal dependencies for E2B sandbox manager + MCP GUI bootstrap 2 | # Install with: pip install -r deploy/e2b/requirements.txt 3 | 4 | # Core E2B async SDK (preferred) 5 | e2b>=0.12.0 6 | 7 | # HTTP client used for readiness/keepalive probes (gracefully optional, but recommended) 8 | httpx>=0.27.0 9 | 10 | # Optional: legacy interpreter SDK fallback (only if you rely on older templates) 11 | # e2b-code-interpreter>=0.0.15 12 | 13 | # Development / Testing (uncomment as needed) 14 | # pytest>=8.0.0 15 | # mypy>=1.10.0 16 | # types-requests 17 | 18 | # NOTE: Removed large web stack (Flask, Celery, Redis, etc.) because this repo's 19 | # deploy/e2b tooling only launches a browser + MCP bridge, not a web API backend. 20 | 21 | # E2B MCP Sandbox Requirements 22 | e2b-code-interpreter>=0.0.10 23 | aiohttp>=3.9.0 24 | python-dotenv>=1.0.0 25 | requests>=2.28.0 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | node_modules/ 3 | npm-debug.log 4 | yarn-debug.log 5 | yarn-error.log 6 | 7 | # Build output 8 | dist/ 9 | build/ 10 | 11 | # Environment variables 12 | .env 13 | .env.local 14 | .env.*.local 15 | 16 | # IDE and editor files 17 | .idea/ 18 | .vscode/ 19 | *.swp 20 | *.swo 21 | .DS_Store 22 | 23 | # Logs 24 | logs/ 25 | *.log 26 | combined.log 27 | error.log 28 | 29 | # Testing 30 | coverage/ 31 | .nyc_output/ 32 | 33 | # Temporary files 34 | tmp/ 35 | temp/ 36 | .tmp/ 37 | 38 | # Debug files 39 | .node-version 40 | .npm/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # TypeScript cache 49 | *.tsbuildinfo 50 | 51 | **/*.key 52 | **/*.pem 53 | .token 54 | **/credentials/* 55 | 56 | literal:old_text=new_text 57 | 58 | # MCP server configuration 59 | mcp-servers.json 60 | 61 | # AGENTS 62 | AGENTS.md 63 | CLAUDE.md -------------------------------------------------------------------------------- /mcp-servers.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "fetch": { 4 | "command": "uvx", 5 | "args": ["mcp-server-fetch"], 6 | "description": "HTTP/HTTPS content fetcher" 7 | }, 8 | "chrome-devtools-headful": { 9 | "command": "npx", 10 | "args": [ 11 | "chrome-devtools-mcp@latest", 12 | "--headless=false", 13 | "--isolated=false", 14 | "--browserUrl=http://127.0.0.1:9222", 15 | "--logFile=/home/user/mcp-debug.log", 16 | "--viewport=1280x800" 17 | ] 18 | }, 19 | "playwright-headful": { 20 | "command": "npx", 21 | "args": [ 22 | "@playwright/mcp@latest", 23 | "--browser=chrome", 24 | "--no-sandbox", 25 | "--viewport-size=1280x800", 26 | "--cdp-endpoint=http://127.0.0.1:9222/" 27 | ], 28 | "env": { 29 | "DISPLAY": ":99" 30 | } 31 | } 32 | } 33 | } -------------------------------------------------------------------------------- /mcp-servers.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "fetch": { 4 | "command": "uvx", 5 | "args": ["mcp-server-fetch"], 6 | "description": "HTTP/HTTPS content fetcher" 7 | }, 8 | "chrome-devtools-headful": { 9 | "command": "npx", 10 | "args": [ 11 | "chrome-devtools-mcp@latest", 12 | "--headless=false", 13 | "--isolated=false", 14 | "--browserUrl=http://127.0.0.1:9222", 15 | "--logFile=/home/user/mcp-debug.log", 16 | "--viewport=1280x800" 17 | ] 18 | }, 19 | "playwright-headful": { 20 | "command": "npx", 21 | "args": [ 22 | "@playwright/mcp@latest", 23 | "--browser=chrome", 24 | "--no-sandbox", 25 | "--viewport-size=1280x800", 26 | "--cdp-endpoint=http://127.0.0.1:9222/" 27 | ], 28 | "env": { 29 | "DISPLAY": ":99" 30 | } 31 | } 32 | } 33 | } -------------------------------------------------------------------------------- /deploy/e2b/chrome-devtools-wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Wrapper to ensure chrome-devtools-mcp connects to the running Chrome instance 3 | set -euo pipefail 4 | 5 | REMOTE_URL=${CHROME_REMOTE_DEBUGGING_URL:-http://127.0.0.1:9222} 6 | ARGS=("$@") 7 | 8 | resolve_ws_url() { 9 | local url="$1" 10 | if [[ "$url" =~ ^https?:// ]]; then 11 | local json 12 | if ! json=$(curl -fsS "$url/json/version"); then 13 | echo "Failed to query Chrome debugger version info from $url" >&2 14 | return 1 15 | fi 16 | local ws 17 | ws=$(python3 - <<'PYJSON' 18 | import json, sys 19 | try: 20 | data = json.load(sys.stdin) 21 | ws = data.get("webSocketDebuggerUrl") 22 | if not ws: 23 | raise SystemExit(1) 24 | print(ws) 25 | except Exception: 26 | raise SystemExit(1) 27 | PYJSON 28 | <<<"$json" 2>/dev/null) || { 29 | echo "Failed to parse webSocketDebuggerUrl from Chrome response" >&2 30 | return 1 31 | } 32 | echo "$ws" 33 | else 34 | echo "$url" 35 | fi 36 | } 37 | 38 | WS_URL=$(resolve_ws_url "$REMOTE_URL") 39 | exec chrome-devtools-mcp --browser "$WS_URL" "${ARGS[@]}" 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Quentin @ ConsoleX.ai 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mcp-bridge", 3 | "version": "2.0.0", 4 | "description": "A bridge service for MCP Server communication", 5 | "type": "module", 6 | "main": "dist/index.js", 7 | "scripts": { 8 | "build": "tsc", 9 | "start": "node dist/index.js", 10 | "start:tunnel": "node dist/index.js --tunnel", 11 | "dev": "tsc --watch & node --watch dist/index.js", 12 | "dev:tunnel": "tsc --watch & node --watch dist/index.js --tunnel", 13 | "lint": "eslint src/", 14 | "test": "jest" 15 | }, 16 | "dependencies": { 17 | "@modelcontextprotocol/sdk": "^1.0.3", 18 | "@types/js-yaml": "^4.0.9", 19 | "dotenv": "^16.0.3", 20 | "esbuild": ">=0.25.0", 21 | "express": "^4.19.2", 22 | "js-yaml": "^4.1.0", 23 | "n8n-mcp": "^2.29.0", 24 | "ngrok": "^5.0.0-beta.2", 25 | "winston": "^3.8.2" 26 | }, 27 | "devDependencies": { 28 | "@types/express": "^4.17.21", 29 | "@types/jest": "^29.5.12", 30 | "@types/node": "^20.11.28", 31 | "esbuild": ">=0.25.0", 32 | "eslint": "^9.8.0", 33 | "jest": "^29.7.0", 34 | "ts-jest": "^29.2.4", 35 | "tsx": "^4.16.5", 36 | "typescript": "^5.4.2" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /deploy/e2b/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | MANIFEST 26 | 27 | # PyInstaller 28 | *.manifest 29 | *.spec 30 | 31 | # Installer logs 32 | pip-log.txt 33 | pip-delete-this-directory.txt 34 | 35 | # Unit test / coverage reports 36 | htmlcov/ 37 | .tox/ 38 | .nox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *.cover 45 | *.py,cover 46 | .hypothesis/ 47 | .pytest_cache/ 48 | 49 | # Type checkers 50 | .mypy_cache/ 51 | .dmypy.json 52 | dmypy.json 53 | .pytype/ 54 | 55 | # Cython debug symbols 56 | cython_debug/ 57 | 58 | # Jupyter / IPython 59 | .ipynb_checkpoints 60 | profile_default/ 61 | ipython_config.py 62 | 63 | # Virtual environments 64 | venv/ 65 | .venv/ 66 | ENV/ 67 | env/ 68 | 69 | # Environment files 70 | .env 71 | .env.* 72 | *.env 73 | 74 | # Logs and temporary files 75 | logs/ 76 | *.log 77 | tmp/ 78 | temp/ 79 | .DS_Store 80 | 81 | -------------------------------------------------------------------------------- /deploy/e2b/servers.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "fetch": { 4 | "command": "uvx", 5 | "args": ["mcp-server-fetch"], 6 | "description": "HTTP/HTTPS content fetcher" 7 | }, 8 | "chrome-devtools-headful": { 9 | "command": "npx", 10 | "args": [ 11 | "chrome-devtools-mcp@latest", 12 | "--headless=false", 13 | "--isolated=false", 14 | "--browserUrl=http://127.0.0.1:9222", 15 | "--logFile=/home/user/mcp-debug.log", 16 | "--viewport=1280x800" 17 | ] 18 | }, 19 | "playwright-headful": { 20 | "command": "npx", 21 | "args": [ 22 | "@playwright/mcp@latest", 23 | "--browser=chrome", 24 | "--no-sandbox", 25 | "--viewport-size=1280x800", 26 | "--cdp-endpoint=http://127.0.0.1:9222/" 27 | ], 28 | "env": { 29 | "DISPLAY": ":99" 30 | } 31 | }, 32 | "n8n-mcp": { 33 | "command": "npx", 34 | "args": ["n8n-mcp"], 35 | "env": { 36 | "MCP_MODE": "stdio", 37 | "LOG_LEVEL": "error", 38 | "DISABLE_CONSOLE_OUTPUT": "true", 39 | "N8N_API_URL": "${N8N_API_URL}", 40 | "N8N_API_KEY": "${N8N_API_KEY}" 41 | } 42 | } 43 | } 44 | } -------------------------------------------------------------------------------- /deploy/e2b/template.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Union 3 | 4 | from e2b import AsyncTemplate 5 | 6 | TEMPLATE_DIR = Path(__file__).resolve().parent 7 | 8 | def make_template(dockerfile: Union[str, Path] = None) -> AsyncTemplate: 9 | """Factory to build an AsyncTemplate from a chosen Dockerfile. 10 | 11 | Args: 12 | dockerfile: Optional path (absolute or relative) to a Dockerfile. If omitted, 13 | defaults to the full featured 'e2b.Dockerfile'. A common alternative is 14 | 'e2b.Dockerfile.minimal'. 15 | 16 | Returns: 17 | AsyncTemplate instance prepared from the specified Dockerfile. 18 | """ 19 | if dockerfile is None: 20 | dockerfile = TEMPLATE_DIR / "e2b.Dockerfile" 21 | else: 22 | dockerfile = Path(dockerfile) 23 | if not dockerfile.is_absolute(): 24 | dockerfile = (TEMPLATE_DIR / dockerfile).resolve() 25 | if not dockerfile.exists(): # Fail early with a clearer message 26 | raise FileNotFoundError(f"Dockerfile not found: {dockerfile}") 27 | return AsyncTemplate().from_dockerfile(str(dockerfile)) 28 | 29 | # Note: 30 | # Avoid creating a default template at import time. 31 | # Calling make_template() here would parse the full Dockerfile immediately, 32 | # and the upstream parser prints "Unsupported instruction: COMMENT" for each 33 | # comment line it encounters. Build scripts import this module, so such side 34 | # effects would show up before the actual build starts. Callers should invoke 35 | # make_template(...) explicitly with their desired Dockerfile variant. 36 | -------------------------------------------------------------------------------- /deploy/e2b/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "e2b-mcp-sandbox" 7 | version = "0.2.1" 8 | description = "E2B Sandbox Manager for MCP-enabled Web Sandbox" 9 | readme = "README.md" 10 | authors = [ 11 | {name = "EvalsOne", email = "your-email@example.com"} 12 | ] 13 | license = {text = "MIT"} 14 | classifiers = [ 15 | "Development Status :: 3 - Alpha", 16 | "Intended Audience :: Developers", 17 | "Programming Language :: Python :: 3", 18 | "Programming Language :: Python :: 3.8", 19 | "Programming Language :: Python :: 3.9", 20 | "Programming Language :: Python :: 3.10", 21 | "Programming Language :: Python :: 3.11", 22 | "Programming Language :: Python :: 3.12", 23 | ] 24 | requires-python = ">=3.8" 25 | dependencies = [ 26 | "e2b>=0.17.0", 27 | "httpx>=0.24.0", 28 | ] 29 | 30 | [project.optional-dependencies] 31 | dev = [ 32 | "pytest>=7.0.0", 33 | "pytest-asyncio>=0.21.0", 34 | ] 35 | 36 | [project.urls] 37 | Homepage = "https://github.com/EvalsOne/MCP-connect" 38 | Repository = "https://github.com/EvalsOne/MCP-connect" 39 | Issues = "https://github.com/EvalsOne/MCP-connect/issues" 40 | 41 | [project.scripts] 42 | e2b-mcp-sandbox = "deploy.e2b.sandbox_deploy:main" 43 | 44 | [tool.setuptools] 45 | packages = ["deploy.e2b"] 46 | package-dir = {"deploy.e2b" = "."} 47 | py-modules = ["sandbox_deploy"] 48 | 49 | [tool.setuptools.package-data] 50 | "deploy.e2b" = [ 51 | "startup.sh", 52 | "chrome-devtools-wrapper.sh", 53 | "servers.json", 54 | "nginx.conf", 55 | "e2b.Dockerfile", 56 | "e2b.Dockerfile.minimal", 57 | "e2b.Dockerfile.simple", 58 | "scripts/*", 59 | "config/**", 60 | ] 61 | -------------------------------------------------------------------------------- /src/server/n8n-mcp-wrapper.ts: -------------------------------------------------------------------------------- 1 | import { spawn } from 'child_process'; 2 | 3 | /** 4 | * Wrapper for n8n-mcp to ensure that only JSON lines are written to stdout. 5 | * Any non-JSON output (e.g. banners, help text) is redirected to stderr 6 | * so that the MCP stdio transport sees a clean JSON-RPC stream. 7 | */ 8 | const child = spawn('npx', ['n8n-mcp'], { 9 | stdio: ['pipe', 'pipe', 'inherit'], 10 | env: process.env, 11 | }); 12 | 13 | process.stdin.pipe(child.stdin); 14 | 15 | let buffer = ''; 16 | if (child.stdout) { 17 | child.stdout.setEncoding('utf8'); 18 | child.stdout.on('data', (chunk: string) => { 19 | buffer += chunk; 20 | 21 | let newlineIndex: number; 22 | while ((newlineIndex = buffer.indexOf('\n')) !== -1) { 23 | const line = buffer.slice(0, newlineIndex); 24 | buffer = buffer.slice(newlineIndex + 1); 25 | 26 | const trimmed = line.trim(); 27 | if (!trimmed) { 28 | continue; 29 | } 30 | 31 | // Heuristic: JSON-RPC messages should start with { or [ 32 | if (trimmed.startsWith('{') || trimmed.startsWith('[')) { 33 | process.stdout.write(trimmed + '\n'); 34 | } else { 35 | // Avoid flooding logs; just emit a short notice to stderr 36 | console.error( 37 | '[n8n-mcp-wrapper] filtered non-JSON stdout:', 38 | trimmed.length > 120 ? `${trimmed.slice(0, 117)}...` : trimmed, 39 | ); 40 | } 41 | } 42 | }); 43 | } 44 | 45 | child.on('exit', (code, signal) => { 46 | if (code !== null) { 47 | process.exit(code); 48 | } else if (signal) { 49 | process.kill(process.pid, signal); 50 | } else { 51 | process.exit(0); 52 | } 53 | }); 54 | 55 | child.on('error', (error) => { 56 | console.error('[n8n-mcp-wrapper] failed to start n8n-mcp:', error); 57 | process.exit(1); 58 | }); 59 | 60 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import * as dotenv from 'dotenv'; 2 | import path from 'path'; 3 | import { fileURLToPath } from 'url'; 4 | import { HttpServer } from './server/http-server.js'; 5 | import { MCPClientManager } from './client/mcp-client-manager.js'; 6 | import { Config, loadConfig } from './config/config.js'; 7 | import { Logger, createLogger } from './utils/logger.js'; 8 | 9 | const __filename = fileURLToPath(import.meta.url); 10 | const __dirname = path.dirname(__filename); 11 | 12 | const result = dotenv.config({ 13 | path: path.resolve(__dirname, '../.env'), 14 | override: true 15 | }); 16 | 17 | if (result.error) { 18 | console.error('Error loading .env file:', result.error); 19 | process.exit(1); 20 | } 21 | 22 | async function main() { 23 | const config = loadConfig(); 24 | const logger = createLogger(config); 25 | const mcpClient = new MCPClientManager(logger); 26 | const server = new HttpServer(config, logger, mcpClient); 27 | 28 | // Handle process termination 29 | async function shutdown() { 30 | logger.info('Shutting down...'); 31 | try { 32 | await mcpClient.stop(); 33 | } catch (error) { 34 | logger.error('Error during shutdown:', error); 35 | } finally { 36 | process.exit(0); 37 | } 38 | } 39 | 40 | // Handle different termination signals 41 | process.on('SIGTERM', shutdown); 42 | process.on('SIGINT', shutdown); 43 | process.on('uncaughtException', async (error) => { 44 | logger.error('Uncaught exception:', error); 45 | await shutdown(); 46 | }); 47 | process.on('unhandledRejection', async (error) => { 48 | logger.error('Unhandled rejection:', error); 49 | await shutdown(); 50 | }); 51 | 52 | // Start the server 53 | server.start(); 54 | } 55 | 56 | main().catch((error) => { 57 | console.error('Failed to start server:', error); 58 | process.exit(1); 59 | }); -------------------------------------------------------------------------------- /deploy/e2b/e2b.Dockerfile.minimal: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | ENV DEBIAN_FRONTEND=noninteractive 3 | RUN apt-get update && apt-get install -y \ 4 | curl \ 5 | wget \ 6 | git \ 7 | ca-certificates \ 8 | sudo \ 9 | python3 \ 10 | python3-pip \ 11 | nginx \ 12 | && rm -rf /var/lib/apt/lists/* 13 | RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \ 14 | && apt-get install -y nodejs \ 15 | && npm install -g npm@latest 16 | RUN set -euo pipefail; \ 17 | if id -u user >/dev/null 2>&1; then \ 18 | echo "User 'user' already exists, skipping creation"; \ 19 | else \ 20 | useradd -m -s /bin/bash -u 1000 user; \ 21 | fi; \ 22 | if ! grep -q '^user .*NOPASSWD:ALL' /etc/sudoers; then \ 23 | echo 'user ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers; \ 24 | fi 25 | USER user 26 | WORKDIR /home/user 27 | RUN set -euo pipefail; \ 28 | echo "Installing uv (primary method)"; \ 29 | if ! curl -fsSL https://astral.sh/uv/install.sh | sh -s --; then \ 30 | echo "Primary uv installation failed; retrying (no flags)..."; \ 31 | sleep 2; \ 32 | curl -fsSL https://astral.sh/uv/install.sh | sh -s -- || { echo 'uv install failed'; exit 1; }; \ 33 | fi; \ 34 | test -x "$HOME/.local/bin/uv" || { echo 'uv binary missing after install'; exit 1; } 35 | USER root 36 | RUN if [ -f /home/user/.local/bin/uv ]; then install -m 0755 /home/user/.local/bin/uv /usr/local/bin/uv; fi \ 37 | && if [ -f /home/user/.local/bin/uvx ]; then install -m 0755 /home/user/.local/bin/uvx /usr/local/bin/uvx; else ln -sf /usr/local/bin/uv /usr/local/bin/uvx; fi 38 | USER user 39 | RUN git clone https://github.com/EvalsOne/MCP-connect.git /home/user/mcp-connect && \ 40 | cd /home/user/mcp-connect && \ 41 | npm install && \ 42 | npm run build 43 | 44 | RUN mkdir -p /home/user/.config/mcp 45 | USER root 46 | COPY nginx.conf /etc/nginx/sites-available/default 47 | COPY startup.sh /home/user/startup.sh 48 | RUN chmod +x /home/user/startup.sh && \ 49 | chown user:user /home/user/startup.sh 50 | ENV HOST=127.0.0.1 51 | ENV HEADLESS=1 52 | USER user 53 | WORKDIR /home/user 54 | CMD /home/user/startup.sh 55 | -------------------------------------------------------------------------------- /src/stream/session-manager.ts: -------------------------------------------------------------------------------- 1 | import { StreamSession } from './stream-session.js'; 2 | import type { StreamableServerConfig } from '../config/config.js'; 3 | import type { Logger } from '../utils/logger.js'; 4 | 5 | interface SessionRecord { 6 | serverId: string; 7 | session: StreamSession; 8 | } 9 | 10 | export class StreamSessionManager { 11 | private readonly sessions = new Map(); 12 | private readonly logger: Logger; 13 | private readonly ttlMs: number; 14 | 15 | constructor(logger: Logger, ttlMs: number) { 16 | this.logger = logger; 17 | this.ttlMs = ttlMs; 18 | } 19 | 20 | public async createSession(serverId: string, config: StreamableServerConfig): Promise { 21 | const session = new StreamSession(this.logger, config); 22 | const sessionId = session.id; 23 | this.sessions.set(sessionId, { serverId, session }); 24 | session.on('close', () => { 25 | this.sessions.delete(sessionId); 26 | }); 27 | session.on('error', () => { 28 | // Errors are already logged by the session. Ensure the entry eventually clears. 29 | if (!this.sessions.has(sessionId)) { 30 | return; 31 | } 32 | // No immediate deletion; allow client to handle recovery. 33 | }); 34 | await session.ensureStarted(); 35 | this.logger.info(`Created stream session ${sessionId} for server ${serverId}`); 36 | return session; 37 | } 38 | 39 | public getSession(sessionId: string, serverId?: string): StreamSession | undefined { 40 | const record = this.sessions.get(sessionId); 41 | if (!record) { 42 | return undefined; 43 | } 44 | 45 | if (serverId && record.serverId !== serverId) { 46 | return undefined; 47 | } 48 | 49 | return record.session; 50 | } 51 | 52 | public async closeSession(sessionId: string): Promise { 53 | const record = this.sessions.get(sessionId); 54 | if (!record) { 55 | return; 56 | } 57 | 58 | this.sessions.delete(sessionId); 59 | await record.session.close(); 60 | } 61 | 62 | public reapExpiredSessions(): void { 63 | const now = Date.now(); 64 | for (const [sessionId, record] of this.sessions.entries()) { 65 | if (now - record.session.lastUsed > this.ttlMs) { 66 | this.logger.info(`Closing idle session ${sessionId}`); 67 | void this.closeSession(sessionId).catch((error) => { 68 | this.logger.error(`Failed to close session ${sessionId}:`, error); 69 | }); 70 | } 71 | } 72 | } 73 | 74 | public async closeAll(): Promise { 75 | const ids = Array.from(this.sessions.keys()); 76 | await Promise.all(ids.map((id) => this.closeSession(id))); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/utils/tunnel.ts: -------------------------------------------------------------------------------- 1 | import ngrok from 'ngrok'; 2 | import { Logger } from './logger.js'; 3 | import { exec } from 'child_process'; 4 | import { promisify } from 'util'; 5 | 6 | const execAsync = promisify(exec); 7 | 8 | export class TunnelManager { 9 | private logger: Logger; 10 | private url: string | null = null; 11 | private retryCount = 0; 12 | private readonly maxRetries = 3; 13 | 14 | constructor(logger: Logger) { 15 | this.logger = logger; 16 | } 17 | 18 | async createTunnel(port: number): Promise { 19 | if (!process.env.NGROK_AUTH_TOKEN) { 20 | this.logger.error('NGROK_AUTH_TOKEN is not set in environment variables'); 21 | return null; 22 | } 23 | 24 | try { 25 | // Clean up existing ngrok processes 26 | this.logger.info('Cleaning up existing ngrok processes...'); 27 | await ngrok.kill(); 28 | await new Promise(resolve => setTimeout(resolve, 2000)); 29 | 30 | // Reset authtoken 31 | this.logger.info('Setting up ngrok authentication...'); 32 | await ngrok.authtoken(process.env.NGROK_AUTH_TOKEN); 33 | 34 | // Use the simplest configuration to connect 35 | this.logger.info(`========================================`); 36 | this.logger.info(`Starting ngrok tunnel for port ${port}...`); 37 | this.url = await ngrok.connect({ 38 | addr: port, 39 | authtoken: process.env.NGROK_AUTH_TOKEN 40 | }); 41 | 42 | if (this.url) { 43 | this.logger.info('Ngrok tunnel established successfully'); 44 | return this.url; 45 | } 46 | 47 | throw new Error('Failed to get tunnel URL'); 48 | 49 | } catch (error) { 50 | this.logger.error('Tunnel creation failed:', error); 51 | 52 | if (this.retryCount < this.maxRetries) { 53 | this.retryCount++; 54 | this.logger.info(`Retrying... (${this.retryCount}/${this.maxRetries})`); 55 | await new Promise(resolve => setTimeout(resolve, 2000)); 56 | return this.createTunnel(port); 57 | } 58 | 59 | // If all attempts fail, use the command line method 60 | try { 61 | this.logger.info('Attempting to start ngrok using CLI...'); 62 | const { stdout } = await execAsync(`npx ngrok http ${port} --log=stdout`); 63 | this.logger.debug('Ngrok CLI output:', stdout); 64 | 65 | const match = stdout.match(/https:\/\/[a-zA-Z0-9-]+\.ngrok\.io/); 66 | if (match) { 67 | this.url = match[0]; 68 | this.logger.info(`Tunnel established via CLI: ${this.url}`); 69 | return this.url; 70 | } 71 | } catch (cliError) { 72 | this.logger.error('CLI fallback also failed:', cliError); 73 | } 74 | 75 | return null; 76 | } 77 | } 78 | 79 | async disconnect(): Promise { 80 | if (this.url) { 81 | try { 82 | await ngrok.disconnect(this.url); 83 | await ngrok.kill(); 84 | this.logger.info('Ngrok tunnel disconnected'); 85 | this.url = null; 86 | } catch (error: any) { 87 | this.logger.error('Error disconnecting ngrok tunnel:', error); 88 | this.url = null; 89 | await ngrok.kill().catch(() => {}); 90 | throw error; 91 | } 92 | } 93 | } 94 | } -------------------------------------------------------------------------------- /src/stream/stream-session.ts: -------------------------------------------------------------------------------- 1 | import { EventEmitter } from 'events'; 2 | import { randomUUID } from 'crypto'; 3 | import { getDefaultEnvironment, StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js'; 4 | import type { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js'; 5 | import type { StreamableServerConfig } from '../config/config.js'; 6 | import type { Logger } from '../utils/logger.js'; 7 | 8 | export class StreamSession extends EventEmitter { 9 | public readonly id: string; 10 | private readonly transport: StdioClientTransport; 11 | private readonly logger: Logger; 12 | private readonly serverConfig: StreamableServerConfig; 13 | private started = false; 14 | private closed = false; 15 | private stderrAttached = false; 16 | private _lastUsed = Date.now(); 17 | 18 | constructor(logger: Logger, serverConfig: StreamableServerConfig, sessionId?: string) { 19 | super(); 20 | this.logger = logger; 21 | this.serverConfig = serverConfig; 22 | this.id = sessionId ?? randomUUID(); 23 | 24 | const mergedEnv = { 25 | ...getDefaultEnvironment(), 26 | ...(serverConfig.env ?? {}), 27 | }; 28 | 29 | const n8nUrl = mergedEnv['N8N_API_URL']; 30 | const n8nKey = mergedEnv['N8N_API_KEY']; 31 | if (n8nUrl || n8nKey) { 32 | const maskedKey = 33 | typeof n8nKey === 'string' && n8nKey.length > 4 34 | ? `***${n8nKey.slice(-4)}` 35 | : n8nKey 36 | ? '***' 37 | : ''; 38 | this.logger.info( 39 | `Stream session ${this.id} env snapshot: N8N_API_URL=${n8nUrl ?? ''}, N8N_API_KEY=${maskedKey}`, 40 | ); 41 | } 42 | 43 | this.transport = new StdioClientTransport({ 44 | command: serverConfig.command, 45 | args: serverConfig.args ?? [], 46 | env: mergedEnv, 47 | stderr: 'pipe', 48 | }); 49 | 50 | this.transport.onmessage = (message) => { 51 | this._lastUsed = Date.now(); 52 | this.emit('message', message as JSONRPCMessage); 53 | }; 54 | 55 | this.transport.onclose = () => { 56 | this.closed = true; 57 | this.emit('close'); 58 | }; 59 | 60 | this.transport.onerror = (error) => { 61 | const err = error instanceof Error ? error : new Error(String(error)); 62 | this.logger.error(`Session ${this.id} transport error:`, err); 63 | this.emit('error', err); 64 | }; 65 | } 66 | 67 | public get lastUsed(): number { 68 | return this._lastUsed; 69 | } 70 | 71 | public async ensureStarted(): Promise { 72 | if (this.started || this.closed) { 73 | return; 74 | } 75 | 76 | await this.transport.start(); 77 | this.started = true; 78 | this._lastUsed = Date.now(); 79 | 80 | if (!this.stderrAttached) { 81 | const stderr = this.transport.stderr; 82 | if (stderr) { 83 | const readable = stderr as unknown as NodeJS.ReadableStream; 84 | if (typeof readable.setEncoding === 'function') { 85 | readable.setEncoding('utf8'); 86 | } 87 | readable.on('data', (chunk: string) => { 88 | const output = chunk.trim(); 89 | if (output.length > 0) { 90 | this.logger.debug(`Session ${this.id} stderr: ${output}`); 91 | } 92 | }); 93 | } 94 | this.stderrAttached = true; 95 | } 96 | } 97 | 98 | public async send(message: JSONRPCMessage): Promise { 99 | if (this.closed) { 100 | throw new Error(`Session ${this.id} is closed`); 101 | } 102 | 103 | await this.ensureStarted(); 104 | await this.transport.send(message); 105 | this._lastUsed = Date.now(); 106 | } 107 | 108 | public async close(): Promise { 109 | if (this.closed) { 110 | return; 111 | } 112 | 113 | this.closed = true; 114 | await this.transport.close(); 115 | this.emit('close'); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /deploy/e2b/build.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import argparse 3 | from typing import Any, Optional 4 | 5 | from e2b import AsyncTemplate 6 | from template import make_template 7 | 8 | 9 | def parse_args(argv: Optional[list[str]] = None) -> argparse.Namespace: 10 | parser = argparse.ArgumentParser(description="Build E2B template (unified dev/prod)") 11 | parser.add_argument( 12 | "--mode", 13 | choices=["dev", "prod"], 14 | default="dev", 15 | help="Select build mode: dev or prod (affects default alias and log verbosity)", 16 | ) 17 | parser.add_argument( 18 | "--variant", 19 | choices=["full", "simple", "minimal"], 20 | default="full", 21 | help=( 22 | "Convenience selector for template Dockerfile: " 23 | "full=e2b.Dockerfile (GUI + noVNC), simple=e2b.Dockerfile.simple (headless Chrome), " 24 | "minimal=e2b.Dockerfile.minimal (no X/noVNC, fastest)" 25 | ), 26 | ) 27 | parser.add_argument( 28 | "--dockerfile", 29 | default=None, 30 | help="Relative or absolute path to Dockerfile (overrides --variant if provided)", 31 | ) 32 | parser.add_argument( 33 | "--alias", 34 | default=None, 35 | help="Template alias to register (default varies by --mode and --variant)", 36 | ) 37 | parser.add_argument("--cpu", type=int, default=2, help="CPU count to allocate during build (default: 2)") 38 | parser.add_argument("--memory-mb", type=int, default=2048, help="Memory in MB to allocate during build (default: 2048)") 39 | parser.add_argument("--skip-cache", action="store_true", help="Skip build cache (default: off)") 40 | parser.add_argument("--verbose", action="store_true", help="Show verbose build logs (default: normal)") 41 | parser.add_argument("--quiet", action="store_true", help="Only show errors (default: normal)") 42 | return parser.parse_args(argv) 43 | 44 | 45 | async def main(args: Optional[argparse.Namespace] = None) -> None: 46 | args = args or parse_args() 47 | 48 | variant_to_df = { 49 | "full": "e2b.Dockerfile", 50 | "simple": "e2b.Dockerfile.simple", 51 | "minimal": "e2b.Dockerfile.minimal", 52 | } 53 | chosen_df = args.dockerfile or variant_to_df[args.variant] 54 | tmpl = make_template(chosen_df) 55 | 56 | if not args.alias: 57 | prefix = "mcp-dev-" if args.mode == "dev" else "mcp-prod-" 58 | variant_to_alias = { 59 | "full": f"{prefix}gui", 60 | "simple": f"{prefix}simple", 61 | "minimal": f"{prefix}minimal", 62 | } 63 | args.alias = variant_to_alias[args.variant] 64 | 65 | def log_handler(entry: Any) -> None: 66 | msg = (entry.message or "").strip() 67 | # Filter noisy Dockerfile COMMENT warnings 68 | if msg.startswith("Unsupported instruction: COMMENT"): 69 | return 70 | # Logging behavior: quiet -> only errors; verbose -> all; normal -> info+warn+error 71 | level = (entry.level or "").lower() 72 | if args.quiet: 73 | if level in ("error", "fatal"): 74 | print(f"[{entry.timestamp.isoformat()}] {entry.level.upper()}: {msg}") 75 | return 76 | if args.verbose: 77 | print(f"[{entry.timestamp.isoformat()}] {entry.level.upper()}: {msg}") 78 | return 79 | if level in ("info", "warn", "error", "fatal"): 80 | print(f"[{entry.timestamp.isoformat()}] {entry.level.upper()}: {msg}") 81 | 82 | await AsyncTemplate.build( 83 | tmpl, 84 | alias=args.alias, 85 | on_build_logs=log_handler, 86 | cpu_count=args.cpu, 87 | memory_mb=args.memory_mb, 88 | skip_cache=args.skip_cache, 89 | ) 90 | 91 | print("✅ Template built successfully!") 92 | print(f"🏷️ Template Alias: {args.alias}") 93 | print(f"📄 Dockerfile: {chosen_df}") 94 | print("\nTo list templates:") 95 | print(" e2b template list") 96 | print("To show a specific template:") 97 | print(f" e2b template show {args.alias}") 98 | print("\nUse the resulting template ID with sandbox_deploy.py --template-id ") 99 | 100 | 101 | if __name__ == "__main__": 102 | asyncio.run(main()) 103 | 104 | -------------------------------------------------------------------------------- /deploy/e2b/scripts/setup-desktop-configs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup desktop environment configuration files for Fluxbox, PCManFM, and tint2 3 | set -e 4 | 5 | TEMPL=/opt/mcp-desktop 6 | install -d -o 1000 -g 1000 "$TEMPL/fluxbox" "$TEMPL/fluxbox/styles" "$TEMPL/tint2" "$TEMPL/pcmanfm/default" 7 | 8 | cat > "$TEMPL/fluxbox/init" <<'FILE' 9 | session.configVersion: 13 10 | session.screen0.toolbar.tools: RootMenu, WorkspaceName, Iconbar, Clock 11 | session.screen0.toolbar.autoHide: false 12 | session.screen0.toolbar.placement: BottomCenter 13 | session.screen0.toolbar.widthPercent: 100 14 | session.screen0.toolbar.height: 28 15 | session.screen0.toolbar.layer: Dock 16 | session.screen0.toolbar.visible: false 17 | session.screen0.toolbar.maxOver: False 18 | session.screen0.toolbar.alpha: 255 19 | session.screen0.iconbar.mode: Workspace 20 | session.screen0.iconbar.focused: true 21 | session.screen0.iconbar.unfocused: true 22 | session.screen0.menu.delay: 150 23 | session.screen0.rootCommand: pcmanfm --desktop --profile=default 24 | session.styleFile: ~/.fluxbox/styles/MCP-Grey 25 | session.screen0.workspaceNames: Workspace 1, Workspace 2, Workspace 3, Workspace 4 26 | session.autoRaiseDelay: 250 27 | session.slitlistFile: ~/.fluxbox/slitlist 28 | session.appsFile: ~/.fluxbox/apps 29 | session.tabsAttachArea: 0 30 | session.tabFocusModel: Follow 31 | session.focusTabMinWidth: 0 32 | session.clickRaises: True 33 | session.focusModel: ClickFocus 34 | session.clientMenu.usePixmap: true 35 | session.tabPadding: 0 36 | session.ignoreBorder: false 37 | session.styleOverlay: ~/.fluxbox/overlay 38 | FILE 39 | 40 | cat > "$TEMPL/fluxbox/overlay" <<'FILE' 41 | window.focus.alpha: 255 42 | window.unfocus.alpha: 220 43 | toolbar.alpha: 255 44 | menu.alpha: 255 45 | FILE 46 | 47 | cat > "$TEMPL/fluxbox/menu" <<'FILE' 48 | [begin] (MCP) 49 | [exec] (Chrome) {/usr/bin/google-chrome --no-sandbox} 50 | [exec] (Terminal) {x-terminal-emulator} 51 | [exec] (File Manager) {pcmanfm} 52 | [separator] 53 | [exit] (Logout) 54 | [end] 55 | FILE 56 | 57 | cat > "$TEMPL/fluxbox/keys" <<'FILE' 58 | OnDesktop Mouse1 :HideMenus 59 | Mod1 Tab :NextWindow {static groups} 60 | Mod1 Shift Tab :PrevWindow {static groups} 61 | Mod4 d :RootMenu 62 | Mod4 Return :ExecCommand x-terminal-emulator 63 | Mod4 c :ExecCommand google-chrome --no-sandbox 64 | FILE 65 | 66 | cat > "$TEMPL/fluxbox/styles/MCP-Grey" <<'FILE' 67 | ! minimal neutral style 68 | toolbar: flat gradient vertical 69 | toolbar.color: #2b303b 70 | toolbar.colorTo: #232832 71 | toolbar.borderColor: #1c1f26 72 | toolbar.borderWidth: 1 73 | window.title.focus: flat gradient vertical 74 | window.title.focus.color: #3b4252 75 | window.title.focus.colorTo: #2e3440 76 | window.title.unfocus: flat gradient vertical 77 | window.title.unfocus.color: #434c5e 78 | window.title.unfocus.colorTo: #3b4252 79 | window.button.focus: flat solid 80 | window.button.focus.color: #d8dee9 81 | window.button.unfocus: flat solid 82 | window.button.unfocus.color: #a7adba 83 | menu.frame: flat solid 84 | menu.frame.color: #2e3440 85 | menu.title: flat solid 86 | menu.title.color: #3b4252 87 | handle: flat solid 88 | handle.color: #2e3440 89 | borderColor: #1c1f26 90 | borderWidth: 2 91 | FILE 92 | 93 | :>"$TEMPL/fluxbox/slitlist" 94 | :>"$TEMPL/fluxbox/apps" 95 | 96 | cat > "$TEMPL/pcmanfm/default/desktop-items-0.conf" <<'FILE' 97 | [*] 98 | wallpaper_mode=color 99 | wallpaper= 100 | desktop_bg=#1d1f21 101 | desktop_shadow=#000000 102 | desktop_font=Sans 11 103 | desktop_folder=$HOME/Desktop 104 | show_desktop_bg=1 105 | show_trash=1 106 | show_mounts=1 107 | show_documents=1 108 | show_wm_menu=0 109 | sort=name;ascending; 110 | FILE 111 | 112 | cat > "$TEMPL/tint2/tint2rc" <<'FILE' 113 | # Minimal tint2 panel 114 | panel_items = LTS 115 | panel_monitor = all 116 | panel_position = bottom center horizontal 117 | panel_size = 100% 30 118 | panel_margin = 0 0 119 | panel_padding = 8 4 8 120 | panel_background_id = 0 121 | wm_menu = 1 122 | panel_layer = top 123 | panel_dock = 1 124 | 125 | rounded = 6 126 | border_width = 1 127 | border_color = #1c1f26 100 128 | background_color = #2b303b 95 129 | background_color_hover = #343b48 95 130 | 131 | launcher_padding = 4 4 4 132 | launcher_item_size = 28 133 | launcher_icon_theme = Adwaita 134 | 135 | # Taskbar 136 | task_text = 1 137 | urgent_nb_of_blink = 7 138 | mouse_middle = none 139 | mouse_right = close 140 | mouse_scroll_up = toggle 141 | mouse_scroll_down = iconify 142 | 143 | time1_format = %H:%M 144 | clock_font_color = #eceff4 100 145 | clock_padding = 8 0 146 | 147 | backgrounds = 1 148 | background_id = 0 149 | background_color = #2b303b 95 150 | border_color = #1c1f26 100 151 | border_width = 1 152 | border_radius = 6 153 | FILE 154 | 155 | chown -R 1000:1000 "$TEMPL" 156 | -------------------------------------------------------------------------------- /deploy/e2b/e2b.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | ENV LANG=en_US.UTF-8 5 | ENV LANGUAGE=en_US:en 6 | ENV LC_ALL=en_US.UTF-8 7 | 8 | RUN apt-get update && apt-get install -y \ 9 | curl \ 10 | wget \ 11 | git \ 12 | gnupg \ 13 | ca-certificates \ 14 | lsb-release \ 15 | sudo \ 16 | python3 \ 17 | python3-pip \ 18 | build-essential \ 19 | software-properties-common \ 20 | nginx \ 21 | openssl \ 22 | xvfb \ 23 | x11vnc \ 24 | fluxbox \ 25 | pcmanfm \ 26 | lxmenu-data \ 27 | tint2 \ 28 | feh \ 29 | novnc \ 30 | websockify 31 | 32 | COPY scripts/setup-desktop-configs.sh /tmp/setup-desktop-configs.sh 33 | RUN bash /tmp/setup-desktop-configs.sh \ 34 | && rm -rf /var/lib/apt/lists/* /tmp/setup-desktop-configs.sh 35 | 36 | RUN apt-get update && apt-get install -y --no-install-recommends \ 37 | locales tzdata \ 38 | procps psmisc lsof net-tools iproute2 iputils-ping dnsutils jq \ 39 | x11-xserver-utils xauth xclip xsel \ 40 | fontconfig fonts-dejavu fonts-noto-cjk fonts-noto-color-emoji fonts-liberation \ 41 | && sed -i 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen \ 42 | && locale-gen \ 43 | && update-locale LANG=en_US.UTF-8 \ 44 | && rm -rf /var/lib/apt/lists/* 45 | 46 | RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \ 47 | && apt-get install -y nodejs \ 48 | && npm install -g npm@latest 49 | 50 | RUN set -euo pipefail; \ 51 | if id -u user >/dev/null 2>&1; then \ 52 | echo "User 'user' already exists, skipping creation"; \ 53 | else \ 54 | useradd -m -s /bin/bash -u 1000 user; \ 55 | fi; \ 56 | if ! grep -q '^user .*NOPASSWD:ALL' /etc/sudoers; then \ 57 | echo 'user ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers; \ 58 | fi 59 | 60 | USER user 61 | RUN set -euo pipefail; \ 62 | echo "Installing uv (primary method)"; \ 63 | if ! curl -fsSL https://astral.sh/uv/install.sh | sh -s --; then \ 64 | echo "Primary uv installation failed; retrying (no flags)..."; \ 65 | sleep 2; \ 66 | curl -fsSL https://astral.sh/uv/install.sh | sh -s -- || { echo 'uv install failed'; exit 1; }; \ 67 | fi; \ 68 | test -x "$HOME/.local/bin/uv" || { echo 'uv binary missing after install'; exit 1; } 69 | USER root 70 | RUN if [ -f /home/user/.local/bin/uv ]; then install -m 0755 /home/user/.local/bin/uv /usr/local/bin/uv; fi \ 71 | && if [ -f /home/user/.local/bin/uvx ]; then install -m 0755 /home/user/.local/bin/uvx /usr/local/bin/uvx; else ln -sf /usr/local/bin/uv /usr/local/bin/uvx; fi 72 | 73 | RUN wget --progress=dot:giga -O /tmp/google-chrome.deb https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb \ 74 | && apt-get update \ 75 | && apt-get install -y /tmp/google-chrome.deb \ 76 | && rm -f /tmp/google-chrome.deb \ 77 | && rm -rf /var/lib/apt/lists/* 78 | 79 | RUN apt-get update && apt-get install -y \ 80 | libnss3 \ 81 | libxss1 \ 82 | libasound2 \ 83 | libxtst6 \ 84 | xdg-utils \ 85 | xdotool \ 86 | wmctrl \ 87 | libgbm1 \ 88 | libxshmfence1 \ 89 | && rm -rf /var/lib/apt/lists/* 90 | 91 | RUN apt-get update && apt-get install -y --no-install-recommends \ 92 | dbus-x11 \ 93 | libatk-bridge2.0-0 libgtk-3-0 libx11-xcb1 libxcb-dri3-0 libxcomposite1 \ 94 | libxrandr2 libxdamage1 libxkbcommon0 libcups2 libcairo2 \ 95 | libpango-1.0-0 libpangocairo-1.0-0 libatspi2.0-0 \ 96 | mesa-utils libgl1 libgl1-mesa-dri libgles2 libegl1 libglx-mesa0 \ 97 | ffmpeg \ 98 | gstreamer1.0-tools gstreamer1.0-plugins-base gstreamer1.0-plugins-good \ 99 | gstreamer1.0-plugins-bad gstreamer1.0-plugins-ugly gstreamer1.0-libav \ 100 | && rm -rf /var/lib/apt/lists/* 101 | 102 | 103 | USER user 104 | WORKDIR /home/user 105 | 106 | RUN git clone https://github.com/EvalsOne/MCP-connect.git /home/user/mcp-connect && \ 107 | cd /home/user/mcp-connect && \ 108 | npm install && \ 109 | npm run build 110 | 111 | 112 | USER root 113 | RUN npm install -g chrome-devtools-mcp@latest 114 | USER user 115 | 116 | # RUN mkdir -p /home/user/.config/mcp 117 | # COPY mcp-servers.json /home/user/.config/mcp/servers.json 118 | # RUN chown user:user /home/user/.config/mcp/servers.json 119 | 120 | COPY scripts/chrome-devtools-wrapper.sh /home/user/chrome-devtools-wrapper.sh 121 | RUN chmod +x /home/user/chrome-devtools-wrapper.sh && chown user:user /home/user/chrome-devtools-wrapper.sh 122 | 123 | USER root 124 | 125 | COPY nginx.conf /etc/nginx/sites-available/default 126 | 127 | COPY startup.sh /home/user/startup.sh 128 | RUN chmod +x /home/user/startup.sh && chown user:user /home/user/startup.sh 129 | 130 | RUN mkdir -p /home/user/app && \ 131 | chown -R user:user /home/user/app 132 | 133 | ENV HOST=127.0.0.1 134 | 135 | 136 | USER user 137 | WORKDIR /home/user 138 | 139 | CMD /bin/sh -c 'echo MCP template ready' 140 | -------------------------------------------------------------------------------- /src/config/config.ts: -------------------------------------------------------------------------------- 1 | import dotenv from 'dotenv'; 2 | import path from 'path'; 3 | import fs from 'fs'; 4 | import yaml from 'js-yaml'; 5 | import { fileURLToPath } from 'url'; 6 | 7 | const __filename = fileURLToPath(import.meta.url); 8 | const __dirname = path.dirname(__filename); 9 | 10 | // Load environment variables 11 | dotenv.config(); 12 | 13 | export interface StreamableServerConfig { 14 | command: string; 15 | args?: string[]; 16 | env?: Record; 17 | description?: string; 18 | timeout?: number; 19 | retries?: number; 20 | } 21 | 22 | export interface Config { 23 | server: { 24 | port: number; 25 | }; 26 | security: { 27 | authToken: string; 28 | allowedOrigins: string[]; 29 | }; 30 | logging: { 31 | level: string; 32 | }; 33 | streamable: { 34 | sessionTtlMs: number; 35 | servers: Record; 36 | }; 37 | } 38 | 39 | function validateConfig(config: Config): void { 40 | if (!config.server.port) { 41 | throw new Error('PORT is required'); 42 | } 43 | 44 | if (Number.isNaN(config.streamable.sessionTtlMs) || config.streamable.sessionTtlMs <= 0) { 45 | throw new Error('STREAM_SESSION_TTL_MS must be a positive integer'); 46 | } 47 | } 48 | 49 | /** 50 | * Resolve environment variable references in a string 51 | * Example: "postgresql://${DB_USER}:${DB_PASS}@localhost" 52 | */ 53 | function resolveEnvVars(value: string): string { 54 | return value.replace(/\$\{([^}]+)\}/g, (_, envVar) => { 55 | return process.env[envVar] || ''; 56 | }); 57 | } 58 | 59 | /** 60 | * Recursively resolve environment variables in an object 61 | */ 62 | function resolveEnvVarsInObject(obj: any): any { 63 | if (typeof obj === 'string') { 64 | return resolveEnvVars(obj); 65 | } 66 | if (Array.isArray(obj)) { 67 | return obj.map(resolveEnvVarsInObject); 68 | } 69 | if (obj && typeof obj === 'object') { 70 | const result: any = {}; 71 | for (const [key, value] of Object.entries(obj)) { 72 | result[key] = resolveEnvVarsInObject(value); 73 | } 74 | return result; 75 | } 76 | return obj; 77 | } 78 | 79 | function parseServers(): Record { 80 | // Priority 1: Load from JSON file 81 | const configPaths = [ 82 | path.resolve(process.cwd(), 'mcp-servers.json'), 83 | ]; 84 | 85 | for (const configPath of configPaths) { 86 | if (fs.existsSync(configPath)) { 87 | try { 88 | const content = fs.readFileSync(configPath, 'utf8'); 89 | let parsed: any; 90 | 91 | parsed = JSON.parse(content); 92 | 93 | // Support both "mcpServers" (MCP standard) and "servers" (legacy) keys 94 | const servers = parsed.mcpServers || parsed.servers || parsed; 95 | 96 | // Validate and resolve env vars 97 | const resolvedServers: Record = {}; 98 | for (const [key, value] of Object.entries(servers)) { 99 | if (!value || typeof value !== 'object') { 100 | throw new Error(`Invalid server definition for key "${key}"`); 101 | } 102 | const config = value as any; 103 | if (!config.command) { 104 | throw new Error(`Missing command for server "${key}"`); 105 | } 106 | 107 | // Resolve environment variable references 108 | resolvedServers[key] = resolveEnvVarsInObject(config); 109 | } 110 | 111 | console.log(`✓ Loaded MCP servers from ${configPath}`); 112 | return resolvedServers; 113 | } catch (error) { 114 | throw new Error(`Failed to parse ${configPath}: ${String(error)}`); 115 | } 116 | } 117 | } 118 | 119 | // Priority 2: Fallback to MCP_SERVERS environment variable (legacy) 120 | const raw = process.env.MCP_SERVERS; 121 | if (!raw) { 122 | console.log('⚠ No MCP server configuration found. Create mcp-servers.json or set MCP_SERVERS env var.'); 123 | return {}; 124 | } 125 | 126 | try { 127 | const parsed = JSON.parse(raw) as Record; 128 | Object.entries(parsed).forEach(([key, value]) => { 129 | if (!value || typeof value !== 'object') { 130 | throw new Error(`Invalid server definition for key "${key}"`); 131 | } 132 | if (!value.command) { 133 | throw new Error(`Missing command for server "${key}"`); 134 | } 135 | }); 136 | console.log('✓ Loaded MCP servers from MCP_SERVERS environment variable'); 137 | return resolveEnvVarsInObject(parsed); 138 | } catch (error) { 139 | throw new Error(`Failed to parse MCP_SERVERS: ${String(error)}`); 140 | } 141 | } 142 | 143 | function parseAllowedOrigins(): string[] { 144 | const raw = process.env.ALLOWED_ORIGINS; 145 | if (!raw) { 146 | return []; 147 | } 148 | 149 | return raw 150 | .split(',') 151 | .map((origin) => origin.trim()) 152 | .filter((origin) => origin.length > 0); 153 | } 154 | 155 | export function loadConfig(): Config { 156 | const config: Config = { 157 | server: { 158 | port: parseInt(process.env.PORT || '3000', 10), 159 | }, 160 | security: { 161 | authToken: process.env.AUTH_TOKEN || process.env.ACCESS_TOKEN || '', 162 | allowedOrigins: parseAllowedOrigins(), 163 | }, 164 | logging: { 165 | level: (process.env.LOG_LEVEL || 'info').toLowerCase(), 166 | }, 167 | streamable: { 168 | sessionTtlMs: parseInt(process.env.STREAM_SESSION_TTL_MS || `${5 * 60 * 1000}`, 10), 169 | servers: parseServers(), 170 | }, 171 | }; 172 | 173 | validateConfig(config); 174 | return config; 175 | } 176 | -------------------------------------------------------------------------------- /src/client/mcp-client-manager.ts: -------------------------------------------------------------------------------- 1 | import { Client } from '@modelcontextprotocol/sdk/client/index.js'; 2 | import { getDefaultEnvironment, StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js'; 3 | import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js'; 4 | import { WebSocketClientTransport } from '@modelcontextprotocol/sdk/client/websocket.js'; 5 | import { Transport } from '@modelcontextprotocol/sdk/shared/transport.js'; 6 | import { 7 | CallToolResultSchema, 8 | ClientCapabilities, 9 | CompleteResultSchema, 10 | EmptyResultSchema, 11 | GetPromptResultSchema, 12 | Implementation, 13 | ListPromptsResultSchema, 14 | ListResourcesResultSchema, 15 | ListResourceTemplatesResultSchema, 16 | ListToolsResultSchema, 17 | ReadResourceResultSchema, 18 | LATEST_PROTOCOL_VERSION, 19 | CompatibilityCallToolResultSchema 20 | } from '@modelcontextprotocol/sdk/types.js'; 21 | import { Logger } from '../utils/logger.js'; 22 | 23 | export class MCPClientManager { 24 | private clients: Map = new Map(); 25 | private transports: Map = new Map(); 26 | private readonly logger: Logger; 27 | private readonly clientInfo: Implementation = { 28 | name: "mcp-bridge", 29 | version: "1.0.0" 30 | }; 31 | private readonly capabilities: ClientCapabilities = { 32 | // Surface legacy capabilities via the experimental shim to satisfy the v1.1 MCP schema. 33 | experimental: { 34 | legacyCapabilities: { 35 | prompts: true, 36 | tools: true, 37 | resources: { 38 | subscribe: true 39 | }, 40 | logging: true 41 | } 42 | } 43 | }; 44 | 45 | constructor(logger: Logger) { 46 | this.logger = logger; 47 | } 48 | 49 | public async createClient(serverPath: string, args?: string[], env?: Record): Promise { 50 | const clientId = `client_${Date.now()}`; 51 | this.logger.info(`Creating client ${clientId} for ${serverPath}`); 52 | try { 53 | let transport; 54 | 55 | // Check if serverPath is a URL 56 | let url: URL | undefined = undefined; 57 | try { 58 | url = new URL(serverPath); 59 | } catch { 60 | // Not a URL, treat as command path 61 | } 62 | 63 | if (url?.protocol === "http:" || url?.protocol === "https:") { 64 | transport = new SSEClientTransport(url); 65 | } else if (url?.protocol === "ws:" || url?.protocol === "wss:") { 66 | transport = new WebSocketClientTransport(url); 67 | } else { 68 | transport = new StdioClientTransport({ 69 | command: serverPath, 70 | args: args || [], 71 | env: { 72 | ...getDefaultEnvironment(), 73 | ...(env || {}) 74 | } 75 | }); 76 | } 77 | 78 | const client = new Client(this.clientInfo, { 79 | capabilities: this.capabilities 80 | }); 81 | 82 | await client.connect(transport); 83 | 84 | this.clients.set(clientId, client); 85 | this.transports.set(clientId, transport); 86 | 87 | return clientId; 88 | } catch (error) { 89 | this.logger.error(`Failed to create client for ${serverPath}:`, error); 90 | throw error; 91 | } 92 | } 93 | 94 | public async executeRequest(clientId: string, method: string, params: any): Promise { 95 | const client = this.clients.get(clientId); 96 | if (!client) { 97 | throw new Error(`Client ${clientId} not found`); 98 | } 99 | 100 | try { 101 | this.logger.info(`Executing method: ${method}`); 102 | switch (method) { 103 | case 'completion/complete': 104 | return await client.complete(params); 105 | 106 | case 'prompts/get': 107 | return await client.getPrompt(params); 108 | 109 | case 'prompts/list': 110 | return await client.listPrompts(params); 111 | 112 | case 'resources/list': 113 | return await client.listResources(params); 114 | 115 | case 'resources/templates/list': 116 | return await client.listResourceTemplates(params); 117 | 118 | case 'resources/read': 119 | return await client.readResource(params); 120 | 121 | case 'resources/subscribe': 122 | return await client.subscribeResource(params); 123 | 124 | case 'resources/unsubscribe': 125 | return await client.unsubscribeResource(params); 126 | 127 | case 'tools/call': 128 | this.logger.info(`Calling tool: ${JSON.stringify(params)}`); 129 | return await client.callTool( 130 | { 131 | name: params.name, 132 | arguments: params.arguments 133 | }, 134 | params.resultSchema === 'compatibility' ? CompatibilityCallToolResultSchema : CallToolResultSchema 135 | ); 136 | 137 | case 'tools/list': 138 | return await client.listTools(params); 139 | 140 | case 'ping': 141 | return await client.ping(); 142 | 143 | default: 144 | throw new Error(`Unsupported method: ${JSON.stringify(method)}`); 145 | } 146 | } catch (error) { 147 | this.logger.error(`Request execution error:`, error); 148 | throw error; 149 | } 150 | } 151 | 152 | public async closeClient(clientId: string): Promise { 153 | const transport = this.transports.get(clientId); 154 | const client = this.clients.get(clientId); 155 | if (client && transport) { 156 | try { 157 | await client.close(); 158 | await transport.close(); 159 | } finally { 160 | this.transports.delete(clientId); 161 | this.clients.delete(clientId); 162 | } 163 | } 164 | } 165 | 166 | public async stop(): Promise { 167 | try { 168 | await this.cleanup(); 169 | } catch (error) { 170 | this.logger.error('Error during MCPClientManager stop:', error); 171 | throw error; 172 | } 173 | } 174 | 175 | private async cleanup(): Promise { 176 | for (const [clientId, _] of this.clients) { 177 | try { 178 | await this.closeClient(clientId); 179 | } catch (error) { 180 | this.logger.error(`Error closing client ${clientId}:`, error); 181 | } 182 | } 183 | this.clients.clear(); 184 | this.transports.clear(); 185 | } 186 | } -------------------------------------------------------------------------------- /deploy/e2b/view_sandbox_logs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Utility script to inspect log files inside an existing E2B sandbox.""" 3 | 4 | from __future__ import annotations 5 | 6 | import argparse 7 | import inspect 8 | import os 9 | import shlex 10 | import sys 11 | from typing import Optional 12 | 13 | SandboxType = None 14 | 15 | try: 16 | from e2b_code_interpreter import Sandbox as _CodeInterpreterSandbox 17 | 18 | SandboxType = _CodeInterpreterSandbox 19 | except Exception: # pragma: no cover - optional dependency 20 | _CodeInterpreterSandbox = None 21 | 22 | if SandboxType is None: 23 | try: 24 | from e2b import Sandbox as _CoreSandbox 25 | 26 | SandboxType = _CoreSandbox 27 | except Exception: # pragma: no cover - fail later with explicit message 28 | _CoreSandbox = None 29 | else: 30 | try: 31 | # code interpreter sandbox on older SDKs misses resume helpers; prefer core sandbox instead 32 | from e2b import Sandbox as _CoreSandbox # type: ignore 33 | 34 | if hasattr(_CoreSandbox, "_cls_connect") and not hasattr(SandboxType, "_cls_connect"): 35 | SandboxType = _CoreSandbox 36 | except Exception: 37 | _CoreSandbox = None 38 | 39 | if SandboxType is None: # pragma: no cover - defensive guard 40 | raise ImportError("Neither e2b_code_interpreter nor e2b Sandbox classes are available") 41 | 42 | try: # Align with e2b_sandbox_manager import path 43 | from e2b.sandbox.commands.command_handle import CommandExitException 44 | except Exception: # pragma: no cover - fallback when package layout changes 45 | try: 46 | from e2b_code_interpreter import CommandExitException # type: ignore 47 | except Exception: 48 | class CommandExitException(Exception): 49 | """Fallback placeholder not expected to be raised.""" 50 | 51 | pass 52 | 53 | 54 | DEFAULT_LOG_DIR = "/home/user" 55 | 56 | 57 | def build_parser() -> argparse.ArgumentParser: 58 | parser = argparse.ArgumentParser( 59 | description= 60 | "Print log file contents from an existing sandbox using the E2B API. " 61 | "Requires E2B_API_KEY to be configured.", 62 | ) 63 | parser.add_argument( 64 | "sandbox_id", 65 | help="ID of the sandbox to inspect (e.g. sandbox_20240101_120000).", 66 | ) 67 | parser.add_argument( 68 | "--path", 69 | default=f"{DEFAULT_LOG_DIR}/novnc.log", 70 | help="Absolute path of the log file inside the sandbox (default: /home/user/novnc.log).", 71 | ) 72 | parser.add_argument( 73 | "--lines", 74 | type=int, 75 | default=200, 76 | help="Number of trailing lines to display (default: 200).", 77 | ) 78 | parser.add_argument( 79 | "--list", 80 | action="store_true", 81 | help="List available *.log files in the log directory instead of printing a specific file.", 82 | ) 83 | parser.add_argument( 84 | "--log-dir", 85 | default=DEFAULT_LOG_DIR, 86 | help="Directory inside the sandbox to search when using --list (default: /home/user).", 87 | ) 88 | parser.add_argument( 89 | "--insecure", 90 | action="store_true", 91 | help="Connect over HTTP instead of HTTPS when resuming the sandbox.", 92 | ) 93 | parser.add_argument( 94 | "--exec", 95 | dest="exec_cmd", 96 | help="Run an arbitrary shell command inside the sandbox instead of reading logs.", 97 | ) 98 | return parser 99 | 100 | 101 | def ensure_api_key() -> None: 102 | if not os.environ.get("E2B_API_KEY"): 103 | sys.stderr.write("E2B_API_KEY environment variable is required to connect to the sandbox.\n") 104 | sys.exit(1) 105 | 106 | 107 | def connect_sandbox(sandbox_id: str, secure: bool) -> SandboxType: # type: ignore[valid-type] 108 | """Resume an existing sandbox regardless of SDK version.""" 109 | 110 | Sandbox = SandboxType # local alias for readability 111 | 112 | connect_kwargs = {} 113 | try: 114 | import inspect 115 | 116 | params = inspect.signature(Sandbox._cls_connect).parameters # type: ignore[attr-defined] 117 | if "secure" in params: 118 | connect_kwargs["secure"] = secure 119 | except (AttributeError, ValueError): 120 | pass 121 | 122 | if hasattr(Sandbox, "_cls_connect"): 123 | sandbox = Sandbox._cls_connect(sandbox_id=sandbox_id, **connect_kwargs) # type: ignore[attr-defined] 124 | sandbox.connect() 125 | return sandbox 126 | 127 | if hasattr(Sandbox, "connect"): 128 | try: 129 | params = inspect.signature(Sandbox.connect).parameters # type: ignore[attr-defined] 130 | connect_kwargs = {k: v for k, v in connect_kwargs.items() if k in params} 131 | except (AttributeError, ValueError): 132 | connect_kwargs = {} 133 | 134 | resumed = Sandbox.connect(sandbox_id=sandbox_id, **connect_kwargs) # type: ignore[misc] 135 | if isinstance(resumed, Sandbox): 136 | return resumed 137 | raise RuntimeError("This version of e2b SDK does not support resuming by sandbox_id.") 138 | 139 | 140 | def run_command(sandbox: Sandbox, command: str) -> str: 141 | try: 142 | result = sandbox.commands.run( 143 | command, 144 | background=False, 145 | cwd="/home/user", 146 | ) 147 | except CommandExitException as exc: # pragma: no cover - defensive logging helper 148 | stderr = getattr(exc, "stderr", "") or str(exc) 149 | raise RuntimeError(f"Command failed inside sandbox: {stderr}") from exc 150 | 151 | if result.exit_code != 0: 152 | raise RuntimeError( 153 | f"Command exited with {result.exit_code}: {result.stderr or 'no stderr available'}", 154 | ) 155 | 156 | return result.stdout 157 | 158 | 159 | def list_logs(sandbox: Sandbox, log_dir: str) -> str: 160 | escaped_dir = shlex.quote(log_dir) 161 | cmd = f"bash -lc 'set -e; ls -1 {escaped_dir}/*.log 2>/dev/null'" 162 | output = run_command(sandbox, cmd) 163 | return output.strip() or "" 164 | 165 | 166 | def tail_log(sandbox: Sandbox, path: str, lines: int) -> str: 167 | escaped_path = shlex.quote(path) 168 | cmd = ( 169 | "bash -lc 'set -e; " 170 | f"if [ ! -f {escaped_path} ]; then echo " 171 | f"\"Log file not found: {escaped_path}\" >&2; exit 1; fi; " 172 | f"tail -n {lines} {escaped_path}'" 173 | ) 174 | try: 175 | return run_command(sandbox, cmd) 176 | except RuntimeError as exc: 177 | message = str(exc) 178 | if "Log file not found" in message: 179 | hint = ( 180 | "Log file not found. Use --list to see available logs or " 181 | "specify the correct path with --path." 182 | ) 183 | raise RuntimeError(f"{message}\n{hint}") from exc 184 | raise 185 | 186 | 187 | def main(argv: Optional[list[str]] = None) -> int: 188 | parser = build_parser() 189 | args = parser.parse_args(argv) 190 | 191 | ensure_api_key() 192 | 193 | sandbox = connect_sandbox(args.sandbox_id, secure=not args.insecure) 194 | 195 | if args.exec_cmd: 196 | output = run_command(sandbox, f"bash -lc {shlex.quote(args.exec_cmd)}") 197 | elif args.list: 198 | output = list_logs(sandbox, args.log_dir) 199 | else: 200 | output = tail_log(sandbox, args.path, args.lines) 201 | 202 | sys.stdout.write(output) 203 | if not output.endswith("\n"): 204 | sys.stdout.write("\n") 205 | return 0 206 | 207 | 208 | if __name__ == "__main__": # pragma: no cover - CLI entry point 209 | raise SystemExit(main()) 210 | -------------------------------------------------------------------------------- /deploy/e2b/README.md: -------------------------------------------------------------------------------- 1 | # 🌐 MCP Bridge — E2B Sandbox Deployment 2 | 3 | Run MCP Bridge in an isolated E2B cloud sandbox environment. 4 | 5 | ## ⚡ Quick Start 6 | 7 | ### 1) Install dependencies 8 | 9 | ```bash 10 | # set up and activate virual environment 11 | cd deploy/e2b 12 | python -m venv venv 13 | source venv/bin/activate 14 | 15 | # install dependencies 16 | pip install -r requirements.txt 17 | ``` 18 | 19 | ### 2) Set API key 20 | 21 | Sign up and get an E2B API key: https://e2b.dev/dashboard 22 | 23 | ```bash 24 | export E2B_API_KEY=your-api-key-here 25 | ``` 26 | 27 | ### 3) Build sandbox templates 28 | 29 | ```bash 30 | cd deploy/e2b 31 | 32 | python build.py --mode dev --variant full 33 | python build.py --mode prod --variant minimal --skip-cache 34 | ``` 35 | 36 | Parameters 37 | 38 | - `--variant` 39 | - What: quick template selection (maps to built-in Dockerfiles) 40 | - Options: `full` (GUI + noVNC), `simple` (no X desktop, headless Chrome), `minimal` (no X/Chrome/noVNC) 41 | - Default: `full` 42 | - Example: `--variant simple` 43 | 44 | - `--dockerfile` 45 | - What: path to a Dockerfile to build the template image; overrides `--variant` 46 | - Example: `--dockerfile e2b.Dockerfile.minimal` 47 | 48 | - `--alias` 49 | - What: assign a human-friendly alias to the built template. Defaults based on `--variant` and environment: 50 | 51 | Default for development: 52 | - `full` → `mcp-dev-gui` 53 | - `simple` → `mcp-dev-simple` 54 | - `minimal` → `mcp-dev-minimal` 55 | 56 | Default for production: 57 | - `full` → `mcp-prod-gui` 58 | - `simple` → `mcp-prod-simple` 59 | - `minimal` → `mcp-prod-minimal` 60 | 61 | 62 | - You can also use custom alias, example: `--alias custom-alias` 63 | 64 | - `--cpu` 65 | - What: number of vCPUs allocated during build (int) 66 | - Default: `2` 67 | - Example: `--cpu 4` 68 | 69 | - `--memory-mb` 70 | - What: memory size in MB (int) 71 | - Default: `2048` 72 | - Example: `--memory-mb 4096` 73 | 74 | - `--skip-cache` 75 | - What: boolean flag; skip Docker cache to force rebuild of all layers. Dev defaults to cache, prod defaults to no cache. 76 | - Example: `--skip-cache` 77 | 78 | Examples 79 | 80 | ```bash 81 | python build.py --mode dev --variant simple --cpu 2 --memory-mb 2048 82 | python build.py --mode dev --variant minimal --cpu 1 --memory-mb 1024 --skip-cache 83 | python build.py --mode dev --alias mcp-dev-gui --cpu 4 --memory-mb 4096 84 | 85 | python build.py --mode prod --variant full --quiet 86 | python build.py --mode prod --variant simple --verbose 87 | ``` 88 | 89 | --- 90 | 91 | ## 💻 Use the Sandbox 92 | 93 | Run the prebuilt quickstart script: 94 | 95 | ```bash 96 | # Full mode (with desktop GUI and Chrome) 97 | python sandbox_deploy.py --template-id --auth-token 98 | ``` 99 | 100 | This script will: 101 | 1. Create an E2B sandbox 102 | 2. Start the MCP Bridge service 103 | 3. Auto-test health check and tool calls 104 | 4. Print sandbox info 105 | 5. Write a unified startup log to `/home/user/startup.log` (both GUI and headless templates) 106 | 107 | Parameters: 108 | 109 | These map to `deploy/e2b/sandbox_deploy.py` CLI options (the script falls back to env vars and exits if required values are missing): 110 | 111 | - `--template-id` 112 | - What: template ID or alias. Pass via CLI or set `E2B_TEMPLATE_ID`. 113 | - Required: yes (CLI or env). The script exits if missing. 114 | - Example: `--template-id mcp-xyz123` 115 | 116 | - `--sandbox-id` 117 | - What: logical sandbox name (for local management/display) 118 | - Default: `mcp_test_sandbox` 119 | - Example: `--sandbox-id demo1` 120 | 121 | - `--no-internet` 122 | - What: boolean flag; disable internet access in the sandbox (allow_internet_access=False) 123 | - Default: internet enabled unless specified 124 | 125 | - `--no-wait` 126 | - What: boolean flag; do not wait for `/health` readiness after creating the sandbox 127 | - Default: wait for readiness (checks `/health`) 128 | 129 | - `--timeout` 130 | - What: sandbox lifetime in seconds. Also settable via `E2B_SANDBOX_TIMEOUT`. 131 | - Default: `3600` (1 hour) 132 | - Example: `--timeout 7200` 133 | 134 | - `--headless` 135 | - What: lightweight headless mode; skip Xvfb/fluxbox/Chrome/VNC/noVNC, keep Nginx + MCP-connect 136 | - Default: off (GUI mode) 137 | - Note: templates or aliases containing `simple` or `minimal` are treated as headless by default. 138 | 139 | - `--auth-token` 140 | - What: sets the bridge API Bearer token. When omitted, the deploy flow prefers `E2B_MCP_AUTH_TOKEN` from the environment, then falls back to `AUTH_TOKEN` for compatibility. Requests must include `Authorization: Bearer ` when set. 141 | - Default: unset (server warns and allows unauthenticated access). For production, set a strong token. 142 | 143 | - `--no-remote-fetch` 144 | - What: disable fetching latest `startup.sh`, `chrome-devtools-wrapper.sh`, and `mcp-servers.json` from a remote base inside the sandbox 145 | - Default: off (remote fetch enabled by config default) 146 | 147 | - `--remote-base` 148 | - What: remote base URL used when fetching assets (e.g. `https://raw.githubusercontent.com////deploy/e2b`) 149 | - Default: `https://raw.githubusercontent.com/EvalsOne/MCP-bridge/main/deploy/e2b` 150 | 151 | - `--probe-http` 152 | - What: also probe HTTP (port 80) `/health` alongside HTTPS during readiness and keepalive. 153 | - Default: off. By default, the manager only probes HTTPS to reduce noise when HTTP is not routed. 154 | 155 | Important environment variables 156 | 157 | - `E2B_API_KEY`: required; the script checks this and exits if missing. Example: 158 | 159 | ```bash 160 | export E2B_API_KEY='your-api-key-here' 161 | ``` 162 | 163 | - `E2B_MCP_AUTH_TOKEN`: preferred environment variable for securing the MCP bridge in E2B deployments. Example: 164 | 165 | ```bash 166 | export E2B_MCP_AUTH_TOKEN='your-secure-token' 167 | ``` 168 | 169 | - `AUTH_TOKEN`: legacy/generic alternative. Still accepted as a fallback if `E2B_MCP_AUTH_TOKEN` is not set. 170 | 171 | - `E2B_TEMPLATE_ID`: alternative to `--template-id` (CLI takes precedence) 172 | - `E2B_SANDBOX_TIMEOUT`: default timeout in seconds, same as `--timeout` 173 | 174 | Note: Remote asset fetch control (`fetch_remote`, `remote_base`) is now configured via `SandboxConfig` or the CLI flags above, not environment variables. 175 | 176 | Examples 177 | 178 | ```bash 179 | # Specify template and wait for readiness 180 | python sandbox_deploy.py --template-id mcp-xyz123 --sandbox-id demo1 181 | 182 | # With explicit auth token 183 | python sandbox_deploy.py --template-id mcp-xyz123 --auth-token 's3cr3t-token' 184 | 185 | # Read template ID from env, disable internet, do not wait for readiness 186 | export E2B_TEMPLATE_ID=mcp-xyz123 187 | python sandbox_deploy.py --no-internet --no-wait 188 | 189 | # Secure the bridge via environment variable (no CLI flag) 190 | export E2B_MCP_AUTH_TOKEN='s3cr3t-token' 191 | python sandbox_deploy.py --template-id mcp-xyz123 --sandbox-id demo1 192 | ``` 193 | 194 | --- 195 | 196 | ## 📁 Template Layout 197 | 198 | | File | Description | 199 | |------|-------------| 200 | | `template.py` | Sandbox template configuration | 201 | | `build_dev.py` | Dev template build script | 202 | | `build_prod.py` | Prod template build script | 203 | | `e2b.Dockerfile` | Full sandbox image definition (with pre-built Desktop GUI and Chrome browser) | 204 | | `e2b.Dockerfile.simple` | Simple image (with Chrome browser) | 205 | | `e2b.Dockerfile.minimal` | Minimal image (core only) | 206 | | `startup.sh` | Sandbox startup script | 207 | | `nginx.conf` | Nginx reverse proxy config | 208 | | `view_sandbox_logs.py` | Exec into sandbox for debug | 209 | | `sandbox_deploy.py` | Sandbox management tool | 210 | 211 | --- 212 | 213 | ## 🔍 Manage & Debug 214 | 215 | ### Manage sandbox instances 216 | 217 | New `sandbox_deploy.py` supports: 218 | 219 | ```bash 220 | # Create a sandbox (template ID or alias) 221 | python sandbox_deploy.py --template-id --sandbox-id demo1 222 | 223 | # Disable waiting for health / disable internet 224 | python sandbox_deploy.py --template-id --no-wait --no-internet 225 | ``` 226 | 227 | ### Exec into sandbox for debug 228 | 229 | ```bash 230 | python view_sandbox_logs.py --exec "" 231 | ``` 232 | 233 | --- 234 | 235 | ## 📖 More Resources 236 | 237 | - **E2B docs**: https://e2b.dev/docs 238 | - **MCP protocol**: https://modelcontextprotocol.io 239 | 240 | --- 241 | 242 | 243 | **Enjoy running MCP Connect in the cloud!** 🎉 244 | -------------------------------------------------------------------------------- /deploy/e2b/nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | # Listen on 443 without SSL to avoid double TLS termination in E2B 3 | listen 443 default_server; 4 | listen [::]:443 default_server; 5 | 6 | server_name _; 7 | 8 | # Lightweight websocket readiness probe (no upstream hit) 9 | location = /ws-health { 10 | add_header Content-Type text/plain; 11 | return 200 'ok'; 12 | } 13 | 14 | location / { 15 | proxy_pass http://127.0.0.1:3000; 16 | proxy_http_version 1.1; 17 | proxy_set_header Upgrade $http_upgrade; 18 | proxy_set_header Connection "upgrade"; 19 | proxy_set_header Host $host; 20 | proxy_cache_bypass $http_upgrade; 21 | proxy_set_header X-Real-IP $remote_addr; 22 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 23 | proxy_set_header X-Forwarded-Proto $scheme; 24 | proxy_connect_timeout 7d; 25 | proxy_send_timeout 7d; 26 | proxy_read_timeout 7d; 27 | proxy_buffering off; 28 | } 29 | 30 | location /novnc/ { 31 | access_log off; 32 | proxy_pass http://127.0.0.1:6080/; 33 | proxy_http_version 1.1; 34 | proxy_set_header Upgrade $http_upgrade; 35 | proxy_set_header Connection "upgrade"; 36 | proxy_set_header Host $host; 37 | proxy_set_header Origin $scheme://$host; 38 | proxy_cache_bypass $http_upgrade; 39 | proxy_set_header X-Real-IP $remote_addr; 40 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 41 | proxy_set_header X-Forwarded-Proto $scheme; 42 | proxy_read_timeout 7d; 43 | proxy_send_timeout 7d; 44 | proxy_buffering off; 45 | } 46 | 47 | # Support relative websocket path used by noVNC when served under /novnc/ 48 | location /novnc/websockify { 49 | access_log off; 50 | proxy_pass http://127.0.0.1:6080/websockify; 51 | proxy_http_version 1.1; 52 | proxy_set_header Upgrade $http_upgrade; 53 | proxy_set_header Connection "upgrade"; 54 | proxy_set_header Host $host; 55 | proxy_set_header Origin $scheme://$host; 56 | proxy_cache_bypass $http_upgrade; 57 | proxy_set_header X-Real-IP $remote_addr; 58 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 59 | proxy_set_header X-Forwarded-Proto $scheme; 60 | proxy_read_timeout 7d; 61 | proxy_send_timeout 7d; 62 | proxy_buffering off; 63 | } 64 | 65 | location = /novnc/vnc.html { 66 | access_log off; 67 | if ($args = "") { 68 | return 302 /novnc/vnc.html?lang=en; 69 | } 70 | if ($args !~ "(^|&)lang=") { 71 | return 302 /novnc/vnc.html?$args&lang=en; 72 | } 73 | proxy_pass http://127.0.0.1:6080/vnc.html; 74 | proxy_http_version 1.1; 75 | proxy_set_header Upgrade $http_upgrade; 76 | proxy_set_header Connection "upgrade"; 77 | proxy_set_header Host $host; 78 | proxy_set_header Origin $scheme://$host; 79 | proxy_cache_bypass $http_upgrade; 80 | proxy_set_header X-Real-IP $remote_addr; 81 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 82 | proxy_set_header X-Forwarded-Proto $scheme; 83 | proxy_read_timeout 7d; 84 | proxy_send_timeout 7d; 85 | proxy_buffering off; 86 | } 87 | 88 | # WebSocket endpoint (noVNC/websockify). A direct HTTP GET (no Upgrade header) will return guidance instead of 404. 89 | location /websockify { 90 | access_log off; 91 | if ($http_upgrade = "") { 92 | add_header Content-Type text/plain; 93 | return 400 'This is a WebSocket endpoint. Open /novnc/vnc.html or connect with a WebSocket client (e.g. noVNC, wscat).\n'; 94 | } 95 | proxy_pass http://127.0.0.1:6080/websockify; 96 | proxy_http_version 1.1; 97 | proxy_set_header Upgrade $http_upgrade; 98 | proxy_set_header Connection "upgrade"; 99 | proxy_set_header Host $host; 100 | proxy_set_header Origin $scheme://$host; 101 | proxy_cache_bypass $http_upgrade; 102 | proxy_set_header X-Real-IP $remote_addr; 103 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 104 | proxy_set_header X-Forwarded-Proto $scheme; 105 | proxy_read_timeout 7d; 106 | proxy_send_timeout 7d; 107 | proxy_buffering off; 108 | } 109 | } 110 | 111 | server { 112 | listen 80 default_server; 113 | listen [::]:80 default_server; 114 | 115 | server_name _; 116 | 117 | # Lightweight websocket readiness probe (no upstream hit) 118 | location = /ws-health { 119 | add_header Content-Type text/plain; 120 | return 200 'ok'; 121 | } 122 | 123 | location / { 124 | proxy_pass http://127.0.0.1:3000; 125 | proxy_http_version 1.1; 126 | proxy_set_header Upgrade $http_upgrade; 127 | proxy_set_header Connection "upgrade"; 128 | proxy_set_header Host $host; 129 | proxy_cache_bypass $http_upgrade; 130 | proxy_set_header X-Real-IP $remote_addr; 131 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 132 | proxy_set_header X-Forwarded-Proto $scheme; 133 | proxy_connect_timeout 7d; 134 | proxy_send_timeout 7d; 135 | proxy_read_timeout 7d; 136 | proxy_buffering off; 137 | } 138 | 139 | location /novnc/ { 140 | access_log off; 141 | proxy_pass http://127.0.0.1:6080/; 142 | proxy_http_version 1.1; 143 | proxy_set_header Upgrade $http_upgrade; 144 | proxy_set_header Connection "upgrade"; 145 | proxy_set_header Host $host; 146 | proxy_set_header Origin $scheme://$host; 147 | proxy_cache_bypass $http_upgrade; 148 | proxy_set_header X-Real-IP $remote_addr; 149 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 150 | proxy_set_header X-Forwarded-Proto $scheme; 151 | proxy_read_timeout 7d; 152 | proxy_send_timeout 7d; 153 | proxy_buffering off; 154 | } 155 | 156 | # Support relative websocket path when vnc.html is served from /novnc/ 157 | location /novnc/websockify { 158 | access_log off; 159 | proxy_pass http://127.0.0.1:6080/websockify; 160 | proxy_http_version 1.1; 161 | proxy_set_header Upgrade $http_upgrade; 162 | proxy_set_header Connection "upgrade"; 163 | proxy_set_header Host $host; 164 | proxy_set_header Origin $scheme://$host; 165 | proxy_cache_bypass $http_upgrade; 166 | proxy_set_header X-Real-IP $remote_addr; 167 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 168 | proxy_set_header X-Forwarded-Proto $scheme; 169 | proxy_read_timeout 7d; 170 | proxy_send_timeout 7d; 171 | proxy_buffering off; 172 | } 173 | 174 | location = /novnc/vnc.html { 175 | access_log off; 176 | if ($args = "") { 177 | return 302 /novnc/vnc.html?lang=en; 178 | } 179 | if ($args !~ "(^|&)lang=") { 180 | return 302 /novnc/vnc.html?$args&lang=en; 181 | } 182 | proxy_pass http://127.0.0.1:6080/vnc.html; 183 | proxy_http_version 1.1; 184 | proxy_set_header Upgrade $http_upgrade; 185 | proxy_set_header Connection "upgrade"; 186 | proxy_set_header Host $host; 187 | proxy_set_header Origin $scheme://$host; 188 | proxy_cache_bypass $http_upgrade; 189 | proxy_set_header X-Real-IP $remote_addr; 190 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 191 | proxy_set_header X-Forwarded-Proto $scheme; 192 | proxy_read_timeout 7d; 193 | proxy_send_timeout 7d; 194 | proxy_buffering off; 195 | } 196 | 197 | # WebSocket endpoint (noVNC/websockify). A direct HTTP GET (no Upgrade header) will return guidance instead of 404. 198 | location /websockify { 199 | access_log off; 200 | if ($http_upgrade = "") { 201 | add_header Content-Type text/plain; 202 | return 400 'This is a WebSocket endpoint. Open /novnc/vnc.html or connect with a WebSocket client (e.g. noVNC, wscat).\n'; 203 | } 204 | proxy_pass http://127.0.0.1:6080/websockify; 205 | proxy_http_version 1.1; 206 | proxy_set_header Upgrade $http_upgrade; 207 | proxy_set_header Connection "upgrade"; 208 | proxy_set_header Host $host; 209 | proxy_set_header Origin $scheme://$host; 210 | proxy_cache_bypass $http_upgrade; 211 | proxy_set_header X-Real-IP $remote_addr; 212 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 213 | proxy_set_header X-Forwarded-Proto $scheme; 214 | proxy_read_timeout 7d; 215 | proxy_send_timeout 7d; 216 | proxy_buffering off; 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP Connect 2 | 3 | ███╗ ███╗ ██████╗██████╗ ██████╗ ██████╗ ███╗ ██╗███╗ ██╗███████╗ ██████╗████████╗ 4 | ████╗ ████║██╔════╝██╔══██╗ ██╔════╝██╔═══██╗████╗ ██║████╗ ██║██╔════╝██╔════╝╚══██╔══╝ 5 | ██╔████╔██║██║ ██████╔╝ ██║ ██║ ██║██╔██╗ ██║██╔██╗ ██║█████╗ ██║ ██║ 6 | ██║╚██╔╝██║██║ ██╔═══╝ ██║ ██║ ██║██║╚██╗██║██║╚██╗██║██╔══╝ ██║ ██║ 7 | ██║ ╚═╝ ██║╚██████╗██║ ╚██████╗╚██████╔╝██║ ╚████║██║ ╚████║███████╗╚██████╗ ██║ 8 | ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═══╝╚══════╝ ╚═════╝ ╚═╝ 9 | 10 | 11 |
12 | 13 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 14 | [![Node.js Version](https://img.shields.io/badge/node-%3E%3D20.0.0-brightgreen)](https://nodejs.org/) 15 | [![TypeScript](https://img.shields.io/badge/TypeScript-5.4+-blue)](https://www.typescriptlang.org/) 16 | 17 | **Lightweight bridge that exposes local MCP servers as HTTP APIs** 18 | 19 |
20 | 21 | --- 22 | 23 | ## What Is MCP Connect? 24 | 25 | MCP Connect is an HTTP gateway that lets you call local MCP servers (that speak stdio) through Streamable HTTP or a classic request/response bridge. 26 | 27 | ### What's New 28 | 29 | - Added Streamable HTTP mode on top of the classic request/response bridge 30 | - New quick-deploy scripts and configs under `deploy/e2b` for launching in an E2B sandbox 31 | 32 | ## How It Works 33 | 34 | ``` 35 | +-----------------+ HTTP (JSON) +------------------+ stdio +------------------+ 36 | | | /bridge | | | | 37 | | Cloud AI tools | <------------------------> | Node.js Bridge | <------------> | MCP Server | 38 | | (Remote) | | (Local) | | (Local) | 39 | | | HTTP (SSE stream) | | | | 40 | | | /mcp/:serverId | | | | 41 | +-----------------+ Tunnels (optional) +------------------+ +------------------+ 42 | ``` 43 | 44 | **Key Features** 45 | 46 | | Feature | Description | 47 | |--------|-------------| 48 | | 🚀 Dual modes | Call local stdio MCP servers via Streamable HTTP or the classic HTTP bridge | 49 | | 🔄 Session management | Maintain conversational continuity with sessions | 50 | | 🔐 Security | Bearer token auth + CORS allowlist | 51 | | 🌐 Public access | Built-in Ngrok tunnel to expose endpoints externally | 52 | | ☁️ Cloud deploy | One-click deploy to E2B cloud sandbox | 53 | --- 54 | 55 | ## Quick Start 56 | 57 | ### Prerequisites 58 | 59 | - Node.js >= 22.0.0 (recommended) 60 | - npm or yarn 61 | 62 | ### 1) Install 63 | 64 | ```bash 65 | git clone https://github.com/EvalsOne/MCP-connect.git 66 | cd mcp-connect 67 | npm install 68 | ``` 69 | 70 | ### 2) Preparations 71 | 72 | **A. Set up initial environment variables** 73 | 74 | ```bash 75 | cp .env.example .env 76 | ``` 77 | 78 | **B. Configure MCP servers** 79 | 80 | For Streamable HTTP method, each MCP server needs to be configurated separately, edit file to add more configurations other than the existing sample servers. 81 | ```bash 82 | vim mcp-servers.json 83 | ``` 84 | 85 | ### 3) Run 86 | 87 | ```bash 88 | # Build and start 89 | npm run build 90 | npm start 91 | 92 | # Or use dev mode (hot reload) 93 | npm run dev 94 | 95 | # Enable Ngrok tunnel 96 | npm run start:tunnel 97 | ``` 98 | 99 | After you see the startup banner, visit http://localhost:3000/health to check server status. 100 | 101 | 102 | --- 103 | 104 | ## Usage 105 | 106 | ### Mode 1: Streamable HTTP bridge 107 | 108 | General-purpose and compatible with any MCP client that supports Streamable HTTP. 109 | 110 | In Streamable HTTP mode, each MCP server is assigned a unique route. Example: add the `fetch` MCP server in `mcp-servers.json`. 111 | 112 | ```json 113 | { 114 | "mcpServers": { 115 | "fetch": { 116 | "command": "uvx", 117 | "args": ["mcp-server-fetch"], 118 | "description": "HTTP/HTTPS content fetcher" 119 | } 120 | } 121 | } 122 | ``` 123 | 124 | Once started, access the `fetch` MCP server with your favorite MCP client (e.g. Claude Code, Cursor, Codex, GitHub Copilot...) 125 | 126 | ``` 127 | http://localhost:3000/mcp/fetch 128 | ``` 129 | 130 | Note: You must configure `mcp-servers.json` before starting the service, otherwise the server won't be available. 131 | 132 | --- 133 | 134 | ### Mode 2: Classic request/response bridge 135 | 136 | Non-standard invocation where you implement methods like `tools/list`, `tools/call`, etc. 137 | 138 | Include ``Authorization: Bearer `` in request header if `ACCESS_TOKEN` is set in .env file 139 | 140 | #### Example 1: List available tools 141 | 142 | ```bash 143 | curl -X POST http://localhost:3000/bridge \ 144 | -H "Authorization: Bearer your-secret-token-here" \ 145 | -H "Content-Type: application/json" \ 146 | -d '{ 147 | "serverPath": "uvx", 148 | "args": ["mcp-server-fetch"], 149 | "method": "tools/list", 150 | "params": {} 151 | }' 152 | ``` 153 | 154 | #### Example 2: Call a tool 155 | 156 | ```bash 157 | curl -X POST http://localhost:3000/bridge \ 158 | -H "Authorization: Bearer your-secret-token-here" \ 159 | -H "Content-Type: application/json" \ 160 | -d '{ 161 | "serverPath": "uvx", 162 | "args": ["mcp-server-fetch"], 163 | "method": "tools/call", 164 | "params": { 165 | "name": "fetch", 166 | "arguments": { 167 | "url": "https://example.com" 168 | } 169 | } 170 | }' 171 | ``` 172 | 173 | ### Security 174 | 175 | #### Authentication 176 | MCP Connect uses a simple token-based authentication system. The token is stored in the .env file. If the token is set, MCP Connect will use it to authenticate the request. 177 | 178 | ```bash 179 | Authorization: Bearer 180 | ``` 181 | 182 | #### Allowed Origins 183 | In production, set `ALLOWED_ORIGINS` to restrict cross-origin requests: 184 | 185 | ```env 186 | ALLOWED_ORIGINS=https://yourdomain.com,https://app.yourdomain.com 187 | ``` 188 | 189 | If `ALLOWED_ORIGINS` is set, non-matching origins are rejected. 190 | 191 | --- 192 | 193 | ## API Reference 194 | 195 | ### `GET /health` 196 | 197 | Health check endpoint (no auth required) 198 | 199 | Response: 200 | ```json 201 | {"status": "ok"} 202 | ``` 203 | 204 | --- 205 | 206 | 207 | ### `POST /mcp/:serverId` 208 | 209 | Streaming HTTP mode 210 | 211 | Path params: 212 | - `serverId`: server ID defined in `MCP_SERVERS` 213 | 214 | Headers: 215 | - `Authorization: Bearer ` (if `ACCESS_TOKEN` is set) 216 | - `Accept: application/json, text/event-stream` (required) 217 | - `mcp-session-id: ` (optional, reuse existing session) 218 | 219 | Body: 220 | ```json 221 | [ 222 | {"jsonrpc":"2.0","id":"1","method":"tools/list","params":{}}, 223 | {"jsonrpc":"2.0","method":"notifications/initialized"} 224 | ] 225 | ``` 226 | 227 | --- 228 | 229 | ### `POST /bridge` 230 | 231 | Original request/response bridge mode 232 | 233 | Headers: 234 | - `Authorization: Bearer ` (if `ACCESS_TOKEN` is set) 235 | - `Content-Type: application/json` 236 | 237 | Body: 238 | ```json 239 | { 240 | "serverPath": "Executable command or URL (http/https/ws/wss)", 241 | "method": "JSON-RPC method name", 242 | "params": {}, 243 | "args": ["optional command-line args"], 244 | "env": {"OPTIONAL_ENV_VAR": "value"} 245 | } 246 | ``` 247 | 248 | Supported methods: 249 | - `tools/list`, `tools/call` 250 | - `prompts/list`, `prompts/get` 251 | - `resources/list`, `resources/read` 252 | - `resources/subscribe`, `resources/unsubscribe` 253 | - `completion/complete` 254 | - `logging/setLevel` 255 | 256 | --- 257 | 258 | ## Expose Publicly via Ngrok 259 | 260 | 1. Get a token: https://dashboard.ngrok.com/get-started/your-authtoken 261 | 262 | 2. Add to `.env`: 263 | ```env 264 | NGROK_AUTH_TOKEN=your-token-here 265 | ``` 266 | 267 | 3. Start the service: 268 | ```bash 269 | npm run start:tunnel 270 | ``` 271 | 272 | 4. The console will show public URLs: 273 | ``` 274 | Tunnel URL: https://abc123.ngrok.io 275 | MCP Bridge URL: https://abc123.ngrok.io/bridge 276 | ``` 277 | 278 | --- 279 | 280 | ## Quick Deploy to E2B Sandbox 281 | 282 | E2B provides isolated cloud sandboxes, ideal for running untrusted MCP servers safely. 283 | 284 | ### Step 1: Prepare E2B environment 285 | 286 | ```bash 287 | # Sign up at https://e2b.dev and get an API key 288 | pip install e2b 289 | export E2B_API_KEY=your-e2b-api-key 290 | ``` 291 | 292 | Optionally set a bearer token for the bridge (preferred env for E2B deployments): 293 | 294 | ```bash 295 | export E2B_MCP_AUTH_TOKEN=your-secure-token 296 | ``` 297 | 298 | ### Step 2: Build sandbox templates 299 | 300 | ```bash 301 | cd deploy/e2b 302 | python build.py 303 | ``` 304 | 305 | ### Step 3: Launch from template 306 | 307 | ```bash 308 | python sandbox_deploy.py --template-id mcp-dev-gui 309 | ``` 310 | 311 | See:[E2B deployment guide](deploy/e2b/README.md) for details. 312 | 313 | --- 314 | 315 | ## Logging & Monitoring 316 | 317 | ### Log files 318 | 319 | - `combined.log`: all levels 320 | - `error.log`: error level only 321 | 322 | ### Levels 323 | 324 | Control verbosity via `LOG_LEVEL`: 325 | 326 | ```env 327 | LOG_LEVEL=DEBUG # development 328 | LOG_LEVEL=INFO # production (default) 329 | LOG_LEVEL=WARN # warnings + errors 330 | ``` 331 | 332 | --- 333 | 334 | ## Development 335 | 336 | ### Project layout 337 | 338 | ``` 339 | src/ 340 | ├── server/ 341 | │ └── http-server.ts # HTTP server and routes 342 | ├── client/ 343 | │ └── mcp-client-manager.ts # MCP client manager 344 | ├── stream/ 345 | │ ├── session-manager.ts # session lifecycle 346 | │ └── stream-session.ts # SSE session implementation 347 | ├── config/ 348 | │ └── config.ts # config loading & validation 349 | ├── utils/ 350 | │ ├── logger.ts # Winston logger 351 | │ └── tunnel.ts # Ngrok tunnel management 352 | └── index.ts # entry point 353 | ``` 354 | 355 | --- 356 | 357 | ## Contributing 358 | 359 | Issues and PRs are welcome! 360 | 361 | --- 362 | 363 |
364 | 365 | **If this project helps you, please ⭐️ Star it!** 366 | 367 |
368 | -------------------------------------------------------------------------------- /deploy/e2b/scripts/create-startup-script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Create /home/user/startup.sh script that initializes all services 3 | set -e 4 | 5 | cat > /home/user/startup.sh <<'EOF' 6 | #!/bin/bash 7 | 8 | # E2B Custom Template Startup Script 9 | # Starts nginx, virtual display services (Xvfb + VNC + noVNC), Chrome, and MCP-connect 10 | 11 | set -euo pipefail 12 | 13 | # Persist all startup output to a unified log file 14 | exec >> /home/user/startup.log 2>&1 15 | 16 | LOG_DIR=/home/user 17 | 18 | log() { 19 | printf '%s %s\n' "$(date -u '+%Y-%m-%dT%H:%SZ')" "$*" 20 | } 21 | 22 | log "Starting E2B MCP Sandbox..." 23 | 24 | # Read token from namespaced env first, then generic; no hardcoded default 25 | AUTH_TOKEN=${E2B_MCP_AUTH_TOKEN:-${AUTH_TOKEN:-}} 26 | PORT=${PORT:-3000} 27 | HOST=${HOST:-127.0.0.1} 28 | DISPLAY=${DISPLAY:-:99} 29 | XVFB_DISPLAY=${XVFB_DISPLAY:-$DISPLAY} 30 | XVFB_RESOLUTION=${XVFB_RESOLUTION:-1920x1080x24} 31 | VNC_PORT=${VNC_PORT:-5900} 32 | NOVNC_PORT=${NOVNC_PORT:-6080} 33 | NOVNC_WEBROOT=${NOVNC_WEBROOT:-/usr/share/novnc} 34 | VNC_PASSWORD=${VNC_PASSWORD:-} 35 | 36 | RESOLUTION_META=${XVFB_RESOLUTION} 37 | XVFB_WIDTH=${XVFB_WIDTH:-${RESOLUTION_META%%x*}} 38 | HEIGHT_WITH_DEPTH=${RESOLUTION_META#*x} 39 | XVFB_HEIGHT=${XVFB_HEIGHT:-${HEIGHT_WITH_DEPTH%%x*}} 40 | 41 | export AUTH_TOKEN PORT HOST DISPLAY XVFB_DISPLAY XVFB_RESOLUTION XVFB_WIDTH XVFB_HEIGHT VNC_PORT NOVNC_PORT NOVNC_WEBROOT 42 | 43 | # Avoid printing the token value; only indicate whether it is set 44 | if [ -n "${AUTH_TOKEN}" ]; then TOKEN_STATUS="set"; else TOKEN_STATUS="unset"; fi 45 | log "Using AUTH_TOKEN=${TOKEN_STATUS} PORT=${PORT} HOST=${HOST} DISPLAY=${XVFB_DISPLAY}" 46 | 47 | # Prepare MCP-connect configuration 48 | cd /home/user/mcp-connect || { log "mcp-connect directory missing"; exit 1; } 49 | cat > .env </dev/null 2>&1 || true 87 | rm -f "/tmp/.X${DISPLAY_NUM}-lock" "${DISPLAY_SOCKET}" >/dev/null 2>&1 || true 88 | mkdir -p /tmp/.X11-unix 89 | chmod 1777 /tmp/.X11-unix || true 90 | 91 | log "Ensuring Xvfb is running on ${XVFB_DISPLAY}" 92 | nohup Xvfb "${XVFB_DISPLAY}" -screen 0 "${XVFB_RESOLUTION}" -nolisten tcp > "${LOG_DIR}/xvfb.log" 2>&1 & 93 | XVFB_PID=$! 94 | echo ${XVFB_PID} > "${LOG_DIR}/xvfb.pid" 95 | 96 | for attempt in $(seq 1 20); do 97 | if [ -S "${DISPLAY_SOCKET}" ]; then 98 | log "Xvfb socket ${DISPLAY_SOCKET} is ready" 99 | break 100 | fi 101 | log "Waiting for Xvfb socket ${DISPLAY_SOCKET} (attempt ${attempt})" 102 | sleep 0.5 103 | done 104 | 105 | if [ ! -S "${DISPLAY_SOCKET}" ]; then 106 | log "WARNING: Xvfb socket ${DISPLAY_SOCKET} did not appear" 107 | fi 108 | 109 | export DISPLAY="${XVFB_DISPLAY}" 110 | 111 | log "Preparing fluxbox configuration" 112 | FLUXBOX_DIR=/home/user/.fluxbox 113 | if [ ! -d "${FLUXBOX_DIR}" ]; then 114 | mkdir -p "${FLUXBOX_DIR}" 115 | OWNED=0 116 | for candidate in /etc/X11/fluxbox /usr/share/fluxbox; do 117 | if [ -d "$candidate" ]; then 118 | cp -r "$candidate"/* "${FLUXBOX_DIR}/" 2>/dev/null || true 119 | OWNED=1 120 | break 121 | fi 122 | done 123 | if [ "$OWNED" -eq 0 ]; then 124 | cat > "${FLUXBOX_DIR}/init" <<'FLUXINIT' 125 | session.menuFile: ~/.fluxbox/menu 126 | session.keyFile: ~/.fluxbox/keys 127 | session.slitlistFile: ~/.fluxbox/slitlist 128 | session.appsFile: ~/.fluxbox/apps 129 | session.screen0.rootCommand: true 130 | FLUXINIT 131 | fi 132 | touch "${FLUXBOX_DIR}/menu" "${FLUXBOX_DIR}/keys" "${FLUXBOX_DIR}/apps" "${FLUXBOX_DIR}/slitlist" 133 | fi 134 | 135 | log "Ensuring fluxbox window manager is running" 136 | pkill -x fluxbox >/dev/null 2>&1 || true 137 | nohup fluxbox > "${LOG_DIR}/fluxbox.log" 2>&1 & 138 | FLUXBOX_PID=$! 139 | echo ${FLUXBOX_PID} > "${LOG_DIR}/fluxbox.pid" 140 | 141 | if [ -n "${VNC_PASSWORD}" ]; then 142 | log "Configuring VNC password file" 143 | mkdir -p /home/user/.vnc 144 | x11vnc -storepasswd "${VNC_PASSWORD}" /home/user/.vnc/passwd >/dev/null 2>&1 145 | chmod 600 /home/user/.vnc/passwd 146 | X11VNC_AUTH_OPTS=("-rfbauth" "/home/user/.vnc/passwd") 147 | else 148 | log "VNC password not set; starting x11vnc without authentication" 149 | X11VNC_AUTH_OPTS=("-nopw") 150 | fi 151 | 152 | log "Ensuring x11vnc is running on port ${VNC_PORT}" 153 | pkill -f -- "x11vnc.*:${VNC_PORT}" >/dev/null 2>&1 || true 154 | # Guard against external envs that inject unsupported libvncserver flags 155 | unset X11VNC_OPTS X11VNC_OPTIONS X11VNC_ARGS X11VNC_QUALITY X11VNC_COMPRESSION TIGHT_QUALITY TIGHT_COMPRESSLEVEL VNC_QUALITY VNC_COMPRESSLEVEL || true 156 | nohup x11vnc -display "${XVFB_DISPLAY}" \ 157 | -rfbport "${VNC_PORT}" \ 158 | -localhost \ 159 | -forever \ 160 | -shared \ 161 | -ncache 0 \ 162 | "${X11VNC_AUTH_OPTS[@]}" \ 163 | -o "${LOG_DIR}/x11vnc.log" \ 164 | > /dev/null 2>&1 & 165 | X11VNC_PID=$! 166 | echo ${X11VNC_PID} > "${LOG_DIR}/x11vnc.pid" 167 | 168 | log "Ensuring noVNC web server is running on port ${NOVNC_PORT}" 169 | pkill -f -- "websockify.*${NOVNC_PORT}" >/dev/null 2>&1 || true 170 | nohup websockify --web="${NOVNC_WEBROOT}" \ 171 | "${NOVNC_PORT}" \ 172 | 127.0.0.1:"${VNC_PORT}" \ 173 | > "${LOG_DIR}/novnc.log" 2>&1 & 174 | NOVNC_PID=$! 175 | echo ${NOVNC_PID} > "${LOG_DIR}/novnc.pid" 176 | 177 | # Minimal readiness check + auto-retry x11vnc with safe flags 178 | for i in $(seq 1 10); do 179 | if nc -z 127.0.0.1 "${VNC_PORT}" >/dev/null 2>&1; then 180 | break 181 | fi 182 | sleep 0.5 183 | done 184 | if ! nc -z 127.0.0.1 "${VNC_PORT}" >/dev/null 2>&1; then 185 | echo "x11vnc not listening on ${VNC_PORT}, retrying with minimal flags" >> "${LOG_DIR}/x11vnc.log" 186 | pkill -f -- "x11vnc.*:${VNC_PORT}" >/dev/null 2>&1 || true 187 | sleep 0.5 188 | nohup x11vnc -display "${XVFB_DISPLAY}" \ 189 | -rfbport "${VNC_PORT}" \ 190 | -localhost \ 191 | -forever \ 192 | -shared \ 193 | "${X11VNC_AUTH_OPTS[@]}" \ 194 | -o "${LOG_DIR}/x11vnc.log" \ 195 | > /dev/null 2>&1 & 196 | X11VNC_PID=$! 197 | echo ${X11VNC_PID} > "${LOG_DIR}/x11vnc.pid" 198 | fi 199 | 200 | # Chrome (non-headless) -------------------------------------------------------- 201 | log "Ensuring Chrome (DevTools) is running on display ${XVFB_DISPLAY}" 202 | if pgrep -f -- '--remote-debugging-port=9222' >/dev/null 2>&1; then 203 | log "Chrome DevTools already running" 204 | CHROME_PID=$(pgrep -f -- '--remote-debugging-port=9222' | head -n 1) 205 | else 206 | mkdir -p /home/user/.chrome-data 207 | export XDG_RUNTIME_DIR=/home/user/.xdg 208 | mkdir -p "${XDG_RUNTIME_DIR}" 209 | nohup google-chrome \ 210 | --no-sandbox \ 211 | --disable-dev-shm-usage \ 212 | --remote-debugging-port=9222 \ 213 | --remote-debugging-address=0.0.0.0 \ 214 | --disable-gpu \ 215 | --disable-features=VizDisplayCompositor \ 216 | --disable-software-rasterizer \ 217 | --no-first-run \ 218 | --window-size=${XVFB_WIDTH},${XVFB_HEIGHT} \ 219 | --user-data-dir=/home/user/.chrome-data \ 220 | about:blank \ 221 | > "${LOG_DIR}/chrome.log" 2>&1 & 222 | CHROME_PID=$! 223 | echo ${CHROME_PID} > "${LOG_DIR}/chrome.pid" 224 | fi 225 | 226 | # nginx proxy ------------------------------------------------------------------ 227 | log "Ensuring nginx reverse proxy is running" 228 | if pgrep -x nginx >/dev/null 2>&1; then 229 | log "nginx already running" 230 | NGINX_PID=$(pgrep -x nginx | head -n 1) 231 | else 232 | sudo nginx -g 'daemon off;' & 233 | NGINX_PID=$! 234 | fi 235 | 236 | sleep 2 237 | 238 | # MCP-connect ------------------------------------------------------------------ 239 | log "Ensuring MCP-connect server is running on port ${PORT}" 240 | if curl -s -o /dev/null -w '%{http_code}' "http://127.0.0.1:${PORT}/health" | grep -q '^200$'; then 241 | log "mcp-connect already healthy on port ${PORT}" 242 | MCP_PID="" 243 | else 244 | npm run start > "${LOG_DIR}/mcp.log" 2>&1 & 245 | MCP_PID=$! 246 | echo ${MCP_PID} > "${LOG_DIR}/mcp.pid" 247 | fi 248 | 249 | log "Waiting for mcp-connect to become healthy..." 250 | code="" 251 | for _ in $(seq 1 30); do 252 | code=$(curl -s -o /dev/null -w '%{http_code}' "http://127.0.0.1:${PORT}/health" || true) 253 | if [ "$code" = "200" ]; then 254 | log "mcp-connect is healthy (HTTP 200)" 255 | break 256 | fi 257 | sleep 2 258 | done 259 | 260 | if [ "$code" != "200" ]; then 261 | log "mcp-connect failed to become healthy (last code: $code)" 262 | log "--- tail chrome.log ---" 263 | tail -n 50 "${LOG_DIR}/chrome.log" 2>/dev/null || true 264 | log "--- tail nginx error.log ---" 265 | sudo tail -n 50 /var/log/nginx/error.log 2>/dev/null || true 266 | exit 1 267 | fi 268 | 269 | cleanup() { 270 | log "Shutting down services..." 271 | for pid_var in MCP_PID NGINX_PID CHROME_PID NOVNC_PID X11VNC_PID FLUXBOX_PID XVFB_PID; do 272 | pid_value=${!pid_var:-} 273 | if [ -n "$pid_value" ] && kill -0 "$pid_value" >/dev/null 2>&1; then 274 | kill "$pid_value" 2>/dev/null || true 275 | fi 276 | done 277 | } 278 | 279 | trap cleanup SIGTERM SIGINT 280 | 281 | if [ -n "${MCP_PID:-}" ]; then 282 | wait "${MCP_PID}" 283 | else 284 | while true; do 285 | sleep 3600 286 | done 287 | fi 288 | EOF 289 | 290 | chmod +x /home/user/startup.sh 291 | chown user:user /home/user/startup.sh 292 | -------------------------------------------------------------------------------- /deploy/e2b/e2b.Dockerfile.simple: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | 5 | RUN apt-get update && apt-get install -y \ 6 | curl \ 7 | wget \ 8 | git \ 9 | gnupg \ 10 | ca-certificates \ 11 | lsb-release \ 12 | sudo \ 13 | python3 \ 14 | python3-pip \ 15 | build-essential \ 16 | software-properties-common \ 17 | nginx \ 18 | openssl \ 19 | && rm -rf /var/lib/apt/lists/* 20 | 21 | RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \ 22 | && apt-get install -y nodejs \ 23 | && npm install -g npm@latest 24 | 25 | RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ 26 | && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ 27 | && apt-get update \ 28 | && apt-get install -y google-chrome-stable \ 29 | && rm -rf /var/lib/apt/lists/* 30 | 31 | RUN apt-get update && apt-get install -y \ 32 | libnss3 \ 33 | libxss1 \ 34 | libasound2 \ 35 | libxtst6 \ 36 | xdg-utils \ 37 | libgbm1 \ 38 | libxshmfence1 \ 39 | && rm -rf /var/lib/apt/lists/* 40 | 41 | RUN set -euo pipefail; \ 42 | if id -u user >/dev/null 2>&1; then \ 43 | echo "User 'user' already exists, skipping creation"; \ 44 | else \ 45 | useradd -m -s /bin/bash -u 1000 user; \ 46 | fi; \ 47 | if ! grep -q '^user .*NOPASSWD:ALL' /etc/sudoers; then \ 48 | echo 'user ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers; \ 49 | fi 50 | 51 | USER user 52 | WORKDIR /home/user 53 | 54 | # Install uv (Astral) for the sandbox user and expose globally 55 | RUN set -euo pipefail; \ 56 | echo "Installing uv (primary method)"; \ 57 | if ! curl -fsSL https://astral.sh/uv/install.sh | sh -s --; then \ 58 | echo "Primary uv installation failed; retrying (no flags)..."; \ 59 | sleep 2; \ 60 | curl -fsSL https://astral.sh/uv/install.sh | sh -s -- || { echo 'uv install failed'; exit 1; }; \ 61 | fi; \ 62 | test -x "$HOME/.local/bin/uv" || { echo 'uv binary missing after install'; exit 1; } 63 | 64 | USER root 65 | RUN if [ -f /home/user/.local/bin/uv ]; then install -m 0755 /home/user/.local/bin/uv /usr/local/bin/uv; fi \ 66 | && if [ -f /home/user/.local/bin/uvx ]; then install -m 0755 /home/user/.local/bin/uvx /usr/local/bin/uvx; else ln -sf /usr/local/bin/uv /usr/local/bin/uvx; fi 67 | USER user 68 | 69 | RUN git clone https://github.com/EvalsOne/MCP-connect.git /home/user/mcp-connect && \ 70 | cd /home/user/mcp-connect && \ 71 | npm install && \ 72 | npm run build 73 | 74 | USER root 75 | RUN npm install -g chrome-devtools-mcp@latest 76 | USER user 77 | 78 | RUN mkdir -p /home/user/.config/mcp \ 79 | && printf '%s\n' \ 80 | '{' \ 81 | ' "servers": {' \ 82 | ' "chrome-devtools": {' \ 83 | ' "command": "chrome-devtools-mcp",' \ 84 | ' "args": [],' \ 85 | ' "env": {}' \ 86 | ' }' \ 87 | ' }' \ 88 | '}' \ 89 | > /home/user/.config/mcp/servers.json \ 90 | && chown user:user /home/user/.config/mcp/servers.json 91 | 92 | USER root 93 | 94 | RUN mkdir -p /etc/nginx/ssl && \ 95 | openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ 96 | -subj "/CN=localhost" \ 97 | -keyout /etc/nginx/ssl/server.key \ 98 | -out /etc/nginx/ssl/server.crt \ 99 | >/dev/null 2>&1 100 | 101 | RUN printf '%s\n' \ 102 | 'server {' \ 103 | ' listen 443 default_server;' \ 104 | ' listen [::]:443 default_server;' \ 105 | '' \ 106 | ' server_name _;' \ 107 | '' \ 108 | ' location = / {' \ 109 | ' default_type text/plain;' \ 110 | ' return 200 "MCP sandbox ready\\n";' \ 111 | ' }' \ 112 | '' \ 113 | ' location / {' \ 114 | ' proxy_pass http://127.0.0.1:3000/;' \ 115 | ' proxy_http_version 1.1;' \ 116 | ' proxy_set_header Upgrade $http_upgrade;' \ 117 | ' proxy_set_header Connection "upgrade";' \ 118 | ' proxy_set_header Host $host;' \ 119 | ' proxy_cache_bypass $http_upgrade;' \ 120 | ' proxy_set_header X-Real-IP $remote_addr;' \ 121 | ' proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;' \ 122 | ' proxy_set_header X-Forwarded-Proto $scheme;' \ 123 | ' proxy_connect_timeout 7d;' \ 124 | ' proxy_send_timeout 7d;' \ 125 | ' proxy_read_timeout 7d;' \ 126 | ' proxy_buffering off;' \ 127 | ' }' \ 128 | '' \ 129 | ' location /health {' \ 130 | ' access_log off;' \ 131 | ' return 200 "healthy\\n";' \ 132 | ' add_header Content-Type text/plain;' \ 133 | ' }' \ 134 | '}' \ 135 | '' \ 136 | 'server {' \ 137 | ' listen 80 default_server;' \ 138 | ' listen [::]:80 default_server;' \ 139 | '' \ 140 | ' server_name _;' \ 141 | '' \ 142 | ' location = / {' \ 143 | ' default_type text/plain;' \ 144 | ' return 200 "MCP sandbox ready\\n";' \ 145 | ' }' \ 146 | '' \ 147 | ' location / {' \ 148 | ' proxy_pass http://127.0.0.1:3000/;' \ 149 | ' proxy_http_version 1.1;' \ 150 | ' proxy_set_header Upgrade $http_upgrade;' \ 151 | ' proxy_set_header Connection "upgrade";' \ 152 | ' proxy_set_header Host $host;' \ 153 | ' proxy_cache_bypass $http_upgrade;' \ 154 | ' proxy_set_header X-Real-IP $remote_addr;' \ 155 | ' proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;' \ 156 | ' proxy_set_header X-Forwarded-Proto $scheme;' \ 157 | ' proxy_connect_timeout 7d;' \ 158 | ' proxy_send_timeout 7d;' \ 159 | ' proxy_read_timeout 7d;' \ 160 | ' proxy_buffering off;' \ 161 | ' }' \ 162 | '' \ 163 | ' location /health {' \ 164 | ' access_log off;' \ 165 | ' return 200 "healthy\\n";' \ 166 | ' add_header Content-Type text/plain;' \ 167 | ' }' \ 168 | '}' \ 169 | > /etc/nginx/sites-available/default 170 | 171 | RUN printf '%s\n' \ 172 | '#!/bin/bash' \ 173 | '' \ 174 | '# E2B Custom Template Startup Script' \ 175 | '# Starts nginx, headless Chrome, and MCP-connect services' \ 176 | '' \ 177 | 'set -euo pipefail' \ 178 | '' \ 179 | '# Persist all startup output to a unified log file' \ 180 | 'exec >> /home/user/startup.log 2>&1' \ 181 | '' \ 182 | 'echo "Starting E2B MCP Sandbox..."' \ 183 | '' \ 184 | 'AUTH_TOKEN="${E2B_MCP_AUTH_TOKEN:-${AUTH_TOKEN:-}}"' \ 185 | 'PORT="${PORT:-3000}"' \ 186 | 'HOST="${HOST:-127.0.0.1}"' \ 187 | '' \ 188 | 'export AUTH_TOKEN PORT HOST' \ 189 | '' \ 190 | 'if [ -n "${AUTH_TOKEN}" ]; then TOKEN_STATUS="set"; else TOKEN_STATUS="unset"; fi' \ 191 | 'echo "Using AUTH_TOKEN=${TOKEN_STATUS} PORT=${PORT} HOST=${HOST}"' \ 192 | '' \ 193 | '# Prepare MCP-connect configuration' \ 194 | 'cd /home/user/mcp-connect || { echo "mcp-connect directory missing"; exit 1; }' \ 195 | 'cat < .env' \ 196 | '# Quote values so dotenv won't treat # as comment' \ 197 | 'AUTH_TOKEN="${AUTH_TOKEN}"' \ 198 | 'PORT="${PORT}"' \ 199 | 'HOST="${HOST}"' \ 200 | 'LOG_LEVEL="info"' \ 201 | 'ENVFILE' \ 202 | '' \ 203 | 'echo "Node.js / npm versions:"' \ 204 | 'node -v || { echo "node not found"; }' \ 205 | 'npm -v || { echo "npm not found"; }' \ 206 | '' \ 207 | '# Ensure dependencies are installed' \ 208 | 'if [ -f package.json ]; then' \ 209 | ' if [ ! -d node_modules ]; then' \ 210 | ' echo "Installing npm dependencies..."' \ 211 | ' if command -v npm >/dev/null 2>&1; then' \ 212 | ' (npm ci || npm install) || { echo "npm install failed"; exit 1; }' \ 213 | ' else' \ 214 | ' echo "npm is not available"; exit 1' \ 215 | ' fi' \ 216 | ' else' \ 217 | ' echo "node_modules present; skipping npm install"' \ 218 | ' fi' \ 219 | 'else' \ 220 | ' echo "package.json missing; cannot install dependencies"; exit 1' \ 221 | 'fi' \ 222 | '' \ 223 | '# Launch headless Chrome for chrome-devtools integration (idempotent)' \ 224 | 'echo "Ensuring headless Chrome is running..."' \ 225 | 'if pgrep -f -- "--remote-debugging-port=9222" >/dev/null 2>&1; then' \ 226 | ' echo "Chrome DevTools already running"' \ 227 | 'else' \ 228 | ' mkdir -p /home/user/.chrome-data' \ 229 | ' google-chrome \\' \ 230 | ' --headless \\' \ 231 | ' --no-sandbox \\' \ 232 | ' --disable-dev-shm-usage \\' \ 233 | ' --remote-debugging-port=9222 \\' \ 234 | ' --remote-debugging-address=0.0.0.0 \\' \ 235 | ' --disable-gpu \\' \ 236 | ' --disable-features=VizDisplayCompositor \\' \ 237 | ' --disable-software-rasterizer \\' \ 238 | ' --user-data-dir=/home/user/.chrome-data \\' \ 239 | ' > /home/user/chrome.log 2>&1 &' \ 240 | ' CHROME_PID=$!' \ 241 | 'fi' \ 242 | '' \ 243 | '# Start nginx reverse proxy (idempotent)' \ 244 | 'echo "Ensuring nginx reverse proxy is running..."' \ 245 | 'if pgrep -x nginx >/dev/null 2>&1; then' \ 246 | ' echo "nginx already running"' \ 247 | 'else' \ 248 | ' sudo nginx -g "daemon off;" &' \ 249 | ' NGINX_PID=$!' \ 250 | 'fi' \ 251 | '' \ 252 | '# Allow services to warm up' \ 253 | 'sleep 2' \ 254 | '' \ 255 | '# Launch MCP-connect in foreground-like mode (idempotent)' \ 256 | 'echo "Ensuring MCP-connect server is running..."' \ 257 | 'if curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:${PORT}/health | grep -q "^200$"; then' \ 258 | ' echo "mcp-connect already healthy on port ${PORT}"' \ 259 | ' MCP_PID=$(pgrep -f "npm run start" || echo "")' \ 260 | 'else' \ 261 | ' npm run start &' \ 262 | ' MCP_PID=$!' \ 263 | 'fi' \ 264 | '' \ 265 | '# Health check loop for MCP-connect' \ 266 | 'echo "Waiting for mcp-connect to become healthy..."' \ 267 | 'code=""' \ 268 | 'for i in $(seq 1 30); do' \ 269 | ' code=$(curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:${PORT}/health || true)' \ 270 | ' if [ "$code" = "200" ]; then' \ 271 | ' echo "mcp-connect is healthy (HTTP 200)"' \ 272 | ' break' \ 273 | ' fi' \ 274 | ' sleep 2' \ 275 | 'done' \ 276 | '' \ 277 | 'if [ "$code" != "200" ]; then' \ 278 | ' echo "mcp-connect failed to become healthy (last code: $code)"' \ 279 | ' echo "--- tail chrome.log ---"' \ 280 | ' tail -n 50 /home/user/chrome.log 2>/dev/null || true' \ 281 | ' echo "--- tail nginx error.log ---"' \ 282 | ' sudo tail -n 50 /var/log/nginx/error.log 2>/dev/null || true' \ 283 | ' exit 1' \ 284 | 'fi' \ 285 | '' \ 286 | 'cleanup() {' \ 287 | ' echo "Shutting down services..."' \ 288 | ' if [ -n "${MCP_PID:-}" ] && ps -p ${MCP_PID} > /dev/null 2>&1; then' \ 289 | ' kill ${MCP_PID} 2>/dev/null || true' \ 290 | ' fi' \ 291 | ' if [ -n "${NGINX_PID:-}" ] && ps -p ${NGINX_PID} > /dev/null 2>&1; then' \ 292 | ' sudo kill ${NGINX_PID} 2>/dev/null || true' \ 293 | ' fi' \ 294 | ' if [ -n "${CHROME_PID:-}" ] && ps -p ${CHROME_PID} > /dev/null 2>&1; then' \ 295 | ' kill ${CHROME_PID} 2>/dev/null || true' \ 296 | ' fi' \ 297 | '}' \ 298 | '' \ 299 | 'trap cleanup EXIT SIGTERM SIGINT' \ 300 | '' \ 301 | 'wait ${MCP_PID}' \ 302 | > /home/user/startup.sh \ 303 | && chown user:user /home/user/startup.sh \ 304 | && chmod +x /home/user/startup.sh 305 | 306 | RUN mkdir -p /home/user/app && \ 307 | chown -R user:user /home/user/app 308 | 309 | ENV HOST=127.0.0.1 310 | ENV HEADLESS=1 311 | 312 | USER user 313 | WORKDIR /home/user 314 | 315 | CMD /bin/sh -c 'echo MCP template ready' 316 | -------------------------------------------------------------------------------- /deploy/e2b/startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # E2B Custom Template Startup Script 4 | # Starts nginx, virtual display services (Xvfb + VNC + noVNC), Chrome, and MCP-connect 5 | 6 | set -euo pipefail 7 | 8 | LOG_DIR=/home/user 9 | STARTUP_VERSION="v2025-10-15-01" 10 | DESKTOP_DIR=/home/user/Desktop 11 | CONFIG_ROOT=/home/user/.config 12 | FLUXBOX_DIR=/home/user/.fluxbox 13 | PCMANFM_CONFIG_DIR=${CONFIG_ROOT}/pcmanfm/default 14 | TINT2_CONFIG_DIR=${CONFIG_ROOT}/tint2 15 | DESKTOP_TEMPLATE_DIR=/opt/mcp-desktop 16 | CHROME_LAUNCHER=/home/user/bin/chrome-devtools.sh 17 | 18 | # Persist all startup output to a unified log file 19 | exec >> "${LOG_DIR}/startup.log" 2>&1 20 | 21 | log() { 22 | printf '%s %s\n' "$(date -u '+%Y-%m-%dT%H:%M:%SZ')" "$*" 23 | } 24 | 25 | log "Starting E2B MCP Sandbox..." 26 | log "Startup script version: ${STARTUP_VERSION}" 27 | 28 | # Read token from namespaced env first, then generic; no hardcoded default 29 | AUTH_TOKEN=${E2B_MCP_AUTH_TOKEN:-${AUTH_TOKEN:-}} 30 | PORT=${PORT:-3000} 31 | HOST=${HOST:-127.0.0.1} 32 | HEADLESS=${HEADLESS:-0} 33 | DISPLAY=${DISPLAY:-:99} 34 | XVFB_DISPLAY=${XVFB_DISPLAY:-$DISPLAY} 35 | XVFB_RESOLUTION=${XVFB_RESOLUTION:-1920x1080x24} 36 | VNC_PORT=${VNC_PORT:-5900} 37 | NOVNC_PORT=${NOVNC_PORT:-6080} 38 | NOVNC_WEBROOT=${NOVNC_WEBROOT:-/usr/share/novnc} 39 | VNC_PASSWORD=${VNC_PASSWORD:-} 40 | TINT2_ENABLED=${TINT2_ENABLED:-0} 41 | FLUXBOX_TOOLBAR=${FLUXBOX_TOOLBAR:-true} 42 | 43 | RESOLUTION_META=${XVFB_RESOLUTION} 44 | XVFB_WIDTH=${XVFB_WIDTH:-${RESOLUTION_META%%x*}} 45 | HEIGHT_WITH_DEPTH=${RESOLUTION_META#*x} 46 | XVFB_HEIGHT=${XVFB_HEIGHT:-${HEIGHT_WITH_DEPTH%%x*}} 47 | 48 | export AUTH_TOKEN PORT HOST DISPLAY XVFB_DISPLAY XVFB_RESOLUTION XVFB_WIDTH XVFB_HEIGHT VNC_PORT NOVNC_PORT NOVNC_WEBROOT 49 | 50 | # Avoid printing the token value; only indicate whether it is set 51 | if [ -n "${AUTH_TOKEN}" ]; then TOKEN_STATUS="set"; else TOKEN_STATUS="unset"; fi 52 | log "Using AUTH_TOKEN=${TOKEN_STATUS} PORT=${PORT} HOST=${HOST} DISPLAY=${XVFB_DISPLAY} HEADLESS=${HEADLESS}" 53 | 54 | prepare_mcp_env() { 55 | # Prepare MCP-connect configuration and ensure deps 56 | cd /home/user/mcp-connect || { log "mcp-connect directory missing"; exit 1; } 57 | cat < .env 58 | # Quote values so dotenv won't treat # as comment 59 | AUTH_TOKEN="${AUTH_TOKEN}" 60 | PORT="${PORT}" 61 | HOST="${HOST}" 62 | LOG_LEVEL="info" 63 | ENVFILE 64 | 65 | log "Node.js / npm versions:" 66 | node -v || log "node not found" 67 | npm -v || log "npm not found" 68 | 69 | FORCE_INSTALL=${NPM_CI_ALWAYS:-0} 70 | if [ -f package.json ]; then 71 | if [ "$FORCE_INSTALL" = "1" ]; then 72 | log "FORCE_INSTALL enabled: running npm ci" 73 | npm ci --no-audit || npm install --no-audit || { log "npm install failed"; exit 1; } 74 | elif [ ! -d node_modules ]; then 75 | log "node_modules missing: installing deps" 76 | npm ci --no-audit || npm install --no-audit || { log "npm install failed"; exit 1; } 77 | elif [ package-lock.json -nt node_modules ] || [ package.json -nt node_modules ]; then 78 | log "package files newer than node_modules: re-installing deps" 79 | npm ci --no-audit || npm install --no-audit || { log "npm install failed"; exit 1; } 80 | else 81 | log "node_modules present and up-to-date; skipping npm install" 82 | fi 83 | else 84 | log "package.json missing; cannot install dependencies" 85 | exit 1 86 | fi 87 | } 88 | 89 | # If HEADLESS is enabled, run a lightweight startup and skip GUI entirely 90 | if [ "${HEADLESS}" = "1" ] || [ "${HEADLESS}" = "true" ]; then 91 | log "HEADLESS mode: starting only nginx + mcp-connect" 92 | prepare_mcp_env 93 | 94 | # nginx proxy 95 | log "Ensuring nginx reverse proxy is running (headless)" 96 | if pgrep -x nginx >/dev/null 2>&1; then 97 | log "nginx already running" 98 | NGINX_PID=$(pgrep -x nginx | head -n 1) 99 | else 100 | sudo nginx -g 'daemon off;' & 101 | NGINX_PID=$! 102 | fi 103 | 104 | # MCP-connect 105 | log "Ensuring MCP-connect server is running on port ${PORT} (headless)" 106 | if curl -s -o /dev/null -w '%{http_code}' "http://127.0.0.1:${PORT}/health" | grep -q '^200$'; then 107 | log "mcp-connect already healthy on port ${PORT}" 108 | MCP_PID="" 109 | else 110 | npm run start > "${LOG_DIR}/mcp.log" 2>&1 & 111 | MCP_PID=$! 112 | echo ${MCP_PID} > "${LOG_DIR}/mcp.pid" 113 | fi 114 | 115 | log "Waiting for mcp-connect to become healthy (headless)..." 116 | code="" 117 | for _ in $(seq 1 30); do 118 | code=$(curl -s -o /dev/null -w '%{http_code}' "http://127.0.0.1:${PORT}/health" || true) 119 | if [ "$code" = "200" ]; then 120 | log "mcp-connect is healthy (HTTP 200)" 121 | break 122 | fi 123 | sleep 2 124 | done 125 | 126 | if [ "$code" != "200" ]; then 127 | log "mcp-connect failed to become healthy (last code: $code)" 128 | log "--- tail nginx error.log ---" 129 | sudo tail -n 50 /var/log/nginx/error.log 2>/dev/null || true 130 | exit 1 131 | fi 132 | 133 | cleanup() { 134 | log "Shutting down services (headless)..." 135 | if [ -n "${MCP_PID:-}" ] && kill -0 "${MCP_PID}" >/dev/null 2>&1; then 136 | kill "${MCP_PID}" 2>/dev/null || true 137 | fi 138 | if [ -n "${NGINX_PID:-}" ] && kill -0 "${NGINX_PID}" >/dev/null 2>&1; then 139 | sudo kill "${NGINX_PID}" 2>/dev/null || true 140 | fi 141 | } 142 | trap cleanup SIGTERM SIGINT 143 | 144 | if [ -n "${MCP_PID:-}" ]; then 145 | wait "${MCP_PID}" 146 | else 147 | while true; do 148 | sleep 3600 149 | done 150 | fi 151 | exit 0 152 | fi 153 | 154 | # Prepare MCP env and dependencies (GUI mode) 155 | prepare_mcp_env 156 | 157 | # Virtual display components --------------------------------------------------- 158 | DISPLAY_NUM=${XVFB_DISPLAY#:} 159 | DISPLAY_NUM=${DISPLAY_NUM%%.*} 160 | DISPLAY_SOCKET="/tmp/.X11-unix/X${DISPLAY_NUM}" 161 | XAUTH_FILE=${XAUTH_FILE:-/home/user/.Xauthority} 162 | 163 | # Prepare Xauthority so both Xvfb and x11vnc can authenticate to the same display 164 | log "Preparing Xauthority for display ${XVFB_DISPLAY}" 165 | mkdir -p "$(dirname "${XAUTH_FILE}")" 166 | # Generate a 16-byte hex cookie (fallback chain: mcookie -> openssl -> /dev/urandom) 167 | COOKIE="$( (mcookie 2>/dev/null) || (openssl rand -hex 16 2>/dev/null) || (dd if=/dev/urandom bs=16 count=1 2>/dev/null | xxd -p -c 32) )" 168 | if [ -z "${COOKIE}" ]; then 169 | log "ERROR: failed to generate Xauthority cookie" 170 | exit 1 171 | fi 172 | # Ensure we replace any existing cookie for this display 173 | xauth -f "${XAUTH_FILE}" remove "${XVFB_DISPLAY}" >/dev/null 2>&1 || true 174 | xauth -f "${XAUTH_FILE}" add "${XVFB_DISPLAY}" . "${COOKIE}" || { log "failed to write cookie to ${XAUTH_FILE}"; exit 1; } 175 | chmod 600 "${XAUTH_FILE}" || true 176 | export XAUTHORITY="${XAUTH_FILE}" 177 | 178 | log "Cleaning up stale Xvfb state for display ${XVFB_DISPLAY}" 179 | pkill -f -- "Xvfb ${XVFB_DISPLAY}" >/dev/null 2>&1 || true 180 | rm -f "/tmp/.X${DISPLAY_NUM}-lock" "${DISPLAY_SOCKET}" >/dev/null 2>&1 || true 181 | mkdir -p /tmp/.X11-unix 182 | chmod 1777 /tmp/.X11-unix || true 183 | 184 | log "Ensuring Xvfb is running on ${XVFB_DISPLAY}" 185 | nohup Xvfb "${XVFB_DISPLAY}" -screen 0 "${XVFB_RESOLUTION}" -nolisten tcp -auth "${XAUTH_FILE}" > "${LOG_DIR}/xvfb.log" 2>&1 & 186 | XVFB_PID=$! 187 | echo ${XVFB_PID} > "${LOG_DIR}/xvfb.pid" 188 | 189 | for attempt in $(seq 1 20); do 190 | if [ -S "${DISPLAY_SOCKET}" ]; then 191 | log "Xvfb socket ${DISPLAY_SOCKET} is ready" 192 | break 193 | fi 194 | log "Waiting for Xvfb socket ${DISPLAY_SOCKET} (attempt ${attempt})" 195 | sleep 0.5 196 | done 197 | 198 | if [ ! -S "${DISPLAY_SOCKET}" ]; then 199 | log "WARNING: Xvfb socket ${DISPLAY_SOCKET} did not appear" 200 | fi 201 | 202 | export DISPLAY="${XVFB_DISPLAY}" 203 | 204 | log "Preparing desktop directories and templates" 205 | mkdir -p "${DESKTOP_DIR}" "${CONFIG_ROOT}" "${FLUXBOX_DIR}" "${PCMANFM_CONFIG_DIR}" "${TINT2_CONFIG_DIR}" "${CHROME_LAUNCHER%/*}" /home/user/.chrome-data 206 | 207 | cat <<'SCRIPT' > "${CHROME_LAUNCHER}" 208 | #!/bin/bash 209 | set -euo pipefail 210 | XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR:-/home/user/.xdg} 211 | mkdir -p "$XDG_RUNTIME_DIR" 212 | 213 | /usr/bin/google-chrome \ 214 | --disable-dev-shm-usage \ 215 | --remote-debugging-port=9222 \ 216 | --remote-debugging-address=127.0.0.1 \ 217 | --disable-gpu \ 218 | --disable-features=VizDisplayCompositor \ 219 | --disable-software-rasterizer \ 220 | --no-first-run \ 221 | --start-maximized \ 222 | --window-size=${XVFB_WIDTH},${XVFB_HEIGHT} \ 223 | --user-data-dir=/home/user/.chrome-data \ 224 | "$@" & 225 | CHROME_LAUNCH_PID=$! 226 | 227 | ( 228 | # Try up to 20 times to find and resize the Chrome window 229 | for i in $(seq 1 20); do 230 | WID=$(xdotool search --onlyvisible --class google-chrome 2>/dev/null | head -n 1 || true) 231 | if [ -n "${WID:-}" ]; then 232 | # Move to 0,0 and size to display geometry 233 | read SCREEN_W SCREEN_H < <(xdotool getdisplaygeometry) 234 | xdotool windowmove "$WID" 0 0 || true 235 | xdotool windowsize "$WID" "$SCREEN_W" "$SCREEN_H" || true 236 | # Prefer maximize (not fullscreen) so panels stay visible 237 | wmctrl -x -r google-chrome.Google-chrome -b add,maximized_vert,maximized_horz >/dev/null 2>&1 || true 238 | break 239 | fi 240 | sleep 0.5 241 | done 242 | ) & 243 | 244 | wait ${CHROME_LAUNCH_PID} 245 | SCRIPT 246 | chmod +x "${CHROME_LAUNCHER}" 247 | 248 | if [ -d "${DESKTOP_TEMPLATE_DIR}/fluxbox" ]; then 249 | cp -rf "${DESKTOP_TEMPLATE_DIR}/fluxbox/." "${FLUXBOX_DIR}/" 250 | fi 251 | if [ -d "${DESKTOP_TEMPLATE_DIR}/pcmanfm/default" ]; then 252 | cp -rf "${DESKTOP_TEMPLATE_DIR}/pcmanfm/default/." "${PCMANFM_CONFIG_DIR}/" 253 | fi 254 | if [ -d "${DESKTOP_TEMPLATE_DIR}/tint2" ]; then 255 | cp -rf "${DESKTOP_TEMPLATE_DIR}/tint2/." "${TINT2_CONFIG_DIR}/" 256 | fi 257 | 258 | # Ensure Fluxbox toolbar is hidden to avoid double panels when using tint2 259 | if [ -f "${FLUXBOX_DIR}/init" ]; then 260 | case "${FLUXBOX_TOOLBAR}" in 261 | 1|true|TRUE|True) 262 | if grep -q '^session.screen0.toolbar.visible:' "${FLUXBOX_DIR}/init" 2>/dev/null; then 263 | sed -i 's/^session.screen0.toolbar.visible:.*/session.screen0.toolbar.visible: true/' "${FLUXBOX_DIR}/init" || true 264 | else 265 | echo 'session.screen0.toolbar.visible: true' >> "${FLUXBOX_DIR}/init" 266 | fi 267 | ;; 268 | *) 269 | # default: hide toolbar to avoid duplicate panels; may be re-enabled later if tint2 fails 270 | if grep -q '^session.screen0.toolbar.visible:' "${FLUXBOX_DIR}/init" 2>/dev/null; then 271 | sed -i 's/^session.screen0.toolbar.visible:.*/session.screen0.toolbar.visible: false/' "${FLUXBOX_DIR}/init" || true 272 | else 273 | echo 'session.screen0.toolbar.visible: false' >> "${FLUXBOX_DIR}/init" 274 | fi 275 | ;; 276 | esac 277 | fi 278 | 279 | cat <<'DESKTOP_ENTRY' > "${DESKTOP_DIR}/Chrome.desktop" 280 | [Desktop Entry] 281 | Name=Chrome 282 | Exec=/home/user/bin/chrome-devtools.sh 283 | Icon=google-chrome 284 | Terminal=false 285 | Type=Application 286 | Categories=Network;WebBrowser; 287 | DESKTOP_ENTRY 288 | 289 | cat <<'DESKTOP_ENTRY' > "${DESKTOP_DIR}/Terminal.desktop" 290 | [Desktop Entry] 291 | Name=Terminal 292 | Exec=/usr/bin/x-terminal-emulator 293 | Icon=utilities-terminal 294 | Terminal=true 295 | Type=Application 296 | Categories=System; 297 | DESKTOP_ENTRY 298 | chmod +x "${DESKTOP_DIR}/"*.desktop 299 | 300 | log "Ensuring fluxbox window manager is running" 301 | pkill -x fluxbox >/dev/null 2>&1 || true 302 | nohup fluxbox > "${LOG_DIR}/fluxbox.log" 2>&1 & 303 | FLUXBOX_PID=$! 304 | echo ${FLUXBOX_PID} > "${LOG_DIR}/fluxbox.pid" 305 | 306 | log "Starting PCManFM desktop manager" 307 | pkill -f "pcmanfm --desktop" >/dev/null 2>&1 || true 308 | nohup pcmanfm --desktop --profile=default > "${LOG_DIR}/pcmanfm.log" 2>&1 & 309 | PCMANFM_PID=$! 310 | echo ${PCMANFM_PID} > "${LOG_DIR}/pcmanfm.pid" 311 | 312 | if [ "${TINT2_ENABLED}" = "1" ] || [ "${TINT2_ENABLED}" = "true" ]; then 313 | log "Starting tint2 panel" 314 | pkill -x tint2 >/dev/null 2>&1 || true 315 | nohup tint2 > "${LOG_DIR}/tint2.log" 2>&1 & 316 | TINT2_PID=$! 317 | echo ${TINT2_PID} > "${LOG_DIR}/tint2.pid" 318 | else 319 | log "TINT2_ENABLED=0; skipping tint2" 320 | TINT2_PID="" 321 | fi 322 | 323 | # If tint2 fails to start, re-enable Fluxbox toolbar as a fallback 324 | ( 325 | sleep 1 326 | if [ -n "${TINT2_PID}" ] && ! kill -0 ${TINT2_PID} >/dev/null 2>&1; then 327 | if [ -f "${FLUXBOX_DIR}/init" ] && [ "${FLUXBOX_TOOLBAR}" = "auto" ]; then 328 | if grep -q '^session.screen0.toolbar.visible:' "${FLUXBOX_DIR}/init" 2>/dev/null; then 329 | sed -i 's/^session.screen0.toolbar.visible:.*/session.screen0.toolbar.visible: true/' "${FLUXBOX_DIR}/init" || true 330 | else 331 | echo 'session.screen0.toolbar.visible: true' >> "${FLUXBOX_DIR}/init" 332 | fi 333 | pkill -HUP -x fluxbox >/dev/null 2>&1 || true 334 | fi 335 | fi 336 | ) & 337 | 338 | if [ -n "${VNC_PASSWORD}" ]; then 339 | log "Configuring VNC password file" 340 | mkdir -p /home/user/.vnc 341 | x11vnc -storepasswd "${VNC_PASSWORD}" /home/user/.vnc/passwd >/dev/null 2>&1 342 | chmod 600 /home/user/.vnc/passwd 343 | X11VNC_AUTH_OPTS=("-rfbauth" "/home/user/.vnc/passwd") 344 | else 345 | log "VNC password not set; starting x11vnc without authentication" 346 | X11VNC_AUTH_OPTS=("-nopw") 347 | fi 348 | 349 | # Dynamic tuning parameters ---------------------------------------------------- 350 | X11VNC_WAIT=${X11VNC_WAIT:-20} # milliseconds to wait between screen polls 351 | X11VNC_DEFER=${X11VNC_DEFER:-20} # defer update batching (ms) 352 | X11VNC_COMPRESSION=${X11VNC_COMPRESSION:-9} # deprecated in some builds; kept for env parity 353 | X11VNC_QUALITY=${X11VNC_QUALITY:-5} # deprecated in some builds; kept for env parity 354 | X11VNC_EXTRA=${X11VNC_EXTRA:-} # extra raw args, space separated 355 | 356 | X11VNC_TUNING_OPTS=( 357 | -wait "${X11VNC_WAIT}" \ 358 | -defer "${X11VNC_DEFER}" \ 359 | -noxdamage \ 360 | -ncache 0 \ 361 | ) 362 | 363 | if [ -n "${X11VNC_EXTRA}" ]; then 364 | # shellcheck disable=SC2206 365 | EXTRA_SPLIT=( ${X11VNC_EXTRA} ) 366 | X11VNC_TUNING_OPTS+=("${EXTRA_SPLIT[@]}") 367 | fi 368 | 369 | log "Ensuring x11vnc is running on port ${VNC_PORT}" 370 | pkill -f -- "x11vnc.*:${VNC_PORT}" >/dev/null 2>&1 || true 371 | # Log the effective x11vnc command (without revealing sensitive values) 372 | { 373 | printf '%s %s' "$(date -u '+%Y-%m-%dT%H:%M:%SZ')" "Launching x11vnc with: x11vnc -display ${XVFB_DISPLAY} -rfbport ${VNC_PORT} -localhost -forever -shared -auth ${XAUTH_FILE} "; 374 | printf '%s ' "${X11VNC_TUNING_OPTS[@]}" 2>/dev/null || true; 375 | printf '\n'; 376 | } >> "${LOG_DIR}/x11vnc.log" 2>/dev/null || true 377 | 378 | # Guard against external envs that inject unsupported libvncserver flags 379 | unset X11VNC_OPTS X11VNC_OPTIONS X11VNC_ARGS X11VNC_QUALITY X11VNC_COMPRESSION TIGHT_QUALITY TIGHT_COMPRESSLEVEL VNC_QUALITY VNC_COMPRESSLEVEL || true 380 | 381 | nohup x11vnc -display "${XVFB_DISPLAY}" \ 382 | -rfbport "${VNC_PORT}" \ 383 | -localhost \ 384 | -forever \ 385 | -shared \ 386 | -auth "${XAUTH_FILE}" \ 387 | "${X11VNC_TUNING_OPTS[@]}" \ 388 | "${X11VNC_AUTH_OPTS[@]}" \ 389 | -o "${LOG_DIR}/x11vnc.log" \ 390 | > /dev/null 2>&1 & 391 | X11VNC_PID=$! 392 | echo ${X11VNC_PID} > "${LOG_DIR}/x11vnc.pid" 393 | 394 | # Quick readiness probe for x11vnc port to avoid silent failures 395 | for i in $(seq 1 10); do 396 | if nc -z 127.0.0.1 "${VNC_PORT}" >/dev/null 2>&1; then 397 | break 398 | fi 399 | sleep 0.5 400 | done 401 | if ! nc -z 127.0.0.1 "${VNC_PORT}" >/dev/null 2>&1; then 402 | log "WARNING: x11vnc not listening on port ${VNC_PORT}" 403 | log "--- tail x11vnc.log ---" 404 | tail -n 50 "${LOG_DIR}/x11vnc.log" 2>/dev/null || true 405 | # Retry with minimal flags to avoid crashes from unsupported options 406 | pkill -f -- "x11vnc.*:${VNC_PORT}" >/dev/null 2>&1 || true 407 | sleep 0.5 408 | log "Retrying x11vnc with minimal flags..." 409 | nohup x11vnc -display "${XVFB_DISPLAY}" \ 410 | -rfbport "${VNC_PORT}" \ 411 | -localhost \ 412 | -forever \ 413 | -shared \ 414 | -auth "${XAUTH_FILE}" \ 415 | "${X11VNC_AUTH_OPTS[@]}" \ 416 | -o "${LOG_DIR}/x11vnc.log" \ 417 | > /dev/null 2>&1 & 418 | X11VNC_PID=$! 419 | echo ${X11VNC_PID} > "${LOG_DIR}/x11vnc.pid" 420 | sleep 1 421 | if nc -z 127.0.0.1 "${VNC_PORT}" >/dev/null 2>&1; then 422 | log "x11vnc recovered with minimal flags" 423 | else 424 | log "x11vnc still not listening on port ${VNC_PORT} after retry" 425 | fi 426 | fi 427 | 428 | log "Ensuring noVNC web server is running on port ${NOVNC_PORT}" 429 | pkill -f -- "websockify.*${NOVNC_PORT}" >/dev/null 2>&1 || true 430 | nohup websockify --web="${NOVNC_WEBROOT}" \ 431 | "${NOVNC_PORT}" \ 432 | 127.0.0.1:"${VNC_PORT}" \ 433 | > "${LOG_DIR}/novnc.log" 2>&1 & 434 | NOVNC_PID=$! 435 | echo ${NOVNC_PID} > "${LOG_DIR}/novnc.pid" 436 | 437 | log "Waiting for noVNC to become ready on port ${NOVNC_PORT}" 438 | NOVNC_READY=0 439 | for attempt in $(seq 1 10); do 440 | if curl -fsS "http://127.0.0.1:${NOVNC_PORT}/vnc.html" >/dev/null 2>&1; then 441 | log "noVNC is reachable (HTTP 200)" 442 | NOVNC_READY=1 443 | break 444 | fi 445 | log "noVNC not ready yet (attempt ${attempt}); retrying..." 446 | sleep 1 447 | done 448 | 449 | if [ "${NOVNC_READY}" -ne 1 ]; then 450 | log "WARNING: noVNC did not become reachable on port ${NOVNC_PORT}" 451 | fi 452 | 453 | # Chrome (non-headless) -------------------------------------------------------- 454 | log "Ensuring Chrome (DevTools) is running on display ${XVFB_DISPLAY}" 455 | if pgrep -f -- '--remote-debugging-port=9222' >/dev/null 2>&1; then 456 | log "Chrome DevTools already running" 457 | CHROME_PID=$(pgrep -f -- '--remote-debugging-port=9222' | head -n 1) 458 | else 459 | export XDG_RUNTIME_DIR=/home/user/.xdg 460 | mkdir -p "${XDG_RUNTIME_DIR}" 461 | nohup "${CHROME_LAUNCHER}" \ 462 | about:blank \ 463 | > "${LOG_DIR}/chrome.log" 2>&1 & 464 | CHROME_PID=$! 465 | echo ${CHROME_PID} > "${LOG_DIR}/chrome.pid" 466 | ( 467 | for i in $(seq 1 10); do 468 | if wmctrl -x -r google-chrome.Google-chrome -b add,maximized_vert,maximized_horz >/dev/null 2>&1; then 469 | break 470 | fi 471 | sleep 1 472 | done 473 | ) & 474 | fi 475 | 476 | # Mark GUI readiness flag after both noVNC and Chrome are up 477 | mkdir -p /home/user/.ready 478 | if [ "${NOVNC_READY}" = "1" ] && [ -n "${CHROME_PID:-}" ]; then 479 | log "GUI ready: creating flag /home/user/.ready/gui" 480 | touch /home/user/.ready/gui 481 | fi 482 | 483 | # nginx proxy ------------------------------------------------------------------ 484 | log "Ensuring nginx reverse proxy is running" 485 | if pgrep -x nginx >/dev/null 2>&1; then 486 | log "nginx already running" 487 | NGINX_PID=$(pgrep -x nginx | head -n 1) 488 | else 489 | sudo nginx -g 'daemon off;' & 490 | NGINX_PID=$! 491 | fi 492 | 493 | sleep 2 494 | 495 | # MCP-connect ------------------------------------------------------------------ 496 | log "Ensuring MCP-connect server is running on port ${PORT}" 497 | if curl -s -o /dev/null -w '%{http_code}' "http://127.0.0.1:${PORT}/health" | grep -q '^200$'; then 498 | log "mcp-connect already healthy on port ${PORT}" 499 | MCP_PID="" 500 | else 501 | npm run start > "${LOG_DIR}/mcp.log" 2>&1 & 502 | MCP_PID=$! 503 | echo ${MCP_PID} > "${LOG_DIR}/mcp.pid" 504 | fi 505 | 506 | log "Waiting for mcp-connect to become healthy..." 507 | code="" 508 | for _ in $(seq 1 30); do 509 | code=$(curl -s -o /dev/null -w '%{http_code}' "http://127.0.0.1:${PORT}/health" || true) 510 | if [ "$code" = "200" ]; then 511 | log "mcp-connect is healthy (HTTP 200)" 512 | break 513 | fi 514 | sleep 2 515 | done 516 | 517 | if [ "$code" != "200" ]; then 518 | log "mcp-connect failed to become healthy (last code: $code)" 519 | log "--- tail chrome.log ---" 520 | tail -n 50 "${LOG_DIR}/chrome.log" 2>/dev/null || true 521 | log "--- tail nginx error.log ---" 522 | sudo tail -n 50 /var/log/nginx/error.log 2>/dev/null || true 523 | exit 1 524 | fi 525 | 526 | cleanup() { 527 | log "Shutting down services..." 528 | for pid_var in MCP_PID NGINX_PID CHROME_PID NOVNC_PID X11VNC_PID FLUXBOX_PID PCMANFM_PID TINT2_PID XVFB_PID; do 529 | pid_value=${!pid_var:-} 530 | if [ -n "$pid_value" ] && kill -0 "$pid_value" >/dev/null 2>&1; then 531 | kill "$pid_value" 2>/dev/null || true 532 | fi 533 | done 534 | } 535 | 536 | trap cleanup SIGTERM SIGINT 537 | 538 | if [ -n "${MCP_PID:-}" ]; then 539 | wait "${MCP_PID}" 540 | else 541 | # Keep script alive so services stay up even if no MCP PID was captured 542 | while true; do 543 | sleep 3600 544 | done 545 | fi 546 | # Virtual display components --------------------------------------------------- 547 | # (GUI mode only; HEADLESS already exited above) 548 | -------------------------------------------------------------------------------- /src/server/http-server.ts: -------------------------------------------------------------------------------- 1 | import { EventEmitter } from 'events'; 2 | import express, { Request, Response } from 'express'; 3 | import { Config, StreamableServerConfig } from '../config/config.js'; 4 | import { Logger } from '../utils/logger.js'; 5 | import { MCPClientManager } from '../client/mcp-client-manager.js'; 6 | import { TunnelManager } from '../utils/tunnel.js'; 7 | import { StreamSessionManager } from '../stream/session-manager.js'; 8 | import type { StreamSession } from '../stream/stream-session.js'; 9 | import type { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js'; 10 | 11 | export class HttpServer { 12 | private app = express(); 13 | private readonly config: Config; 14 | private readonly logger: Logger; 15 | private readonly mcpClient: MCPClientManager; 16 | private readonly accessToken: string; 17 | private readonly allowedOrigins: string[]; 18 | private tunnelManager?: TunnelManager; 19 | private reconnectTimer: NodeJS.Timeout | null = null; 20 | private streamSessionCleanupTimer: NodeJS.Timeout | null = null; 21 | private bridgeCleanupTimer: NodeJS.Timeout | null = null; 22 | private clientCache: Map 26 | }> = new Map(); 27 | private readonly CLIENT_CACHE_TTL = 5 * 60 * 1000; // five minutes caching time 28 | private readonly CLEANUP_INTERVAL_DIVISOR = 3; // Run cleanup every TTL/3 29 | private readonly streamSessionManager: StreamSessionManager; 30 | private readonly streamableServers: Record; 31 | 32 | constructor(config: Config, logger: Logger, mcpClient: MCPClientManager) { 33 | this.config = config; 34 | this.logger = logger; 35 | this.mcpClient = mcpClient; 36 | 37 | EventEmitter.defaultMaxListeners = 15; 38 | 39 | this.accessToken = this.config.security.authToken; 40 | this.allowedOrigins = this.config.security.allowedOrigins; 41 | if (!this.accessToken) { 42 | this.logger.warn('No AUTH_TOKEN environment variable set. This is a security risk.'); 43 | } 44 | 45 | if (process.argv.includes('--tunnel')) { 46 | this.tunnelManager = new TunnelManager(logger); 47 | } 48 | 49 | this.streamableServers = this.config.streamable.servers; 50 | this.streamSessionManager = new StreamSessionManager( 51 | this.logger, 52 | this.config.streamable.sessionTtlMs 53 | ); 54 | 55 | this.setupMiddleware(); 56 | this.setupRoutes(); 57 | 58 | this.setupHeartbeat(); 59 | this.setupCleanupTimers(); 60 | } 61 | 62 | private setupHeartbeat() { 63 | this.reconnectTimer = setInterval(async () => { 64 | try { 65 | const response = await fetch(`http://localhost:${this.config.server.port}/health`); 66 | if (!response.ok) { 67 | throw new Error(`Health check failed with status: ${response.status}`); 68 | } 69 | } catch (error) { 70 | this.logger.warn('Health check failed, restarting server...', error); 71 | await this.start().catch(startError => { 72 | this.logger.error('Failed to restart server:', startError); 73 | }); 74 | } 75 | }, 30000); 76 | } 77 | 78 | private setupCleanupTimers() { 79 | // Check if cleanup is disabled via environment variable 80 | const disableBridgeCleanup = process.env.DISABLE_BRIDGE_CLEANUP === 'true'; 81 | 82 | if (!disableBridgeCleanup) { 83 | // Run bridge cleanup every TTL/3 to catch expired sessions more frequently 84 | // This reduces the window where an expired session might still be used 85 | const bridgeCleanupInterval = Math.floor(this.CLIENT_CACHE_TTL / this.CLEANUP_INTERVAL_DIVISOR); 86 | this.bridgeCleanupTimer = setInterval( 87 | () => this.cleanupClientCache(), 88 | bridgeCleanupInterval 89 | ); 90 | this.logger.info(`Bridge session cleanup enabled (interval: ${bridgeCleanupInterval}ms, TTL: ${this.CLIENT_CACHE_TTL}ms)`); 91 | } else { 92 | this.logger.warn('Bridge session cleanup is DISABLED - sessions will not be automatically cleaned up'); 93 | } 94 | } 95 | 96 | private setupMiddleware(): void { 97 | // JSON body parser 98 | this.app.use(express.json()); 99 | 100 | // Health check endpoint 101 | this.app.get('/health', (req: Request, res: Response) => { 102 | res.json({ status: 'ok' }); 103 | }); 104 | 105 | // Bearer Token Authentication middleware 106 | this.app.use((req: Request, res: Response, next) => { 107 | if (this.allowedOrigins.length > 0) { 108 | const origin = req.headers.origin; 109 | if (origin && !this.allowedOrigins.includes(origin)) { 110 | this.logger.warn(`Rejected request due to origin mismatch: ${origin}`); 111 | res.status(403).json({ error: 'Origin not allowed' }); 112 | return; 113 | } 114 | } 115 | 116 | const authHeader = req.headers.authorization; 117 | // If no auth header, check if access token is set 118 | if (this.accessToken) { 119 | if (!authHeader) { 120 | res.status(401).json({ error: 'Authorization header is required' }); 121 | return; 122 | } else { 123 | // If auth header is set, check if it's a valid Bearer token 124 | if (authHeader) { 125 | const [type, token] = authHeader.split(' '); 126 | if (type !== 'Bearer') { 127 | res.status(401).json({ error: 'Authorization type must be Bearer' }); 128 | return; 129 | } 130 | 131 | if (!token || token !== this.accessToken) { 132 | res.status(401).json({ error: 'Invalid access token' }); 133 | return; 134 | } 135 | } else { 136 | res.status(401).json({ error: 'Access token is required' }); 137 | return; 138 | } 139 | } 140 | } 141 | next(); 142 | }); 143 | 144 | // Error handling middleware 145 | this.app.use((err: Error, req: Request, res: Response, next: any) => { 146 | this.logger.error('Server error:', err); 147 | res.status(500).json({ error: 'Internal server error' }); 148 | }); 149 | } 150 | 151 | private maskSensitiveData(data: any): any { 152 | if (!data) return data; 153 | const masked = { ...data }; 154 | if (masked.env && typeof masked.env === 'object') { 155 | masked.env = Object.keys(masked.env).reduce((acc, key) => { 156 | acc[key] = '********'; 157 | return acc; 158 | }, {} as Record); 159 | } 160 | return masked; 161 | } 162 | 163 | /** 164 | * Mask sensitive values in headers for logging. 165 | * - Authorization 166 | * - Any header that looks like an API key 167 | * - X-MCP-ENV-* overrides (may contain secrets) 168 | */ 169 | private maskHeadersForLogging(headers: Record): Record { 170 | const masked: Record = {}; 171 | for (const [key, value] of Object.entries(headers)) { 172 | const lowerKey = key.toLowerCase(); 173 | const shouldMask = 174 | lowerKey === 'authorization' || 175 | lowerKey.startsWith('x-mcp-env-') || 176 | lowerKey.includes('api_key') || 177 | lowerKey.includes('apikey') || 178 | lowerKey.endsWith('-key'); 179 | 180 | if (shouldMask) { 181 | masked[key] = '********'; 182 | } else { 183 | masked[key] = value; 184 | } 185 | } 186 | return masked; 187 | } 188 | 189 | private setupRoutes(): void { 190 | // Bridge endpoint 191 | this.app.post('/bridge', async (req: Request, res: Response) => { 192 | let clientId: string | undefined; 193 | try { 194 | const { serverPath, method, params, args, env } = req.body; 195 | this.logger.info('Bridge request received:', this.maskSensitiveData(req.body)); 196 | this.logger.info('Bridge request headers:', this.maskHeadersForLogging(req.headers as any)); 197 | if (!serverPath || !method || !params) { 198 | res.status(400).json({ 199 | error: 'Invalid request body. Required: serverPath, method, params. Optional: args' 200 | }); 201 | return; 202 | } 203 | 204 | // Generate cache key 205 | const cacheKey = `${serverPath}-${JSON.stringify(args)}-${JSON.stringify(env)}`; 206 | const cachedClient = this.clientCache.get(cacheKey); 207 | 208 | if (cachedClient) { 209 | try { 210 | // Test if the connection is still valid 211 | await this.mcpClient.executeRequest(cachedClient.id, 'ping', {}); 212 | clientId = cachedClient.id; 213 | cachedClient.lastUsed = Date.now(); 214 | } catch (error) { 215 | // If the connection is invalid, delete the cache and create a new one 216 | await this.mcpClient.closeClient(cachedClient.id).catch(() => {}); 217 | this.clientCache.delete(cacheKey); 218 | clientId = await this.mcpClient.createClient(serverPath, args, env); 219 | this.clientCache.set(cacheKey, { 220 | id: clientId, 221 | lastUsed: Date.now(), 222 | env 223 | }); 224 | } 225 | } else { 226 | // Create new client 227 | clientId = await this.mcpClient.createClient(serverPath, args, env); 228 | this.clientCache.set(cacheKey, { 229 | id: clientId, 230 | lastUsed: Date.now(), 231 | env 232 | }); 233 | } 234 | 235 | // Execute request 236 | const response = await this.mcpClient.executeRequest(clientId, method, params); 237 | res.json(response); 238 | 239 | } catch (error) { 240 | const errorText = error instanceof Error ? error.message : String(error); 241 | this.logger.error('Error processing bridge request:', error); 242 | res.status(500).json({ error: `Failed to process request: ${errorText}` }); 243 | } 244 | }); 245 | 246 | this.app.post('/mcp/:serverId', (req: Request, res: Response) => { 247 | this.logger.info(`MCP request received for serverId: ${req.params.serverId}`); 248 | void this.handleStreamablePost(req, res); 249 | }); 250 | 251 | this.app.get('/mcp/:_serverId', (req: Request, res: Response) => { 252 | res.status(405).json({ error: 'GET not supported for MCP endpoint' }); 253 | }); 254 | 255 | this.app.delete('/mcp/:_serverId', async (req: Request, res: Response) => { 256 | const sessionIdHeader = req.header('mcp-session-id'); 257 | if (!sessionIdHeader) { 258 | res.status(400).json({ error: 'mcp-session-id header required' }); 259 | return; 260 | } 261 | 262 | await this.streamSessionManager.closeSession(sessionIdHeader).catch((error) => { 263 | this.logger.error('Failed to close session:', error); 264 | }); 265 | res.status(204).end(); 266 | }); 267 | } 268 | 269 | public async start(): Promise { 270 | const banner = ` 271 | ███╗ ███╗ ██████╗██████╗ ██████╗ ██████╗ ███╗ ██╗███╗ ██╗███████╗ ██████╗████████╗ 272 | ████╗ ████║██╔════╝██╔══██╗ ██╔════╝██╔═══██╗████╗ ██║████╗ ██║██╔════╝██╔════╝╚══██╔══╝ 273 | ██╔████╔██║██║ ██████╔╝ ██║ ██║ ██║██╔██╗ ██║██╔██╗ ██║█████╗ ██║ ██║ 274 | ██║╚██╔╝██║██║ ██╔═══╝ ██║ ██║ ██║██║╚██╗██║██║╚██╗██║██╔══╝ ██║ ██║ 275 | ██║ ╚═╝ ██║╚██████╗██║ ╚██████╗╚██████╔╝██║ ╚████║██║ ╚████║███████╗╚██████╗ ██║ 276 | ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═══╝╚══════╝ ╚═════╝ ╚═╝ 277 | `; 278 | 279 | return new Promise((resolve, reject) => { 280 | try { 281 | const server = this.app.listen(this.config.server.port, async () => { 282 | try { 283 | console.log('\x1b[36m%s\x1b[0m', banner); 284 | const localUrl = `http://localhost:${this.config.server.port}`; 285 | this.logger.info(`Server listening on port ${this.config.server.port}`); 286 | this.logger.info(`Local: ${localUrl}`); 287 | this.logger.info(`Health check URL: ${localUrl}/health`); 288 | this.logger.info(`MCP Bridge URL: ${localUrl}/bridge`); 289 | 290 | if (this.tunnelManager) { 291 | try { 292 | const url = await this.tunnelManager.createTunnel(this.config.server.port); 293 | if (url) { 294 | this.logger.info(`Tunnel URL: ${url}`); 295 | this.logger.info(`MCP Bridge URL: ${url}/bridge`); 296 | } 297 | } catch (error) { 298 | this.logger.error('Failed to create tunnel:', error); 299 | // Don't reject here as tunnel is optional 300 | } 301 | } 302 | resolve(); 303 | } catch (error) { 304 | this.logger.error('Error during server startup:', error); 305 | server.close(); 306 | reject(error); 307 | } 308 | }); 309 | 310 | server.on('error', (error: Error) => { 311 | this.logger.error('Server failed to start:', error); 312 | reject(error); 313 | }); 314 | } catch (error) { 315 | this.logger.error('Critical error during server initialization:', error); 316 | reject(error); 317 | } 318 | }); 319 | } 320 | 321 | public async stop(): Promise { 322 | if (this.reconnectTimer) { 323 | clearInterval(this.reconnectTimer); 324 | this.reconnectTimer = null; 325 | } 326 | 327 | if (this.bridgeCleanupTimer) { 328 | clearInterval(this.bridgeCleanupTimer); 329 | this.bridgeCleanupTimer = null; 330 | } 331 | 332 | // Close all cached clients 333 | const closePromises = Array.from(this.clientCache.values()).map(async (client) => { 334 | try { 335 | await this.mcpClient.closeClient(client.id); 336 | } catch (error) { 337 | this.logger.error(`Error closing client ${client.id}:`, error); 338 | } 339 | }); 340 | 341 | await Promise.all(closePromises); 342 | this.clientCache.clear(); 343 | 344 | if (this.streamSessionCleanupTimer) { 345 | clearInterval(this.streamSessionCleanupTimer); 346 | this.streamSessionCleanupTimer = null; 347 | } 348 | 349 | await this.streamSessionManager.closeAll(); 350 | 351 | if (this.tunnelManager) { 352 | await this.tunnelManager.disconnect(); 353 | } 354 | } 355 | 356 | private getStreamableServer(serverId: string): StreamableServerConfig | undefined { 357 | return this.streamableServers[serverId]; 358 | } 359 | 360 | /** 361 | * Extract per-request environment overrides from headers. 362 | * Convention: X-MCP-ENV-FOO: bar -> env.FOO = "bar" 363 | */ 364 | private extractStreamEnvFromHeaders(req: Request): Record { 365 | const overrides: Record = {}; 366 | const prefix = 'x-mcp-env-'; 367 | 368 | for (const [headerKey, rawValue] of Object.entries(req.headers)) { 369 | const lowerKey = headerKey.toLowerCase(); 370 | if (!lowerKey.startsWith(prefix)) { 371 | continue; 372 | } 373 | 374 | // Strip prefix, normalise: hyphens -> underscores, upper-case for env 375 | const rawEnvName = lowerKey.slice(prefix.length); 376 | if (!rawEnvName) { 377 | continue; 378 | } 379 | 380 | const envKey = rawEnvName.replace(/-/g, '_').toUpperCase(); 381 | const value = Array.isArray(rawValue) ? rawValue[0] : rawValue; 382 | if (typeof value === 'string' && value.length > 0) { 383 | overrides[envKey] = value; 384 | } 385 | } 386 | 387 | return overrides; 388 | } 389 | 390 | private acceptHeaderSupportsStreaming(headerValue: string | undefined): boolean { 391 | if (!headerValue) { 392 | return false; 393 | } 394 | 395 | const values = headerValue 396 | .split(',') 397 | .map((value) => value.split(';')[0].trim().toLowerCase()) 398 | .filter((value) => value.length > 0); 399 | 400 | return values.includes('application/json') && values.includes('text/event-stream'); 401 | } 402 | 403 | private isJsonRpcRequest(message: JSONRPCMessage): message is Extract { 404 | return ( 405 | typeof (message as { method?: unknown }).method === 'string' && 406 | Object.prototype.hasOwnProperty.call(message, 'id') 407 | ); 408 | } 409 | 410 | private isJsonRpcResponse(message: JSONRPCMessage): message is Extract { 411 | const hasId = Object.prototype.hasOwnProperty.call(message, 'id'); 412 | const hasResult = Object.prototype.hasOwnProperty.call(message, 'result'); 413 | const hasError = Object.prototype.hasOwnProperty.call(message, 'error'); 414 | return hasId && (hasResult || hasError) && !Object.prototype.hasOwnProperty.call(message, 'method'); 415 | } 416 | 417 | private async handleStreamablePost(req: Request, res: Response): Promise { 418 | this.logger.info(`Streamable request received: ${req.method} ${req.originalUrl}`); 419 | this.logger.info('Streamable request headers:', this.maskHeadersForLogging(req.headers as any)); 420 | const serverId = req.params.serverId; 421 | const serverConfig = this.getStreamableServer(serverId); 422 | if (!serverConfig) { 423 | this.logger.warn(`Streamable request received for unknown serverId: ${serverId}`); 424 | res.status(404).json({ error: `Unknown MCP server: ${serverId}` }); 425 | return; 426 | } 427 | if (!this.acceptHeaderSupportsStreaming(req.headers.accept)) { 428 | res.status(406).json({ error: 'Accept header must include application/json and text/event-stream' }); 429 | return; 430 | } 431 | 432 | const payload = req.body; 433 | const messages = Array.isArray(payload) ? payload : [payload]; 434 | if (!messages || messages.length === 0) { 435 | res.status(400).json({ error: 'Request body must include at least one JSON-RPC message' }); 436 | return; 437 | } 438 | 439 | if (messages.some((message) => typeof message !== 'object' || message === null)) { 440 | res.status(400).json({ error: 'Each JSON-RPC message must be an object' }); 441 | return; 442 | } 443 | const normalizedMessages = messages as JSONRPCMessage[]; 444 | const hasRequests = normalizedMessages.some((message) => this.isJsonRpcRequest(message)); 445 | const sessionHeader = req.header('mcp-session-id'); 446 | 447 | let session: StreamSession | undefined; 448 | let sessionId: string; 449 | const headerEnvOverrides = this.extractStreamEnvFromHeaders(req); 450 | if (Object.keys(headerEnvOverrides).length > 0) { 451 | this.logger.info('Streamable env overrides from headers:', Object.keys(headerEnvOverrides)); 452 | } 453 | const effectiveServerConfig: StreamableServerConfig = { 454 | ...serverConfig, 455 | env: { 456 | ...(serverConfig.env ?? {}), 457 | ...headerEnvOverrides, 458 | }, 459 | }; 460 | 461 | try { 462 | this.logger.info(`Session header: ${sessionHeader}, hasRequests: ${hasRequests}`); 463 | if (!sessionHeader) { 464 | if (!hasRequests) { 465 | res.status(400).json({ error: 'mcp-session-id header required when request body has no requests' }); 466 | return; 467 | } 468 | 469 | session = await this.streamSessionManager.createSession(serverId, effectiveServerConfig); 470 | sessionId = session.id; 471 | } else { 472 | session = this.streamSessionManager.getSession(sessionHeader, serverId); 473 | if (!session) { 474 | res.status(404).json({ error: 'Session not found' }); 475 | return; 476 | } 477 | await session.ensureStarted(); 478 | sessionId = sessionHeader; 479 | } 480 | } catch (error) { 481 | res.status(500).json({ error: 'Failed to establish session with MCP server' }); 482 | return; 483 | } 484 | res.setHeader('mcp-session-id', sessionId); 485 | 486 | const forwardMessages = async () => { 487 | for (const message of normalizedMessages) { 488 | await session!.send(message); 489 | } 490 | }; 491 | 492 | if (!hasRequests) { 493 | try { 494 | await forwardMessages(); 495 | res.status(202).end(); 496 | } catch (error) { 497 | res.status(500).json({ error: 'Failed to forward messages to MCP server' }); 498 | } 499 | return; 500 | } 501 | 502 | res.status(200); 503 | res.setHeader('Content-Type', 'text/event-stream'); 504 | res.setHeader('Cache-Control', 'no-cache'); 505 | res.setHeader('Connection', 'keep-alive'); 506 | if (typeof (res as any).flushHeaders === 'function') { 507 | (res as any).flushHeaders(); 508 | } else { 509 | res.write('\n'); 510 | } 511 | 512 | const pendingIds = new Set(); 513 | for (const message of normalizedMessages) { 514 | if (this.isJsonRpcRequest(message)) { 515 | pendingIds.add(String(message.id)); 516 | } 517 | } 518 | 519 | let streamClosed = false; 520 | let eventCounter = 0; 521 | 522 | const writeEvent = (payload: unknown, eventType = 'message') => { 523 | if (streamClosed || res.writableEnded) { 524 | return; 525 | } 526 | try { 527 | res.write(`event: ${eventType}\n`); 528 | res.write(`id: ${++eventCounter}\n`); 529 | res.write(`data: ${JSON.stringify(payload)}\n\n`); 530 | } catch (error) { 531 | this.logger.error('Error writing SSE frame:', error); 532 | } 533 | }; 534 | 535 | const cleanup = () => { 536 | if (streamClosed) { 537 | return; 538 | } 539 | streamClosed = true; 540 | session?.off('message', onSessionMessage); 541 | session?.off('error', onSessionError); 542 | session?.off('close', onSessionClose); 543 | req.off('close', onClientClose); 544 | if (!res.writableEnded) { 545 | res.end(); 546 | } 547 | }; 548 | 549 | const deliverMessage = (message: JSONRPCMessage) => { 550 | let shouldCloseAfterWrite = false; 551 | if (this.isJsonRpcResponse(message)) { 552 | const key = String(message.id); 553 | if (!pendingIds.has(key)) { 554 | return; 555 | } 556 | pendingIds.delete(key); 557 | shouldCloseAfterWrite = pendingIds.size === 0; 558 | } 559 | 560 | writeEvent(message); 561 | 562 | if (shouldCloseAfterWrite) { 563 | cleanup(); 564 | } 565 | }; 566 | 567 | const onSessionMessage = (payload: JSONRPCMessage | JSONRPCMessage[]) => { 568 | if (Array.isArray(payload)) { 569 | payload.forEach((message) => deliverMessage(message)); 570 | } else { 571 | deliverMessage(payload); 572 | } 573 | }; 574 | 575 | const onSessionError = (error: Error) => { 576 | // Emit JSON-RPC error responses for all pending request IDs 577 | const errMsg = error?.message ?? 'Stream session error'; 578 | const errorCode = 32603; // JSON-RPC internal error 579 | if (pendingIds.size === 0) { 580 | // If we don't have any pending IDs, emit a synthetic message to help clients notice the failure 581 | // Note: JSON-RPC responses should carry an id; when none is available, clients may simply close. 582 | // We prefer not to send a nonstandard SSE event type. 583 | writeEvent({ jsonrpc: '2.0', error: { code: errorCode, message: errMsg } }); 584 | } else { 585 | for (const id of Array.from(pendingIds)) { 586 | writeEvent({ jsonrpc: '2.0', id, error: { code: errorCode, message: errMsg } }); 587 | } 588 | pendingIds.clear(); 589 | } 590 | cleanup(); 591 | }; 592 | 593 | const onSessionClose = () => { 594 | cleanup(); 595 | }; 596 | 597 | const onClientClose = () => { 598 | cleanup(); 599 | }; 600 | 601 | session.on('message', onSessionMessage); 602 | session.on('error', onSessionError); 603 | session.on('close', onSessionClose); 604 | req.on('close', onClientClose); 605 | 606 | try { 607 | await forwardMessages(); 608 | } catch (error) { 609 | this.logger.error('Failed to forward JSON-RPC request batch:', error); 610 | const errMsg = error instanceof Error ? error.message : 'Failed to forward request to MCP server'; 611 | const errorCode = 32603; // JSON-RPC internal error 612 | if (pendingIds.size === 0) { 613 | writeEvent({ jsonrpc: '2.0', error: { code: errorCode, message: errMsg } }); 614 | } else { 615 | for (const id of Array.from(pendingIds)) { 616 | writeEvent({ jsonrpc: '2.0', id, error: { code: errorCode, message: errMsg } }); 617 | } 618 | pendingIds.clear(); 619 | } 620 | cleanup(); 621 | } 622 | } 623 | 624 | private async cleanupClientCache(): Promise { 625 | const now = Date.now(); 626 | const expiredClients: Array<{ key: string; clientId: string; idleTime: number }> = []; 627 | 628 | // First pass: identify expired clients 629 | for (const [key, value] of this.clientCache.entries()) { 630 | const idleTime = now - value.lastUsed; 631 | if (idleTime > this.CLIENT_CACHE_TTL) { 632 | expiredClients.push({ key, clientId: value.id, idleTime }); 633 | } 634 | } 635 | 636 | if (expiredClients.length === 0) { 637 | return; 638 | } 639 | 640 | this.logger.info(`Cleaning up ${expiredClients.length} expired bridge client(s)`); 641 | 642 | // Second pass: close expired clients 643 | for (const { key, clientId, idleTime } of expiredClients) { 644 | try { 645 | this.logger.debug(`Closing bridge client ${clientId} (idle for ${Math.floor(idleTime / 1000)}s)`); 646 | await this.mcpClient.closeClient(clientId).catch(err => { 647 | this.logger.error(`Error closing client ${clientId}:`, err); 648 | }); 649 | this.clientCache.delete(key); 650 | } catch (error) { 651 | this.logger.error(`Error during cleanup for client ${clientId}:`, error); 652 | this.clientCache.delete(key); 653 | } 654 | } 655 | } 656 | } 657 | -------------------------------------------------------------------------------- /deploy/e2b/sandbox_deploy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | E2B Sandbox Manager for MCP-enabled Web Sandbox 4 | Creates and manages e2b sandboxes with pre-configured MCP servers 5 | """ 6 | 7 | import os 8 | import sys 9 | import asyncio 10 | import argparse 11 | from typing import Optional, Dict, Any, Tuple, Callable, Type, Union 12 | from dataclasses import dataclass 13 | from urllib.parse import quote as _url_quote 14 | ############################# 15 | # Sandbox client import logic 16 | ############################# 17 | # Prefer modern sync Sandbox API first, then fall back to async and legacy 18 | SandboxType = Any # alias for type hints after dynamic import 19 | _sandbox_create_fn: Optional[Callable[..., Any]] = None 20 | _sandbox_kind: str = "unknown" 21 | 22 | try: # Preferred: modern sync Sandbox interface from e2b 23 | from e2b import Sandbox as _SyncSandbox # type: ignore 24 | if hasattr(_SyncSandbox, "create"): 25 | _sandbox_create_fn = _SyncSandbox.create # type: ignore 26 | SandboxType = _SyncSandbox # type: ignore 27 | _sandbox_kind = "sync" 28 | except Exception: # pragma: no cover 29 | _sandbox_create_fn = None 30 | 31 | if _sandbox_create_fn is None: 32 | try: # Fallback: async sandbox interface 33 | from e2b import AsyncSandbox as _AsyncSandbox # type: ignore 34 | # Async interface typically exposes an async classmethod .create 35 | if hasattr(_AsyncSandbox, "create"): 36 | _sandbox_create_fn = _AsyncSandbox.create # type: ignore 37 | SandboxType = _AsyncSandbox # type: ignore 38 | _sandbox_kind = "async" 39 | except Exception: # pragma: no cover 40 | _sandbox_create_fn = None 41 | 42 | if _sandbox_create_fn is None: 43 | try: # Legacy: e2b_code_interpreter Sandbox (sync create) 44 | from e2b_code_interpreter import Sandbox as _LegacySandbox # type: ignore 45 | if hasattr(_LegacySandbox, "create"): 46 | # Legacy create is synchronous; we will offload with to_thread 47 | _sandbox_create_fn = _LegacySandbox.create # type: ignore 48 | SandboxType = _LegacySandbox # type: ignore 49 | _sandbox_kind = "legacy" 50 | except Exception: # pragma: no cover 51 | _sandbox_create_fn = None 52 | 53 | if _sandbox_create_fn is None: # Final guard: give a clear error early 54 | raise RuntimeError( 55 | "Unable to import E2B Sandbox client. Install or upgrade 'e2b' (preferred) or 'e2b-code-interpreter'." 56 | ) 57 | try: 58 | from e2b.sandbox.commands.command_handle import CommandExitException # type: ignore 59 | except Exception: # pragma: no cover 60 | class CommandExitException(Exception): 61 | pass 62 | import logging 63 | from datetime import datetime 64 | from importlib import resources as importlib_resources 65 | 66 | # Configure logging 67 | logging.basicConfig( 68 | level=logging.INFO, 69 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 70 | ) 71 | logger = logging.getLogger(__name__) 72 | 73 | @dataclass 74 | class SandboxConfig: 75 | """Configuration for E2B Sandbox 76 | 77 | template_id is now mandatory (no default). Provide via constructor, CLI --template-id, 78 | or environment variable E2B_TEMPLATE_ID. 79 | """ 80 | template_id: str = "" 81 | timeout: int = 3600 # 1 hour default timeout in seconds 82 | api_key: Optional[str] = None 83 | metadata: Optional[Dict[str, Any]] = None 84 | auth_token: str = "your-auth-token" 85 | port: int = 3000 86 | host: str = "127.0.0.1" 87 | secure: bool = True 88 | # Optional: heartbeat to keep sandbox/public URL active (seconds, 0 to disable) 89 | keepalive_interval: int = 60 90 | platform_keepalive_interval: int = 120 91 | # Health probing: by default only probe HTTPS (443); disable HTTP (80) 92 | probe_http: bool = False 93 | display: str = ":99" 94 | xvfb_resolution: str = "1920x1080x24" 95 | vnc_port: int = 5900 96 | novnc_port: int = 6080 97 | novnc_path: str = "/novnc/" 98 | novnc_webroot: str = "/usr/share/novnc" 99 | vnc_password: Optional[str] = "" 100 | # Headless/lightweight mode: skip X/Chrome/VNC/noVNC bootstrap 101 | headless: bool = False 102 | # Remote asset fetch settings (instead of env vars) 103 | # When enabled, manager fetches latest startup.sh, chrome-devtools-wrapper.sh, 104 | # and servers.json from the configured remote_base inside the sandbox. 105 | fetch_remote: bool = False 106 | remote_base: str = ( 107 | "https://raw.githubusercontent.com/EvalsOne/MCP-bridge/main/deploy/e2b" 108 | ) 109 | 110 | class E2BSandboxManager: 111 | """Manager for E2B Sandboxes with MCP support""" 112 | 113 | def __init__(self, config: Optional[SandboxConfig] = None): 114 | """ 115 | Initialize the sandbox manager 116 | 117 | Args: 118 | config: Optional sandbox configuration 119 | """ 120 | self.config = config or SandboxConfig() 121 | if not self.config.template_id: 122 | # Allow fallback to environment variable 123 | env_tid = os.getenv("E2B_TEMPLATE_ID", "").strip() 124 | if env_tid: 125 | self.config.template_id = env_tid 126 | if not self.config.template_id: 127 | raise ValueError("template_id is required (use --template-id or set E2B_TEMPLATE_ID)") 128 | 129 | if not self.config.novnc_path.endswith("/"): 130 | self.config.novnc_path = f"{self.config.novnc_path}/" 131 | 132 | # Allow overriding resolution via env for convenience 133 | env_res = os.getenv("XVFB_RESOLUTION") or os.getenv("E2B_XVFB_RESOLUTION") 134 | if env_res and isinstance(env_res, str) and env_res.strip(): 135 | self.config.xvfb_resolution = env_res.strip() 136 | 137 | # Allow overriding timeout via environment variable (seconds) 138 | env_timeout = os.getenv("E2B_SANDBOX_TIMEOUT") 139 | if env_timeout: 140 | try: 141 | self.config.timeout = int(env_timeout) 142 | except ValueError: 143 | logger.warning("Invalid E2B_SANDBOX_TIMEOUT; using default %s", self.config.timeout) 144 | 145 | self.active_sandboxes: Dict[str, Dict[str, Any]] = {} 146 | 147 | async def create_sandbox( 148 | self, 149 | sandbox_id: Optional[str] = None, 150 | enable_internet: bool = True, 151 | wait_for_ready: bool = True 152 | ) -> Dict[str, Any]: 153 | """ 154 | Create a new E2B sandbox with MCP servers pre-configured 155 | 156 | Args: 157 | sandbox_id: Optional custom ID for the sandbox 158 | enable_internet: Whether to enable internet access 159 | wait_for_ready: Whether to wait for services to be ready 160 | 161 | Returns: 162 | Dictionary containing sandbox information 163 | """ 164 | try: 165 | logger.info(f"Creating sandbox with template: {self.config.template_id}") 166 | 167 | # Create sandbox with custom template using whichever client was imported 168 | if _sandbox_kind == 'async': 169 | # Async API: call directly 170 | sandbox = await _sandbox_create_fn( # type: ignore 171 | template=self.config.template_id, 172 | timeout=self.config.timeout, 173 | metadata=self.config.metadata, 174 | secure=self.config.secure, 175 | allow_internet_access=enable_internet, 176 | ) 177 | else: 178 | # Legacy sync API: run in thread 179 | sandbox = await asyncio.to_thread( 180 | _sandbox_create_fn, # type: ignore 181 | template=self.config.template_id, 182 | timeout=self.config.timeout, 183 | metadata=self.config.metadata, 184 | secure=self.config.secure, 185 | allow_internet_access=enable_internet, 186 | ) 187 | 188 | logger.info("Sandbox launched; startup handled by image entrypoint") 189 | 190 | # Generate sandbox ID if not provided 191 | if not sandbox_id: 192 | sandbox_id = f"sandbox_{datetime.now().strftime('%Y%m%d_%H%M%S')}" 193 | # Bootstrap services inside sandbox and persist handles 194 | bootstrap_info = await self._bootstrap_services(sandbox) 195 | 196 | handles = bootstrap_info["handles"] 197 | 198 | self.active_sandboxes[sandbox_id] = { 199 | "sandbox": sandbox, 200 | "process_handles": handles, 201 | "envs": bootstrap_info["envs"], 202 | } 203 | 204 | logger.info(f"Sandbox created with ID: {sandbox.sandbox_id}") 205 | 206 | # Enable internet access if requested 207 | if enable_internet: 208 | logger.info("Enabling internet access for sandbox...") 209 | # E2B sandboxes have internet access by default in newer versions 210 | # If using older version, you might need to configure this differently 211 | 212 | # Prepare candidate public URLs (both http/https) 213 | https_url = self._get_public_url(sandbox, secure=True) 214 | http_url = self._get_public_url(sandbox, secure=False) 215 | 216 | # Wait for services to be ready if requested, and discover which URL is healthy 217 | healthy_url = None 218 | probe_result = {"https_ok": False, "http_ok": False} 219 | if wait_for_ready: 220 | logger.info("Waiting for services to be ready (probing http and https /health)...") 221 | ready_info = await self._wait_for_services(sandbox, https_url, http_url) 222 | healthy_url = ready_info.get("healthy_url") 223 | probe_result = {k: ready_info.get(k, False) for k in ("https_ok", "http_ok")} 224 | 225 | # Choose public_url based on health probe or user preference as fallback 226 | if healthy_url: 227 | public_url = healthy_url 228 | fallback_url = http_url if healthy_url.startswith("https") else https_url 229 | else: 230 | # If no healthy result yet, pick according to self.config.secure but keep fallback 231 | if self.config.secure: 232 | public_url = https_url 233 | fallback_url = http_url 234 | else: 235 | public_url = http_url 236 | fallback_url = https_url 237 | 238 | # Prepare response 239 | sandbox_headers = sandbox.connection_config.sandbox_headers or {} 240 | 241 | # Resolve effective VNC password: use custom if provided, otherwise no password 242 | vnc_password_resolved = ( 243 | str(self.config.vnc_password).strip() 244 | if (self.config.vnc_password and str(self.config.vnc_password).strip()) 245 | else "" 246 | ) 247 | # Derive displayed port for nginx based on the chosen URL 248 | nginx_port = 443 if public_url.startswith("https") else 80 249 | # Decide headless-like behavior based on CLI flag or template naming 250 | template_headless_like = self._template_indicates_headless(self.config.template_id) 251 | gui_disabled = self.config.headless or template_headless_like 252 | novnc_url = None 253 | websocket_url = None 254 | if not gui_disabled: 255 | # Build noVNC URL with autoconnect, explicit websocket path, and auto password (URL-encoded) 256 | # Note: Including password in URL trades convenience for security; use only in trusted contexts. 257 | encoded_pw = _url_quote(vnc_password_resolved, safe="") if vnc_password_resolved else "" 258 | # Scale the remote desktop to the browser instead of cropping 259 | common_qs = "autoconnect=1&path=/websockify&resize=scale" 260 | novnc_url = ( 261 | f"{public_url.rstrip('/')}{self.config.novnc_path}vnc.html?{common_qs}" 262 | + (f"&password={encoded_pw}" if encoded_pw else "") 263 | ) 264 | # Derive websocket (noVNC/websockify) URL (public_url already includes scheme) 265 | ws_scheme = "wss" if public_url.startswith("https") else "ws" 266 | # public_url like https://host; we append /websockify 267 | websocket_url = f"{ws_scheme}://{public_url.split('://', 1)[1].rstrip('/')}/websockify" 268 | 269 | result = { 270 | "success": True, 271 | "sandbox_id": sandbox_id, 272 | "e2b_sandbox_id": sandbox.sandbox_id, 273 | "public_url": public_url, 274 | "vnc_password_resolved": vnc_password_resolved, 275 | "services": { 276 | "nginx": { 277 | "url": public_url, 278 | "port": nginx_port, 279 | "status": "running", 280 | "pid": handles["nginx"].pid if handles.get("nginx") else None, 281 | }, 282 | "mcp_connect": { 283 | "url": f"{public_url}/bridge", 284 | "port": self.config.port, 285 | "status": "running", 286 | "pid": handles["mcp_connect"].pid if handles.get("mcp_connect") else None, 287 | }, 288 | "chrome_devtools": { 289 | "debug_port": 9222, 290 | "display": self.config.display, 291 | "status": ("disabled" if gui_disabled else "running"), 292 | "pid": handles["chrome"].pid if handles.get("chrome") else None, 293 | }, 294 | "virtual_display": { 295 | "display": self.config.display, 296 | "resolution": self.config.xvfb_resolution, 297 | "status": ("disabled" if gui_disabled else "running"), 298 | }, 299 | "vnc": { 300 | "port": self.config.vnc_port, 301 | "status": ("disabled" if gui_disabled else "running"), 302 | "password_hint": ( 303 | "custom" if (self.config.vnc_password and str(self.config.vnc_password).strip()) else "none" 304 | ), 305 | "resolved_password": vnc_password_resolved, 306 | }, 307 | "novnc": { 308 | "url": novnc_url, 309 | "websocket_url": websocket_url, 310 | "port": self.config.novnc_port, 311 | "path": self.config.novnc_path, 312 | "status": ("disabled" if gui_disabled else "running"), 313 | "requires_password": bool(vnc_password_resolved), 314 | "password_hint": ( 315 | "custom" if (self.config.vnc_password and str(self.config.vnc_password).strip()) else "none" 316 | ), 317 | "resolved_password": vnc_password_resolved, 318 | }, 319 | }, 320 | "security": { 321 | "secure": self.config.secure, 322 | "access_token": sandbox_headers.get("X-Access-Token"), 323 | }, 324 | "created_at": datetime.now().isoformat(), 325 | "timeout_seconds": self.config.timeout, 326 | "internet_access": bool(enable_internet), 327 | "probes": probe_result 328 | } 329 | 330 | # Conditionally include optional URLs based on availability 331 | if novnc_url: 332 | result["novnc_url"] = novnc_url 333 | if websocket_url: 334 | result["websocket_url"] = websocket_url 335 | 336 | if fallback_url and fallback_url != public_url: 337 | key = "public_url_http" if self.config.secure else "public_url_https" 338 | result[key] = fallback_url 339 | 340 | logger.info(f"Sandbox ready: {public_url}") 341 | 342 | # Start keepalive loops if configured 343 | if self.config.keepalive_interval and self.config.keepalive_interval > 0: 344 | try: 345 | task = asyncio.create_task(self._keepalive_loop(sandbox_id)) 346 | self.active_sandboxes[sandbox_id]["keepalive_task"] = task 347 | except Exception as e: 348 | logger.warning("Failed to start keepalive loop: %s", str(e)) 349 | if self.config.platform_keepalive_interval and self.config.platform_keepalive_interval > 0: 350 | try: 351 | ptask = asyncio.create_task(self._platform_keepalive_loop(sandbox_id)) 352 | self.active_sandboxes[sandbox_id]["platform_keepalive_task"] = ptask 353 | except Exception as e: 354 | logger.warning("Failed to start platform keepalive loop: %s", str(e)) 355 | return result 356 | 357 | except Exception as e: 358 | logger.error(f"Failed to create sandbox: {str(e)}") 359 | if "sandbox" in locals(): 360 | try: 361 | await self._kill(sandbox) 362 | except Exception as cleanup_error: 363 | logger.warning( 364 | f"Unable to clean up sandbox after failure: {cleanup_error}" 365 | ) 366 | return { 367 | "success": False, 368 | "error": str(e), 369 | "sandbox_id": sandbox_id 370 | } 371 | 372 | async def _bootstrap_services(self, sandbox) -> Dict[str, Any]: 373 | """Mirror startup.sh to initialise required services inside the sandbox.""" 374 | 375 | display = self.config.display 376 | xvfb_resolution = self.config.xvfb_resolution 377 | xvfb_width, xvfb_height = self._parse_resolution(xvfb_resolution) 378 | vnc_port = str(self.config.vnc_port) 379 | novnc_port = str(self.config.novnc_port) 380 | # Treat empty string or whitespace as unset; when unset, do not configure a VNC password 381 | if self.config.vnc_password and str(self.config.vnc_password).strip(): 382 | vnc_password = str(self.config.vnc_password).strip() 383 | else: 384 | vnc_password = "" 385 | novnc_webroot = self.config.novnc_webroot 386 | 387 | envs = { 388 | "AUTH_TOKEN": self.config.auth_token, 389 | "PORT": str(self.config.port), 390 | "HOST": self.config.host, 391 | # Allow forcing dependency reinstall similar to startup.sh 392 | "NPM_CI_ALWAYS": os.getenv("NPM_CI_ALWAYS", "0"), 393 | } 394 | 395 | # Base env for services 396 | service_envs = { 397 | **envs, 398 | "DISPLAY": display, 399 | "XVFB_DISPLAY": display, 400 | "XVFB_RESOLUTION": xvfb_resolution, 401 | "XVFB_WIDTH": xvfb_width, 402 | "XVFB_HEIGHT": xvfb_height, 403 | "VNC_PORT": vnc_port, 404 | "NOVNC_PORT": novnc_port, 405 | "NOVNC_WEBROOT": novnc_webroot, 406 | "VNC_PASSWORD": vnc_password, 407 | } 408 | # Pass HEADLESS flag so startup.sh decides orchestration flow (single orchestrator) 409 | service_envs["HEADLESS"] = ( 410 | "1" if (self.config.headless or self._template_indicates_headless(self.config.template_id)) else "0" 411 | ) 412 | # Only pass optional x11vnc tuning envs if the outer environment explicitly set them. 413 | for key in ("X11VNC_WAIT", "X11VNC_DEFER", "X11VNC_COMPRESSION", "X11VNC_QUALITY", "X11VNC_EXTRA"): 414 | if key in os.environ and str(os.environ.get(key, "")).strip() != "": 415 | service_envs[key] = os.environ[key] 416 | 417 | logger.info("Preparing mcp-connect directory inside sandbox (startup.sh will manage .env)") 418 | await self._run(sandbox, "mkdir -p /home/user/mcp-connect", background=False, envs=envs, cwd="/home/user") 419 | 420 | # If a local nginx.conf exists in repo, push it into the sandbox and apply 421 | try: 422 | repo_dir = os.path.dirname(os.path.abspath(__file__)) 423 | local_nginx_conf = os.path.join(repo_dir, "nginx.conf") 424 | if os.path.isfile(local_nginx_conf): 425 | logger.info("Found local nginx.conf; pushing to sandbox and applying...") 426 | with open(local_nginx_conf, "r", encoding="utf-8") as f: 427 | nginx_conf_content = f.read() 428 | 429 | # Write to a temp location and move with sudo 430 | await self._write(sandbox, "/home/user/nginx.conf.tmp", nginx_conf_content) 431 | 432 | # Test and apply the configuration if nginx is already present 433 | apply_cmd = ( 434 | "bash -lc " 435 | "'set -e; " 436 | "sudo cp /home/user/nginx.conf.tmp /etc/nginx/sites-available/default; " 437 | "if pgrep -x nginx >/dev/null; then " 438 | " sudo nginx -t && sudo nginx -s reload; " 439 | "else echo nginx not running yet; fi'" 440 | ) 441 | await self._run(sandbox, apply_cmd, background=False, cwd="/home/user") 442 | else: 443 | logger.debug("No local nginx.conf found; keeping image default") 444 | except Exception as e: 445 | logger.warning("Failed to push/apply nginx.conf: %s", str(e)) 446 | 447 | # Ensure startup.sh exists and use it to bootstrap services (single orchestrator inside sandbox) 448 | 449 | logger.info("Ensuring startup.sh is available inside sandbox") 450 | startup_exists = False 451 | try: 452 | startup_check = await self._run(sandbox, "bash -lc 'if [ -f /home/user/startup.sh ]; then echo FOUND; else echo MISSING; fi'", background=False, cwd="/home/user") 453 | startup_exists = 'FOUND' in (startup_check.stdout or '') 454 | except CommandExitException: 455 | startup_exists = False 456 | 457 | repo_dir = os.path.dirname(os.path.abspath(__file__)) 458 | local_startup = os.path.join(repo_dir, "startup.sh") 459 | local_wrapper = os.path.join(repo_dir, "chrome-devtools-wrapper.sh") 460 | local_servers = os.path.join(repo_dir, "servers.json") 461 | 462 | # Optional: prefer fetching assets from a remote repo (always-latest) inside the sandbox 463 | # Now controlled via SandboxConfig instead of environment variables. 464 | fetch_remote = bool(self.config.fetch_remote) 465 | remote_base = str(self.config.remote_base).strip() 466 | 467 | # Try to load resources from packaged e2b_mcp_sandbox if not present next to this file 468 | def _resource_text(pkg: str, name: str) -> Optional[str]: 469 | try: 470 | pkg_mod = __import__(pkg, fromlist=['dummy']) 471 | ref = importlib_resources.files(pkg_mod) / name # type: ignore 472 | if ref.is_file(): 473 | return ref.read_text(encoding='utf-8') 474 | except Exception: 475 | return None 476 | return None 477 | 478 | startup_contents: Optional[str] = None 479 | remote_ok = False 480 | if fetch_remote: 481 | try: 482 | logger.info("Fetching startup.sh from remote: %s", remote_base) 483 | cmd = ( 484 | f"bash -lc 'curl -fsSL {remote_base}/startup.sh -o /home/user/startup.sh && chmod +x /home/user/startup.sh && echo OK'" 485 | ) 486 | out = await self._run(sandbox, cmd, background=False, cwd="/home/user") 487 | if out and "OK" in (out.stdout or ""): 488 | startup_exists = True 489 | remote_ok = True 490 | except Exception as e: 491 | logger.warning("Remote fetch of startup.sh failed; falling back to packaged/local. %s", str(e)) 492 | 493 | if not remote_ok: 494 | if os.path.isfile(local_startup): 495 | with open(local_startup, "r", encoding="utf-8") as handle: 496 | startup_contents = handle.read() 497 | if startup_contents is None: 498 | # try packaged fallback from this package path 499 | startup_contents = _resource_text('deploy.e2b', 'startup.sh') 500 | if startup_contents: 501 | logger.info("Using packaged startup.sh from deploy.e2b") 502 | if startup_contents is None: 503 | logger.warning("Local startup.sh not found at %s and no packaged resource; cannot upload", local_startup) 504 | else: 505 | logger.info("Uploading startup.sh to sandbox (overwriting existing copy)") 506 | await self._write(sandbox, "/home/user/startup.sh", startup_contents) 507 | startup_exists = True 508 | 509 | wrapper_contents: Optional[str] = None 510 | wrapper_remote_ok = False 511 | if fetch_remote: 512 | try: 513 | logger.info("Fetching chrome-devtools-wrapper.sh from remote: %s", remote_base) 514 | cmd = ( 515 | f"bash -lc 'curl -fsSL {remote_base}/chrome-devtools-wrapper.sh -o /home/user/chrome-devtools-wrapper.sh && chmod +x /home/user/chrome-devtools-wrapper.sh && echo OK'" 516 | ) 517 | out = await self._run(sandbox, cmd, background=False, cwd="/home/user") 518 | if out and "OK" in (out.stdout or ""): 519 | wrapper_remote_ok = True 520 | except Exception: 521 | pass 522 | if not wrapper_remote_ok: 523 | if os.path.isfile(local_wrapper): 524 | with open(local_wrapper, "r", encoding="utf-8") as handle: 525 | wrapper_contents = handle.read() 526 | if wrapper_contents is None: 527 | wrapper_contents = _resource_text('deploy.e2b', 'chrome-devtools-wrapper.sh') 528 | if wrapper_contents: 529 | logger.info("Using packaged chrome-devtools-wrapper.sh from deploy.e2b") 530 | if wrapper_contents is None: 531 | logger.warning("chrome-devtools wrapper script missing at %s and no packaged resource", local_wrapper) 532 | else: 533 | logger.info("Uploading chrome-devtools wrapper script to sandbox") 534 | await self._write(sandbox, "/home/user/chrome-devtools-wrapper.sh", wrapper_contents) 535 | try: 536 | await self._run(sandbox, "bash -lc 'chmod +x /home/user/chrome-devtools-wrapper.sh'", background=False, cwd="/home/user") 537 | except CommandExitException as e: 538 | logger.warning("Failed to chmod chrome-devtools-wrapper.sh: %s", getattr(e, 'stderr', '') or str(e)) 539 | 540 | # MCP servers configuration: optionally override repo mcp-servers.json inside mcp-connect 541 | try: 542 | servers_contents: Optional[str] = None 543 | # 1) 优先使用与 sandbox_deploy.py 同目录的 servers.json(开发场景) 544 | if os.path.isfile(local_servers): 545 | with open(local_servers, "r", encoding="utf-8") as handle: 546 | servers_contents = handle.read() 547 | logger.info("Using local deploy/e2b/servers.json to override /home/user/mcp-connect/mcp-servers.json") 548 | # 2) 若同目录没有,则尝试使用打包在 deploy.e2b 包里的 servers.json(生产安装场景) 549 | if servers_contents is None: 550 | pkg_servers = _resource_text('deploy.e2b', 'servers.json') 551 | if pkg_servers: 552 | servers_contents = pkg_servers 553 | logger.info("Using packaged deploy.e2b/servers.json to override /home/user/mcp-connect/mcp-servers.json") 554 | 555 | if servers_contents is not None: 556 | await self._write(sandbox, "/home/user/mcp-connect/mcp-servers.json", servers_contents) 557 | else: 558 | logger.info("No deploy/e2b/servers.json (local or packaged) found; keeping mcp-connect repo mcp-servers.json") 559 | except Exception as e: 560 | logger.warning("Failed to update /home/user/mcp-connect/mcp-servers.json: %s", str(e)) 561 | 562 | if startup_exists: 563 | try: 564 | await self._run(sandbox, "bash -lc 'chmod +x /home/user/startup.sh'", background=False, cwd="/home/user") 565 | except CommandExitException as e: 566 | logger.warning("Failed to chmod startup.sh: %s", getattr(e, 'stderr', '') or str(e)) 567 | 568 | logger.info("Launching startup.sh to initialise GUI and proxy services") 569 | try: 570 | start_cmd = ( 571 | "bash -lc '" 572 | "if [ -f /home/user/startup_sh.pid ] && kill -0 $(cat /home/user/startup_sh.pid) 2>/dev/null; then " 573 | " echo startup.sh already running; " 574 | "else " 575 | " nohup /home/user/startup.sh > /home/user/startup.log 2>&1 & " 576 | " echo $! > /home/user/startup_sh.pid; " 577 | "fi'" 578 | ) 579 | await self._run(sandbox, start_cmd, background=False, envs=service_envs, cwd="/home/user") 580 | except CommandExitException as e: 581 | logger.error("Failed to launch startup.sh: %s", getattr(e, 'stderr', '') or str(e)) 582 | raise 583 | 584 | logger.info("startup.sh launched; delegating orchestration to startup.sh") 585 | else: 586 | logger.warning("startup.sh is not present inside sandbox; GUI services may be unavailable") 587 | 588 | # Handles retained for compatibility with existing return structure 589 | chrome_handle = None 590 | nginx_handle = None 591 | 592 | # All service management is delegated to startup.sh; avoid duplicate MCP/nginx handling 593 | mcp_handle = None 594 | 595 | return { 596 | "handles": { 597 | "chrome": chrome_handle, 598 | "nginx": nginx_handle, 599 | "mcp_connect": mcp_handle, 600 | "xvfb": None, 601 | "fluxbox": None, 602 | "x11vnc": None, 603 | "novnc": None, 604 | }, 605 | "envs": {**envs, "LOG_LEVEL": "info"}, 606 | } 607 | 608 | @staticmethod 609 | def _parse_resolution(resolution: str) -> Tuple[str, str]: 610 | """Extract width and height from an Xvfb resolution string (e.g. 1920x1080x24).""" 611 | try: 612 | parts = resolution.lower().split("x") 613 | width = parts[0] if parts else "1920" 614 | height = parts[1] if len(parts) > 1 else "1080" 615 | except Exception: 616 | return "1920", "1080" 617 | 618 | width_digits = ''.join(ch for ch in width if ch.isdigit()) or "1920" 619 | height_digits = ''.join(ch for ch in height if ch.isdigit()) or "1080" 620 | return width_digits, height_digits 621 | 622 | @staticmethod 623 | def _template_indicates_headless(template_id: str) -> bool: 624 | """Infer headless-like behavior from template naming conventions. 625 | 626 | Treat aliases or IDs containing 'minimal' or 'simple' as headless (no GUI/noVNC). 627 | This heuristic avoids exposing noVNC URLs for minimal/simple templates. 628 | """ 629 | tid = (template_id or "").lower() 630 | patterns = ("minimal", "simple", "mcp-dev-minimal", "mcp-dev-simple") 631 | return any(p in tid for p in patterns) 632 | 633 | def _get_public_url(self, sandbox, secure: Optional[bool] = None) -> str: 634 | """ 635 | Get the public URL for the sandbox 636 | 637 | Args: 638 | sandbox: The E2B sandbox instance 639 | 640 | Returns: 641 | The public URL for accessing the sandbox 642 | """ 643 | target_secure = self.config.secure if secure is None else secure 644 | 645 | port = 443 if target_secure else 80 646 | scheme = "https" if target_secure else "http" 647 | 648 | try: 649 | hostname = sandbox.get_host(port) 650 | return f"{scheme}://{hostname}" 651 | except Exception: 652 | # attempt alternate port as last resort 653 | alt_port = 80 if target_secure else 443 654 | alt_scheme = "https" if alt_port == 443 else "http" 655 | hostname = sandbox.get_host(alt_port) 656 | return f"{alt_scheme}://{hostname}" 657 | 658 | async def _wait_for_services( 659 | self, 660 | sandbox, 661 | https_url: str, 662 | http_url: str, 663 | max_attempts: int = 30, 664 | delay: float = 2.0 665 | ) -> Dict[str, Any]: 666 | """ 667 | Probe both HTTPS and HTTP health endpoints to determine readiness. 668 | 669 | Args: 670 | sandbox: The sandbox instance 671 | https_url: Candidate HTTPS public URL 672 | http_url: Candidate HTTP public URL 673 | max_attempts: Maximum number of attempts 674 | delay: Delay between attempts in seconds 675 | 676 | Returns: 677 | Dict with keys: https_ok (bool), http_ok (bool), healthy_url (str|None) 678 | """ 679 | try: 680 | import httpx # local import to allow absence handling 681 | except Exception: 682 | logger.warning("httpx not installed; skipping readiness probes and returning insecure URL preference.") 683 | return {"https_ok": False, "http_ok": False, "healthy_url": None} 684 | 685 | https_ok = False 686 | http_ok = False 687 | healthy_url = None 688 | 689 | for attempt in range(max_attempts): 690 | try: 691 | # Try HTTPS first (ignore TLS validation issues because of self-signed cert) 692 | async with httpx.AsyncClient(verify=False, timeout=5) as client: 693 | # HTTPS 694 | try: 695 | resp = await client.get(f"{https_url}/health") 696 | if resp.status_code == 200: 697 | https_ok = True 698 | healthy_url = https_url 699 | except Exception: 700 | pass 701 | # Optional HTTP probe 702 | if self.config.probe_http: 703 | try: 704 | resp = await client.get(f"{http_url}/health") 705 | if resp.status_code == 200: 706 | http_ok = True 707 | if healthy_url is None: 708 | healthy_url = http_url 709 | except Exception: 710 | pass 711 | 712 | except Exception: 713 | # ignore session-level errors; we'll retry 714 | pass 715 | 716 | if https_ok or http_ok: 717 | logger.info( 718 | f"Services are ready (https_ok={https_ok}, http_ok={http_ok}, healthy={healthy_url})" 719 | ) 720 | return {"https_ok": https_ok, "http_ok": http_ok, "healthy_url": healthy_url} 721 | 722 | if self.config.probe_http: 723 | logger.debug( 724 | f"Attempt {attempt + 1}/{max_attempts}: Services not ready yet (https={https_url}, http={http_url})" 725 | ) 726 | else: 727 | logger.debug( 728 | f"Attempt {attempt + 1}/{max_attempts}: Services not ready yet (https={https_url})" 729 | ) 730 | await asyncio.sleep(delay) 731 | 732 | logger.warning( 733 | f"Services did not become ready after {max_attempts} attempts. Will continue without readiness guarantee." 734 | ) 735 | return {"https_ok": https_ok, "http_ok": http_ok, "healthy_url": healthy_url} 736 | 737 | async def list_sandboxes(self) -> Dict[str, Any]: 738 | """ 739 | List all active sandboxes 740 | 741 | Returns: 742 | Dictionary containing list of active sandboxes 743 | """ 744 | sandboxes_info = [] 745 | 746 | for sandbox_id, entry in self.active_sandboxes.items(): 747 | sandbox = entry["sandbox"] 748 | sandboxes_info.append({ 749 | "sandbox_id": sandbox_id, 750 | "e2b_sandbox_id": sandbox.sandbox_id, 751 | "public_url": self._get_public_url(sandbox), 752 | "status": "active" 753 | }) 754 | 755 | return { 756 | "success": True, 757 | "count": len(sandboxes_info), 758 | "sandboxes": sandboxes_info 759 | } 760 | 761 | async def stop_sandbox(self, sandbox_id: str) -> Dict[str, Any]: 762 | """ 763 | Stop and remove a sandbox 764 | 765 | Args: 766 | sandbox_id: The ID of the sandbox to stop 767 | 768 | Returns: 769 | Dictionary containing operation status 770 | """ 771 | try: 772 | if sandbox_id not in self.active_sandboxes: 773 | return { 774 | "success": False, 775 | "error": f"Sandbox {sandbox_id} not found" 776 | } 777 | 778 | sandbox_entry = self.active_sandboxes[sandbox_id] 779 | sandbox = sandbox_entry["sandbox"] 780 | 781 | logger.info(f"Stopping sandbox: {sandbox_id}") 782 | 783 | # Cancel keepalive task if running 784 | try: 785 | task = sandbox_entry.get("keepalive_task") 786 | if task: 787 | task.cancel() 788 | except Exception: 789 | pass 790 | try: 791 | ptask = sandbox_entry.get("platform_keepalive_task") 792 | if ptask: 793 | ptask.cancel() 794 | except Exception: 795 | pass 796 | 797 | # Attempt to terminate services gracefully even if detached 798 | stop_cmds = [ 799 | "bash -lc 'if [ -f /home/user/mcp-connect/mcp.pid ]; then kill $(cat /home/user/mcp-connect/mcp.pid) 2>/dev/null || true; rm -f /home/user/mcp-connect/mcp.pid; else pkill -f \"npm run start\" 2>/dev/null || true; fi'", 800 | "sudo nginx -s quit", 801 | "bash -lc 'if [ -f /home/user/chrome.pid ]; then kill $(cat /home/user/chrome.pid) 2>/dev/null || true; rm -f /home/user/chrome.pid; else pkill -f -- \"--remote-debugging-port=9222\" 2>/dev/null || true; fi'", 802 | "bash -lc 'if [ -f /home/user/novnc.pid ]; then kill $(cat /home/user/novnc.pid) 2>/dev/null || true; rm -f /home/user/novnc.pid; else pkill -f websockify 2>/dev/null || true; fi'", 803 | "bash -lc 'if [ -f /home/user/x11vnc.pid ]; then kill $(cat /home/user/x11vnc.pid) 2>/dev/null || true; rm -f /home/user/x11vnc.pid; else pkill -f x11vnc 2>/dev/null || true; fi'", 804 | "bash -lc 'if [ -f /home/user/fluxbox.pid ]; then kill $(cat /home/user/fluxbox.pid) 2>/dev/null || true; rm -f /home/user/fluxbox.pid; else pkill -x fluxbox 2>/dev/null || true; fi'", 805 | f"bash -lc 'if [ -f /home/user/xvfb.pid ]; then kill $(cat /home/user/xvfb.pid) 2>/dev/null || true; rm -f /home/user/xvfb.pid; else pkill -f \"Xvfb {self.config.display}\" 2>/dev/null || true; fi'", 806 | ] 807 | for cmd in stop_cmds: 808 | try: 809 | await self._run(sandbox, cmd, background=False, cwd="/home/user") 810 | except Exception: 811 | pass 812 | # Stop the sandbox itself 813 | await self._kill(sandbox) 814 | 815 | # Remove from active sandboxes 816 | del self.active_sandboxes[sandbox_id] 817 | 818 | return { 819 | "success": True, 820 | "message": f"Sandbox {sandbox_id} stopped successfully" 821 | } 822 | 823 | except Exception as e: 824 | logger.error(f"Failed to stop sandbox: {str(e)}") 825 | return { 826 | "success": False, 827 | "error": str(e) 828 | } 829 | 830 | async def stop_all_sandboxes(self) -> Dict[str, Any]: 831 | """ 832 | Stop all active sandboxes 833 | 834 | Returns: 835 | Dictionary containing operation status 836 | """ 837 | results = [] 838 | 839 | for sandbox_id in list(self.active_sandboxes.keys()): 840 | result = await self.stop_sandbox(sandbox_id) 841 | results.append({ 842 | "sandbox_id": sandbox_id, 843 | "stopped": result["success"] 844 | }) 845 | 846 | return { 847 | "success": True, 848 | "stopped_count": len(results), 849 | "results": results 850 | } 851 | 852 | async def _keepalive_loop(self, sandbox_id: str) -> None: 853 | """Periodically ping the public URL to avoid idle eviction and refresh mapping. 854 | 855 | This loop will: 856 | - Probe both HTTPS and HTTP /health endpoints (TLS verification disabled for self-signed) 857 | - Prefer HTTPS when both are healthy 858 | - Log status each cycle 859 | """ 860 | try: 861 | import httpx 862 | except Exception: 863 | logger.warning("httpx not installed; disabling keepalive probes.") 864 | return 865 | interval = max(5, int(self.config.keepalive_interval)) # floor to >=5s 866 | while True: 867 | try: 868 | entry = self.active_sandboxes.get(sandbox_id) 869 | if not entry: 870 | return 871 | sandbox = entry["sandbox"] 872 | https_url = self._get_public_url(sandbox, secure=True) 873 | http_url = self._get_public_url(sandbox, secure=False) if self.config.probe_http else None 874 | 875 | https_ok = False 876 | http_ok = False 877 | async with httpx.AsyncClient(verify=False, timeout=5) as client: 878 | try: 879 | r1 = await client.get(f"{https_url}/health") 880 | https_ok = (r1.status_code == 200) 881 | except Exception: 882 | https_ok = False 883 | if http_url: 884 | try: 885 | r2 = await client.get(f"{http_url}/health") 886 | http_ok = (r2.status_code == 200) 887 | except Exception: 888 | http_ok = False 889 | 890 | if https_ok or http_ok: 891 | chosen = https_url if https_ok else (http_url or "") 892 | if self.config.probe_http: 893 | logger.debug("Keepalive OK for %s (https=%s, http=%s)", sandbox_id, https_ok, http_ok) 894 | else: 895 | logger.debug("Keepalive OK for %s (https=%s)", sandbox_id, https_ok) 896 | else: 897 | if self.config.probe_http: 898 | logger.warning("Keepalive probe failed for %s (https=%s, http=%s)", sandbox_id, https_ok, http_ok) 899 | else: 900 | logger.warning("Keepalive probe failed for %s (https=%s)", sandbox_id, https_ok) 901 | await asyncio.sleep(interval) 902 | except asyncio.CancelledError: 903 | return 904 | except Exception as e: 905 | logger.debug("Keepalive loop error: %s", str(e)) 906 | await asyncio.sleep(interval) 907 | 908 | async def _platform_keepalive_loop(self, sandbox_id: str) -> None: 909 | """Ping the sandbox with a no-op command periodically to keep session active.""" 910 | interval = max(10, int(self.config.platform_keepalive_interval)) 911 | while True: 912 | try: 913 | entry = self.active_sandboxes.get(sandbox_id) 914 | if not entry: 915 | return 916 | sandbox = entry["sandbox"] 917 | # Execute a very cheap no-op 918 | await self._run(sandbox, "bash -lc 'true'", False) 919 | await asyncio.sleep(interval) 920 | except asyncio.CancelledError: 921 | return 922 | except Exception as e: 923 | logger.debug("Platform keepalive error: %s", str(e)) 924 | await asyncio.sleep(interval) 925 | 926 | # ---------------- Helper abstraction layer for async vs legacy sandbox APIs ---------------- 927 | @staticmethod 928 | def _is_coro(obj: Any) -> bool: 929 | return asyncio.iscoroutine(obj) or asyncio.isfuture(obj) 930 | 931 | async def _run(self, sandbox: Any, *args, **kwargs) -> Any: 932 | """Invoke sandbox.commands.run handling async or sync implementations.""" 933 | runner = getattr(sandbox, 'commands', None) 934 | if runner is None: 935 | raise RuntimeError("Sandbox object missing 'commands' interface") 936 | fn = getattr(runner, 'run', None) 937 | if fn is None: 938 | raise RuntimeError("Sandbox.commands missing 'run' method") 939 | try: 940 | result = fn(*args, **kwargs) 941 | if self._is_coro(result): # Async API 942 | return await result 943 | # Legacy returns an object directly 944 | return result 945 | except Exception: 946 | # Some legacy variants may require threading if blocking; fallback 947 | return await asyncio.to_thread(fn, *args, **kwargs) 948 | 949 | async def _write(self, sandbox: Any, path: str, content: str) -> None: 950 | files = getattr(sandbox, 'files', None) 951 | if files is None: 952 | raise RuntimeError("Sandbox object missing 'files' interface") 953 | fn = getattr(files, 'write', None) 954 | if fn is None: 955 | raise RuntimeError("Sandbox.files missing 'write' method") 956 | result = fn(path, content) 957 | if self._is_coro(result): 958 | await result 959 | 960 | async def _kill(self, sandbox: Any) -> None: 961 | fn = getattr(sandbox, 'kill', None) 962 | if fn is None: 963 | return 964 | result = fn() 965 | if self._is_coro(result): 966 | await result 967 | 968 | async def main(): 969 | """CLI entrypoint: allow specifying --template-id and --sandbox-id.""" 970 | parser = argparse.ArgumentParser(description="Create an E2B sandbox running MCP Connect") 971 | parser.add_argument("--template-id", required=False, help="Template ID or alias to use (or set E2B_TEMPLATE_ID)") 972 | parser.add_argument("--sandbox-id", default="mcp_test_sandbox", help="Logical sandbox identifier label") 973 | parser.add_argument("--no-internet", action="store_true", help="Disable internet access inside sandbox") 974 | parser.add_argument("--no-wait", action="store_true", help="Do not wait for service readiness") 975 | parser.add_argument("--timeout", type=int, default=3600, help="Sandbox timeout seconds (default 3600)") 976 | parser.add_argument("--xvfb-resolution", dest="xvfb_resolution", default=os.getenv("XVFB_RESOLUTION", ""), help="Set Xvfb resolution, e.g. 1280x800x24 (env: XVFB_RESOLUTION)") 977 | parser.add_argument("--headless", action="store_true", help="Launch in lightweight headless mode (no X/noVNC/VNC/Chrome)") 978 | parser.add_argument("--auth-token", dest="auth_token", default=None, help="Bearer token for bridge API auth (maps to AUTH_TOKEN)") 979 | parser.add_argument("--no-remote-fetch", action="store_true", help="Disable fetching startup.sh and configs from remote base") 980 | parser.add_argument("--remote-base", default=None, help="Remote base URL to fetch assets (e.g. https://raw.githubusercontent.com////deploy/e2b)") 981 | parser.add_argument("--probe-http", action="store_true", help="Also probe HTTP (port 80) /health alongside HTTPS during readiness and keepalive") 982 | args = parser.parse_args() 983 | 984 | template_id = (args.template_id or os.getenv("E2B_TEMPLATE_ID", "")).strip() 985 | if not template_id: 986 | print("❌ Error: Missing template ID. Provide --template-id or set E2B_TEMPLATE_ID.") 987 | sys.exit(2) 988 | config = SandboxConfig( 989 | template_id=template_id, 990 | timeout=args.timeout, 991 | metadata={"purpose": ("mcp-dev-headless" if args.headless else "mcp-dev-gui")}, 992 | headless=bool(args.headless), 993 | ) 994 | # Prefer CLI --auth-token; otherwise fall back to environment variables 995 | if args.auth_token: 996 | config.auth_token = args.auth_token 997 | else: 998 | env_token = os.getenv("E2B_MCP_AUTH_TOKEN") or os.getenv("AUTH_TOKEN") or "" 999 | if env_token: 1000 | config.auth_token = env_token 1001 | if args.no_remote_fetch: 1002 | config.fetch_remote = False 1003 | if args.remote_base: 1004 | config.remote_base = args.remote_base 1005 | if args.xvfb_resolution: 1006 | config.xvfb_resolution = args.xvfb_resolution 1007 | if args.probe_http: 1008 | config.probe_http = True 1009 | manager = E2BSandboxManager(config) 1010 | logger.info("Creating E2B sandbox (template=%s sandbox_id=%s)...", template_id, args.sandbox_id) 1011 | result = await manager.create_sandbox( 1012 | sandbox_id=args.sandbox_id, 1013 | enable_internet=not args.no_internet, 1014 | wait_for_ready=not args.no_wait, 1015 | ) 1016 | if not result.get("success"): 1017 | print(f"❌ Failed to create sandbox: {result.get('error')}") 1018 | sys.exit(1) 1019 | 1020 | print("\n" + "="*60) 1021 | print("✅ SANDBOX CREATED SUCCESSFULLY!") 1022 | print("="*60) 1023 | print(f"\n📦 Sandbox ID: {result['sandbox_id']}") 1024 | print(f"🌐 Public URL: {result['public_url']}") 1025 | if not args.headless: 1026 | if 'novnc_url' in result and result['novnc_url']: 1027 | print(f"🖥️ noVNC URL: {result['novnc_url']}") 1028 | if 'websocket_url' in result and result['websocket_url']: 1029 | print(f"🔌 WebSocket URL: {result['websocket_url']}") 1030 | print(f"\n🔧 Services:") 1031 | for service_name, service_info in result['services'].items(): 1032 | print(f" - {service_name}:") 1033 | print(f" URL: {service_info.get('url', 'N/A')}") 1034 | print(f" Status: {service_info['status']}") 1035 | if 'auth_token' in service_info: 1036 | print(f" Auth Token: {service_info['auth_token']}") 1037 | print(f"\n⏱️ Timeout: {result['timeout_seconds']} seconds") 1038 | print(f"🕐 Created: {result['created_at']}") 1039 | print("\n" + "="*60) 1040 | 1041 | # Keep sandbox running for demonstration 1042 | print("\n⌨️ Press Ctrl+C to stop the sandbox...") 1043 | try: 1044 | await asyncio.sleep(3600) # Keep running for 1 hour max 1045 | except (KeyboardInterrupt, asyncio.CancelledError): 1046 | # Gracefully handle both direct KeyboardInterrupt and asyncio task cancellation 1047 | print("\n🛑 Stopping sandbox...") 1048 | # Only stop the current sandbox instead of all 1049 | await manager.stop_sandbox(args.sandbox_id) 1050 | print("✅ Sandbox stopped") 1051 | 1052 | if __name__ == "__main__": 1053 | # Check for E2B API key 1054 | if not os.getenv('E2B_API_KEY'): 1055 | print("❌ Error: E2B_API_KEY environment variable not set") 1056 | print("Please set your E2B API key:") 1057 | print(" export E2B_API_KEY='your-api-key-here'") 1058 | sys.exit(1) 1059 | 1060 | # Run the main function; swallow top-level KeyboardInterrupt to avoid noisy trace 1061 | try: 1062 | asyncio.run(main()) 1063 | except KeyboardInterrupt: 1064 | # Already handled inside main; exit quietly 1065 | pass 1066 | --------------------------------------------------------------------------------