├── src ├── services │ ├── fallback-capture.service.js │ └── capture.service.js ├── input.css ├── core │ ├── config.js │ └── logger.js ├── ui │ ├── settings-window.js │ ├── chat-window.js │ └── llm-response-window.js ├── styles │ └── common.css └── managers │ └── session.manager.js ├── .gitignore ├── assests ├── activity.png ├── settings.png ├── terminal.png └── icons │ ├── activity.png │ ├── settings.png │ └── terminal.png ├── speech-recognition.js ├── tailwind.config.js ├── prompts ├── dsa.md └── programming.md ├── env.example ├── package.json ├── preload.js ├── setup.sh ├── LICENSE ├── README.md ├── index.html ├── settings.html └── prompt-loader.js /src/services/fallback-capture.service.js: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | .env 3 | eng.traineddata 4 | dist/ 5 | .DS_Store -------------------------------------------------------------------------------- /src/input.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; -------------------------------------------------------------------------------- /assests/activity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TechyCSR/OpenCluely/HEAD/assests/activity.png -------------------------------------------------------------------------------- /assests/settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TechyCSR/OpenCluely/HEAD/assests/settings.png -------------------------------------------------------------------------------- /assests/terminal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TechyCSR/OpenCluely/HEAD/assests/terminal.png -------------------------------------------------------------------------------- /assests/icons/activity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TechyCSR/OpenCluely/HEAD/assests/icons/activity.png -------------------------------------------------------------------------------- /assests/icons/settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TechyCSR/OpenCluely/HEAD/assests/icons/settings.png -------------------------------------------------------------------------------- /assests/icons/terminal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TechyCSR/OpenCluely/HEAD/assests/icons/terminal.png -------------------------------------------------------------------------------- /speech-recognition.js: -------------------------------------------------------------------------------- 1 | // Speech Recognition wrapper for testing 2 | const speechService = require('./src/services/speech.service'); 3 | 4 | module.exports = speechService; -------------------------------------------------------------------------------- /tailwind.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | module.exports = { 3 | content: ["./**/*.{html,js}"], 4 | theme: { 5 | extend: {}, 6 | }, 7 | plugins: [], 8 | } -------------------------------------------------------------------------------- /prompts/dsa.md: -------------------------------------------------------------------------------- 1 | # DSA Interview Helper Agent (Focused & Optimal) 2 | 3 | You are a competitive programming expert that outputs the most optimal solution with minimal time and space complexity. 4 | 5 | STRICT RULES 6 | - Output code ONLY in the user-selected language. No alternatives unless asked. 7 | - Use triple backticks with the correct language tag. 8 | - Prefer O(n) or O(n log n) where feasible; call out if optimal lower bound is higher. 9 | - if there's some pre-code or template in Question then strictly use that template to answer it. 10 | - Avoid extra commentary; be concise and implementation-focused. 11 | - Your code must not contain any comments. 12 | 13 | Workflow 14 | 1) Identify the problem pattern quickly (Array, Hashing, Two Pointers, Sliding Window, Binary Search, Stack/Queue, Linked List, Tree/Graph, Heap, Greedy, DP). 15 | 2) State naive idea in 1–2 lines with complexity. 16 | 3) Give optimal approach with 3–5 bullet steps. 17 | 4) Provide clean, production-ready, comment-free implementation in the selected language. 18 | 5) State time and space complexity precisely. 19 | 6) Optional: 1 short dry-run example if non-obvious. 20 | 21 | Implementation Template 22 | ```lang 23 | ``` 24 | 25 | Notes 26 | - Prefer iterative over recursive when it reduces stack usage or improves clarity. 27 | - Use built-in data structures and libraries idiomatically for the selected language. 28 | - For DP, specify state, transition, and memory optimization opportunities. -------------------------------------------------------------------------------- /env.example: -------------------------------------------------------------------------------- 1 | # Google Gemini API Configuration 2 | # Get your API key from: https://makersuite.google.com/app/apikey 3 | GEMINI_API_KEY=your_gemini_api_key_here 4 | 5 | 6 | # Azure Speech Services Configuration 7 | # Copy this file to .env and fill in your actual credentials 8 | 9 | # Your Azure Speech Service key (get this from Azure Portal) 10 | AZURE_SPEECH_KEY=your_azure_speech_key_here 11 | 12 | # Your Azure Speech Service region (e.g., eastus, westeurope, japaneast) 13 | AZURE_SPEECH_REGION=your_azure_region_here 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 1. Two Sum 56 | Solved 57 | Easy 58 | Topics 59 | premium lock icon 60 | Companies 61 | Hint 62 | Given an array of integers nums and an integer target, return indices of the two numbers such that they add up to target. 63 | 64 | You may assume that each input would have exactly one solution, and you may not use the same element twice. 65 | 66 | You can return the answer in any order. 67 | 68 | 69 | 70 | Example 1: 71 | 72 | Input: nums = [2,7,11,15], target = 9 73 | Output: [0,1] 74 | Explanation: Because nums[0] + nums[1] == 9, we return [0, 1]. 75 | Example 2: 76 | 77 | Input: nums = [3,2,4], target = 6 78 | Output: [1,2] 79 | Example 3: 80 | 81 | Input: nums = [3,3], target = 6 82 | Output: [0,1] 83 | 84 | 85 | Constraints: 86 | 87 | 2 <= nums.length <= 104 88 | -109 <= nums[i] <= 109 89 | -109 <= target <= 109 90 | Only one valid answer exists. 91 | 92 | 93 | 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /prompts/programming.md: -------------------------------------------------------------------------------- 1 | # Programming Interview Helper Agent 2 | 3 | You are a concise programming interview assistant. Provide quick, actionable guidance without revealing you're an AI helper. 4 | 5 | ## Response Structure 6 | 7 | ### 1. Naive Approach (30 seconds) 8 | - State the simplest solution first 9 | - Mention time/space complexity 10 | - One-line reasoning why it works 11 | 12 | ### 2. Optimized Solution (2 minutes) 13 | - Best approach with clear explanation 14 | - Step-by-step algorithm breakdown 15 | - Time/space complexity analysis 16 | 17 | ### 3. Dry Run (1 minute) 18 | - Walk through with a concrete example 19 | - Show key variable states at each step 20 | - Highlight the core insight 21 | 22 | ### 4. Production Code 23 | ```language 24 | // Clean, interview-ready implementation 25 | // Include edge case handling 26 | // Add meaningful comments 27 | ``` 28 | 29 | ### 5. Quick Validation 30 | - 2-3 test cases (edge cases included) 31 | - Alternative approaches if time permits 32 | 33 | ## Communication Style 34 | - Start with "Let me think through this step by step" 35 | - Use "First, the straightforward approach would be..." 36 | - Transition with "But we can optimize this by..." 37 | - Be conversational, not robotic 38 | - Show your thought process naturally 39 | 40 | ## Key Technologies to Reference 41 | **Data Structures**: Arrays, HashMaps, Trees, Graphs, Heaps, Stacks, Queues 42 | **Algorithms**: Two Pointers, Sliding Window, DFS/BFS, Dynamic Programming, Binary Search 43 | **Patterns**: Divide & Conquer, Greedy, Backtracking, Memoization 44 | 45 | ## Common Optimizations 46 | - HashMap for O(1) lookups instead of nested loops 47 | - Two pointers for array problems 48 | - Binary search for sorted data 49 | - DP for overlapping subproblems 50 | - BFS/DFS for tree/graph traversal 51 | 52 | Give direct, implementable solutions with clear reasoning. Focus on demonstrating problem-solving skills naturally. -------------------------------------------------------------------------------- /src/core/config.js: -------------------------------------------------------------------------------- 1 | const path = require('path'); 2 | const os = require('os'); 3 | 4 | class ConfigManager { 5 | constructor() { 6 | this.env = process.env.NODE_ENV || 'development'; 7 | this.appDataDir = path.join(os.homedir(), '.OpenCluely'); 8 | this.loadConfiguration(); 9 | } 10 | 11 | loadConfiguration() { 12 | this.config = { 13 | app: { 14 | name: 'OpenCluely', 15 | version: '1.0.0', 16 | processTitle: 'OpenCluely', 17 | dataDir: this.appDataDir, 18 | isDevelopment: this.env === 'development', 19 | isProduction: this.env === 'production' 20 | }, 21 | 22 | window: { 23 | defaultWidth: 400, 24 | defaultHeight: 600, 25 | minWidth: 300, 26 | minHeight: 400, 27 | webPreferences: { 28 | nodeIntegration: false, 29 | contextIsolation: true, 30 | enableRemoteModule: false, 31 | preload: path.join(__dirname, '../../preload.js') 32 | } 33 | }, 34 | 35 | ocr: { 36 | language: 'eng', 37 | tempDir: os.tmpdir(), 38 | cleanupDelay: 5000 39 | }, 40 | 41 | llm: { 42 | gemini: { 43 | model: 'gemini-2.5-flash', 44 | maxRetries: 3, 45 | timeout: 60000, 46 | fallbackEnabled: true, 47 | enableFallbackMethod: true, 48 | generation: { 49 | temperature: 0.7, 50 | topK: 32, 51 | topP: 0.9, 52 | maxOutputTokens: 4096 53 | } 54 | } 55 | }, 56 | 57 | speech: { 58 | azure: { 59 | language: 'en-US', 60 | enableDictation: true, 61 | enableAudioLogging: false, 62 | outputFormat: 'detailed' 63 | } 64 | }, 65 | 66 | session: { 67 | maxMemorySize: 1000, 68 | compressionThreshold: 500, 69 | clearOnRestart: false 70 | }, 71 | 72 | stealth: { 73 | hideFromDock: true, 74 | noAttachConsole: true, 75 | disguiseProcess: true 76 | } 77 | }; 78 | } 79 | 80 | get(keyPath) { 81 | return keyPath.split('.').reduce((obj, key) => obj?.[key], this.config); 82 | } 83 | 84 | set(keyPath, value) { 85 | const keys = keyPath.split('.'); 86 | const lastKey = keys.pop(); 87 | const target = keys.reduce((obj, key) => obj[key] = obj[key] || {}, this.config); 88 | target[lastKey] = value; 89 | } 90 | 91 | getApiKey(service) { 92 | const envKey = `${service.toUpperCase()}_API_KEY`; 93 | return process.env[envKey]; 94 | } 95 | 96 | isFeatureEnabled(feature) { 97 | return this.get(`features.${feature}`) !== false; 98 | } 99 | } 100 | 101 | module.exports = new ConfigManager(); -------------------------------------------------------------------------------- /src/core/logger.js: -------------------------------------------------------------------------------- 1 | const winston = require('winston'); 2 | const DailyRotateFile = require('winston-daily-rotate-file'); 3 | const path = require('path'); 4 | const os = require('os'); 5 | 6 | class Logger { 7 | constructor() { 8 | this.logDir = path.join(os.homedir(), '.OpenCluely', 'logs'); 9 | this.setupLogger(); 10 | } 11 | 12 | setupLogger() { 13 | const logFormat = winston.format.combine( 14 | winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss.SSS' }), 15 | winston.format.errors({ stack: true }), 16 | winston.format.printf(({ timestamp, level, message, stack, service, ...meta }) => { 17 | const metaStr = Object.keys(meta).length ? JSON.stringify(meta, null, 2) : ''; 18 | const serviceStr = service ? `[${service}]` : ''; 19 | const stackStr = stack ? `\n${stack}` : ''; 20 | return `${timestamp} ${level.toUpperCase()} ${serviceStr} ${message}${stackStr}${metaStr ? `\n${metaStr}` : ''}`; 21 | }) 22 | ); 23 | 24 | this.logger = winston.createLogger({ 25 | level: process.env.LOG_LEVEL || 'info', 26 | format: logFormat, 27 | defaultMeta: { pid: process.pid }, 28 | transports: [ 29 | new winston.transports.Console({ 30 | format: winston.format.combine( 31 | winston.format.colorize(), 32 | logFormat 33 | ), 34 | stderrLevels: ['error', 'warn'] 35 | }), 36 | new DailyRotateFile({ 37 | filename: path.join(this.logDir, 'application-%DATE%.log'), 38 | datePattern: 'YYYY-MM-DD', 39 | maxSize: '20m', 40 | maxFiles: '14d', 41 | level: 'info' 42 | }), 43 | new DailyRotateFile({ 44 | filename: path.join(this.logDir, 'error-%DATE%.log'), 45 | datePattern: 'YYYY-MM-DD', 46 | maxSize: '20m', 47 | maxFiles: '30d', 48 | level: 'error' 49 | }) 50 | ], 51 | exceptionHandlers: [ 52 | new winston.transports.File({ 53 | filename: path.join(this.logDir, 'exceptions.log') 54 | }) 55 | ], 56 | rejectionHandlers: [ 57 | new winston.transports.File({ 58 | filename: path.join(this.logDir, 'rejections.log') 59 | }) 60 | ] 61 | }); 62 | } 63 | 64 | createServiceLogger(serviceName) { 65 | return { 66 | debug: (message, meta = {}) => this.logger.debug(message, { service: serviceName, ...meta }), 67 | info: (message, meta = {}) => this.logger.info(message, { service: serviceName, ...meta }), 68 | warn: (message, meta = {}) => this.logger.warn(message, { service: serviceName, ...meta }), 69 | error: (message, meta = {}) => this.logger.error(message, { service: serviceName, ...meta }), 70 | logPerformance: (operation, startTime, metadata = {}) => this.logPerformance(operation, startTime, { service: serviceName, ...metadata }) 71 | }; 72 | } 73 | 74 | getSystemMetrics() { 75 | return { 76 | memory: process.memoryUsage(), 77 | uptime: process.uptime(), 78 | platform: process.platform, 79 | nodeVersion: process.version 80 | }; 81 | } 82 | 83 | logPerformance(operation, startTime, metadata = {}) { 84 | const duration = Date.now() - startTime; 85 | this.logger.info(`Performance: ${operation} completed`, { 86 | service: 'PERFORMANCE', 87 | duration: `${duration}ms`, 88 | ...metadata 89 | }); 90 | return duration; 91 | } 92 | } 93 | 94 | module.exports = new Logger(); -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "opencluely", 3 | "version": "1.0.0", 4 | "description": "AI Problem Solving Assistant", 5 | "main": "main.js", 6 | "scripts": { 7 | "start": "electron .", 8 | "dev": "electron . --no-sandbox --disable-gpu", 9 | "test-speech": "node test-azure-speech.js", 10 | "build": "electron-builder", 11 | "build:mac": "electron-builder --mac", 12 | "build:win": "electron-builder --win", 13 | "build:linux": "electron-builder --linux", 14 | "build:all": "electron-builder --mac --win --linux", 15 | "dist": "npm run build", 16 | "pack": "electron-builder --dir", 17 | "clean": "rm -rf dist/", 18 | "rebuild": "npm run clean && npm run build", 19 | "release": "npm run clean && npm run build:all", 20 | "postinstall": "electron-builder install-app-deps" 21 | }, 22 | "author": { 23 | "name": "TechyCSR", 24 | "email": "info@techycsr.dev", 25 | "url": "https://techycsr.dev" 26 | }, 27 | "repository": { 28 | "type": "git", 29 | "url": "https://github.com/TechyCSR/OpenCluely" 30 | }, 31 | "license": "ISC", 32 | "dependencies": { 33 | "@google/generative-ai": "^0.24.1", 34 | "dotenv": "^16.3.1", 35 | "markdown": "^0.5.0", 36 | "marked": "^15.0.12", 37 | "microsoft-cognitiveservices-speech-sdk": "^1.40.0", 38 | "node-record-lpcm16": "^1.0.1", 39 | "winston": "^3.17.0", 40 | "winston-daily-rotate-file": "^4.7.1" 41 | }, 42 | "devDependencies": { 43 | "electron": "^29.1.0", 44 | "electron-builder": "^24.13.3" 45 | }, 46 | "build": { 47 | "appId": "com.opencluely.app", 48 | "productName": "OpenCluely", 49 | "directories": { 50 | "output": "dist" 51 | }, 52 | "files": [ 53 | "**/*", 54 | "!dist/**/*", 55 | "!*.md", 56 | "!.git/**/*", 57 | "!.env*" 58 | ], 59 | "extraFiles": [ 60 | { 61 | "from": "prompts", 62 | "to": "prompts" 63 | } 64 | ], 65 | "asarUnpack": [ 66 | "node_modules/**/*" 67 | ], 68 | "mac": { 69 | "category": "public.app-category.utilities", 70 | "target": [ 71 | { 72 | "target": "dmg", 73 | "arch": [ 74 | "x64", 75 | "arm64" 76 | ] 77 | }, 78 | { 79 | "target": "zip", 80 | "arch": [ 81 | "x64", 82 | "arm64" 83 | ] 84 | } 85 | ], 86 | "icon": "assets/icons/app-icon.icns", 87 | "darkModeSupport": true, 88 | "hardenedRuntime": true, 89 | "gatekeeperAssess": false, 90 | "entitlements": "build/entitlements.mac.plist", 91 | "entitlementsInherit": "build/entitlements.mac.plist" 92 | }, 93 | "win": { 94 | "target": [ 95 | { 96 | "target": "nsis", 97 | "arch": [ 98 | "x64", 99 | "ia32" 100 | ] 101 | }, 102 | { 103 | "target": "portable", 104 | "arch": [ 105 | "x64", 106 | "ia32" 107 | ] 108 | } 109 | ], 110 | "icon": "assets/icons/app-icon.ico" 111 | }, 112 | "linux": { 113 | "target": [ 114 | { 115 | "target": "AppImage", 116 | "arch": [ 117 | "x64" 118 | ] 119 | }, 120 | { 121 | "target": "deb", 122 | "arch": [ 123 | "x64" 124 | ] 125 | } 126 | ], 127 | "icon": "assets/icons/app-icon.png", 128 | "category": "Utility" 129 | }, 130 | "nsis": { 131 | "oneClick": false, 132 | "allowToChangeInstallationDirectory": true, 133 | "createDesktopShortcut": true, 134 | "createStartMenuShortcut": true 135 | }, 136 | "dmg": { 137 | "title": "OpenCluely Interview Assistant", 138 | "backgroundColor": "#000000", 139 | "window": { 140 | "width": 600, 141 | "height": 400 142 | } 143 | } 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/services/capture.service.js: -------------------------------------------------------------------------------- 1 | const { desktopCapturer, screen } = require('electron'); 2 | const logger = require('../core/logger').createServiceLogger('CAPTURE'); 3 | 4 | class CaptureService { 5 | constructor() { 6 | this.isProcessing = false; 7 | } 8 | 9 | listDisplays() { 10 | try { 11 | const displays = screen.getAllDisplays().map(d => ({ 12 | id: d.id, 13 | bounds: d.bounds, 14 | size: d.size, 15 | scaleFactor: d.scaleFactor, 16 | rotation: d.rotation, 17 | touchSupport: d.touchSupport || 'unknown' 18 | })); 19 | return { success: true, displays }; 20 | } catch (error) { 21 | logger.error('Failed to list displays', { error: error.message }); 22 | return { success: false, error: error.message }; 23 | } 24 | } 25 | 26 | /** 27 | * Capture screenshot and return an image buffer. 28 | * options: { displayId?: number, area?: { x, y, width, height } } 29 | */ 30 | async captureAndProcess(options = {}) { 31 | if (this.isProcessing) throw new Error('Capture already in progress'); 32 | this.isProcessing = true; 33 | const startTime = Date.now(); 34 | try { 35 | const { image, metadata } = await this.captureScreenshot(options); 36 | 37 | // Crop if area specified 38 | let finalImage = image; 39 | if (options.area && this._isValidArea(options.area)) { 40 | try { 41 | finalImage = image.crop(options.area); 42 | } catch (e) { 43 | logger.warn('Crop failed, returning full image', { error: e.message, area: options.area }); 44 | } 45 | } 46 | 47 | const buffer = finalImage.toPNG(); 48 | logger.logPerformance('Screenshot capture', startTime, { 49 | bytes: buffer.length, 50 | dimensions: finalImage.getSize() 51 | }); 52 | 53 | return { 54 | imageBuffer: buffer, 55 | mimeType: 'image/png', 56 | metadata: { 57 | timestamp: new Date().toISOString(), 58 | source: metadata, 59 | processingTime: Date.now() - startTime 60 | } 61 | }; 62 | } finally { 63 | this.isProcessing = false; 64 | } 65 | } 66 | 67 | async captureScreenshot(options = {}) { 68 | const targetDisplay = this._getTargetDisplay(options.displayId); 69 | const { width, height } = targetDisplay.size || { width: 1920, height: 1080 }; 70 | 71 | const sources = await desktopCapturer.getSources({ 72 | types: ['screen'], 73 | thumbnailSize: { width, height } 74 | }); 75 | 76 | if (sources.length === 0) { 77 | throw new Error('No screen sources available for capture'); 78 | } 79 | 80 | // Find source matching the target display by comparing sizes as heuristic 81 | let source = sources[0]; 82 | const match = sources.find(s => { 83 | const size = s.thumbnail.getSize(); 84 | return size.width === width && size.height === height; 85 | }); 86 | if (match) source = match; 87 | 88 | const image = source.thumbnail; 89 | if (!image) throw new Error('Failed to capture screen thumbnail'); 90 | 91 | logger.debug('Screenshot captured successfully', { 92 | sourceName: source.name, 93 | imageSize: image.getSize() 94 | }); 95 | 96 | return { 97 | image, 98 | metadata: { 99 | displayId: targetDisplay.id, 100 | sourceName: source.name, 101 | dimensions: image.getSize(), 102 | captureTime: new Date().toISOString() 103 | } 104 | }; 105 | } 106 | 107 | _getTargetDisplay(displayId) { 108 | const all = screen.getAllDisplays(); 109 | if (!all || all.length === 0) return screen.getPrimaryDisplay(); 110 | if (displayId == null) return screen.getPrimaryDisplay(); 111 | const found = all.find(d => d.id === displayId); 112 | return found || screen.getPrimaryDisplay(); 113 | } 114 | 115 | _isValidArea(area) { 116 | return area && Number.isFinite(area.x) && Number.isFinite(area.y) && 117 | Number.isFinite(area.width) && Number.isFinite(area.height) && 118 | area.width > 0 && area.height > 0; 119 | } 120 | } 121 | 122 | module.exports = new CaptureService(); 123 | -------------------------------------------------------------------------------- /preload.js: -------------------------------------------------------------------------------- 1 | const { contextBridge, ipcRenderer } = require('electron') 2 | 3 | // Expose protected methods that allow the renderer process to use 4 | // the ipcRenderer without exposing the entire object 5 | contextBridge.exposeInMainWorld('electronAPI', { 6 | // Screenshot and OCR 7 | takeScreenshot: () => ipcRenderer.invoke('take-screenshot'), 8 | 9 | // Speech recognition 10 | startSpeechRecognition: () => ipcRenderer.invoke('start-speech-recognition'), 11 | stopSpeechRecognition: () => ipcRenderer.invoke('stop-speech-recognition'), 12 | getSpeechAvailability: () => ipcRenderer.invoke('get-speech-availability'), 13 | 14 | // Window management 15 | showAllWindows: () => ipcRenderer.invoke('show-all-windows'), 16 | hideAllWindows: () => ipcRenderer.invoke('hide-all-windows'), 17 | enableWindowInteraction: () => ipcRenderer.invoke('enable-window-interaction'), 18 | disableWindowInteraction: () => ipcRenderer.invoke('disable-window-interaction'), 19 | switchToChat: () => ipcRenderer.invoke('switch-to-chat'), 20 | switchToSkills: () => ipcRenderer.invoke('switch-to-skills'), 21 | resizeWindow: (width, height) => ipcRenderer.invoke('resize-window', { width, height }), 22 | moveWindow: (deltaX, deltaY) => ipcRenderer.invoke('move-window', { deltaX, deltaY }), 23 | getWindowStats: () => ipcRenderer.invoke('get-window-stats'), 24 | 25 | // Session memory 26 | getSessionHistory: () => ipcRenderer.invoke('get-session-history'), 27 | getLLMSessionHistory: () => ipcRenderer.invoke('get-llm-session-history'), 28 | clearSessionMemory: () => ipcRenderer.invoke('clear-session-memory'), 29 | formatSessionHistory: () => ipcRenderer.invoke('format-session-history'), 30 | sendChatMessage: (text) => ipcRenderer.invoke('send-chat-message', text), 31 | getSkillPrompt: (skillName) => ipcRenderer.invoke('get-skill-prompt', skillName), 32 | 33 | // Gemini LLM configuration 34 | setGeminiApiKey: (apiKey) => ipcRenderer.invoke('set-gemini-api-key', apiKey), 35 | getGeminiStatus: () => ipcRenderer.invoke('get-gemini-status'), 36 | testGeminiConnection: () => ipcRenderer.invoke('test-gemini-connection'), 37 | 38 | // Settings 39 | showSettings: () => ipcRenderer.invoke('show-settings'), 40 | hideSettings: () => ipcRenderer.invoke('hide-settings'), 41 | getSettings: () => ipcRenderer.invoke('get-settings'), 42 | saveSettings: (settings) => ipcRenderer.invoke('save-settings', settings), 43 | updateAppIcon: (iconKey) => ipcRenderer.invoke('update-app-icon', iconKey), 44 | updateActiveSkill: (skill) => ipcRenderer.invoke('update-active-skill', skill), 45 | restartAppForStealth: () => ipcRenderer.invoke('restart-app-for-stealth'), 46 | closeWindow: () => ipcRenderer.invoke('close-window'), 47 | quit: () => { 48 | try { 49 | ipcRenderer.send('quit-app'); 50 | // Also try the app quit method 51 | setTimeout(() => { 52 | require('electron').app.quit(); 53 | }, 100); 54 | } catch (error) { 55 | console.error('Error in quit:', error); 56 | } 57 | }, 58 | 59 | // LLM window specific methods 60 | expandLlmWindow: (contentMetrics) => ipcRenderer.invoke('expand-llm-window', contentMetrics), 61 | resizeLlmWindowForContent: (contentMetrics) => ipcRenderer.invoke('resize-llm-window-for-content', contentMetrics), 62 | 63 | // Clipboard helper for reliable copy actions 64 | copyToClipboard: (text) => { 65 | try { 66 | return ipcRenderer.invoke('copy-to-clipboard', String(text ?? '')); 67 | } catch (e) { 68 | console.error('copyToClipboard failed:', e); 69 | return false; 70 | } 71 | }, 72 | 73 | // Display management 74 | listDisplays: () => ipcRenderer.invoke('list-displays'), 75 | captureArea: (options) => ipcRenderer.invoke('capture-area', options), 76 | 77 | // Event listeners 78 | onTranscriptionReceived: (callback) => ipcRenderer.on('transcription-received', callback), 79 | onInterimTranscription: (callback) => ipcRenderer.on('interim-transcription', callback), 80 | onSpeechStatus: (callback) => ipcRenderer.on('speech-status', callback), 81 | onSpeechError: (callback) => ipcRenderer.on('speech-error', callback), 82 | onSpeechAvailability: (callback) => ipcRenderer.on('speech-availability', callback), 83 | onSessionEvent: (callback) => ipcRenderer.on('session-event', callback), 84 | onSessionCleared: (callback) => ipcRenderer.on('session-cleared', callback), 85 | onOcrCompleted: (callback) => ipcRenderer.on('ocr-completed', callback), 86 | onOcrError: (callback) => ipcRenderer.on('ocr-error', callback), 87 | onLlmResponse: (callback) => ipcRenderer.on('llm-response', callback), 88 | onLlmError: (callback) => ipcRenderer.on('llm-error', callback), 89 | onTranscriptionLlmResponse: (callback) => ipcRenderer.on('transcription-llm-response', callback), 90 | onOpenGeminiConfig: (callback) => ipcRenderer.on('open-gemini-config', callback), 91 | onDisplayLlmResponse: (callback) => ipcRenderer.on('display-llm-response', callback), 92 | onShowLoading: (callback) => ipcRenderer.on('show-loading', callback), 93 | onSkillChanged: (callback) => ipcRenderer.on('skill-changed', callback), 94 | onInteractionModeChanged: (callback) => ipcRenderer.on('interaction-mode-changed', callback), 95 | onRecordingStarted: (callback) => ipcRenderer.on('recording-started', callback), 96 | onRecordingStopped: (callback) => ipcRenderer.on('recording-stopped', callback), 97 | onCodingLanguageChanged: (callback) => ipcRenderer.on('coding-language-changed', callback), 98 | 99 | // Generic receive method 100 | receive: (channel, callback) => ipcRenderer.on(channel, callback), 101 | 102 | // Remove listeners 103 | removeAllListeners: (channel) => ipcRenderer.removeAllListeners(channel) 104 | }) 105 | 106 | contextBridge.exposeInMainWorld('api', { 107 | send: (channel, data) => { 108 | let validChannels = [ 109 | 'close-settings', 110 | 'quit-app', 111 | 'save-settings', 112 | 'toggle-recording', 113 | 'toggle-interaction-mode', 114 | 'update-skill', 115 | 'window-loaded' 116 | ]; 117 | if (validChannels.includes(channel)) { 118 | ipcRenderer.send(channel, data); 119 | } else { 120 | console.warn('Invalid IPC channel:', channel); 121 | } 122 | }, 123 | receive: (channel, func) => { 124 | let validChannels = [ 125 | 'load-settings', 126 | 'recording-state-changed', 127 | 'interaction-mode-changed', 128 | 'skill-updated', 129 | 'update-skill', 130 | 'recording-started', 131 | 'recording-stopped' 132 | ]; 133 | if (validChannels.includes(channel)) { 134 | ipcRenderer.on(channel, (event, ...args) => func(...args)); 135 | } 136 | } 137 | }); -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # OpenCluely one-shot setup: install deps, (optionally) build, and run 5 | # Works on macOS, Linux, and Windows (Git Bash / MSYS2 / Cygwin) 6 | 7 | # Defaults 8 | DO_BUILD=0 9 | DO_RUN=1 10 | USE_CI=0 11 | INSTALL_SYSTEM_DEPS=0 12 | OS_NAME="unknown" 13 | PLATFORM_BUILD_SCRIPT="build" 14 | 15 | print_header() { 16 | echo "========================================" 17 | echo " OpenCluely Setup" 18 | echo "========================================" 19 | } 20 | 21 | usage() { 22 | cat </dev/null 2>&1; then 84 | echo "Error: Node.js is not installed or not in PATH. Please install Node 18+ and retry." 85 | exit 1 86 | fi 87 | if ! command -v npm >/dev/null 2>&1; then 88 | echo "Error: npm is not installed or not in PATH." 89 | exit 1 90 | fi 91 | 92 | echo "Node: $(node -v)" 93 | echo "npm: $(npm -v)" 94 | 95 | # Install system dependencies (optional best-effort) 96 | if [[ "$INSTALL_SYSTEM_DEPS" -eq 1 ]]; then 97 | echo "Attempting to install system dependencies (best effort)" 98 | if ! command -v sox >/dev/null 2>&1; then 99 | case "$OS_NAME" in 100 | macos) 101 | if command -v brew >/dev/null 2>&1; then 102 | echo "Installing sox via Homebrew..." 103 | brew install sox || echo "Could not install sox via brew. You can install it manually: brew install sox" 104 | else 105 | echo "Homebrew not found. Install sox manually: https://formulae.brew.sh/formula/sox" 106 | fi 107 | ;; 108 | linux) 109 | if command -v apt-get >/dev/null 2>&1; then 110 | echo "Installing sox via apt-get (sudo may prompt)..." 111 | sudo apt-get update -y && sudo apt-get install -y sox || echo "Could not install sox via apt-get." 112 | elif command -v dnf >/dev/null 2>&1; then 113 | echo "Installing sox via dnf (sudo may prompt)..." 114 | sudo dnf install -y sox || echo "Could not install sox via dnf." 115 | elif command -v pacman >/dev/null 2>&1; then 116 | echo "Installing sox via pacman (sudo may prompt)..." 117 | sudo pacman -S --noconfirm sox || echo "Could not install sox via pacman." 118 | else 119 | echo "Unknown package manager. Please install 'sox' manually." 120 | fi 121 | ;; 122 | windows) 123 | echo "On Windows, install sox via Chocolatey (Admin PowerShell): choco install sox" 124 | ;; 125 | *) 126 | echo "Unknown OS; please install 'sox' manually if you need microphone capture." 127 | ;; 128 | esac 129 | else 130 | echo "sox already installed." 131 | fi 132 | fi 133 | 134 | # Project root 135 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 136 | cd "$SCRIPT_DIR" 137 | 138 | # Ensure .env exists and has API key 139 | ENV_NEEDS_CONFIG=0 140 | if [[ ! -f .env ]]; then 141 | if [[ -f env.example ]]; then 142 | echo "Creating .env from env.example" 143 | cp env.example .env 144 | ENV_NEEDS_CONFIG=1 145 | else 146 | echo "Creating new .env file" 147 | cat > .env << 'EOF' 148 | # Google Gemini API Configuration 149 | # Get your API key from: https://aistudio.google.com/ 150 | GEMINI_API_KEY=your_gemini_api_key_here 151 | 152 | # Optional: Azure Speech Services Configuration 153 | # AZURE_SPEECH_KEY=your_azure_speech_key_here 154 | # AZURE_SPEECH_REGION=your_azure_region_here 155 | EOF 156 | ENV_NEEDS_CONFIG=1 157 | fi 158 | fi 159 | 160 | # If GEMINI_API_KEY is provided via env and .env lacks it, append it 161 | if [[ -n "${GEMINI_API_KEY:-}" ]]; then 162 | if ! grep -q '^GEMINI_API_KEY=' .env 2>/dev/null; then 163 | echo "GEMINI_API_KEY is set in the environment; writing to .env" 164 | printf "GEMINI_API_KEY=%s\n" "$GEMINI_API_KEY" >> .env 165 | ENV_NEEDS_CONFIG=0 166 | fi 167 | fi 168 | 169 | # Check if API key is configured 170 | if [[ "$ENV_NEEDS_CONFIG" -eq 1 ]] || grep -q "your_gemini_api_key_here" .env 2>/dev/null; then 171 | echo "" 172 | echo "==========================================" 173 | echo " ⚠️ API KEY REQUIRED" 174 | echo "==========================================" 175 | echo "" 176 | echo "OpenCluely needs a Google Gemini API key to work." 177 | echo "" 178 | echo "Steps to get your API key:" 179 | echo "1. Visit: https://aistudio.google.com/" 180 | echo "2. Click 'Create API Key'" 181 | echo "3. Copy the generated key" 182 | echo "" 183 | echo "Then edit your .env file and replace 'your_gemini_api_key_here' with your actual key:" 184 | echo "" 185 | echo "GEMINI_API_KEY=your_actual_api_key_here" 186 | echo "" 187 | echo "You can edit .env with any text editor:" 188 | echo " nano .env (Linux/macOS)" 189 | echo " notepad .env (Windows)" 190 | echo " code .env (VS Code)" 191 | echo "" 192 | read -p "Press Enter after you've added your API key to continue..." 193 | echo "" 194 | fi 195 | 196 | # Install node dependencies 197 | if [[ -f package-lock.json && "$USE_CI" -eq 1 ]]; then 198 | echo "Installing dependencies with npm ci" 199 | npm ci 200 | else 201 | echo "Installing dependencies with npm install" 202 | npm install 203 | fi 204 | 205 | # Build (optional) 206 | if [[ "$DO_BUILD" -eq 1 ]]; then 207 | echo "Building app for $OS_NAME via npm run $PLATFORM_BUILD_SCRIPT" 208 | npm run "$PLATFORM_BUILD_SCRIPT" 209 | fi 210 | 211 | # Run (default) 212 | if [[ "$DO_RUN" -eq 1 ]]; then 213 | # Final validation before starting 214 | if grep -q "your_gemini_api_key_here" .env 2>/dev/null; then 215 | echo "" 216 | echo "❌ Error: API key not configured!" 217 | echo "Please edit .env and replace 'your_gemini_api_key_here' with your actual Gemini API key." 218 | echo "Get your key from: https://aistudio.google.com/" 219 | echo "" 220 | echo "Then run the setup script again:" 221 | echo "./setup.sh" 222 | exit 1 223 | fi 224 | 225 | echo "Starting app (npm start)" 226 | npm start 227 | else 228 | echo "Setup complete. Skipping run." 229 | fi 230 | -------------------------------------------------------------------------------- /src/ui/settings-window.js: -------------------------------------------------------------------------------- 1 | document.addEventListener('DOMContentLoaded', () => { 2 | // Get DOM elements 3 | const closeButton = document.getElementById('closeButton'); 4 | const quitButton = document.getElementById('quitButton'); 5 | const azureKeyInput = document.getElementById('azureKey'); 6 | const azureRegionInput = document.getElementById('azureRegion'); 7 | const geminiKeyInput = document.getElementById('geminiKey'); 8 | const windowGapInput = document.getElementById('windowGap'); 9 | const codingLanguageSelect = document.getElementById('codingLanguage'); 10 | const activeSkillSelect = document.getElementById('activeSkill'); 11 | const iconGrid = document.getElementById('iconGrid'); 12 | 13 | // Check if window.api exists 14 | if (!window.api) { 15 | console.error('window.api not available'); 16 | return; 17 | } 18 | 19 | // Request current settings when window opens 20 | const requestCurrentSettings = () => { 21 | if (window.electronAPI && window.electronAPI.getSettings) { 22 | window.electronAPI.getSettings().then(settings => { 23 | loadSettingsIntoUI(settings); 24 | }).catch(error => { 25 | console.error('Failed to get settings:', error); 26 | }); 27 | } 28 | }; 29 | 30 | // Close button handler 31 | if (closeButton) { 32 | closeButton.addEventListener('click', () => { 33 | window.api.send('close-settings'); 34 | }); 35 | } 36 | 37 | // Quit button handler with multiple attempts 38 | if (quitButton) { 39 | quitButton.addEventListener('click', () => { 40 | try { 41 | // Try multiple ways to quit the app 42 | if (window.api && window.api.send) { 43 | window.api.send('quit-app'); 44 | } 45 | 46 | // Also try the electron API if available 47 | if (window.electronAPI && window.electronAPI.quit) { 48 | window.electronAPI.quit(); 49 | } 50 | 51 | // Fallback: close the window 52 | setTimeout(() => { 53 | window.close(); 54 | }, 500); 55 | 56 | } catch (error) { 57 | console.error('Error quitting app:', error); 58 | window.close(); 59 | } 60 | }); 61 | } 62 | 63 | // Function to load settings into UI 64 | const loadSettingsIntoUI = (settings) => { 65 | if (settings.azureKey && azureKeyInput) azureKeyInput.value = settings.azureKey; 66 | if (settings.azureRegion && azureRegionInput) azureRegionInput.value = settings.azureRegion; 67 | if (settings.geminiKey && geminiKeyInput) geminiKeyInput.value = settings.geminiKey; 68 | if (settings.windowGap && windowGapInput) windowGapInput.value = settings.windowGap; 69 | 70 | // Set C++ as default if no coding language is specified 71 | if (codingLanguageSelect) { 72 | codingLanguageSelect.value = settings.codingLanguage || 'cpp'; 73 | } 74 | 75 | if (settings.activeSkill && activeSkillSelect) activeSkillSelect.value = settings.activeSkill; 76 | 77 | // Handle icon selection 78 | const selectedIcon = settings.selectedIcon || settings.appIcon; 79 | if (selectedIcon && iconGrid) { 80 | const iconOptions = iconGrid.querySelectorAll('.icon-option'); 81 | iconOptions.forEach(option => { 82 | if (option.dataset.icon === selectedIcon) { 83 | option.classList.add('selected'); 84 | } else { 85 | option.classList.remove('selected'); 86 | } 87 | }); 88 | } 89 | }; 90 | 91 | // Load settings when window opens 92 | window.api.receive('load-settings', (settings) => { 93 | loadSettingsIntoUI(settings); 94 | }); 95 | 96 | // Listen for settings window shown event 97 | if (window.electronAPI && window.electronAPI.receive) { 98 | window.electronAPI.receive('settings-window-shown', () => { 99 | requestCurrentSettings(); 100 | }); 101 | 102 | // Listen for coding language changes from other windows via helper 103 | window.electronAPI.onCodingLanguageChanged((event, data) => { 104 | if (data && data.language && codingLanguageSelect) { 105 | codingLanguageSelect.value = data.language; 106 | console.log('Language updated from overlay window:', data.language); 107 | } 108 | }); 109 | } 110 | 111 | // Save settings helper function 112 | const saveSettings = () => { 113 | const settings = {}; 114 | if (azureKeyInput) settings.azureKey = azureKeyInput.value; 115 | if (azureRegionInput) settings.azureRegion = azureRegionInput.value; 116 | if (geminiKeyInput) settings.geminiKey = geminiKeyInput.value; 117 | if (windowGapInput) settings.windowGap = windowGapInput.value; 118 | if (codingLanguageSelect) settings.codingLanguage = codingLanguageSelect.value; 119 | if (activeSkillSelect) settings.activeSkill = activeSkillSelect.value; 120 | 121 | window.api.send('save-settings', settings); 122 | }; 123 | 124 | // Add event listeners for all inputs 125 | const inputs = [ 126 | azureKeyInput, 127 | azureRegionInput, 128 | geminiKeyInput, 129 | windowGapInput 130 | ]; 131 | 132 | inputs.forEach(input => { 133 | if (input) { 134 | input.addEventListener('change', saveSettings); 135 | input.addEventListener('blur', saveSettings); 136 | } 137 | }); 138 | 139 | // Language selection handler 140 | if (codingLanguageSelect) { 141 | codingLanguageSelect.addEventListener('change', (e) => { 142 | const lang = e.target.value; 143 | // use electronAPI so main broadcast is consistent 144 | if (window.electronAPI && window.electronAPI.saveSettings) { 145 | window.electronAPI.saveSettings({ codingLanguage: lang }); 146 | } else { 147 | // fallback 148 | saveSettings(); 149 | } 150 | }); 151 | } 152 | 153 | // Skill selection handler 154 | if (activeSkillSelect) { 155 | activeSkillSelect.addEventListener('change', (e) => { 156 | saveSettings(); 157 | // Also update the main window 158 | window.api.send('update-skill', e.target.value); 159 | }); 160 | } 161 | 162 | // Initialize icon grid with correct paths 163 | const initializeIconGrid = () => { 164 | if (!iconGrid) return; 165 | 166 | const icons = [ 167 | { key: 'terminal', name: 'Terminal', src: './assests/icons/terminal.png' }, 168 | { key: 'activity', name: 'Activity', src: './assests/icons/activity.png' }, 169 | { key: 'settings', name: 'Settings', src: './assests/icons/settings.png' } 170 | ]; 171 | 172 | iconGrid.innerHTML = ''; 173 | 174 | icons.forEach(icon => { 175 | const iconElement = document.createElement('div'); 176 | iconElement.className = 'icon-option'; 177 | iconElement.dataset.icon = icon.key; 178 | 179 | const img = document.createElement('img'); 180 | img.src = icon.src; 181 | img.alt = icon.name; 182 | img.onload = () => { 183 | logger.info('Icon loaded successfully:', icon.src); 184 | }; 185 | img.onerror = () => { 186 | console.error('Failed to load icon:', icon.src); 187 | // Try alternative paths 188 | const altPaths = [ 189 | `./assests/${icon.key}.png`, 190 | `./assets/icons/${icon.key}.png`, 191 | `./assets/${icon.key}.png` 192 | ]; 193 | 194 | let pathIndex = 0; 195 | const tryNextPath = () => { 196 | if (pathIndex < altPaths.length) { 197 | img.src = altPaths[pathIndex]; 198 | pathIndex++; 199 | } else { 200 | img.style.display = 'none'; 201 | console.error('All icon paths failed for:', icon.key); 202 | } 203 | }; 204 | 205 | img.onload = () => { 206 | logger.info('Icon loaded with alternative path:', img.src); 207 | }; 208 | 209 | img.onerror = tryNextPath; 210 | tryNextPath(); 211 | }; 212 | 213 | const label = document.createElement('div'); 214 | label.textContent = icon.name; 215 | 216 | iconElement.appendChild(img); 217 | iconElement.appendChild(label); 218 | 219 | // Click handler for icon selection 220 | iconElement.addEventListener('click', () => { 221 | // Remove selection from all icons 222 | iconGrid.querySelectorAll('.icon-option').forEach(opt => { 223 | opt.classList.remove('selected'); 224 | }); 225 | 226 | // Add selection to clicked icon 227 | iconElement.classList.add('selected'); 228 | 229 | // Save the selection - this should trigger the app icon change 230 | window.api.send('save-settings', { selectedIcon: icon.key }); 231 | 232 | // Show visual feedback 233 | iconElement.style.transform = 'scale(0.95)'; 234 | setTimeout(() => { 235 | iconElement.style.transform = 'scale(1)'; 236 | }, 100); 237 | }); 238 | 239 | iconGrid.appendChild(iconElement); 240 | }); 241 | }; 242 | 243 | // Initialize icon grid 244 | initializeIconGrid(); 245 | 246 | // Request settings on load 247 | setTimeout(() => { 248 | requestCurrentSettings(); 249 | }, 200); 250 | 251 | // ESC key to close 252 | document.addEventListener('keydown', (e) => { 253 | if (e.key === 'Escape') { 254 | window.api.send('close-settings'); 255 | } 256 | }); 257 | }); -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | # 🧠 OpenCluely 4 | 5 | 6 |

7 | Under Active Development 8 |

9 |

10 | Core is working; improvements are shipping daily. 11 |

12 | 13 |

14 | OpenCluely Typing Animation 15 |

16 | 17 |

18 | OpenCluely Badge 19 |

20 | 21 |

22 | Platform 23 | Stealth 24 | AI 25 | Speech 26 |

27 | 28 | --- 29 | 30 | 31 | **OpenCluely** is a revolutionary AI-powered desktop application that provides **invisible, real-time assistance** during technical rounds. 32 | 33 | ## 🎬 Demo Video 34 | 35 | https://github.com/user-attachments/assets/896a7140-1e85-405d-bfbe-e05c9f3a816b 36 |
37 | 38 | ## 🌟 Why OpenCluely? 39 | 40 | 41 | 42 | 52 | 62 | 63 |
43 | 44 | ### 🥷 **100% Stealth Mode** 45 | - **Invisible to Screen Sharing**: Zoom, Teams, Meet, Discord 46 | - **Process Disguise**: Appears as normal system process (Terminal, Activity Monitor, Settings) 47 | - **Click-Through Windows**: Transparent overlay technology 48 | - **Draggable UI**: Move windows anywhere on screen 49 | - **Zero Detection**: Bypasses all recording software 50 | 51 | 53 | 54 | ### 🚀 **AI-Powered Intelligence** 55 | - **Direct Image Analysis**: Screenshots are analyzed by Gemini (no Tesseract OCR) 56 | - **Voice Commands**: Optional Azure Speech (Whisper questions, get instant answers) 57 | - **Context Memory**: Remembers entire interview conversation 58 | - **Multi-Language Support**: C++, Python, Java, JavaScript, C 59 | - **Smart Response Window**: Draggable with close button 60 | 61 |
64 | 65 | ## 🖼️ Modern UI Features 66 | 67 | ### 📱 **Interactive Windows** 68 | - **Floating Overlay Bar**: Compact command center with camera, mic, and skill selector 69 | - **Draggable Answer Window**: Move and resize AI response window anywhere 70 | - **Close Button**: Clean × button to close answer window when needed 71 | - **Auto-Hide Mic**: Microphone button appears only when Azure Speech is configured 72 | - **Interactive Chat**: Full conversation window with markdown support 73 | 74 | ### 🎨 **Visual Design** 75 | - **Glass Morphism**: Beautiful blur effects and transparency 76 | - **Adaptive Layout**: UI adjusts based on available services 77 | - **Smart Resizing**: Windows resize automatically to fit content 78 | - **Professional Look**: Mimics system applications for perfect stealth 79 | 80 | --- 81 | 82 | ## 🎯 Functional Overview 83 | 84 | ### 📋 **Core Components** 85 | 86 | 87 | 88 | 99 | 110 | 121 | 122 |
89 | 90 | #### 🖱️ **Main Overlay** 91 | - Floating command bar 92 | - Screenshot capture (⌘⇧S) 93 | - Microphone toggle (Optional) 94 | - Skill selector (DSA) 95 | - Language picker 96 | - Status indicator 97 | 98 | 100 | 101 | #### 💬 **Interactive Chat** 102 | - Real-time transcription 103 | - AI conversation 104 | - Markdown formatting 105 | - Session memory 106 | - Listening animations 107 | - Auto-scroll messages 108 | 109 | 111 | 112 | #### 📊 **Answer Window** 113 | - Draggable interface 114 | - Close button (×) 115 | - Split layout for code 116 | - Full markdown support 117 | - Syntax highlighting 118 | - Smart content sizing 119 | 120 |
123 | 124 | 125 | --- 126 | ## ✅ To-Do List & Development Status 127 | 128 | ### 🎯 **Core Features** *(Completed)* 129 | 130 | - [x] **Stealth overlay** with draggable command bar and click‑through toggle 131 | - [x] **Screenshot capture** with direct Gemini analysis (no OCR step) 132 | - [x] **AI response window** with markdown and code highlighting 133 | - [x] **Global shortcuts** (capture, visibility, interaction, chat, settings) 134 | - [x] **Session memory** and chat UI 135 | - [x] **Language picker** and DSA skill prompt 136 | - [x] **Optional Azure Speech** integration with auto‑hide mic 137 | - [x] **Multi‑monitor** and area capture APIs 138 | - [x] **Window binding** and positioning system 139 | - [x] **Settings management** with app icon/stealth modes 140 | 141 | ### 🚧 **Planned Features** *(In Development)* 142 | 143 | - [ ] **Hidden during screen share** (auto‑hide all windows while screen is being shared) 144 | - [ ] **Multi‑model support** (OpenAI/Anthropic/Local backends alongside Gemini) 145 | - [ ] **Auto‑typer for code snippets** (paste or simulate typing into editors/IDEs) 146 | - [ ] **Export conversation history** (save sessions as markdown/PDF) 147 | - [ ] **Performance optimizations** (faster startup, reduced memory usage) 148 | - [ ] **Enhanced stealth modes** (process name randomization, deeper OS integration) 149 | 150 | --- 151 | 152 | ### ⚙️ **Configuration** 153 | 154 | The setup script automatically handles configuration. You only need: 155 | 156 | ```bash 157 | # Required: Google Gemini API Key (setup script will ask for this) 158 | GEMINI_API_KEY=your_gemini_api_key_here 159 | 160 | # Optional: Azure Speech Recognition (add later if you want voice features) 161 | AZURE_SPEECH_KEY=your_azure_speech_key 162 | AZURE_SPEECH_REGION=your_region 163 | ``` 164 | 165 | **Note**: Speech recognition is completely optional. If Azure credentials are not provided, the microphone button will be automatically hidden from all interfaces. 166 | 167 | ## 🚀 Quick Start & Installation 168 | 169 | ### ⚡ Three Simple Steps (All Operating Systems) 170 | 171 | 1. **Clone the repository** 172 | ```bash 173 | git clone https://github.com/TechyCSR/OpenCluely.git 174 | cd OpenCluely 175 | ``` 176 | 177 | 2. **Get your Gemini API key** (Required) 178 | - Visit [Google AI Studio](https://aistudio.google.com/) 179 | - Click "Create API Key" 180 | - Copy the key (you'll need it in step 3) 181 | 182 | 3. **Run the setup script** (One command does everything!) 183 | ```bash 184 | ./setup.sh 185 | ``` 186 | 187 | 188 | **That's it!** The setup script will: 189 | - Install all dependencies automatically 190 | - Create and configure your `.env` file 191 | - Build the app (if needed) 192 | - Launch OpenCluely ready to use (if not works use npm install & then npm start) 193 | 194 | ### 💻 Platform-Specific Notes 195 | 196 | - **Windows**: Use Git Bash (comes with Git for Windows), WSL, or any bash environment 197 | - **macOS/Linux**: Use your regular terminal 198 | - **All platforms**: No manual npm commands needed - the setup script handles everything 199 | 200 | ### 🎛️ Setup Script Options 201 | 202 | ```bash 203 | ./setup.sh --build # Build distributable for your OS 204 | ./setup.sh --ci # Use npm ci instead of npm install 205 | ./setup.sh --no-run # Setup only, don't launch the app 206 | ./setup.sh --install-system-deps # Install sox for microphone (optional) 207 | ``` 208 | 209 | ### 🔧 **Optional: Azure Speech Setup** (For Voice Features) 210 | 211 | Voice recognition is completely optional. The setup script will create a `.env` file with just the required Gemini key. To add voice features: 212 | 213 | 1. Get Azure Speech credentials: 214 | - Visit [Azure Portal](https://portal.azure.com/) 215 | - Create a Speech Service 216 | - Copy your key and region 217 | 218 | 2. Add to your `.env` file: 219 | ```env 220 | # Already configured by setup script 221 | GEMINI_API_KEY=your_gemini_api_key_here 222 | 223 | # Add these for voice features (optional) 224 | AZURE_SPEECH_KEY=your_azure_speech_key 225 | AZURE_SPEECH_REGION=your_region 226 | ``` 227 | 228 | 3. Restart the app - microphone buttons will now appear automatically 229 | 230 | ## 🎮 How to Use 231 | 232 | ### 🖱️ **Main Controls** 233 | 234 | | Action | Shortcut | Description | 235 | |--------|----------|-------------| 236 | | **Screenshot Capture** | `⌘⇧S` | Capture screen and analyze via Gemini (image understanding) | 237 | | **Toggle Speech** | `Alt+R` | Start/stop voice recognition (if configured) | 238 | | **Toggle Visibility** | `⌘⇧V` | Show/hide all windows | 239 | | **Toggle Interaction** | `⌘⇧I` or `Alt+A` | Enable/disable window interaction | 240 | | **Switch to Chat** | `⌘⇧C` | Open interactive chat window | 241 | | **Settings** | `⌘,` | Open settings panel | 242 | 243 | ### 🎯 **Workflow** 244 | 245 | 1. **Start OpenCluely** → App appears as system process (Terminal/Activity Monitor) 246 | 2. **Position Windows** → Drag overlay and answer windows to preferred locations 247 | 3. **Capture Questions** → Use screenshot (⌘⇧S) or voice commands 248 | 4. **Get AI Answers** → Instant responses in draggable answer window 249 | 5. **Interactive Chat** → Type or speak for detailed conversations 250 | 6. **Stay Stealth** → All operations invisible to screen recording 251 | 252 | ### 🔧 **Advanced Features** 253 | 254 | #### 🎨 **Window Management** 255 | - **Draggable Interface**: Click and drag any window to reposition 256 | - **Auto-resize**: Windows automatically adjust to content 257 | - **Close Button**: Click × to close answer window 258 | - **Always on Top**: Windows stay above all applications 259 | 260 | #### 🧠 **AI Intelligence** 261 | - **Context Awareness**: Remembers entire conversation 262 | - **Code Detection**: Automatically formats code blocks 263 | - **Language Specific**: Tailored responses for selected programming language 264 | - **Session Memory**: Maintains context across multiple questions 265 | - **Image Understanding**: DSA prompt is applied only for new image-based queries; chat messages don’t include the full prompt 266 | - **Multi-monitor & Area Capture**: Programmatic APIs allow targeting a display and optional rectangular crop for focused analysis 267 | 268 | #### 🔊 **Optional Voice Features** (Azure Speech) 269 | - **Real-time Transcription**: Speak questions naturally 270 | - **Listening Animation**: Visual feedback during recording 271 | - **Interim Results**: See transcription as you speak 272 | - **Auto-processing**: Instant AI responses to voice input 273 | ] 274 | --- 275 | 276 |
277 | 🧩 Troubleshooting 278 | 279 | ### Setup Issues 280 | 281 | - **setup.sh not found or won't run** 282 | - Make sure you're in the OpenCluely directory: `cd OpenCluely` 283 | - Make the script executable: `chmod +x setup.sh` 284 | - On Windows, use Git Bash (comes with Git for Windows) 285 | 286 | - **Setup script stops with exit code 130** 287 | - This means you pressed Ctrl+C. Just run `./setup.sh` again 288 | 289 | - **Node or npm not found** 290 | - Install Node.js 18+ from [nodejs.org](https://nodejs.org/) 291 | - Restart your terminal and try again 292 | 293 | ### App Issues 294 | 295 | - **Electron won't start or shows blank window (Linux)** 296 | - Try: `npm run dev` 297 | - Ensure X11/XWayland is available if running in headless environments 298 | 299 | - **macOS screen capture doesn't work** 300 | - Grant "Screen Recording" permission in System Settings → Privacy & Security → Screen Recording 301 | - Quit and relaunch the app after granting permission 302 | 303 | - **Windows SmartScreen blocks the app** 304 | - Click "More info" → "Run anyway" or use `npm start` during development 305 | 306 | - **Microphone/voice not working** 307 | - Voice is optional - ignore related warnings if you don't need it 308 | - To enable: install `sox` (Linux/macOS) and add Azure keys to `.env` 309 | 310 |
311 | 312 |
313 | ⚖️ Legal & Ethics 314 | 315 | ### 📋 **Disclaimer** 316 | 317 | OpenCluely is provided for educational and research purposes. Users are responsible for: 318 | - Complying with interview guidelines 319 | - Respecting company policies 320 | - Understanding legal implications 321 | - Using ethically and responsibly 322 | 323 | ### 🔒 **Privacy** 324 | 325 | - No data collection or telemetry 326 | - All processing happens locally 327 | - API communications are encrypted 328 | - Session data stays on your device 329 | 330 | 331 | ### 📄 License 332 | 333 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 334 | 335 | 336 |
337 | 338 | 339 |
340 | 341 | ## 🙏 Acknowledgments 342 | 343 | - **Google Gemini**: Powering AI intelligence 344 | - **Azure Speech**: Optional voice recognition 345 | - **Electron**: Cross-platform desktop framework 346 | - **Community**: Amazing contributors and feedback 347 | 348 | - **Vysper**: UI and code structure inspiration — see [Vysper by varun-singhh](https://github.com/varun-singhh/Vysper) 349 | 350 | --- 351 |
352 | 353 | 354 | 355 | ⭐ **Star this repo** if OpenCluely helped you ace your interviews or you vibed with it! 356 | 357 | **Made with ❤️ by [TechyCSR](https://techycsr.dev)** 358 | 359 | 360 |
361 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | OpenCluely 6 | 7 | 8 | 353 | 354 | 355 |
356 |
357 | 358 | ⌘⇧S 359 |
360 |
361 |
362 | 363 |
364 |
365 |
366 | 367 | DSA 368 |
369 |
370 |
371 | 372 | 379 |
380 |
381 |
382 | 383 |
384 |
385 |
386 |
387 | 388 | 389 |
390 |
391 | Global Shortcuts 392 |
393 | 394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 402 | 407 | 408 | 409 | 410 | 415 | 416 | 417 | 418 | 426 | 427 | 428 | 429 | 434 | 435 | 436 | 437 | 441 | 442 | 443 | 444 | 448 | 449 | 450 | 451 |
ShortcutAction
403 | Ctrl/Cmd 404 | + Shift 405 | + S 406 | Capture screenshot and analyze
411 | Ctrl/Cmd 412 | + Shift 413 | + V 414 | Toggle visibility
419 | Ctrl/Cmd 420 | + Shift 421 | + I 422 | or 423 | Alt 424 | + A 425 | Toggle interaction (click-through)
430 | Ctrl/Cmd 431 | + Shift 432 | + C 433 | Open chat
438 | Alt 439 | + R 440 | Toggle speech
445 | Ctrl/Cmd 446 | + , 447 | Open settings
452 | 456 |
457 | 458 | 459 | 460 | 461 | -------------------------------------------------------------------------------- /src/styles/common.css: -------------------------------------------------------------------------------- 1 | /* Common Styles for OpenCluely UI Components */ 2 | 3 | /* Font imports */ 4 | @import url('https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css'); 5 | 6 | /* Base styles */ 7 | body { 8 | background: transparent !important; 9 | margin: 0; 10 | padding: 0; 11 | overflow: hidden; 12 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; 13 | cursor: default; 14 | } 15 | 16 | /* Common container styles */ 17 | .app-container { 18 | width: 100%; 19 | height: 100vh; 20 | display: flex; 21 | flex-direction: column; 22 | -webkit-app-region: drag; 23 | background: linear-gradient(135deg, rgba(0, 0, 0, 0.3) 0%, rgba(20, 20, 20, 0.4) 100%); 24 | backdrop-filter: blur(25px); 25 | border-radius: 12px; 26 | border: 1px solid rgba(255, 255, 255, 0.1); 27 | box-shadow: 0 4px 25px rgba(0, 0, 0, 0.15); 28 | } 29 | 30 | /* Header styles */ 31 | .app-header { 32 | padding: 12px 16px; 33 | border-bottom: 1px solid rgba(255, 255, 255, 0.08); 34 | display: flex; 35 | align-items: center; 36 | justify-content: space-between; 37 | -webkit-app-region: no-drag; 38 | background: rgba(0, 0, 0, 0.2); 39 | backdrop-filter: blur(10px); 40 | } 41 | 42 | .header-title { 43 | color: rgba(255, 255, 255, 0.95); 44 | font-size: 13px; 45 | font-weight: 600; 46 | display: flex; 47 | align-items: center; 48 | gap: 8px; 49 | text-shadow: 0 1px 2px rgba(0, 0, 0, 0.3); 50 | } 51 | 52 | /* Content area styles */ 53 | .app-content { 54 | flex: 1; 55 | padding: 16px; 56 | overflow-y: auto; 57 | -webkit-app-region: no-drag; 58 | } 59 | 60 | /* Status indicators */ 61 | .status-dot { 62 | width: 6px; 63 | height: 6px; 64 | border-radius: 50%; 65 | transition: all 0.3s ease; 66 | } 67 | 68 | .status-dot.interactive { 69 | background: #4CAF50; 70 | box-shadow: 0 0 8px rgba(76, 175, 80, 0.6); 71 | } 72 | 73 | .status-dot.non-interactive { 74 | background: #f44336; 75 | box-shadow: 0 0 8px rgba(244, 67, 54, 0.6); 76 | } 77 | 78 | /* Pulse animation for active indicators */ 79 | .active-indicator { 80 | width: 8px; 81 | height: 8px; 82 | border-radius: 50%; 83 | background: #4caf50; 84 | animation: pulse 2s infinite; 85 | box-shadow: 0 0 10px rgba(76, 175, 80, 0.5); 86 | } 87 | 88 | @keyframes pulse { 89 | 0% { opacity: 1; transform: scale(1); } 90 | 50% { opacity: 0.7; transform: scale(1.1); } 91 | 100% { opacity: 1; transform: scale(1); } 92 | } 93 | 94 | /* Interaction indicators */ 95 | .interaction-indicator { 96 | position: absolute; 97 | top: 50%; 98 | left: 50%; 99 | transform: translate(-50%, -50%); 100 | background: rgba(0, 0, 0, 0.8); 101 | color: rgba(255, 255, 255, 0.9); 102 | padding: 8px 16px; 103 | border-radius: 6px; 104 | font-size: 12px; 105 | font-weight: 500; 106 | opacity: 0; 107 | transition: opacity 0.3s ease; 108 | pointer-events: none; 109 | z-index: 1000; 110 | } 111 | 112 | .interaction-indicator.show { 113 | opacity: 1; 114 | } 115 | 116 | .interaction-indicator.interactive { 117 | background: rgba(76, 175, 80, 0.9); 118 | } 119 | 120 | .interaction-indicator.non-interactive { 121 | background: rgba(244, 67, 54, 0.9); 122 | } 123 | 124 | /* Non-interactive state styles */ 125 | .non-interactive .interactive-element { 126 | pointer-events: none; 127 | opacity: 0.5; 128 | } 129 | 130 | /* Button styles */ 131 | .btn { 132 | background: rgba(255, 255, 255, 0.15); 133 | border: none; 134 | border-radius: 6px; 135 | padding: 6px 10px; 136 | color: rgba(255, 255, 255, 0.9); 137 | cursor: pointer; 138 | transition: all 0.2s ease; 139 | font-size: 12px; 140 | font-weight: 500; 141 | } 142 | 143 | .btn:hover { 144 | background: rgba(255, 255, 255, 0.25); 145 | color: rgba(255, 255, 255, 1); 146 | } 147 | 148 | .btn.btn-primary { 149 | background: rgba(76, 175, 80, 0.8); 150 | } 151 | 152 | .btn.btn-primary:hover { 153 | background: rgba(76, 175, 80, 1); 154 | } 155 | 156 | .btn.btn-danger { 157 | background: rgba(244, 67, 54, 0.8); 158 | } 159 | 160 | .btn.btn-danger:hover { 161 | background: rgba(244, 67, 54, 1); 162 | } 163 | 164 | .btn.recording { 165 | background: rgba(255, 71, 87, 0.8); 166 | color: white; 167 | box-shadow: 0 0 15px rgba(255, 71, 87, 0.4); 168 | } 169 | 170 | /* Input styles */ 171 | .input-field { 172 | background: rgba(255, 255, 255, 0.1); 173 | border: 1px solid rgba(255, 255, 255, 0.15); 174 | border-radius: 6px; 175 | padding: 8px 12px; 176 | color: rgba(255, 255, 255, 0.95); 177 | font-size: 13px; 178 | outline: none; 179 | transition: all 0.2s ease; 180 | } 181 | 182 | .input-field:focus { 183 | background: rgba(255, 255, 255, 0.15); 184 | border-color: rgba(76, 175, 80, 0.5); 185 | } 186 | 187 | .input-field::placeholder { 188 | color: rgba(255, 255, 255, 0.5); 189 | } 190 | 191 | /* Card styles */ 192 | .card { 193 | background: rgba(255, 255, 255, 0.08); 194 | border-radius: 8px; 195 | padding: 12px 16px; 196 | border: 1px solid rgba(255, 255, 255, 0.15); 197 | backdrop-filter: blur(5px); 198 | transition: all 0.3s ease; 199 | cursor: pointer; 200 | position: relative; 201 | overflow: hidden; 202 | } 203 | 204 | .card:hover { 205 | background: rgba(255, 255, 255, 0.12); 206 | border-color: rgba(255, 255, 255, 0.25); 207 | transform: translateY(-2px); 208 | box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2); 209 | } 210 | 211 | .card.active { 212 | background: rgba(76, 175, 80, 0.15); 213 | border-color: rgba(76, 175, 80, 0.4); 214 | box-shadow: 0 0 15px rgba(76, 175, 80, 0.3); 215 | } 216 | 217 | /* Message styles */ 218 | .message { 219 | margin-bottom: 12px; 220 | padding: 10px 14px; 221 | background: rgba(255, 255, 255, 0.08); 222 | border-radius: 8px; 223 | border-left: 3px solid rgba(255, 255, 255, 0.2); 224 | backdrop-filter: blur(5px); 225 | min-height: auto; 226 | height: auto; 227 | overflow: visible; 228 | word-wrap: break-word; 229 | word-break: break-word; 230 | } 231 | 232 | .message.system { 233 | border-left-color: #2196f3; 234 | background: rgba(33, 150, 243, 0.1); 235 | } 236 | 237 | .message.user { 238 | border-left-color: #ff9800; 239 | background: rgba(255, 152, 0, 0.1); 240 | } 241 | 242 | .message.transcription { 243 | border-left-color: #4caf50; 244 | background: rgba(76, 175, 80, 0.1); 245 | } 246 | 247 | .message.error { 248 | border-left-color: #f44336; 249 | background: rgba(244, 67, 54, 0.1); 250 | } 251 | 252 | .message.assistant { 253 | border-left-color: #9c27b0; 254 | background: rgba(156, 39, 176, 0.1); 255 | min-height: auto; 256 | height: auto; 257 | overflow: visible; 258 | } 259 | 260 | /* Markdown formatting for assistant messages */ 261 | .message.assistant .bullet-point { 262 | margin: 4px 0; 263 | padding-left: 8px; 264 | line-height: 1.4; 265 | word-wrap: break-word; 266 | word-break: break-word; 267 | overflow-wrap: break-word; 268 | } 269 | 270 | .message.assistant .numbered-point { 271 | margin: 4px 0; 272 | padding-left: 16px; 273 | line-height: 1.4; 274 | position: relative; 275 | counter-increment: list-counter; 276 | word-wrap: break-word; 277 | word-break: break-word; 278 | overflow-wrap: break-word; 279 | } 280 | 281 | .message.assistant .numbered-point::before { 282 | content: counter(list-counter) ". "; 283 | position: absolute; 284 | left: 0; 285 | font-weight: 500; 286 | color: rgba(156, 39, 176, 0.8); 287 | } 288 | 289 | .message.assistant { 290 | counter-reset: list-counter; 291 | } 292 | 293 | .message.assistant strong { 294 | font-weight: 600; 295 | color: rgba(255, 255, 255, 1); 296 | } 297 | 298 | .message.assistant em { 299 | font-style: italic; 300 | color: rgba(255, 255, 255, 0.9); 301 | } 302 | 303 | .message.assistant code { 304 | background: rgba(0, 0, 0, 0.3); 305 | padding: 2px 4px; 306 | border-radius: 3px; 307 | font-family: 'Monaco', 'Menlo', monospace; 308 | font-size: 11px; 309 | color: #64ffda; 310 | } 311 | 312 | /* Thinking indicator animation */ 313 | .thinking-dots { 314 | display: flex; 315 | align-items: center; 316 | gap: 2px; 317 | } 318 | 319 | .thinking-dots .dot { 320 | opacity: 0.4; 321 | animation: thinking 1.4s infinite ease-in-out; 322 | } 323 | 324 | .thinking-dots .dot:nth-child(1) { animation-delay: 0s; } 325 | .thinking-dots .dot:nth-child(2) { animation-delay: 0.2s; } 326 | .thinking-dots .dot:nth-child(3) { animation-delay: 0.4s; } 327 | 328 | @keyframes thinking { 329 | 0%, 80%, 100% { 330 | opacity: 0.4; 331 | transform: scale(1); 332 | } 333 | 40% { 334 | opacity: 1; 335 | transform: scale(1.2); 336 | } 337 | } 338 | 339 | .message.thinking { 340 | animation: fadeIn 0.3s ease-out; 341 | } 342 | 343 | @keyframes fadeIn { 344 | from { 345 | opacity: 0; 346 | transform: translateY(10px); 347 | } 348 | to { 349 | opacity: 1; 350 | transform: translateY(0); 351 | } 352 | } 353 | 354 | .message-time { 355 | color: rgba(255, 255, 255, 0.6); 356 | font-size: 10px; 357 | margin-bottom: 4px; 358 | font-weight: 500; 359 | } 360 | 361 | .message-text { 362 | color: rgba(255, 255, 255, 0.95); 363 | font-size: 13px; 364 | line-height: 1.4; 365 | text-shadow: 0 1px 2px rgba(0, 0, 0, 0.2); 366 | word-wrap: break-word; 367 | word-break: break-word; 368 | white-space: pre-wrap; 369 | overflow-wrap: break-word; 370 | max-width: 100%; 371 | display: block; 372 | } 373 | 374 | /* Help text styles */ 375 | .help-text { 376 | color: rgba(255, 255, 255, 0.7); 377 | font-size: 11px; 378 | text-align: center; 379 | padding: 10px; 380 | line-height: 1.4; 381 | text-shadow: 0 1px 2px rgba(0, 0, 0, 0.2); 382 | } 383 | 384 | /* Notification styles */ 385 | .notification { 386 | position: fixed; 387 | top: 16px; 388 | right: 16px; 389 | padding: 12px 20px; 390 | border-radius: 8px; 391 | color: white; 392 | font-size: 13px; 393 | font-weight: 500; 394 | z-index: 9999; 395 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3); 396 | backdrop-filter: blur(10px); 397 | border: 1px solid rgba(255, 255, 255, 0.1); 398 | } 399 | 400 | .notification.info { 401 | background: rgba(33, 150, 243, 0.9); 402 | } 403 | 404 | .notification.success { 405 | background: rgba(76, 175, 80, 0.9); 406 | } 407 | 408 | .notification.error { 409 | background: rgba(244, 67, 54, 0.9); 410 | } 411 | 412 | .notification.warning { 413 | background: rgba(255, 193, 7, 0.9); 414 | } 415 | 416 | /* Modal styles */ 417 | .modal-overlay { 418 | position: fixed; 419 | inset: 0; 420 | background: rgba(0, 0, 0, 0.75); 421 | display: flex; 422 | align-items: center; 423 | justify-content: center; 424 | z-index: 1000; 425 | backdrop-filter: blur(4px); 426 | } 427 | 428 | .modal-content { 429 | background: rgba(30, 30, 30, 0.95); 430 | backdrop-filter: blur(20px); 431 | border-radius: 12px; 432 | border: 1px solid rgba(255, 255, 255, 0.1); 433 | box-shadow: 0 8px 32px rgba(0, 0, 0, 0.5); 434 | color: white; 435 | max-width: 90vw; 436 | max-height: 90vh; 437 | overflow-y: auto; 438 | } 439 | 440 | /* Separator styles */ 441 | .separator { 442 | width: 1px; 443 | height: 16px; 444 | background: linear-gradient(to bottom, transparent, rgba(255, 255, 255, 0.25), transparent); 445 | flex-shrink: 0; 446 | margin: 0 8px; 447 | } 448 | 449 | /* Loading animations */ 450 | .loading-spinner { 451 | display: inline-block; 452 | width: 16px; 453 | height: 16px; 454 | border: 2px solid rgba(255, 255, 255, 0.3); 455 | border-radius: 50%; 456 | border-top-color: #4CAF50; 457 | animation: spin 1s ease-in-out infinite; 458 | } 459 | 460 | @keyframes spin { 461 | to { transform: rotate(360deg); } 462 | } 463 | 464 | /* Bouncing dots loader */ 465 | .dots-loader { 466 | display: inline-flex; 467 | align-items: center; 468 | gap: 4px; 469 | } 470 | 471 | .bouncing-dot { 472 | width: 6px; 473 | height: 6px; 474 | border-radius: 50%; 475 | background-color: #4CAF50; 476 | animation: bounce 1.4s infinite ease-in-out; 477 | } 478 | 479 | .bouncing-dot:nth-child(1) { animation-delay: -0.32s; } 480 | .bouncing-dot:nth-child(2) { animation-delay: -0.16s; } 481 | .bouncing-dot:nth-child(3) { animation-delay: 0s; } 482 | 483 | @keyframes bounce { 484 | 0%, 80%, 100% { 485 | transform: scale(0); 486 | opacity: 0.5; 487 | } 488 | 40% { 489 | transform: scale(1); 490 | opacity: 1; 491 | } 492 | } 493 | 494 | /* Utility classes */ 495 | .hidden { display: none !important; } 496 | .visible { display: block !important; } 497 | .flex { display: flex !important; } 498 | .inline-flex { display: inline-flex !important; } 499 | .grid { display: grid !important; } 500 | 501 | .text-center { text-align: center; } 502 | .text-left { text-align: left; } 503 | .text-right { text-align: right; } 504 | 505 | .font-bold { font-weight: 600; } 506 | .font-medium { font-weight: 500; } 507 | .font-normal { font-weight: 400; } 508 | 509 | .text-xs { font-size: 10px; } 510 | .text-sm { font-size: 12px; } 511 | .text-base { font-size: 14px; } 512 | .text-lg { font-size: 16px; } 513 | .text-xl { font-size: 18px; } 514 | 515 | .opacity-50 { opacity: 0.5; } 516 | .opacity-75 { opacity: 0.75; } 517 | .opacity-90 { opacity: 0.9; } 518 | 519 | /* Responsive design helpers */ 520 | @media (max-width: 768px) { 521 | .app-header { 522 | padding: 8px 12px; 523 | } 524 | 525 | .app-content { 526 | padding: 12px; 527 | } 528 | 529 | .header-title { 530 | font-size: 12px; 531 | } 532 | 533 | .card { 534 | padding: 10px 12px; 535 | } 536 | } 537 | 538 | /* Focus styles for accessibility */ 539 | .focus-visible:focus { 540 | outline: 2px solid rgba(76, 175, 80, 0.8); 541 | outline-offset: 2px; 542 | } 543 | 544 | /* Smooth transitions */ 545 | * { 546 | transition: opacity 0.2s ease, transform 0.2s ease; 547 | } 548 | 549 | /* Scrollbar styles */ 550 | ::-webkit-scrollbar { 551 | width: 6px; 552 | height: 6px; 553 | } 554 | 555 | ::-webkit-scrollbar-track { 556 | background: rgba(255, 255, 255, 0.05); 557 | border-radius: 3px; 558 | } 559 | 560 | ::-webkit-scrollbar-thumb { 561 | background: rgba(255, 255, 255, 0.2); 562 | border-radius: 3px; 563 | } 564 | 565 | ::-webkit-scrollbar-thumb:hover { 566 | background: rgba(255, 255, 255, 0.3); 567 | } 568 | 569 | /* Hide scrollbars when specified */ 570 | .hide-scrollbar::-webkit-scrollbar { 571 | width: 0px; 572 | height: 0px; 573 | display: none; 574 | } 575 | 576 | .hide-scrollbar { 577 | scrollbar-width: none; 578 | -ms-overflow-style: none; 579 | } -------------------------------------------------------------------------------- /settings.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Settings 7 | 8 | 9 | 258 | 259 | 260 |
261 |
262 |
263 | 264 | Settings 265 |
266 |
267 | 271 | 275 |
276 |
277 | 278 |
279 |
280 |
281 | 282 | Language & Skills 283 |
284 |
285 |
286 |
287 |
Coding Language
288 |
Select your preferred programming language
289 |
290 | 297 |
298 |
299 |
300 |
Active Skill
301 |
Choose your current focus area
302 |
303 | 306 |
307 |
308 |
309 | 310 |
311 |
312 | 313 | App Icon 314 |
315 |
316 |
317 |
318 | Terminal 319 |
Terminal
320 |
321 |
322 | Activity 323 |
Activity
324 |
325 |
326 | Settings 327 |
Settings
328 |
329 |
330 |
331 |
332 | 333 |
334 |
335 | 336 | Speech Recognition 337 |
338 |
339 |
340 |
341 |
Azure Speech Key
342 |
Your Azure Cognitive Services API key
343 |
344 | 345 |
346 |
347 |
348 |
Azure Region
349 |
Your Azure service region
350 |
351 | 352 |
353 |
354 |
355 | 356 |
357 |
358 | 359 | Gemini Settings 360 |
361 |
362 |
363 |
364 |
Google API Key
365 |
Your Google API key for Gemini models
366 |
367 | 368 |
369 |
370 |
371 | 372 |
373 |
374 | 375 | Window Settings 376 |
377 |
378 |
379 |
380 |
Window Gap
381 |
Gap between bound windows (in pixels)
382 |
383 | 384 |
385 |
386 |
387 |
388 |
389 | 390 | 391 | 392 | -------------------------------------------------------------------------------- /prompt-loader.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs'); 2 | const path = require('path'); 3 | 4 | class PromptLoader { 5 | constructor() { 6 | this.prompts = new Map(); 7 | this.promptsLoaded = false; 8 | this.skillPromptSent = new Set(); 9 | // Focus only on DSA 10 | this.skillsRequiringProgrammingLanguage = ['dsa']; 11 | } 12 | 13 | /** 14 | * Load all skill prompts from the prompts directory 15 | */ 16 | loadPrompts() { 17 | if (this.promptsLoaded) { 18 | return; 19 | } 20 | 21 | const promptsDir = path.join(__dirname, 'prompts'); 22 | 23 | try { 24 | const files = fs.readdirSync(promptsDir); 25 | 26 | for (const file of files) { 27 | if (file.endsWith('.md')) { 28 | const skillName = path.basename(file, '.md'); 29 | if (skillName !== 'dsa') continue; // only keep DSA 30 | const filePath = path.join(promptsDir, file); 31 | const promptContent = fs.readFileSync(filePath, 'utf8'); 32 | 33 | this.prompts.set(skillName, promptContent); 34 | } 35 | } 36 | 37 | this.promptsLoaded = true; 38 | 39 | } catch (error) { 40 | console.error('Error loading skill prompts:', error); 41 | throw new Error(`Failed to load skill prompts: ${error.message}`); 42 | } 43 | } 44 | 45 | /** 46 | * Get the system prompt for a specific skill with optional programming language injection 47 | * @param {string} skillName - The name of the skill 48 | * @param {string|null} programmingLanguage - Optional programming language to inject 49 | * @returns {string|null} The system prompt content or null if not found 50 | */ 51 | getSkillPrompt(skillName, programmingLanguage = null) { 52 | if (!this.promptsLoaded) { 53 | this.loadPrompts(); 54 | } 55 | 56 | const normalizedSkillName = this.normalizeSkillName(skillName); 57 | let promptContent = this.prompts.get(normalizedSkillName); 58 | 59 | if (!promptContent) { 60 | return null; 61 | } 62 | 63 | // Inject programming language if provided and skill requires it 64 | if (programmingLanguage && this.skillsRequiringProgrammingLanguage.includes(normalizedSkillName)) { 65 | promptContent = this.injectProgrammingLanguage(promptContent, programmingLanguage, normalizedSkillName); 66 | } 67 | 68 | return promptContent; 69 | } 70 | 71 | /** 72 | * Inject programming language context into skill prompts 73 | * @param {string} promptContent - Original prompt content 74 | * @param {string} programmingLanguage - Programming language to inject 75 | * @param {string} skillName - Normalized skill name 76 | * @returns {string} Modified prompt with programming language context 77 | */ 78 | injectProgrammingLanguage(promptContent, programmingLanguage, skillName) { 79 | const languageMap = { cpp: 'C++', c: 'C', python: 'Python', java: 'Java', javascript: 'JavaScript', js: 'JavaScript' }; 80 | const fenceTagMap = { cpp: 'cpp', c: 'c', python: 'python', java: 'java', javascript: 'javascript', js: 'javascript' }; 81 | const norm = (programmingLanguage || '').toLowerCase(); 82 | const languageTitle = languageMap[norm] || (programmingLanguage.charAt(0).toUpperCase() + programmingLanguage.slice(1)); 83 | const fenceTag = fenceTagMap[norm] || norm || 'text'; 84 | const languageUpper = (languageMap[norm] || languageTitle).toUpperCase(); 85 | 86 | let languageInjection = ''; 87 | 88 | switch (skillName) { 89 | case 'dsa': 90 | languageInjection = `\n\n## IMPLEMENTATION LANGUAGE: ${languageUpper} 91 | STRICT REQUIREMENTS: 92 | - Respond ONLY in ${languageTitle}. Do not include any snippets or alternatives in other languages. 93 | - All code blocks must use triple backticks with the exact language tag: \`\`\`${fenceTag}\`\`\`. 94 | - Aim for the best possible time and space complexity; prefer optimal algorithms and data structures. 95 | - Provide: brief approach, then final ${languageTitle} implementation, followed by time/space complexity. 96 | - If the user's input is a problem statement (and does not include code), produce a complete, runnable ${languageTitle} solution without asking for clarification. 97 | - Avoid unnecessary verbosity; focus on correctness, clarity, and efficiency.`; 98 | break; 99 | default: 100 | languageInjection = `\n\n## PROGRAMMING LANGUAGE: ${languageUpper}\nAll code and examples must be in ${languageTitle}. Use code fences with tag: \`\`\`${fenceTag}\`\`\`.`; 101 | } 102 | 103 | return promptContent + languageInjection; 104 | } 105 | 106 | /** 107 | * Check if stored memory is empty (first time interaction) 108 | * @param {Array} storedMemory - Current stored memory from your system 109 | * @returns {boolean} True if memory is empty 110 | */ 111 | isFirstTimeInteraction(storedMemory) { 112 | return !storedMemory || storedMemory.length === 0; 113 | } 114 | 115 | /** 116 | * Check if skill prompt should be sent as model memory 117 | * @param {string} skillName - The name of the skill 118 | * @param {Array} storedMemory - Current stored memory 119 | * @returns {boolean} True if skill prompt should be sent as model memory 120 | */ 121 | shouldSendAsModelMemory(skillName, storedMemory) { 122 | const normalizedSkillName = this.normalizeSkillName(skillName); 123 | 124 | // If stored memory is empty, this is the first time - send as model memory 125 | if (this.isFirstTimeInteraction(storedMemory)) { 126 | return true; 127 | } 128 | 129 | // Check if we've already sent this skill's prompt as model memory 130 | const hasSkillInMemory = storedMemory.some(event => 131 | event.skillUsed === normalizedSkillName && event.promptSentAsMemory === true 132 | ); 133 | 134 | if (!hasSkillInMemory) { 135 | return true; 136 | } 137 | 138 | return false; 139 | } 140 | 141 | /** 142 | * Prepare Gemini API request with model memory or regular message 143 | * @param {string} skillName - The active skill 144 | * @param {string} userMessage - The user's message/query 145 | * @param {Array} storedMemory - Current stored memory 146 | * @param {string|null} programmingLanguage - Optional programming language 147 | * @returns {Object} Gemini API request configuration 148 | */ 149 | prepareGeminiRequest(skillName, userMessage, storedMemory, programmingLanguage = null) { 150 | const normalizedSkillName = this.normalizeSkillName(skillName); 151 | const skillPrompt = this.getSkillPrompt(normalizedSkillName, programmingLanguage); 152 | 153 | const requestConfig = { 154 | model: 'gemini-pro', // or your preferred Gemini model 155 | contents: [], 156 | systemInstruction: null, 157 | generationConfig: { 158 | temperature: 0.7, 159 | maxOutputTokens: 2048, 160 | } 161 | }; 162 | 163 | // If stored memory is empty or skill prompt not sent, use model memory 164 | if (this.shouldSendAsModelMemory(skillName, storedMemory)) { 165 | if (skillPrompt) { 166 | // Send skill prompt as system instruction (model memory) 167 | requestConfig.systemInstruction = { 168 | parts: [{ text: skillPrompt }] 169 | }; 170 | 171 | // Add user message as regular content 172 | requestConfig.contents.push({ 173 | role: 'user', 174 | parts: [{ text: userMessage }] 175 | }); 176 | 177 | // Mark that we're sending this as model memory 178 | this.skillPromptSent.add(normalizedSkillName); 179 | 180 | return { 181 | ...requestConfig, 182 | isUsingModelMemory: true, 183 | skillUsed: normalizedSkillName, 184 | programmingLanguage 185 | }; 186 | } else { 187 | console.warn(`No system prompt found for skill: ${normalizedSkillName}`); 188 | } 189 | } 190 | 191 | // Regular message (stored memory not empty, prompt already sent) 192 | requestConfig.contents.push({ 193 | role: 'user', 194 | parts: [{ text: userMessage }] 195 | }); 196 | 197 | return { 198 | ...requestConfig, 199 | isUsingModelMemory: false, 200 | skillUsed: normalizedSkillName, 201 | programmingLanguage 202 | }; 203 | } 204 | 205 | /** 206 | * Alternative method: Get separate components for manual API construction 207 | * @param {string} skillName - The active skill 208 | * @param {string} userMessage - The user's message/query 209 | * @param {Array} storedMemory - Current stored memory 210 | * @param {string|null} programmingLanguage - Optional programming language 211 | * @returns {Object} Separated components for manual request building 212 | */ 213 | getRequestComponents(skillName, userMessage, storedMemory, programmingLanguage = null) { 214 | const normalizedSkillName = this.normalizeSkillName(skillName); 215 | const shouldUseModelMemory = this.shouldSendAsModelMemory(skillName, storedMemory); 216 | const skillPrompt = this.getSkillPrompt(normalizedSkillName, programmingLanguage); 217 | 218 | return { 219 | skillName: normalizedSkillName, 220 | userMessage, 221 | skillPrompt, 222 | shouldUseModelMemory, 223 | isFirstTime: this.isFirstTimeInteraction(storedMemory), 224 | modelMemory: shouldUseModelMemory && skillPrompt ? skillPrompt : null, 225 | messageContent: userMessage, 226 | programmingLanguage, 227 | requiresProgrammingLanguage: this.skillsRequiringProgrammingLanguage.includes(normalizedSkillName) 228 | }; 229 | } 230 | 231 | /** 232 | * Update stored memory after successful API call 233 | * @param {Array} storedMemory - Current stored memory array 234 | * @param {string} skillName - The skill that was used 235 | * @param {boolean} wasModelMemoryUsed - Whether model memory was used 236 | * @param {string} userMessage - The user message 237 | * @param {string} aiResponse - The AI response 238 | * @param {string|null} programmingLanguage - Programming language used 239 | * @returns {Array} Updated stored memory 240 | */ 241 | updateStoredMemory(storedMemory, skillName, wasModelMemoryUsed, userMessage, aiResponse, programmingLanguage = null) { 242 | const normalizedSkillName = this.normalizeSkillName(skillName); 243 | const updatedMemory = [...(storedMemory || [])]; 244 | 245 | const memoryEntry = { 246 | timestamp: new Date().toISOString(), 247 | skillUsed: normalizedSkillName, 248 | promptSentAsMemory: wasModelMemoryUsed, 249 | userMessage, 250 | aiResponse: aiResponse ? aiResponse.substring(0, 200) + '...' : null, // Truncated for storage 251 | action: wasModelMemoryUsed ? 'MODEL_MEMORY_SENT' : 'REGULAR_MESSAGE', 252 | programmingLanguage: programmingLanguage || null 253 | }; 254 | 255 | updatedMemory.push(memoryEntry); 256 | 257 | return updatedMemory; 258 | } 259 | 260 | /** 261 | * Example usage method showing complete flow 262 | * @param {string} skillName - The active skill 263 | * @param {string} userMessage - User's message 264 | * @param {Array} storedMemory - Current stored memory 265 | * @param {string|null} programmingLanguage - Optional programming language 266 | * @returns {Object} Complete flow result 267 | */ 268 | async processUserRequest(skillName, userMessage, storedMemory, programmingLanguage = null) { 269 | try { 270 | // Get request components 271 | const components = this.getRequestComponents(skillName, userMessage, storedMemory, programmingLanguage); 272 | 273 | // Prepare the actual API request 274 | const geminiRequest = this.prepareGeminiRequest(skillName, userMessage, storedMemory, programmingLanguage); 275 | 276 | return { 277 | requestReady: true, 278 | geminiRequest, 279 | components, 280 | needsMemoryUpdate: true, 281 | programmingLanguage 282 | }; 283 | 284 | } catch (error) { 285 | console.error('Error processing user request:', error); 286 | return { 287 | requestReady: false, 288 | error: error.message, 289 | programmingLanguage 290 | }; 291 | } 292 | } 293 | 294 | /** 295 | * Check if a skill requires programming language context 296 | * @param {string} skillName - The skill name to check 297 | * @returns {boolean} True if skill requires programming language 298 | */ 299 | requiresProgrammingLanguage(skillName) { 300 | const normalizedSkillName = this.normalizeSkillName(skillName); 301 | return this.skillsRequiringProgrammingLanguage.includes(normalizedSkillName); 302 | } 303 | 304 | /** 305 | * Get list of skills that require programming language context 306 | * @returns {Array} Array of skill names that require programming language 307 | */ 308 | getSkillsRequiringProgrammingLanguage() { 309 | return [...this.skillsRequiringProgrammingLanguage]; 310 | } 311 | 312 | /** 313 | * Normalize skill names to match file names 314 | * @param {string} skillName - Raw skill name 315 | * @returns {string} Normalized skill name 316 | */ 317 | normalizeSkillName(skillName) { 318 | if (!skillName) return 'general'; 319 | 320 | // Convert to lowercase and handle common variations 321 | const normalized = skillName.toLowerCase().trim(); 322 | 323 | // Map common variations to standard names 324 | const skillMap = { 325 | 'dsa': 'dsa', 326 | 'data-structures': 'dsa', 327 | 'algorithms': 'dsa', 328 | 'data-structures-algorithms': 'dsa', 329 | 'behavioral': 'behavioral', 330 | 'behavioral-interview': 'behavioral', 331 | 'behavior': 'behavioral', 332 | 'sales': 'sales', 333 | 'selling': 'sales', 334 | 'business-development': 'sales', 335 | 'presentation': 'presentation', 336 | 'presentations': 'presentation', 337 | 'public-speaking': 'presentation', 338 | 'data-science': 'data-science', 339 | 'datascience': 'data-science', 340 | 'machine-learning': 'data-science', 341 | 'ml': 'data-science', 342 | 'programming': 'programming', 343 | 'coding': 'programming', 344 | 'software-development': 'programming', 345 | 'development': 'programming', 346 | 'devops': 'devops', 347 | 'dev-ops': 'devops', 348 | 'infrastructure': 'devops', 349 | 'system-design': 'system-design', 350 | 'systems-design': 'system-design', 351 | 'architecture': 'system-design', 352 | 'distributed-systems': 'system-design', 353 | 'negotiation': 'negotiation', 354 | 'negotiating': 'negotiation', 355 | 'conflict-resolution': 'negotiation' 356 | }; 357 | 358 | return skillMap[normalized] || normalized; 359 | } 360 | 361 | /** 362 | * Get list of available skills 363 | * @returns {Array} Array of available skill names 364 | */ 365 | getAvailableSkills() { 366 | if (!this.promptsLoaded) { 367 | this.loadPrompts(); 368 | } 369 | return ['dsa']; 370 | } 371 | 372 | /** 373 | * Reset the prompt sent tracking and clear stored memory 374 | */ 375 | resetSession() { 376 | this.skillPromptSent.clear(); 377 | } 378 | 379 | /** 380 | * Get current session statistics 381 | * @returns {Object} Statistics about current session 382 | */ 383 | getSessionStats() { 384 | if (!this.promptsLoaded) { 385 | this.loadPrompts(); 386 | } 387 | 388 | const stats = { 389 | totalPrompts: this.prompts.size, 390 | skillsUsedInSession: this.skillPromptSent.size, 391 | availableSkills: this.getAvailableSkills(), 392 | skillsUsed: Array.from(this.skillPromptSent), 393 | skillsRequiringProgrammingLanguage: this.skillsRequiringProgrammingLanguage 394 | }; 395 | 396 | return stats; 397 | } 398 | } 399 | 400 | // Export singleton instance 401 | const promptLoader = new PromptLoader(); 402 | 403 | module.exports = { 404 | PromptLoader, 405 | promptLoader 406 | }; -------------------------------------------------------------------------------- /src/managers/session.manager.js: -------------------------------------------------------------------------------- 1 | const logger = require('../core/logger').createServiceLogger('SESSION'); 2 | const config = require('../core/config'); 3 | const { promptLoader } = require('../../prompt-loader'); 4 | 5 | class SessionManager { 6 | constructor() { 7 | this.sessionMemory = []; 8 | this.compressionEnabled = true; 9 | this.maxSize = config.get('session.maxMemorySize'); 10 | this.compressionThreshold = config.get('session.compressionThreshold'); 11 | this.currentSkill = 'dsa'; // Default skill is DSA 12 | this.isInitialized = false; 13 | 14 | this.initializeWithSkillPrompts(); 15 | } 16 | 17 | /** 18 | * Initialize session memory with all available skill prompts 19 | */ 20 | async initializeWithSkillPrompts() { 21 | if (this.isInitialized) return; 22 | 23 | try { 24 | // Load prompts from the prompt loader 25 | promptLoader.loadPrompts(); 26 | const availableSkills = promptLoader.getAvailableSkills(); 27 | 28 | // Add initial system context for each skill 29 | for (const skill of availableSkills) { 30 | const skillPrompt = promptLoader.getSkillPrompt(skill); 31 | if (skillPrompt) { 32 | const event = this.createConversationEvent({ 33 | role: 'system', 34 | content: skillPrompt, 35 | skill: skill, 36 | action: 'skill_prompt_initialization', 37 | metadata: { 38 | isInitialization: true, 39 | skillName: skill 40 | } 41 | }); 42 | this.sessionMemory.push(event); 43 | } 44 | } 45 | 46 | this.isInitialized = true; 47 | logger.info('Session memory initialized with skill prompts', { 48 | skillCount: availableSkills.length, 49 | totalEvents: this.sessionMemory.length 50 | }); 51 | 52 | } catch (error) { 53 | logger.error('Failed to initialize session memory with skill prompts', { 54 | error: error.message 55 | }); 56 | } 57 | } 58 | 59 | /** 60 | * Set the current active skill 61 | */ 62 | setActiveSkill(skill) { 63 | const previousSkill = this.currentSkill; 64 | this.currentSkill = skill; 65 | 66 | this.addConversationEvent({ 67 | role: 'system', 68 | content: `Switched to ${skill} mode`, 69 | action: 'skill_change', 70 | metadata: { 71 | previousSkill, 72 | newSkill: skill 73 | } 74 | }); 75 | 76 | logger.info('Active skill changed', { 77 | from: previousSkill, 78 | to: skill 79 | }); 80 | } 81 | 82 | /** 83 | * Add a conversation event with proper role classification 84 | */ 85 | addConversationEvent({ role, content, action = null, metadata = {} }) { 86 | const event = this.createConversationEvent({ 87 | role, 88 | content, 89 | skill: this.currentSkill, 90 | action: action || this.inferActionFromRole(role), 91 | metadata 92 | }); 93 | 94 | this.sessionMemory.push(event); 95 | 96 | logger.debug('Conversation event added', { 97 | role, 98 | action: event.action, 99 | skill: this.currentSkill, 100 | contentLength: content?.length || 0, 101 | totalEvents: this.sessionMemory.length 102 | }); 103 | 104 | this.performMaintenanceIfNeeded(); 105 | return event.id; 106 | } 107 | 108 | /** 109 | * Add user transcription or chat input 110 | */ 111 | addUserInput(text, source = 'chat') { 112 | return this.addConversationEvent({ 113 | role: 'user', 114 | content: text, 115 | action: source === 'speech' ? 'speech_transcription' : 'chat_input', 116 | metadata: { 117 | source, 118 | textLength: text.length 119 | } 120 | }); 121 | } 122 | 123 | /** 124 | * Add LLM/model response 125 | */ 126 | addModelResponse(text, metadata = {}) { 127 | return this.addConversationEvent({ 128 | role: 'model', 129 | content: text, 130 | action: 'llm_response', 131 | metadata: { 132 | ...metadata, 133 | responseLength: text.length 134 | } 135 | }); 136 | } 137 | 138 | /** 139 | * Add OCR extracted text 140 | */ 141 | addOCREvent(extractedText, metadata = {}) { 142 | return this.addConversationEvent({ 143 | role: 'user', 144 | content: extractedText, 145 | action: 'ocr_extraction', 146 | metadata: { 147 | ...metadata, 148 | source: 'screenshot', 149 | textLength: extractedText.length 150 | } 151 | }); 152 | } 153 | 154 | /** 155 | * Create a conversation event with consistent structure 156 | */ 157 | createConversationEvent({ role, content, skill, action, metadata = {} }) { 158 | return { 159 | id: this.generateEventId(), 160 | timestamp: new Date().toISOString(), 161 | role: role, // 'user', 'model', or 'system' 162 | content, 163 | skill: skill || this.currentSkill, 164 | action, 165 | category: this.categorizeAction(action), 166 | metadata: { 167 | ...metadata, 168 | contentLength: content?.length || 0 169 | }, 170 | contextSummary: this.generateContextSummary(action, { 171 | role, 172 | content, 173 | skill: skill || this.currentSkill, 174 | ...metadata 175 | }) 176 | }; 177 | } 178 | 179 | /** 180 | * Infer action from role 181 | */ 182 | inferActionFromRole(role) { 183 | switch (role) { 184 | case 'user': return 'user_message'; 185 | case 'model': return 'model_response'; 186 | case 'system': return 'system_message'; 187 | default: return 'unknown'; 188 | } 189 | } 190 | 191 | /** 192 | * Get conversation history for LLM context 193 | */ 194 | getConversationHistory(maxEntries = 20) { 195 | // Get recent conversation events (excluding system initialization) 196 | const conversationEvents = this.sessionMemory 197 | .filter(event => event.role !== 'system' || !event.metadata?.isInitialization) 198 | .slice(-maxEntries); 199 | 200 | return conversationEvents.map(event => ({ 201 | role: event.role, 202 | content: event.content, 203 | timestamp: event.timestamp, 204 | skill: event.skill, 205 | action: event.action 206 | })); 207 | } 208 | 209 | /** 210 | * Get the entire conversation history (excluding initialization system messages) 211 | * This is useful when the model needs complete context for each new message. 212 | */ 213 | getFullConversationHistory() { 214 | const conversationEvents = this.sessionMemory 215 | .filter(event => event.role !== 'system' || !event.metadata?.isInitialization); 216 | 217 | return conversationEvents.map(event => ({ 218 | role: event.role, 219 | content: event.content, 220 | timestamp: event.timestamp, 221 | skill: event.skill, 222 | action: event.action 223 | })); 224 | } 225 | 226 | /** 227 | * Get skill-specific context with optional programming language support 228 | * @param {string|null} skillName - Target skill name (defaults to current skill) 229 | * @param {string|null} programmingLanguage - Optional programming language for injection 230 | */ 231 | getSkillContext(skillName = null, programmingLanguage = null) { 232 | const targetSkill = skillName || this.currentSkill; 233 | 234 | // Get skill prompt with programming language injection if provided 235 | let skillPrompt = null; 236 | if (programmingLanguage && promptLoader.requiresProgrammingLanguage(targetSkill)) { 237 | // Use prompt loader to get language-enhanced prompt 238 | skillPrompt = promptLoader.getSkillPrompt(targetSkill, programmingLanguage); 239 | } else { 240 | // Find skill prompt from session memory (fallback) 241 | const skillPromptEvent = this.sessionMemory.find(event => 242 | event.action === 'skill_prompt_initialization' && 243 | event.skill === targetSkill 244 | ); 245 | skillPrompt = skillPromptEvent?.content || null; 246 | } 247 | 248 | // Get recent events for this skill 249 | const skillEvents = this.sessionMemory 250 | .filter(event => event.skill === targetSkill && !event.metadata?.isInitialization) 251 | .slice(-10); 252 | 253 | return { 254 | skillPrompt, 255 | recentEvents: skillEvents, 256 | currentSkill: targetSkill, 257 | programmingLanguage, 258 | requiresProgrammingLanguage: promptLoader.requiresProgrammingLanguage(targetSkill) 259 | }; 260 | } 261 | 262 | addEvent(action, details = {}) { 263 | const event = this.createEvent(action, details); 264 | this.sessionMemory.push(event); 265 | 266 | logger.debug('Session event added', { 267 | action, 268 | eventId: event.id, 269 | totalEvents: this.sessionMemory.length 270 | }); 271 | 272 | this.performMaintenanceIfNeeded(); 273 | return event.id; 274 | } 275 | 276 | createEvent(action, details) { 277 | return { 278 | id: this.generateEventId(), 279 | timestamp: new Date().toISOString(), 280 | action, 281 | category: this.categorizeAction(action), 282 | primaryContent: this.extractPrimaryContent(action, details), 283 | metadata: this.extractMetadata(action, details), 284 | contextSummary: this.generateContextSummary(action, details) 285 | }; 286 | } 287 | 288 | generateEventId() { 289 | return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`; 290 | } 291 | 292 | categorizeAction(action) { 293 | const actionLower = (action || '').toLowerCase(); 294 | 295 | if (actionLower.includes('screenshot') || actionLower.includes('ocr')) { 296 | return 'capture'; 297 | } 298 | if (actionLower.includes('speech') || actionLower.includes('transcription')) { 299 | return 'speech'; 300 | } 301 | if (actionLower.includes('llm') || actionLower.includes('gemini')) { 302 | return 'llm'; 303 | } 304 | if (actionLower.includes('skill') || actionLower.includes('switch')) { 305 | return 'navigation'; 306 | } 307 | 308 | return 'system'; 309 | } 310 | 311 | extractPrimaryContent(action, details) { 312 | if (details.text && typeof details.text === 'string') { 313 | return details.text.substring(0, 200); 314 | } 315 | if (details.response && typeof details.response === 'string') { 316 | return details.response.substring(0, 200); 317 | } 318 | if (details.preview && typeof details.preview === 'string') { 319 | return details.preview; 320 | } 321 | 322 | return null; 323 | } 324 | 325 | extractMetadata(action, details) { 326 | const metadata = {}; 327 | 328 | const metadataFields = ['skill', 'duration', 'size', 'textLength', 'processingTime']; 329 | metadataFields.forEach(field => { 330 | if (details[field] !== undefined) { 331 | metadata[field] = details[field]; 332 | } 333 | }); 334 | 335 | return Object.keys(metadata).length > 0 ? metadata : null; 336 | } 337 | 338 | generateContextSummary(action, details) { 339 | const role = details.role; 340 | const skill = details.skill || this.currentSkill; 341 | 342 | switch (action) { 343 | case 'speech_transcription': 344 | return `User spoke: "${details.content?.substring(0, 50)}..." (${skill} mode)`; 345 | case 'chat_input': 346 | return `User typed: "${details.content?.substring(0, 50)}..." (${skill} mode)`; 347 | case 'llm_response': 348 | return `AI responded in ${skill} mode (${details.responseLength || details.contentLength} chars)`; 349 | case 'ocr_extraction': 350 | return `Screenshot text extracted: ${details.textLength || details.contentLength} characters (${skill} mode)`; 351 | case 'skill_change': 352 | return `Switched from ${details.previousSkill} to ${details.newSkill} mode`; 353 | case 'skill_prompt_initialization': 354 | return `${skill} skill prompt loaded for context`; 355 | case 'user_message': 356 | return `User: "${details.content?.substring(0, 50)}..." (${skill})`; 357 | case 'model_response': 358 | return `Model: Response in ${skill} mode (${details.contentLength} chars)`; 359 | default: 360 | if (role === 'user') { 361 | return `User input in ${skill} mode`; 362 | } else if (role === 'model') { 363 | return `Model response in ${skill} mode`; 364 | } 365 | return action || 'Unknown action'; 366 | } 367 | } 368 | 369 | performMaintenanceIfNeeded() { 370 | if (this.sessionMemory.length > this.maxSize) { 371 | this.performMaintenance(); 372 | } else if (this.compressionEnabled && this.sessionMemory.length > this.compressionThreshold) { 373 | this.compressOldEvents(); 374 | } 375 | } 376 | 377 | performMaintenance() { 378 | const beforeCount = this.sessionMemory.length; 379 | 380 | this.removeOldSystemEvents(); 381 | this.consolidateSimilarEvents(); 382 | 383 | const afterCount = this.sessionMemory.length; 384 | 385 | logger.info('Session memory maintenance completed', { 386 | beforeCount, 387 | afterCount, 388 | eventsRemoved: beforeCount - afterCount 389 | }); 390 | } 391 | 392 | removeOldSystemEvents() { 393 | const cutoffTime = Date.now() - (24 * 60 * 60 * 1000); // 24 hours 394 | 395 | this.sessionMemory = this.sessionMemory.filter(event => { 396 | const eventTime = new Date(event.timestamp).getTime(); 397 | const shouldKeep = event.category !== 'system' || eventTime > cutoffTime; 398 | return shouldKeep; 399 | }); 400 | } 401 | 402 | consolidateSimilarEvents() { 403 | const groups = this.groupSimilarEvents(); 404 | const consolidated = []; 405 | 406 | for (const group of groups) { 407 | if (group.length === 1) { 408 | consolidated.push(group[0]); 409 | } else { 410 | consolidated.push(this.createConsolidatedEvent(group)); 411 | } 412 | } 413 | 414 | this.sessionMemory = consolidated; 415 | } 416 | 417 | groupSimilarEvents() { 418 | const groups = []; 419 | const processed = new Set(); 420 | 421 | for (let i = 0; i < this.sessionMemory.length; i++) { 422 | if (processed.has(i)) continue; 423 | 424 | const group = [this.sessionMemory[i]]; 425 | processed.add(i); 426 | 427 | for (let j = i + 1; j < this.sessionMemory.length; j++) { 428 | if (processed.has(j)) continue; 429 | 430 | if (this.areEventsSimilar(this.sessionMemory[i], this.sessionMemory[j])) { 431 | group.push(this.sessionMemory[j]); 432 | processed.add(j); 433 | } 434 | } 435 | 436 | groups.push(group); 437 | } 438 | 439 | return groups; 440 | } 441 | 442 | areEventsSimilar(event1, event2) { 443 | const timeDiff = Math.abs( 444 | new Date(event1.timestamp).getTime() - new Date(event2.timestamp).getTime() 445 | ); 446 | 447 | return event1.category === event2.category && 448 | event1.action === event2.action && 449 | timeDiff < 60000; // Within 1 minute 450 | } 451 | 452 | createConsolidatedEvent(events) { 453 | const firstEvent = events[0]; 454 | const lastEvent = events[events.length - 1]; 455 | 456 | return { 457 | ...firstEvent, 458 | id: this.generateEventId(), 459 | timestamp: lastEvent.timestamp, 460 | contextSummary: `${firstEvent.contextSummary} (${events.length} similar events)`, 461 | metadata: { 462 | ...firstEvent.metadata, 463 | consolidatedCount: events.length, 464 | timeSpan: { 465 | start: firstEvent.timestamp, 466 | end: lastEvent.timestamp 467 | } 468 | } 469 | }; 470 | } 471 | 472 | compressOldEvents() { 473 | const cutoffTime = Date.now() - (2 * 60 * 60 * 1000); // 2 hours 474 | 475 | this.sessionMemory = this.sessionMemory.map(event => { 476 | const eventTime = new Date(event.timestamp).getTime(); 477 | 478 | if (eventTime < cutoffTime && event.primaryContent && event.primaryContent.length > 100) { 479 | return { 480 | ...event, 481 | primaryContent: event.primaryContent.substring(0, 100) + '...[compressed]', 482 | compressed: true 483 | }; 484 | } 485 | 486 | return event; 487 | }); 488 | } 489 | 490 | getOptimizedHistory() { 491 | const recent = this.getRecentEvents(10); 492 | const important = this.getImportantEvents(5); 493 | const summary = this.generateSessionSummary(); 494 | 495 | return { 496 | recent, 497 | important, 498 | summary, 499 | totalEvents: this.sessionMemory.length 500 | }; 501 | } 502 | 503 | getRecentEvents(count = 10) { 504 | return this.sessionMemory 505 | .slice(-count) 506 | .map(event => ({ 507 | timestamp: event.timestamp, 508 | action: event.action, 509 | category: event.category, 510 | summary: event.contextSummary, 511 | metadata: event.metadata 512 | })); 513 | } 514 | 515 | getImportantEvents(count = 5) { 516 | return this.sessionMemory 517 | .filter(event => ['capture', 'llm'].includes(event.category)) 518 | .slice(-count) 519 | .map(event => ({ 520 | timestamp: event.timestamp, 521 | category: event.category, 522 | summary: event.contextSummary, 523 | content: event.primaryContent?.substring(0, 150) || null 524 | })); 525 | } 526 | 527 | generateSessionSummary() { 528 | const categoryStats = this.getCategoryStatistics(); 529 | const timeSpan = this.getSessionTimeSpan(); 530 | const primaryActivities = this.getPrimaryActivities(); 531 | 532 | return { 533 | duration: timeSpan, 534 | activities: categoryStats, 535 | focus: primaryActivities, 536 | eventCount: this.sessionMemory.length 537 | }; 538 | } 539 | 540 | getCategoryStatistics() { 541 | const stats = {}; 542 | 543 | this.sessionMemory.forEach(event => { 544 | stats[event.category] = (stats[event.category] || 0) + 1; 545 | }); 546 | 547 | return stats; 548 | } 549 | 550 | getSessionTimeSpan() { 551 | if (this.sessionMemory.length === 0) return null; 552 | 553 | const timestamps = this.sessionMemory.map(e => new Date(e.timestamp).getTime()); 554 | const start = Math.min(...timestamps); 555 | const end = Math.max(...timestamps); 556 | 557 | return { 558 | start: new Date(start).toISOString(), 559 | end: new Date(end).toISOString(), 560 | durationMs: end - start 561 | }; 562 | } 563 | 564 | getPrimaryActivities() { 565 | const activities = {}; 566 | 567 | this.sessionMemory.forEach(event => { 568 | if (event.metadata?.skill) { 569 | activities[event.metadata.skill] = (activities[event.metadata.skill] || 0) + 1; 570 | } 571 | }); 572 | 573 | return Object.entries(activities) 574 | .sort(([,a], [,b]) => b - a) 575 | .slice(0, 3) 576 | .map(([skill, count]) => ({ skill, count })); 577 | } 578 | 579 | clear() { 580 | const eventCount = this.sessionMemory.length; 581 | this.sessionMemory = []; 582 | this.isInitialized = false; 583 | 584 | logger.info('Session memory cleared', { eventCount }); 585 | 586 | // Reinitialize with skill prompts 587 | this.initializeWithSkillPrompts(); 588 | } 589 | 590 | getMemoryUsage() { 591 | const totalSize = JSON.stringify(this.sessionMemory).length; 592 | 593 | return { 594 | eventCount: this.sessionMemory.length, 595 | approximateSize: `${(totalSize / 1024).toFixed(2)} KB`, 596 | utilizationPercent: Math.round((this.sessionMemory.length / this.maxSize) * 100) 597 | }; 598 | } 599 | } 600 | 601 | module.exports = new SessionManager(); -------------------------------------------------------------------------------- /src/ui/chat-window.js: -------------------------------------------------------------------------------- 1 | try { 2 | // Check if we're in Node.js context or browser context 3 | let logger; 4 | try { 5 | logger = require('../core/logger').createServiceLogger('CHAT-UI'); 6 | } catch (error) { 7 | logger = { 8 | info: (...args) => console.log('[CHAT-UI INFO]', ...args), 9 | debug: (...args) => console.log('[CHAT-UI DEBUG]', ...args), 10 | error: (...args) => console.error('[CHAT-UI ERROR]', ...args), 11 | warn: (...args) => console.warn('[CHAT-UI WARN]', ...args) 12 | }; 13 | } 14 | 15 | class ChatWindowUI { 16 | constructor() { 17 | this.isRecording = false; 18 | this.isInteractive = true; // Start in interactive mode 19 | this.elements = {}; 20 | 21 | this.init(); 22 | } 23 | 24 | init() { 25 | try { 26 | this.setupElements(); 27 | this.setupEventListeners(); 28 | this.addMessage('Chat window initialized. Click microphone or press ⌘+R to start recording.', 'system'); 29 | 30 | logger.info('Chat window UI initialized successfully'); 31 | } catch (error) { 32 | logger.error('Failed to initialize chat window UI', { error: error.message }); 33 | console.error('Chat window initialization failed:', error); 34 | } 35 | } 36 | 37 | setupElements() { 38 | this.elements = { 39 | chatMessages: document.getElementById('chatMessages'), 40 | recordingIndicator: document.getElementById('recordingIndicator'), 41 | messageInput: document.getElementById('messageInput'), 42 | sendButton: document.getElementById('sendButton'), 43 | micButton: document.getElementById('micButton'), 44 | chatContainer: document.getElementById('chatContainer'), 45 | interactionIndicator: document.getElementById('interactionIndicator'), 46 | interactionText: document.getElementById('interactionText'), 47 | listeningContainer: document.getElementById('listeningContainer'), 48 | listeningDuration: document.getElementById('listeningDuration') 49 | }; 50 | 51 | // Validate required elements 52 | const requiredElements = ['chatMessages', 'micButton', 'sendButton', 'messageInput']; 53 | for (const elementKey of requiredElements) { 54 | if (!this.elements[elementKey]) { 55 | throw new Error(`Required element '${elementKey}' not found`); 56 | } 57 | } 58 | 59 | // Initialize listening timer 60 | this.listeningStartTime = null; 61 | this.listeningTimer = null; 62 | } 63 | 64 | setupEventListeners() { 65 | // Interaction state handlers 66 | if (window.electronAPI) { 67 | window.electronAPI.onInteractionModeChanged((event, interactive) => { 68 | this.isInteractive = interactive; 69 | if (interactive) { 70 | this.handleInteractionEnabled(); 71 | } else { 72 | this.handleInteractionDisabled(); 73 | } 74 | }); 75 | 76 | // Speech recognition handlers 77 | window.electronAPI.onTranscriptionReceived((event, data) => { 78 | if (data && data.text) { 79 | this.handleTranscription(data.text); 80 | } else { 81 | console.warn('Transcription event received but no text data:', data); 82 | } 83 | }); 84 | 85 | // Listen for interim transcription (real-time) 86 | if (window.electronAPI.onInterimTranscription) { 87 | window.electronAPI.onInterimTranscription((event, data) => { 88 | if (data && data.text) { 89 | this.showInterimText(data.text); 90 | } 91 | }); 92 | } 93 | 94 | window.electronAPI.onSpeechStatus((event, data) => { 95 | if (data && data.status) { 96 | this.addMessage(data.status, 'system'); 97 | 98 | // Update recording state based on status 99 | if (data.status.includes('started') || data.status.includes('Recording')) { 100 | this.handleRecordingStarted(); 101 | } else if (data.status.includes('stopped') || data.status.includes('ended')) { 102 | this.handleRecordingStopped(); 103 | } 104 | } 105 | }); 106 | 107 | window.electronAPI.onSpeechError((event, data) => { 108 | if (data && data.error) { 109 | this.addMessage(`Speech Error: ${data.error}`, 'error'); 110 | this.handleRecordingStopped(); // Stop recording on error 111 | } 112 | }); 113 | 114 | // Skill handlers 115 | window.electronAPI.onSkillChanged((event, data) => { 116 | if (data && data.skill) { 117 | this.handleSkillActivated(data.skill); 118 | } 119 | }); 120 | 121 | // Session handlers 122 | window.electronAPI.onSessionCleared(() => { 123 | this.addMessage('Session memory has been cleared', 'system'); 124 | }); 125 | 126 | window.electronAPI.onOcrCompleted((event, data) => { 127 | if (data.text && data.text.trim()) { 128 | this.addMessage(`📷 OCR Result: ${data.text}`, 'transcription'); 129 | } 130 | }); 131 | 132 | window.electronAPI.onOcrError((event, data) => { 133 | this.addMessage(`OCR Error: ${data.error}`, 'error'); 134 | }); 135 | 136 | window.electronAPI.onLlmResponse((event, data) => { 137 | // Store AI response (text + snippets) in chat history 138 | if (data && data.response) { 139 | this.hideThinkingIndicator?.(); 140 | this.renderAssistantResponse(data.response); 141 | } 142 | }); 143 | 144 | window.electronAPI.onLlmError((event, data) => { 145 | this.addMessage(`LLM Error: ${data.error}`, 'error'); 146 | }); 147 | 148 | window.electronAPI.onTranscriptionLlmResponse((event, data) => { 149 | if (data && data.response) { 150 | // Hide thinking indicator 151 | this.hideThinkingIndicator(); 152 | // Add assistant response (text + snippets) 153 | this.renderAssistantResponse(data.response); 154 | } 155 | }); 156 | } 157 | 158 | // UI event handlers 159 | this.setupUIHandlers(); 160 | 161 | logger.debug('Chat window event listeners set up'); 162 | } 163 | 164 | setupUIHandlers() { 165 | // Microphone button 166 | this.elements.micButton.addEventListener('click', async () => { 167 | if (!this.isInteractive) { 168 | this.addMessage('Window is in non-interactive mode. Press Alt+A to enable interaction.', 'error'); 169 | return; 170 | } 171 | 172 | try { 173 | if (this.isRecording) { 174 | await window.electronAPI.stopSpeechRecognition(); 175 | } else { 176 | await window.electronAPI.startSpeechRecognition(); 177 | } 178 | } catch (error) { 179 | this.addMessage(`Speech recognition error: ${error.message}`, 'error'); 180 | logger.error('Speech recognition failed', { error: error.message }); 181 | } 182 | }); 183 | 184 | // Send button 185 | this.elements.sendButton.addEventListener('click', () => { 186 | this.sendMessage(); 187 | }); 188 | 189 | // Message input 190 | this.elements.messageInput.addEventListener('keypress', (e) => { 191 | if (e.key === 'Enter') { 192 | this.sendMessage(); 193 | } 194 | }); 195 | 196 | // Global keyboard shortcuts 197 | document.addEventListener('keydown', (e) => { 198 | if (e.altKey && e.key === 'r') { 199 | e.preventDefault(); 200 | this.elements.micButton.click(); 201 | } 202 | }); 203 | } 204 | 205 | handleInteractionEnabled() { 206 | this.isInteractive = true; 207 | this.elements.chatContainer.classList.remove('non-interactive'); 208 | this.showInteractionIndicator('Interactive', true); 209 | logger.debug('Interaction mode enabled in chat'); 210 | } 211 | 212 | handleInteractionDisabled() { 213 | this.isInteractive = false; 214 | this.elements.chatContainer.classList.add('non-interactive'); 215 | this.showInteractionIndicator('Non-Interactive', false); 216 | logger.debug('Interaction mode disabled in chat'); 217 | } 218 | 219 | handleRecordingStarted() { 220 | this.isRecording = true; 221 | if (this.elements.recordingIndicator) { 222 | this.elements.recordingIndicator.style.display = 'block'; 223 | } 224 | if (this.elements.micButton) { 225 | this.elements.micButton.classList.add('recording'); 226 | } 227 | 228 | // Show listening animation 229 | this.showListeningAnimation(); 230 | 231 | logger.debug('Recording started in chat window'); 232 | } 233 | 234 | handleRecordingStopped() { 235 | this.isRecording = false; 236 | if (this.elements.recordingIndicator) { 237 | this.elements.recordingIndicator.style.display = 'none'; 238 | } 239 | if (this.elements.micButton) { 240 | this.elements.micButton.classList.remove('recording'); 241 | } 242 | 243 | // Hide listening animation 244 | this.hideListeningAnimation(); 245 | 246 | logger.debug('Recording stopped in chat window'); 247 | } 248 | 249 | handleTranscription(text) { 250 | if (text && text.trim()) { 251 | // Hide listening animation first 252 | this.hideListeningAnimation(); 253 | 254 | // Show transcribed text with a slight delay for smooth transition 255 | setTimeout(() => { 256 | this.addMessage(text, 'transcription'); 257 | 258 | // Show thinking indicator after transcription 259 | setTimeout(() => { 260 | this.showThinkingIndicator(); 261 | }, 300); 262 | }, 200); 263 | 264 | logger.debug('Transcription received in chat', { textLength: text.length }); 265 | } else { 266 | console.warn('❌ Transcription text is empty or invalid:', text); 267 | } 268 | } 269 | 270 | async handleSkillActivated(skillName) { 271 | try { 272 | // Request the actual skill prompt from the main process 273 | const skillPrompt = await window.electronAPI.getSkillPrompt(skillName); 274 | 275 | if (skillPrompt) { 276 | // Extract the title/first line for display 277 | const lines = skillPrompt.split('\n').filter(line => line.trim()); 278 | const title = lines.find(line => line.startsWith('#')) || `# ${skillName.toUpperCase()} Mode`; 279 | const cleanTitle = title.replace(/^#+\s*/, '').trim(); 280 | 281 | // Show a brief activation message with the skill title 282 | const icons = { 283 | 'dsa': '🧠', 284 | 'behavioral': '💼', 285 | 'sales': '💰', 286 | 'presentation': '🎤', 287 | 'data-science': '📊', 288 | 'programming': '💻', 289 | 'devops': '🚀', 290 | 'system-design': '🏗️', 291 | 'negotiation': '🤝' 292 | }; 293 | 294 | const icon = icons[skillName] || '🎯'; 295 | this.addMessage(`${icon} ${cleanTitle} - Ready to help!`, 'system'); 296 | } else { 297 | // Fallback if prompt not found 298 | this.addMessage(`🎯 ${skillName.toUpperCase()} Mode: Ready to help!`, 'system'); 299 | } 300 | } catch (error) { 301 | logger.error('Failed to load skill prompt', { skill: skillName, error: error.message }); 302 | // Fallback message 303 | this.addMessage(`🎯 ${skillName.toUpperCase()} Mode: Ready to help!`, 'system'); 304 | } 305 | 306 | logger.info('Skill activated in chat', { skill: skillName }); 307 | } 308 | 309 | async sendMessage() { 310 | const text = this.elements.messageInput.value.trim(); 311 | if (text) { 312 | this.addMessage(text, 'user'); 313 | this.elements.messageInput.value = ''; 314 | 315 | // Send to main process for session memory storage 316 | try { 317 | if (window.electronAPI && window.electronAPI.sendChatMessage) { 318 | await window.electronAPI.sendChatMessage(text); 319 | } 320 | } catch (error) { 321 | logger.error('Failed to send chat message to main process', { error: error.message }); 322 | } 323 | 324 | logger.debug('User message sent', { textLength: text.length }); 325 | } 326 | } 327 | 328 | addMessage(text, type = 'user') { 329 | if (!this.elements.chatMessages) { 330 | console.error('❌ Chat messages element not found!'); 331 | return; 332 | } 333 | 334 | const messageDiv = document.createElement('div'); 335 | messageDiv.className = `message ${type}`; 336 | 337 | const timeDiv = document.createElement('div'); 338 | timeDiv.className = 'message-time'; 339 | timeDiv.textContent = new Date().toLocaleTimeString(); 340 | 341 | const textDiv = document.createElement('div'); 342 | textDiv.className = 'message-text'; 343 | 344 | // Format assistant messages as markdown 345 | if (type === 'assistant') { 346 | textDiv.innerHTML = this.formatMarkdown(text); 347 | } else { 348 | textDiv.textContent = text; 349 | } 350 | 351 | messageDiv.appendChild(timeDiv); 352 | messageDiv.appendChild(textDiv); 353 | 354 | this.elements.chatMessages.appendChild(messageDiv); 355 | 356 | // Auto-scroll to bottom 357 | this.elements.chatMessages.scrollTop = this.elements.chatMessages.scrollHeight; 358 | } 359 | 360 | // Split AI response into plain text and code snippets and append to chat 361 | renderAssistantResponse(response) { 362 | if (!response || typeof response !== 'string') return; 363 | const blocks = this.extractCodeBlocks(response); 364 | const textOnly = this.stripCodeBlocks(response, blocks); 365 | if (textOnly && textOnly.trim().length) { 366 | this.addMessage(textOnly, 'assistant'); 367 | } 368 | blocks.forEach(b => this.addCodeSnippet(b.language, b.code)); 369 | } 370 | 371 | extractCodeBlocks(text) { 372 | const codeBlockRegex = /```(\w+)?\n([\s\S]*?)```/g; 373 | const blocks = []; 374 | let match; 375 | while ((match = codeBlockRegex.exec(text)) !== null) { 376 | blocks.push({ language: match[1] || 'text', code: (match[2] || '').trim(), fullMatch: match[0] }); 377 | } 378 | return blocks; 379 | } 380 | 381 | stripCodeBlocks(text, blocks) { 382 | let result = text || ''; 383 | blocks.forEach(b => { result = result.replace(b.fullMatch, ''); }); 384 | return result.replace(/\n\s*\n\s*\n/g, '\n\n').trim(); 385 | } 386 | 387 | addCodeSnippet(language, code) { 388 | if (!this.elements.chatMessages) return; 389 | const messageDiv = document.createElement('div'); 390 | messageDiv.className = 'message assistant'; 391 | const timeDiv = document.createElement('div'); 392 | timeDiv.className = 'message-time'; 393 | timeDiv.textContent = new Date().toLocaleTimeString(); 394 | const textDiv = document.createElement('div'); 395 | textDiv.className = 'message-text'; 396 | const escapedLang = (language || 'text').toUpperCase(); 397 | const escapedCode = this.escapeHtmlForSnippet(code || ''); 398 | textDiv.innerHTML = ` 399 |
Snippet: ${escapedLang}
400 |
${escapedCode}
401 | `; 402 | messageDiv.appendChild(timeDiv); 403 | messageDiv.appendChild(textDiv); 404 | this.elements.chatMessages.appendChild(messageDiv); 405 | this.elements.chatMessages.scrollTop = this.elements.chatMessages.scrollHeight; 406 | } 407 | 408 | escapeHtmlForSnippet(text) { 409 | const div = document.createElement('div'); 410 | div.textContent = text; 411 | return div.innerHTML; 412 | } 413 | 414 | formatMarkdown(text) { 415 | if (!text) return ''; 416 | 417 | try { 418 | // Use the markdown.js library for proper markdown parsing 419 | // Try to access markdown library in different contexts 420 | let markdownLib; 421 | 422 | // First try global markdown object (from script tag) 423 | if (typeof markdown !== 'undefined' && markdown.toHTML) { 424 | markdownLib = markdown; 425 | } 426 | // Then try require (Node.js context) 427 | else if (typeof require !== 'undefined') { 428 | try { 429 | markdownLib = require('markdown'); 430 | } catch (requireError) { 431 | logger.debug('Could not require markdown library:', requireError.message); 432 | } 433 | } 434 | // Finally try window.markdown (browser context) 435 | else if (typeof window !== 'undefined' && window.markdown) { 436 | markdownLib = window.markdown; 437 | } 438 | 439 | if (markdownLib && markdownLib.toHTML) { 440 | return markdownLib.toHTML(text); 441 | } else { 442 | logger.warn('Markdown library not available, falling back to basic formatting'); 443 | // Fallback to basic formatting 444 | return text 445 | .replace(/\*\*(.+?)\*\*/g, '$1') 446 | .replace(/\*(.+?)\*/g, '$1') 447 | .replace(/`(.+?)`/g, '$1') 448 | .replace(/\n/g, '
'); 449 | } 450 | } catch (error) { 451 | logger.warn('Failed to parse markdown, falling back to plain text', { error: error.message }); 452 | // Fallback to basic formatting 453 | return text.replace(/\n/g, '
'); 454 | } 455 | } 456 | 457 | showThinkingIndicator() { 458 | if (!this.elements.chatMessages) return; 459 | 460 | const thinkingDiv = document.createElement('div'); 461 | thinkingDiv.className = 'message assistant thinking'; 462 | thinkingDiv.id = 'thinking-indicator'; 463 | 464 | const timeDiv = document.createElement('div'); 465 | timeDiv.className = 'message-time'; 466 | timeDiv.textContent = new Date().toLocaleTimeString(); 467 | 468 | const textDiv = document.createElement('div'); 469 | textDiv.className = 'message-text thinking-dots'; 470 | textDiv.innerHTML = ''; 471 | 472 | thinkingDiv.appendChild(timeDiv); 473 | thinkingDiv.appendChild(textDiv); 474 | 475 | this.elements.chatMessages.appendChild(thinkingDiv); 476 | this.elements.chatMessages.scrollTop = this.elements.chatMessages.scrollHeight; 477 | } 478 | 479 | hideThinkingIndicator() { 480 | const thinkingIndicator = document.getElementById('thinking-indicator'); 481 | if (thinkingIndicator) { 482 | thinkingIndicator.remove(); 483 | } 484 | } 485 | 486 | showInteractionIndicator(text, interactive) { 487 | if (!this.elements.interactionIndicator || !this.elements.interactionText) return; 488 | 489 | this.elements.interactionText.textContent = text; 490 | this.elements.interactionIndicator.className = `interaction-indicator show ${interactive ? 'interactive' : 'non-interactive'}`; 491 | 492 | setTimeout(() => { 493 | this.elements.interactionIndicator.classList.remove('show'); 494 | }, 2000); 495 | } 496 | 497 | showListeningAnimation() { 498 | if (!this.elements.listeningContainer) { 499 | console.warn('❌ Listening container not found'); 500 | return; 501 | } 502 | 503 | // Show the listening animation 504 | this.elements.listeningContainer.classList.add('active'); 505 | 506 | // Start the duration timer 507 | this.listeningStartTime = Date.now(); 508 | this.listeningTimer = setInterval(() => { 509 | this.updateListeningDuration(); 510 | }, 100); 511 | 512 | // Auto-scroll to show the listening animation 513 | if (this.elements.chatMessages) { 514 | this.elements.chatMessages.scrollTop = 0; 515 | } 516 | } 517 | 518 | hideListeningAnimation() { 519 | if (this.elements.listeningContainer) { 520 | this.elements.listeningContainer.classList.remove('active'); 521 | } 522 | 523 | // Clear interim text 524 | this.clearInterimText(); 525 | 526 | // Clear the duration timer 527 | if (this.listeningTimer) { 528 | clearInterval(this.listeningTimer); 529 | this.listeningTimer = null; 530 | } 531 | 532 | this.listeningStartTime = null; 533 | } 534 | 535 | updateListeningDuration() { 536 | if (!this.listeningStartTime || !this.elements.listeningDuration) return; 537 | 538 | const elapsed = Date.now() - this.listeningStartTime; 539 | const seconds = Math.floor(elapsed / 1000); 540 | const milliseconds = Math.floor((elapsed % 1000) / 100); 541 | 542 | const formattedTime = `${seconds.toString().padStart(2, '0')}:${milliseconds}`; 543 | this.elements.listeningDuration.textContent = formattedTime; 544 | } 545 | 546 | showInterimText(text) { 547 | if (!this.elements.listeningContainer) return; 548 | 549 | // Find or create interim text element 550 | let interimElement = this.elements.listeningContainer.querySelector('.interim-text'); 551 | if (!interimElement) { 552 | interimElement = document.createElement('div'); 553 | interimElement.className = 'interim-text'; 554 | interimElement.style.cssText = ` 555 | color: rgba(255, 255, 255, 0.8); 556 | font-size: 12px; 557 | font-style: italic; 558 | margin-top: 10px; 559 | padding: 8px; 560 | background: rgba(76, 175, 80, 0.2); 561 | border-radius: 6px; 562 | min-height: 20px; 563 | border: 1px dashed rgba(76, 175, 80, 0.4); 564 | `; 565 | this.elements.listeningContainer.appendChild(interimElement); 566 | } 567 | 568 | interimElement.textContent = text || 'Waiting for speech...'; 569 | } 570 | 571 | clearInterimText() { 572 | if (!this.elements.listeningContainer) return; 573 | 574 | const interimElement = this.elements.listeningContainer.querySelector('.interim-text'); 575 | if (interimElement) { 576 | interimElement.remove(); 577 | } 578 | } 579 | } 580 | 581 | // Initialize when DOM is loaded 582 | if (document.readyState === 'loading') { 583 | document.addEventListener('DOMContentLoaded', () => { 584 | new ChatWindowUI(); 585 | }); 586 | } else { 587 | new ChatWindowUI(); 588 | } 589 | 590 | } catch (error) { 591 | console.error('💥 CHAT-WINDOW.JS: Script execution failed!', error); 592 | console.error('💥 CHAT-WINDOW.JS: Error stack:', error.stack); 593 | } -------------------------------------------------------------------------------- /src/ui/llm-response-window.js: -------------------------------------------------------------------------------- 1 | const logger = require("../core/logger"); 2 | 3 | class LLMResponseWindowUI { 4 | constructor() { 5 | this.currentLayout = "split"; 6 | this.hasCode = false; 7 | this.currentSkill = "dsa"; 8 | this.isInteractive = false; 9 | this.scrollableElements = []; 10 | 11 | this.elements = {}; 12 | 13 | this.init(); 14 | this.handleMouseEnter = this.handleMouseEnter.bind(this); 15 | this.handleMouseLeave = this.handleMouseLeave.bind(this); 16 | this.handleWheelScroll = this.handleWheelScroll.bind(this); 17 | } 18 | 19 | init() { 20 | try { 21 | this.setupElements(); 22 | this.setupEventListeners(); 23 | this.configureMarked(); 24 | 25 | logger.info("LLM response window UI initialized", { 26 | component: "LLMResponseWindowUI", 27 | }); 28 | } catch (error) { 29 | logger.error("Failed to initialize LLM response window UI", { 30 | component: "LLMResponseWindowUI", 31 | error: error.message, 32 | }); 33 | } 34 | } 35 | 36 | setupElements() { 37 | this.elements = { 38 | loading: document.getElementById("loading"), 39 | responseContent: document.getElementById("response-content"), 40 | splitLayout: document.getElementById("split-layout"), 41 | fullContent: document.getElementById("full-content"), 42 | textContent: document.getElementById("text-content"), 43 | codeContent: document.getElementById("code-content"), 44 | fullMarkdown: document.getElementById("full-markdown"), 45 | }; 46 | 47 | // Validate required elements 48 | const requiredElements = ["loading", "responseContent"]; 49 | for (const elementKey of requiredElements) { 50 | if (!this.elements[elementKey]) { 51 | throw new Error(`Required element '${elementKey}' not found`); 52 | } 53 | } 54 | } 55 | 56 | setupEventListeners() { 57 | const { ipcRenderer } = require("electron"); 58 | 59 | logger.debug("Setting up event listeners", { 60 | component: "LLMResponseWindowUI", 61 | }); 62 | 63 | // Test IPC connection 64 | try { 65 | ipcRenderer.send("test-connection"); 66 | logger.debug("IPC connection test sent", { 67 | component: "LLMResponseWindowUI", 68 | }); 69 | } catch (error) { 70 | logger.error("IPC connection test failed", { 71 | component: "LLMResponseWindowUI", 72 | error: error.message, 73 | }); 74 | } 75 | 76 | // Core event handlers with enhanced logging 77 | ipcRenderer.on("show-loading", () => { 78 | logger.debug("show-loading event received", { 79 | component: "LLMResponseWindowUI", 80 | }); 81 | this.showLoadingState(); 82 | }); 83 | 84 | ipcRenderer.on("display-llm-response", (event, data) => { 85 | logger.info("display-llm-response event received - ENTRY POINT", { 86 | component: "LLMResponseWindowUI", 87 | hasData: !!data, 88 | dataKeys: data ? Object.keys(data) : [], 89 | contentLength: data?.content?.length || 0, 90 | responseLength: data?.response?.length || 0, 91 | eventOrigin: event ? "valid" : "invalid", 92 | timestamp: new Date().toISOString(), 93 | }); 94 | 95 | // Add a small delay to ensure DOM is ready 96 | setTimeout(() => { 97 | this.handleDisplayResponse(data); 98 | }, 50); 99 | }); 100 | 101 | // Interaction state handlers 102 | ipcRenderer.on("interaction-enabled", () => { 103 | logger.debug("interaction-enabled event received", { 104 | component: "LLMResponseWindowUI", 105 | }); 106 | this.handleInteractionEnabled(); 107 | }); 108 | 109 | ipcRenderer.on("interaction-disabled", () => { 110 | logger.debug("interaction-disabled event received", { 111 | component: "LLMResponseWindowUI", 112 | }); 113 | this.handleInteractionDisabled(); 114 | }); 115 | 116 | // Window event handlers 117 | this.setupWindowEventHandlers(); 118 | 119 | // Keyboard event handlers 120 | this.setupKeyboardHandlers(); 121 | 122 | logger.debug("Event listeners setup complete", { 123 | component: "LLMResponseWindowUI", 124 | }); 125 | 126 | // Add a test method to verify setup 127 | window.testLLMResponse = () => { 128 | logger.info("Manual test triggered"); 129 | this.handleDisplayResponse({ 130 | content: "Test content from manual trigger", 131 | metadata: { skill: "test" }, 132 | }); 133 | }; 134 | } 135 | 136 | setupWindowEventHandlers() { 137 | window.addEventListener("focus", () => { 138 | logger.debug("LLM Response window focused", { 139 | component: "LLMResponseWindowUI", 140 | }); 141 | }); 142 | 143 | window.addEventListener("beforeunload", () => { 144 | logger.debug("LLM Response window closing", { 145 | component: "LLMResponseWindowUI", 146 | }); 147 | }); 148 | } 149 | 150 | setupKeyboardHandlers() { 151 | document.addEventListener("keydown", (e) => { 152 | this.handleKeyDown(e); 153 | }); 154 | } 155 | 156 | configureMarked() { 157 | if (typeof marked !== "undefined") { 158 | marked.setOptions({ 159 | highlight: function (code, lang) { 160 | if (typeof Prism !== "undefined" && Prism.languages[lang]) { 161 | return Prism.highlight(code, Prism.languages[lang], lang); 162 | } 163 | return code; 164 | }, 165 | breaks: true, 166 | gfm: true, 167 | }); 168 | } 169 | } 170 | 171 | handleDisplayResponse(data) { 172 | try { 173 | logger.info("LLM Response received - START", { 174 | component: "LLMResponseWindowUI", 175 | dataExists: !!data, 176 | timestamp: new Date().toISOString(), 177 | }); 178 | 179 | // Comprehensive data validation 180 | if (!data || typeof data !== "object") { 181 | throw new Error( 182 | "Invalid data: expected object, received " + typeof data 183 | ); 184 | } 185 | 186 | const response = data.content || data.response; 187 | if (!response || typeof response !== "string") { 188 | throw new Error("Invalid response: expected string content"); 189 | } 190 | 191 | if (response.trim().length === 0) { 192 | throw new Error("Empty response content"); 193 | } 194 | 195 | logger.info("Valid response data found", { 196 | component: "LLMResponseWindowUI", 197 | responseLength: response.length, 198 | }); 199 | 200 | // Rest of the method... 201 | if (window.innerWidth < 500 || window.innerHeight < 300) { 202 | this.handleWindowExpansion(data); 203 | } else { 204 | this.displayResponseContent(data); 205 | } 206 | } catch (error) { 207 | logger.error("Failed to handle display response", { 208 | component: "LLMResponseWindowUI", 209 | error: error.message, 210 | stack: error.stack, 211 | }); 212 | 213 | this.hideLoadingState(); 214 | this.displayErrorMessage(`Error processing response: ${error.message}`); 215 | } 216 | } 217 | 218 | async handleWindowExpansion(data) { 219 | try { 220 | logger.debug("Window appears to be compact size, requesting expansion", { 221 | component: "LLMResponseWindowUI", 222 | }); 223 | 224 | const response = data.content || data.response; 225 | if (!response) { 226 | throw new Error("No response data available for expansion calculation"); 227 | } 228 | 229 | const codeBlocks = this.extractCodeBlocks(response); 230 | const contentMetrics = this.calculateContentMetrics(response, codeBlocks); 231 | 232 | const { ipcRenderer } = require("electron"); 233 | 234 | // Add timeout to prevent hanging 235 | const expansionPromise = ipcRenderer.invoke( 236 | "expand-llm-window", 237 | contentMetrics 238 | ); 239 | const timeoutPromise = new Promise((_, reject) => 240 | setTimeout(() => reject(new Error("Window expansion timeout")), 5000) 241 | ); 242 | 243 | const result = await Promise.race([expansionPromise, timeoutPromise]); 244 | 245 | logger.debug("Window expansion completed", { 246 | component: "LLMResponseWindowUI", 247 | result, 248 | }); 249 | 250 | // Use a more reliable delay mechanism 251 | await new Promise((resolve) => setTimeout(resolve, 200)); 252 | this.displayResponseContent(data); 253 | } catch (error) { 254 | logger.error("Failed to expand window", { 255 | component: "LLMResponseWindowUI", 256 | error: error.message, 257 | }); 258 | 259 | // Fallback to display content without expansion 260 | this.displayResponseContent(data); 261 | } 262 | } 263 | 264 | displayResponseContent(data) { 265 | try { 266 | logger.debug("displayResponseContent called - START", { 267 | component: "LLMResponseWindowUI", 268 | dataKeys: Object.keys(data), 269 | hasContent: !!data.content, 270 | hasResponse: !!data.response, 271 | }); 272 | 273 | // Always hide loading state first 274 | logger.debug("Hiding loading state..."); 275 | this.hideLoadingState(); 276 | 277 | // Always show response content 278 | logger.debug("Showing response content..."); 279 | this.showResponseContent(); 280 | 281 | // Validate elements exist 282 | if (!this.elements.responseContent) { 283 | logger.error("Response content element not found!", { 284 | component: "LLMResponseWindowUI", 285 | }); 286 | return; 287 | } 288 | 289 | // Check both content and response properties for compatibility 290 | const response = data.content || data.response; 291 | if (!response) { 292 | logger.error("No response data received", { 293 | component: "LLMResponseWindowUI", 294 | dataKeys: Object.keys(data), 295 | dataContent: data, 296 | }); 297 | 298 | // Show error message instead of staying in loading state 299 | this.displayErrorMessage("No content received"); 300 | return; 301 | } 302 | 303 | logger.info("Processing response content", { 304 | component: "LLMResponseWindowUI", 305 | responseLength: response.length, 306 | responsePreview: response.substring(0, 200) + "...", 307 | }); 308 | 309 | // Check if response contains code blocks 310 | const codeBlocks = this.extractCodeBlocks(response); 311 | this.hasCode = codeBlocks.length > 0; 312 | 313 | logger.debug("Code analysis complete", { 314 | component: "LLMResponseWindowUI", 315 | hasCode: this.hasCode, 316 | codeBlockCount: codeBlocks.length, 317 | }); 318 | 319 | // Calculate content metrics for dynamic sizing 320 | const contentMetrics = this.calculateContentMetrics(response, codeBlocks); 321 | 322 | logger.info("About to display response content", { 323 | component: "LLMResponseWindowUI", 324 | hasCode: this.hasCode, 325 | responseLength: response.length, 326 | layoutType: this.hasCode ? "split" : "full", 327 | }); 328 | 329 | // Display content based on type 330 | if (this.hasCode) { 331 | logger.debug("Displaying split layout..."); 332 | this.displaySplitLayout(response, codeBlocks); 333 | } else { 334 | logger.debug("Displaying full layout..."); 335 | this.displayFullLayout(response); 336 | } 337 | 338 | logger.info("Response content displayed successfully", { 339 | component: "LLMResponseWindowUI", 340 | layoutType: this.hasCode ? "split" : "full", 341 | }); 342 | 343 | // Setup additional features 344 | this.setupScrolling(); 345 | this.requestWindowResize(contentMetrics); 346 | 347 | // Final verification 348 | setTimeout(() => { 349 | const isLoadingHidden = this.elements.loading 350 | ? this.elements.loading.classList.contains("hidden") 351 | : true; 352 | const isContentVisible = this.elements.responseContent 353 | ? !this.elements.responseContent.classList.contains("hidden") 354 | : false; 355 | 356 | logger.info("Display state verification", { 357 | component: "LLMResponseWindowUI", 358 | loadingHidden: isLoadingHidden, 359 | contentVisible: isContentVisible, 360 | windowVisible: !document.hidden, 361 | }); 362 | 363 | if (!isLoadingHidden || !isContentVisible) { 364 | logger.warn("Display state inconsistent - forcing correction", { 365 | component: "LLMResponseWindowUI", 366 | }); 367 | this.hideLoadingState(); 368 | this.showResponseContent(); 369 | } 370 | }, 100); 371 | 372 | logger.debug("displayResponseContent completed - END"); 373 | } catch (error) { 374 | logger.error("Failed to display response content", { 375 | component: "LLMResponseWindowUI", 376 | error: error.message, 377 | stack: error.stack, 378 | }); 379 | 380 | // Always try to hide loading and show some content 381 | this.hideLoadingState(); 382 | this.displayErrorMessage("Error displaying content: " + error.message); 383 | } 384 | } 385 | 386 | displayErrorMessage(message) { 387 | logger.info("Displaying error message", { 388 | component: "LLMResponseWindowUI", 389 | message, 390 | }); 391 | 392 | this.showResponseContent(); 393 | 394 | if (this.elements.fullContent && this.elements.fullMarkdown) { 395 | this.elements.splitLayout?.classList.add("hidden"); 396 | this.elements.fullContent?.classList.remove("hidden"); 397 | this.elements.fullMarkdown.innerHTML = `
${message}
`; 398 | } 399 | } 400 | 401 | async requestWindowResize(contentMetrics) { 402 | try { 403 | setTimeout(async () => { 404 | logger.debug("Requesting window resize based on content metrics", { 405 | component: "LLMResponseWindowUI", 406 | metrics: contentMetrics, 407 | }); 408 | 409 | const { ipcRenderer } = require("electron"); 410 | const result = await ipcRenderer.invoke( 411 | "resize-llm-window-for-content", 412 | contentMetrics 413 | ); 414 | 415 | logger.debug("Window resize result", { 416 | component: "LLMResponseWindowUI", 417 | result, 418 | }); 419 | }, 200); 420 | } catch (error) { 421 | logger.error("Failed to resize window", { 422 | component: "LLMResponseWindowUI", 423 | error: error.message, 424 | }); 425 | } 426 | } 427 | 428 | handleInteractionEnabled() { 429 | this.isInteractive = true; 430 | document.body.classList.add("interactive-scrolling"); 431 | this.enableScrolling(); 432 | 433 | logger.debug("Interaction mode enabled", { 434 | component: "LLMResponseWindowUI", 435 | }); 436 | } 437 | 438 | handleInteractionDisabled() { 439 | this.isInteractive = false; 440 | document.body.classList.remove("interactive-scrolling"); 441 | this.disableScrolling(); 442 | 443 | logger.debug("Interaction mode disabled", { 444 | component: "LLMResponseWindowUI", 445 | }); 446 | } 447 | 448 | handleKeyDown(e) { 449 | if (!this.isInteractive) return; 450 | 451 | const activeElement = document.activeElement; 452 | let targetElement = null; 453 | 454 | // Find the currently focused scrollable element 455 | for (let element of this.scrollableElements) { 456 | if (element === activeElement || element.contains(activeElement)) { 457 | targetElement = element; 458 | break; 459 | } 460 | } 461 | 462 | // Default to first scrollable element if none focused 463 | if (!targetElement && this.scrollableElements.length > 0) { 464 | targetElement = this.scrollableElements[0]; 465 | } 466 | 467 | if (!targetElement) return; 468 | 469 | const scrollAmount = 50; 470 | 471 | switch (e.key) { 472 | case "ArrowUp": 473 | e.preventDefault(); 474 | targetElement.scrollBy({ top: -scrollAmount, behavior: "smooth" }); 475 | break; 476 | case "ArrowDown": 477 | e.preventDefault(); 478 | targetElement.scrollBy({ top: scrollAmount, behavior: "smooth" }); 479 | break; 480 | case "ArrowLeft": 481 | e.preventDefault(); 482 | targetElement.scrollBy({ left: -scrollAmount, behavior: "smooth" }); 483 | break; 484 | case "ArrowRight": 485 | e.preventDefault(); 486 | targetElement.scrollBy({ left: scrollAmount, behavior: "smooth" }); 487 | break; 488 | case "PageUp": 489 | e.preventDefault(); 490 | targetElement.scrollBy({ 491 | top: -targetElement.clientHeight * 0.8, 492 | behavior: "smooth", 493 | }); 494 | break; 495 | case "PageDown": 496 | e.preventDefault(); 497 | targetElement.scrollBy({ 498 | top: targetElement.clientHeight * 0.8, 499 | behavior: "smooth", 500 | }); 501 | break; 502 | case "Home": 503 | e.preventDefault(); 504 | targetElement.scrollTo({ top: 0, behavior: "smooth" }); 505 | break; 506 | case "End": 507 | e.preventDefault(); 508 | targetElement.scrollTo({ 509 | top: targetElement.scrollHeight, 510 | behavior: "smooth", 511 | }); 512 | break; 513 | } 514 | } 515 | 516 | showLoadingState() { 517 | if (this.elements.loading) { 518 | this.elements.loading.classList.remove("hidden"); 519 | } 520 | 521 | if (this.elements.responseContent) { 522 | this.elements.responseContent.classList.add("hidden"); 523 | } 524 | 525 | logger.debug("Loading state shown", { component: "LLMResponseWindowUI" }); 526 | } 527 | 528 | hideLoadingState() { 529 | if (this.elements.loading) { 530 | this.elements.loading.classList.add("hidden"); 531 | logger.debug('Loading element hidden with "hidden" class', { 532 | component: "LLMResponseWindowUI", 533 | }); 534 | } else { 535 | logger.warn("Loading element not found!", { 536 | component: "LLMResponseWindowUI", 537 | }); 538 | } 539 | 540 | logger.debug("Loading state hidden", { component: "LLMResponseWindowUI" }); 541 | } 542 | 543 | showResponseContent() { 544 | if (this.elements.responseContent) { 545 | this.elements.responseContent.classList.remove("hidden"); 546 | logger.debug("Response content element shown (hidden class removed)", { 547 | component: "LLMResponseWindowUI", 548 | }); 549 | } else { 550 | logger.warn("Response content element not found!", { 551 | component: "LLMResponseWindowUI", 552 | }); 553 | } 554 | 555 | logger.debug("Response content shown", { 556 | component: "LLMResponseWindowUI", 557 | }); 558 | } 559 | 560 | extractCodeBlocks(text) { 561 | const codeBlockRegex = /```(\w+)?\n([\s\S]*?)```/g; 562 | const blocks = []; 563 | let match; 564 | 565 | while ((match = codeBlockRegex.exec(text)) !== null) { 566 | blocks.push({ 567 | language: match[1] || "text", 568 | code: match[2].trim(), 569 | fullMatch: match[0], 570 | }); 571 | } 572 | 573 | return blocks; 574 | } 575 | 576 | calculateContentMetrics(response, codeBlocks) { 577 | const lineCount = response.split("\n").length; 578 | const hasLongLines = response.split("\n").some((line) => line.length > 80); 579 | const codeBlockCount = codeBlocks.length; 580 | const hasCode = codeBlockCount > 0; 581 | const isLongContent = lineCount > 30; 582 | const hasMultipleCodeBlocks = codeBlockCount > 2; 583 | 584 | return { 585 | lineCount, 586 | hasLongLines, 587 | codeBlocks: codeBlockCount, 588 | hasCode, 589 | isLongContent, 590 | hasMultipleCodeBlocks, 591 | complexity: 592 | isLongContent || hasMultipleCodeBlocks 593 | ? "high" 594 | : hasCode 595 | ? "medium" 596 | : "low", 597 | }; 598 | } 599 | 600 | displaySplitLayout(response, codeBlocks) { 601 | // Show split layout, hide full layout 602 | this.elements.splitLayout?.classList.remove("hidden"); 603 | this.elements.fullContent?.classList.add("hidden"); 604 | 605 | // Remove code blocks from text content 606 | let textContent = response; 607 | codeBlocks.forEach((block) => { 608 | textContent = textContent.replace(block.fullMatch, ""); 609 | }); 610 | 611 | // Clean up text content 612 | textContent = textContent.replace(/\n\s*\n\s*\n/g, "\n\n").trim(); 613 | 614 | // Render text content 615 | if (this.elements.textContent && typeof marked !== "undefined") { 616 | const textHtml = marked.parse(textContent); 617 | this.elements.textContent.innerHTML = textHtml; 618 | } 619 | 620 | // Render code blocks 621 | this.renderCodeBlocks(codeBlocks); 622 | 623 | // Highlight code 624 | this.highlightCode(); 625 | 626 | logger.debug("Split layout displayed", { 627 | component: "LLMResponseWindowUI", 628 | codeBlockCount: codeBlocks.length, 629 | }); 630 | } 631 | 632 | displayFullLayout(response) { 633 | logger.info("Displaying full layout", { 634 | component: "LLMResponseWindowUI", 635 | hasSplitLayout: !!this.elements.splitLayout, 636 | hasFullContent: !!this.elements.fullContent, 637 | hasFullMarkdown: !!this.elements.fullMarkdown, 638 | responseLength: response.length, 639 | }); 640 | 641 | // Show full layout, hide split layout 642 | this.elements.splitLayout?.classList.add("hidden"); 643 | this.elements.fullContent?.classList.remove("hidden"); 644 | 645 | // Render full markdown 646 | if (this.elements.fullMarkdown && typeof marked !== "undefined") { 647 | const html = marked.parse(response); 648 | this.elements.fullMarkdown.innerHTML = html; 649 | logger.info("Markdown content rendered", { 650 | component: "LLMResponseWindowUI", 651 | htmlLength: html.length, 652 | htmlPreview: html.substring(0, 200) + "...", 653 | }); 654 | } else { 655 | if (!this.elements.fullMarkdown) { 656 | logger.error("fullMarkdown element not found!", { 657 | component: "LLMResponseWindowUI", 658 | }); 659 | } 660 | if (typeof marked === "undefined") { 661 | logger.error("marked library not available!", { 662 | component: "LLMResponseWindowUI", 663 | }); 664 | } 665 | } 666 | 667 | // Highlight any code 668 | this.highlightCode(); 669 | 670 | logger.info("Full layout displayed", { component: "LLMResponseWindowUI" }); 671 | } 672 | 673 | renderCodeBlocks(codeBlocks) { 674 | if (!this.elements.codeContent) return; 675 | 676 | this.elements.codeContent.innerHTML = ""; 677 | 678 | if (codeBlocks.length === 0) { 679 | this.elements.codeContent.innerHTML = 680 | '
No code examples found
'; 681 | } else { 682 | codeBlocks.forEach((block, index) => { 683 | const codeBlock = document.createElement("div"); 684 | codeBlock.className = "code-block"; 685 | codeBlock.innerHTML = ` 686 |
${block.language.toUpperCase()}
687 |
688 |
${this.escapeHtml(block.code)}
691 |
692 | `; 693 | this.elements.codeContent.appendChild(codeBlock); 694 | }); 695 | } 696 | } 697 | 698 | highlightCode() { 699 | if (typeof Prism !== "undefined") { 700 | Prism.highlightAll(); 701 | } 702 | } 703 | 704 | escapeHtml(text) { 705 | const div = document.createElement("div"); 706 | div.textContent = text; 707 | return div.innerHTML; 708 | } 709 | 710 | setupScrolling() { 711 | setTimeout(() => { 712 | if (this.hasCode) { 713 | this.scrollableElements = [ 714 | document.querySelector(".text-panel .panel-content"), 715 | document.querySelector(".code-panel .panel-content"), 716 | ].filter((el) => el !== null); 717 | } else { 718 | this.scrollableElements = [ 719 | document.querySelector(".full-content-inner"), 720 | ].filter((el) => el !== null); 721 | } 722 | 723 | logger.debug("Scrollable elements found", { 724 | component: "LLMResponseWindowUI", 725 | count: this.scrollableElements.length, 726 | }); 727 | 728 | // Reset scroll positions and ensure elements are focusable 729 | this.scrollableElements.forEach((element) => { 730 | element.scrollTop = 0; 731 | element.scrollLeft = 0; 732 | element.setAttribute("tabindex", "0"); 733 | }); 734 | 735 | // Enable scrolling if interactive mode is on 736 | if (this.isInteractive) { 737 | this.enableScrolling(); 738 | } 739 | }, 100); 740 | } 741 | 742 | enableScrolling() { 743 | logger.debug("Enabling scrolling", { 744 | component: "LLMResponseWindowUI", 745 | elementCount: this.scrollableElements.length, 746 | }); 747 | 748 | this.scrollableElements.forEach((element) => { 749 | if (element) { 750 | element.style.scrollBehavior = "smooth"; 751 | 752 | // Remove any existing listeners first to prevent duplicates 753 | element.removeEventListener("mouseenter", this.handleMouseEnter); 754 | element.removeEventListener("mouseleave", this.handleMouseLeave); 755 | element.removeEventListener("wheel", this.handleWheelScroll); 756 | 757 | // Add listeners with proper binding 758 | element.addEventListener("mouseenter", this.handleMouseEnter, { 759 | passive: true, 760 | }); 761 | element.addEventListener("mouseleave", this.handleMouseLeave, { 762 | passive: true, 763 | }); 764 | element.addEventListener("wheel", this.handleWheelScroll, { 765 | passive: false, 766 | }); 767 | } 768 | }); 769 | } 770 | 771 | destroy() { 772 | try { 773 | // Remove all event listeners 774 | this.disableScrolling(); 775 | 776 | // Remove keyboard handlers 777 | document.removeEventListener("keydown", this.handleKeyDown); 778 | 779 | // Clear any pending timeouts (you'd need to track these) 780 | // clearTimeout(this.expansionTimeout); 781 | 782 | // Clear references 783 | this.scrollableElements = []; 784 | this.elements = {}; 785 | 786 | logger.info("LLMResponseWindowUI destroyed", { 787 | component: "LLMResponseWindowUI", 788 | }); 789 | } catch (error) { 790 | logger.error("Error during cleanup", { 791 | component: "LLMResponseWindowUI", 792 | error: error.message, 793 | }); 794 | } 795 | } 796 | 797 | // Add method to check if dependencies are available 798 | checkDependencies() { 799 | const missing = []; 800 | 801 | if (typeof marked === "undefined") { 802 | missing.push("marked"); 803 | } 804 | 805 | if (typeof Prism === "undefined") { 806 | missing.push("Prism"); 807 | } 808 | 809 | try { 810 | require("electron"); 811 | } catch (e) { 812 | missing.push("electron"); 813 | } 814 | 815 | if (missing.length > 0) { 816 | logger.warn("Missing dependencies", { 817 | component: "LLMResponseWindowUI", 818 | missing: missing, 819 | }); 820 | return false; 821 | } 822 | 823 | return true; 824 | } 825 | 826 | disableScrolling() { 827 | logger.debug("Disabling scrolling", { component: "LLMResponseWindowUI" }); 828 | 829 | this.scrollableElements.forEach((element) => { 830 | if (element) { 831 | element.removeEventListener("mouseenter", this.handleMouseEnter); 832 | element.removeEventListener("mouseleave", this.handleMouseLeave); 833 | element.removeEventListener("wheel", this.handleWheelScroll); 834 | } 835 | }); 836 | } 837 | 838 | handleMouseEnter(e) { 839 | e.target.focus({ preventScroll: true }); 840 | e.target.style.cursor = "grab"; 841 | } 842 | 843 | handleMouseLeave(e) { 844 | e.target.style.cursor = "default"; 845 | } 846 | 847 | handleWheelScroll(e) { 848 | logger.debug("Wheel scroll detected", { component: "LLMResponseWindowUI" }); 849 | } 850 | 851 | // Public methods for external access 852 | getCurrentLayout() { 853 | return this.currentLayout; 854 | } 855 | 856 | hasCodeContent() { 857 | return this.hasCode; 858 | } 859 | 860 | isInteractiveMode() { 861 | return this.isInteractive; 862 | } 863 | } 864 | 865 | // Initialize when DOM is ready - Re-enabled for better error handling 866 | let llmResponseWindowUI; 867 | document.addEventListener('DOMContentLoaded', () => { 868 | llmResponseWindowUI = new LLMResponseWindowUI(); 869 | 870 | // Global access for debugging 871 | window.llmResponseWindowUI = llmResponseWindowUI; 872 | }); 873 | 874 | module.exports = LLMResponseWindowUI; 875 | --------------------------------------------------------------------------------