├── demo.gif ├── bin ├── requirements.txt ├── index.js ├── cloi-setup.cjs ├── ollama-setup.cjs └── ollama_setup.py ├── src ├── utils │ ├── __pycache__ │ │ ├── ollama_call.cpython-312.pyc │ │ └── optimization.cpython-312.pyc │ ├── file.js │ ├── history.js │ ├── tempscript.js │ ├── optimization.py │ └── ollama_call.py ├── core │ ├── command.js │ ├── traceback.js │ ├── patch.js │ └── llm.js ├── ui │ ├── boxen.js │ └── prompt.js └── cli │ └── index.js ├── .npmignore ├── .gitignore ├── LICENSE ├── package.json ├── README.md └── CONTRIBUTING.md /demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/cloi/main/demo.gif -------------------------------------------------------------------------------- /bin/requirements.txt: -------------------------------------------------------------------------------- 1 | orjson==3.10.16 2 | requests==2.32.3 3 | typing_extensions==4.13.2 -------------------------------------------------------------------------------- /src/utils/__pycache__/ollama_call.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/cloi/main/src/utils/__pycache__/ollama_call.cpython-312.pyc -------------------------------------------------------------------------------- /src/utils/__pycache__/optimization.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuilenren/cloi/main/src/utils/__pycache__/optimization.cpython-312.pyc -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | # Development files 2 | .git 3 | .github 4 | .gitignore 5 | .vscode 6 | .idea 7 | 8 | # Debug logs 9 | npm-debug.log* 10 | yarn-debug.log* 11 | yarn-error.log* 12 | 13 | # Node modules (should already be excluded by npm) 14 | node_modules 15 | 16 | # Debug history 17 | src/cli/debug_history 18 | 19 | # Test files 20 | __tests__ 21 | *.test.js 22 | *.spec.js 23 | 24 | # Build files 25 | dist 26 | 27 | # Miscellaneous 28 | .DS_Store 29 | *.tmp 30 | *.bak -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | node_modules/ 3 | npm-debug.log* 4 | yarn-debug.log* 5 | yarn-error.log* 6 | 7 | # Runtime data 8 | pids 9 | *.pid 10 | *.seed 11 | *.pid.lock 12 | 13 | # Debug history 14 | src/cli/debug_history/ 15 | 16 | # Environment variables 17 | .env 18 | .env.local 19 | .env.development.local 20 | .env.test.local 21 | .env.production.local 22 | 23 | # OS specific files 24 | .DS_Store 25 | Thumbs.db 26 | 27 | # Editor directories and files 28 | .idea/ 29 | .vscode/ 30 | *.swp 31 | *.swo 32 | *~ -------------------------------------------------------------------------------- /bin/index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | /* ---------------------------------------------------------------------------- 3 | * CLOI — Secure Agentic Debugger v1.3.0 4 | * ---------------------------------------------------------------------------- 5 | * PURPOSE ▸ Local helper to re‑run shell commands, capture & pretty‑print 6 | * their output, and (on errors) feed that output to a local 7 | * Llama‑3 model for analysis. 8 | * 9 | * ----------------------------------------------------------------------------*/ 10 | 11 | // This file is just a wrapper that loads the modular implementation 12 | import '../src/cli/index.js'; 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0) 2 | 3 | Copyright (c) 2025 Gabriel Cha, Min Kim 4 | 5 | This work is licensed under the Creative Commons Attribution-NonCommercial 4.0 International License. 6 | 7 | To view a copy of this license, visit: 8 | http://creativecommons.org/licenses/by-nc/4.0/ 9 | 10 | or send a letter to: 11 | Creative Commons 12 | PO Box 1866 13 | Mountain View, CA 94042 14 | USA 15 | 16 | Under this license: 17 | - Attribution — You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. 18 | - NonCommercial — You may not use the material for commercial purposes. 19 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@cloi-ai/cloi", 3 | "version": "1.0.5", 4 | "description": "Security-first agentic debugging tool for the terminal", 5 | "main": "bin/index.js", 6 | "type": "module", 7 | "bin": { 8 | "cloi": "bin/index.js", 9 | "cloi-setup": "bin/cloi-setup.cjs", 10 | "cloi-ollama-setup": "bin/ollama-setup.cjs" 11 | }, 12 | "scripts": { 13 | "postinstall": "node bin/cloi-setup.cjs --auto && (pip install -r bin/requirements.txt || pip3 install -r bin/requirements.txt || echo 'Python requirements installation failed - continuing...') && node bin/ollama-setup.cjs" 14 | }, 15 | "author": "Gabriel Cha, Min Kim", 16 | "license": "CC BY-NC 4.0", 17 | "dependencies": { 18 | "boxen": "^8.0.1", 19 | "chalk": "^5.4.1", 20 | "ollama": "0.5.15", 21 | "yargs": "^17.7.2" 22 | }, 23 | "engines": { 24 | "node": ">=14.0.0" 25 | }, 26 | "repository": { 27 | "type": "git", 28 | "url": "https://github.com/cloi-ai/cloi" 29 | }, 30 | "homepage": "https://github.com/cloi-ai/cloi#readme", 31 | "keywords": [ 32 | "cli", 33 | "terminal", 34 | "debug", 35 | "llm", 36 | "ollama", 37 | "ai", 38 | "debugging", 39 | "shell", 40 | "command-line", 41 | "developer-tools" 42 | ], 43 | "files": [ 44 | "bin/", 45 | "src/", 46 | "README.md", 47 | "LICENSE" 48 | ] 49 | } 50 | -------------------------------------------------------------------------------- /src/utils/file.js: -------------------------------------------------------------------------------- 1 | /** 2 | * File System Utilities Module 3 | * 4 | * Provides basic file system utilities for the application. 5 | * Handles creating directories and writing debug logs. 6 | * These functions support error logging and debugging 7 | * by ensuring necessary directories exist and recording 8 | * detailed information about debugging sessions. 9 | */ 10 | 11 | import { promises as fs } from 'fs'; 12 | import { join, dirname } from 'path'; 13 | 14 | /* ──────────────── Check Debug Directory Existence ─────────────────────── */ 15 | /** 16 | * Ensures that a directory exists, creating it if necessary. 17 | * @param {string} dir - The directory path to ensure. 18 | * @returns {Promise} 19 | */ 20 | export async function ensureDir(dir) { 21 | try { 22 | await fs.mkdir(dir, { recursive: true }); 23 | } catch (err) { 24 | // Ignore error if directory already exists 25 | if (err.code !== 'EEXIST') throw err; 26 | } 27 | } 28 | 29 | /* ───────────────────────── Write Debug Log ────────────────────────────── */ 30 | /** 31 | * Writes a debug log file with the history of iterations in a debug session. 32 | * @param {Array} historyArr - Array of objects with error, patch, and analysis data. 33 | * @param {string} logPath - The path to write the log file. 34 | * @returns {Promise} 35 | */ 36 | export async function writeDebugLog(historyArr, logPath) { 37 | // Ensure the parent directory exists 38 | await ensureDir(dirname(logPath)); 39 | 40 | const content = historyArr.map((iteration, i) => { 41 | return `=== ITERATION ${i + 1} ===\n\n` + 42 | `ERROR:\n${iteration.error}\n\n` + 43 | `ANALYSIS:\n${iteration.analysis}\n\n` + 44 | `PATCH:\n${iteration.patch}\n\n` + 45 | '='.repeat(50) + '\n\n'; 46 | }).join(''); 47 | 48 | await fs.writeFile(logPath, content, 'utf8'); 49 | } -------------------------------------------------------------------------------- /bin/cloi-setup.cjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | /* CLOI shell-RC bootstrap — idempotent & non-interactive capable */ 3 | const fs = require('fs'); 4 | const os = require('os'); 5 | const path = require('path'); 6 | const readline = require('readline'); 7 | const { execSync } = require('child_process'); 8 | 9 | /* ---------- config ------------------------------------------------------ */ 10 | const SENTINEL = '# >>> CLOI_HISTORY_SETTINGS >>>'; 11 | const SNIPPET = ` 12 | ${SENTINEL} 13 | setopt INC_APPEND_HISTORY 14 | setopt SHARE_HISTORY 15 | # <<< CLOI_HISTORY_SETTINGS <<< 16 | `; 17 | /* ----------------------------------------------------------------------- */ 18 | 19 | const argvHas = flag => process.argv.slice(2).includes(flag); 20 | const interactive = !argvHas('--auto'); 21 | 22 | if (process.env.CLOI_SKIP_ZSHRC === '1') { 23 | console.log('ℹ︎ Skipping ~/.zshrc modification – CLOI_SKIP_ZSHRC=1'); 24 | process.exit(0); 25 | } 26 | 27 | /* resolve correct home dir even when run under sudo */ 28 | const sudoUser = process.env.SUDO_USER; 29 | let homeDir; 30 | try { 31 | homeDir = sudoUser 32 | ? execSync(`eval echo "~${sudoUser}"`, { encoding: 'utf8' }).trim() 33 | : os.homedir(); 34 | } catch { 35 | homeDir = os.homedir(); 36 | } 37 | const ZSHRC = path.join(homeDir, '.zshrc'); 38 | 39 | /* read existing file (if any) */ 40 | let content = fs.existsSync(ZSHRC) ? fs.readFileSync(ZSHRC, 'utf8') : ''; 41 | 42 | if (content.includes(SENTINEL)) { 43 | console.log('✅ CLOI history settings already present – nothing to do.'); 44 | process.exit(0); 45 | } 46 | 47 | async function confirm(q) { 48 | return new Promise(res => { 49 | const rl = readline.createInterface({ input: process.stdin, 50 | output: process.stdout }); 51 | rl.question(q + ' (Y/n) ', a => { 52 | rl.close(); res(/^y(es)?$/i.test(a.trim() || 'y')); 53 | }); 54 | }); 55 | } 56 | 57 | (async () => { 58 | if (interactive) { 59 | const ok = await confirm( 60 | 'CLOI will add history settings to your ~/.zshrc. Proceed?' 61 | ); 62 | if (!ok) { console.log('Aborted.'); process.exit(0); } 63 | } else { 64 | console.log('ℹ︎ --auto mode: patching ~/.zshrc without prompt.'); 65 | } 66 | 67 | /* ensure file exists, then append */ 68 | if (!fs.existsSync(ZSHRC)) fs.writeFileSync(ZSHRC, '', 'utf8'); 69 | fs.appendFileSync(ZSHRC, SNIPPET, 'utf8'); 70 | console.log('✅ Added CLOI history settings to ~/.zshrc'); 71 | })().catch(err => { 72 | console.error('❌ Error updating ~/.zshrc:', err); 73 | process.exit(1); 74 | }); 75 | -------------------------------------------------------------------------------- /src/core/command.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Command Execution Module 3 | * 4 | * Provides utilities for executing shell commands safely with timeouts, 5 | * both synchronously and asynchronously. Also includes network connectivity 6 | * checking functionality. This module serves as the foundation for all 7 | * terminal command interactions throughout the application. 8 | */ 9 | 10 | import { execSync } from 'child_process'; 11 | import { exec } from 'child_process'; 12 | import { promisify } from 'util'; 13 | 14 | // Promisify exec to use with async/await 15 | const execAsync = promisify(exec); 16 | 17 | // CHECK 18 | 19 | /* ───────────────────────── Synchronous Command Execution ────────────────────────────── */ 20 | /** 21 | * Synchronously executes a shell command, capturing stdout and stderr. 22 | * Includes a timeout to prevent hanging processes. 23 | * @param {string} cmd - The command to execute. 24 | * @param {number} [timeout=5000] - Timeout in milliseconds. 25 | * @returns {{ok: boolean, output: string}} - An object indicating success and the combined output. 26 | */ 27 | export function runCommand(cmd, timeout = 10000) { 28 | try { 29 | const out = execSync(`${cmd} 2>&1`, { encoding: 'utf8', timeout }); 30 | return { ok: true, output: out }; 31 | } catch (e) { 32 | return { ok: false, output: e.stdout?.toString() || e.message }; 33 | } 34 | } 35 | 36 | /* ───────────────────────── Asynchronous Command Execution ────────────────────────────── */ 37 | /** 38 | * Asynchronously executes a shell command, capturing stdout and stderr. 39 | * This version doesn't block the event loop, allowing UI updates during execution. 40 | * @param {string} cmd - The command to execute. 41 | * @param {number} [timeout=10000] - Timeout in milliseconds. 42 | * @returns {Promise<{ok: boolean, output: string}>} - A promise resolving to an object with success flag and output. 43 | */ 44 | export async function runCommandAsync(cmd, timeout = 10000) { 45 | try { 46 | const { stdout, stderr } = await execAsync(`${cmd} 2>&1`, { encoding: 'utf8', timeout }); 47 | return { ok: true, output: stdout || '' }; 48 | } catch (e) { 49 | return { ok: false, output: e.stdout?.toString() || e.message }; 50 | } 51 | } 52 | 53 | /* ───────────────────────── Network Connectivity Check ────────────────────────────── */ 54 | /** 55 | * Checks for basic network connectivity by pinging a reliable host. 56 | * @returns {boolean} - True if the network seems reachable. 57 | */ 58 | export function checkNetwork() { 59 | try { 60 | // Try to connect to a reliable host 61 | execSync('ping -c 1 -t 1 8.8.8.8', { stdio: 'ignore' }); 62 | return true; 63 | } catch { 64 | return false; 65 | } 66 | } -------------------------------------------------------------------------------- /src/ui/boxen.js: -------------------------------------------------------------------------------- 1 | /** 2 | * UI Box Display Module 3 | * 4 | * Provides utilities for creating consistent, styled terminal boxes for various UI elements. 5 | * This module contains predefined box styles for different content types (outputs, errors, 6 | * prompts, etc.) and helper functions to create and display boxed content in the terminal. 7 | * It enhances the terminal UI by providing visually distinct areas for different types of information. 8 | */ 9 | 10 | import boxen from 'boxen'; 11 | import chalk from 'chalk'; 12 | 13 | /* ───────────────────────────── Boxen Presets ──────────────────────────── */ 14 | export const BOX = { 15 | WELCOME: { padding: 0.5, margin: 0.5, borderStyle: 'round', width: 75 }, 16 | PROMPT: { padding: 0.2, margin: 0.5, borderStyle: 'round', width: 75 }, 17 | OUTPUT: { padding: 0.5, margin: 0.5, borderStyle: 'round', width: 75, title: 'Output' }, 18 | ERROR: { padding: 0.5, margin: 0.5, borderStyle: 'round', width: 75, title: 'Error' }, 19 | ANALYSIS: { padding: 0.5, margin: 0.5, borderStyle: 'round', width: 75, borderColor: '#00AAFF', title: 'AI Error Analysis' }, 20 | CONFIRM: { padding: 0.5, margin: 0.5, borderStyle: 'round', width: 75, borderColor: 'yellow', title: 'Confirm' }, 21 | PICKER: { padding: 0.2, margin: 0.5, borderStyle: 'round', width: 75 } // generic picker box 22 | }; 23 | 24 | /** 25 | * Prints a shell command styled within a box for visual clarity. 26 | * @param {string} cmd - The command string to display. 27 | */ 28 | export function echoCommand(cmd) { 29 | console.log(''); 30 | console.log(` ${chalk.blueBright.bold('$')} ${chalk.blueBright.bold(cmd)}`); 31 | console.log(''); 32 | } 33 | 34 | /** 35 | * Creates a string for displaying a command with a $ prefix. 36 | * @param {string} cmd - The command to display. 37 | * @param {object} [options] - Additional options (kept for backward compatibility). 38 | * @returns {string} - A formatted string containing the command. 39 | */ 40 | export function createCommandBox(cmd, options = {}) { 41 | // Return just styled text, no boxen 42 | return ` ${chalk.blueBright.bold('$')} ${chalk.blueBright.bold(cmd)}`; 43 | } 44 | 45 | /** 46 | * Truncates a multi-line string to a maximum number of lines, 47 | * showing the last few lines prefixed with an ellipsis if truncated. 48 | * @param {string} output - The string to truncate. 49 | * @param {number} [maxLines=2] - The maximum number of lines to keep. 50 | * @returns {string} - The potentially truncated string. 51 | */ 52 | export function truncateOutput(output, maxLines = 2) { 53 | const lines = output.trimEnd().split(/\r?\n/); 54 | if (lines.length <= maxLines) return output; 55 | return lines.slice(-maxLines).join('\n'); 56 | } -------------------------------------------------------------------------------- /src/utils/history.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Shell History Management Module 3 | * 4 | * Provides utilities for reading and interacting with the user's shell command history. 5 | * Includes functions to fetch recent commands, find the last relevant command, 6 | * and present an interactive selection interface. This module enables the application 7 | * to reference and reuse previous terminal commands. 8 | */ 9 | 10 | import { promises as fs } from 'fs'; 11 | import { homedir } from 'os'; 12 | import { join } from 'path'; 13 | import { runCommand } from '../core/command.js'; 14 | import { makePicker } from '../ui/prompt.js'; 15 | 16 | // CHECK 17 | 18 | /* ───────────────────────── Shell History Reader ────────────────────────────── */ 19 | /** 20 | * Reads the shell history based on the detected shell type. 21 | * Supports fish, zsh, and bash shells. 22 | * @returns {Promise} - Array of command strings. 23 | */ 24 | export async function readHistory() { 25 | const shell = process.env.SHELL || ''; 26 | const hist = shell.includes('fish') 27 | ? join(homedir(), '.local/share/fish/fish_history') 28 | : shell.includes('zsh') 29 | ? join(homedir(), '.zsh_history') 30 | : join(homedir(), '.bash_history'); 31 | try { 32 | const raw = await fs.readFile(hist, 'utf8'); 33 | if (hist.endsWith('fish_history')) { 34 | return raw.split('\n- cmd:').slice(1).map(l => l.split('\n')[0].trim()); 35 | } 36 | if (hist.endsWith('zsh_history')) { 37 | return raw 38 | .split('\n') 39 | .filter(Boolean) 40 | .map(l => l.replace(/^.*?;/, '').trim()); 41 | } 42 | return raw.split('\n').filter(Boolean); 43 | } catch { 44 | return []; 45 | } 46 | } 47 | 48 | /* ───────────────────────── Last Command Retrieval ────────────────────────────── */ 49 | /** 50 | * Finds the most recent command in history that isn't a call to figai itself. 51 | * @returns {Promise} - The last relevant user command, or null if none found. 52 | */ 53 | export async function lastRealCommand() { 54 | const h = await readHistory(); 55 | return [...h] 56 | .reverse() 57 | .find(c => !/cloi|node .*cloi/i.test(c) && c.trim()) 58 | || null; 59 | } 60 | 61 | /* ───────────────────────── Interactive History Selector ────────────────────────────── */ 62 | /** 63 | * Displays the command history using the interactive picker UI and returns the selected command. 64 | * @param {number} [limit=15] - Number of recent history items to display. 65 | * @returns {Promise} - The selected command string, or null if cancelled. 66 | */ 67 | export async function selectHistoryItem(limit = 15) { 68 | const history = (await readHistory()).slice(-limit); 69 | const items = history.map((cmd,i) => `${history.length - limit + i + 1}: ${cmd}`); 70 | const choice = await makePicker(items, 'Command History')(); 71 | if (!choice) return null; 72 | // Extract command after the first colon+space 73 | return choice.replace(/^\d+:\s*/, ''); 74 | } -------------------------------------------------------------------------------- /bin/ollama-setup.cjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Ollama Setup Wrapper 5 | * 6 | * This script calls the Python setup script for Ollama and handles any errors. 7 | * It's designed to be called during the npm installation process. 8 | */ 9 | 10 | const { spawn, spawnSync } = require('child_process'); 11 | const path = require('path'); 12 | const fs = require('fs'); 13 | 14 | // Get the path to the Python script 15 | const scriptPath = path.join(__dirname, 'ollama_setup.py'); 16 | 17 | // Check if Python is available 18 | function checkPythonAndDependencies() { 19 | // Check Python 20 | const pythonCommand = process.platform === 'win32' ? 'python' : 'python3'; 21 | const pythonCheck = spawnSync(pythonCommand, ['--version'], { encoding: 'utf8' }); 22 | 23 | if (pythonCheck.error) { 24 | console.warn(`Python (${pythonCommand}) not found. Ollama auto-setup will be skipped.`); 25 | console.warn('You may need to install Ollama manually: https://ollama.com/download'); 26 | return false; 27 | } 28 | 29 | // Check pip installation 30 | const pipCommand = process.platform === 'win32' ? 'pip' : 'pip3'; 31 | const pipCheck = spawnSync(pipCommand, ['--version'], { encoding: 'utf8' }); 32 | 33 | if (pipCheck.error) { 34 | console.warn(`pip (${pipCommand}) not found. Ollama auto-setup will be skipped.`); 35 | console.warn('You may need to install Ollama manually: https://ollama.com/download'); 36 | return false; 37 | } 38 | 39 | // Check if Python requests module is installed 40 | const requestsCheck = spawnSync( 41 | pythonCommand, 42 | ['-c', 'import requests; print("OK")'], 43 | { encoding: 'utf8' } 44 | ); 45 | 46 | if (requestsCheck.status !== 0) { 47 | console.log('Requests module not found. Attempting to install...'); 48 | 49 | const pipInstall = spawnSync( 50 | pipCommand, 51 | ['install', 'requests'], 52 | { encoding: 'utf8', stdio: 'inherit' } 53 | ); 54 | 55 | if (pipInstall.error || pipInstall.status !== 0) { 56 | console.warn('Failed to install the Python requests module. Ollama auto-setup will be skipped.'); 57 | console.warn('You may need to install Ollama manually: https://ollama.com/download'); 58 | return false; 59 | } 60 | } 61 | 62 | return true; 63 | } 64 | 65 | // Check if the Python script exists 66 | if (!fs.existsSync(scriptPath)) { 67 | console.error(`Error: Could not find the Ollama setup script at ${scriptPath}`); 68 | process.exit(1); 69 | } 70 | 71 | console.log('Checking Ollama installation...'); 72 | 73 | // Only proceed if Python and requirements are available 74 | if (checkPythonAndDependencies()) { 75 | // Determine the Python command to use (try python3 first, then python) 76 | const pythonCommand = process.platform === 'win32' ? 'python' : 'python3'; 77 | 78 | // Run the Python script 79 | const setupProcess = spawn(pythonCommand, [scriptPath], { 80 | stdio: 'inherit' // Show output from the Python script 81 | }); 82 | 83 | setupProcess.on('close', (code) => { 84 | if (code !== 0) { 85 | console.log('\n'); 86 | console.warn('──────────────────────────────────────────────────────────────────'); 87 | console.warn('Warning: Ollama setup was not fully completed.'); 88 | console.warn('CLOI will still work, but you may need to install Ollama manually:'); 89 | console.warn('https://ollama.com/download'); 90 | console.warn('──────────────────────────────────────────────────────────────────'); 91 | console.log('\n'); 92 | // We don't exit with an error code to allow npm install to continue 93 | } 94 | }); 95 | } else { 96 | console.log('\n'); 97 | console.warn('──────────────────────────────────────────────────────────────────'); 98 | console.warn('Skipping automatic Ollama setup.'); 99 | console.warn('CLOI will still work, but you will need to install Ollama manually:'); 100 | console.warn('https://ollama.com/download'); 101 | console.warn('Then run: ollama pull phi4'); 102 | console.warn('──────────────────────────────────────────────────────────────────'); 103 | console.log('\n'); 104 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #
Cloi CLI (beta preview)
2 | 3 |
Local debugging agent that runs in your terminal
4 |
5 |
6 | version 7 | license 8 |
9 |
10 |
Cloi CLI Demo
11 | 12 | ## Overview 13 | 14 | Cloi is a local, context-aware agent designed to streamline your debugging process. Operating entirely on your machine, it ensures that your code and data remain private and secure. With your permission, Cloi can analyze errors and apply fixes directly to your codebase. 15 | 16 | **Disclaimer:** Cloi is an experimental project under beta development. It may contain bugs, and we recommend reviewing all changes before accepting agentic suggestions. That said, help us improve Cloi by filing issues or submitting PRs, see down below for more info. 17 | 18 | ## Installation 19 | 20 | Install globally: 21 | 22 | ```bash 23 | npm install -g @cloi-ai/cloi 24 | ``` 25 | 26 | **No API key needed, runs completely locally.** 27 | 28 | Navigate to your project directory and call Cloi when you run into an error. 29 | 30 | ```bash 31 | cloi 32 | ``` 33 | 34 | ### Interactive Mode Commands 35 | ``` 36 | /debug - Auto-patch errors iteratively using LLM 37 | /model - Pick Ollama model 38 | /history - Pick from recent shell commands 39 | /help - Show this help 40 | /exit - Quit 41 | ``` 42 | 43 | ### Why use Cloi? 44 | 45 | Cloi is built for developers who live in the terminal and value privacy: 46 | 47 | - **100% Local** – Your code never leaves your machine. No API key needed. 48 | - **Automates Fixes (Beta)** – Analyze errors and apply patches with a single command. 49 | - **Safe Changes** – Review all diffs before applying. Full control to accept or reject. 50 | - **Customizable** – Ships with Phi-4 model. Swap models as needed via Ollama. 51 | - **Free to Use** – Extensible architecture. Fork, contribute, and customize to your needs. 52 | 53 | ### System Requirements 54 | 55 | 56 | 57 | 58 | 63 | 64 | 65 | 66 | 72 | 73 |
🖥️ Hardware 59 | • Memory: 8GB RAM minimum (16GB+ recommended)
60 | • Storage: 10GB+ free space (Phi-4 model: ~9.1GB)
61 | • Processor: Tested on M2 and M3 62 |
💻 Software 67 | • OS: macOS (Big Sur 11.0+), Linux and Windows (limited testing)
68 | • Runtime: Node.js 14+ and Python 3.6+
69 | • Shell: Zsh, Fish, Bash (limited testing)
70 | • Dependencies: Ollama (automatically installed if needed) 71 |
74 | 75 | ### Beta Features 76 | 77 | > **Beta Feature Notice:** The Automate Fix feature is currently in beta. While it can automatically apply fixes to your code, we strongly recommend: 78 | > - Reviewing all suggested changes before accepting them 79 | > - Testing the changes in a development environment first 80 | > - Keeping backups of your code before using automated fixes 81 | 82 | ### Contributing 83 | 84 | We welcome contributions from the community! By contributing to this project, you agree to the following guidelines: 85 | 86 | - **Scope:** Contributions should align with the project's goals of providing a secure, local AI debugging assistant 87 | - **Non-Commercial Use:** All contributions must adhere to the CC BY-NC 4.0 license 88 | - **Attribution:** Please ensure proper attribution for any third-party work 89 | - **Code of Conduct:** Be respectful and considerate in all interactions 90 | 91 | For more detailed information on contributing, please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file. 92 | 93 | --- 94 | 95 | ### Patches 96 | 97 | #### [1.0.5] - May 4th, 2025 @ 9:10pm PST 98 | 99 | - **Bug Fix**: Resolved dependency issue in package.json 100 | - Updated Ollama dependency from deprecated version 0.1.1 to 0.5.15 which resolved ollama.chat API endpoints 101 | 102 | #### [1.0.2] - May 4th, 2025 @ 12:20pm PST 103 | 104 | - **Feature:** Integrated [structured outputs](https://ollama.com/blog/structured-outputs) from Ollama's latest API 105 | - Creates more robust patches with JSON-based formatting 106 | - Falls back to traditional LLM calls if the structured API isn't available 107 | - **Feature:** Implemented CLI model selection via `--model` flag 108 | - Specify your preferred Ollama model right from the command line 109 | - Credit to [@canadaduane](https://github.com/canadaduane) for the insightful suggestion! 110 | - **UI Enhancement:** The `/model` command now displays your locally installed Ollama models 111 | - **Refactor:** Internal architecture adjustments to maintain conceptual integrity 112 | - Migrated `history.js` to the utils directory where it semantically belongs 113 | - Repositioned `traceback.js` to core since it's foundational to the debugging pipeline 114 | - **Improvements:** Purged lingering references to our project's original name "FigAI" and cleaned the CLI `--help` interface 115 | -------------------------------------------------------------------------------- /src/ui/prompt.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Interactive Terminal Prompt Module 3 | * 4 | * Provides utilities for creating interactive terminal UI components for user input. 5 | * This module handles readline management, yes/no confirmations, and an interactive 6 | * item picker with keyboard navigation. These components enhance the terminal user 7 | * experience by providing intuitive ways to interact with the application. 8 | */ 9 | 10 | import readline from 'readline'; 11 | import chalk from 'chalk'; 12 | import boxen from 'boxen'; 13 | import { BOX } from './boxen.js'; 14 | 15 | /* Global readline instance */ 16 | let rl = /** @type {readline.Interface|null} */ (null); 17 | 18 | /* ───────────────────────── Readline Management ─────────────────────────── */ 19 | /** 20 | * Lazily creates and returns a singleton readline interface instance. 21 | * Ensures that only one interface is active at a time. 22 | */ 23 | export function getReadline() { 24 | if (rl) return rl; 25 | rl = readline.createInterface({ input: process.stdin, output: process.stdout }); 26 | rl.on('close', () => { 27 | rl = null; 28 | process.stdin.removeAllListeners('keypress'); 29 | process.stdin.setRawMode(false); 30 | process.stdin.pause(); 31 | }); 32 | return rl; 33 | } 34 | 35 | /** 36 | * Closes the active readline interface and performs necessary cleanup. 37 | */ 38 | export function closeReadline() { 39 | if (rl) { 40 | rl.close(); 41 | rl = null; 42 | process.stdin.removeAllListeners('keypress'); 43 | process.stdin.setRawMode(false); 44 | process.stdin.pause(); 45 | } 46 | } 47 | 48 | /* ───────────────────────── Yes/No Prompt ─────────────────────────── */ 49 | /** 50 | * Prompts the user for a yes/no confirmation in the terminal. 51 | * Uses raw mode to capture single key presses (y/n). 52 | * @param {string} [question=''] - The question to display before the prompt. 53 | * @param {boolean} [silent=false] - If true, don't print the question text. 54 | * @returns {Promise} - Resolves true for 'y'/'Y', false for 'n'/'N'. 55 | */ 56 | export async function askYesNo(question = '', silent = false) { 57 | if (!silent) process.stdout.write(`${question} (y/N): `); 58 | return new Promise(res => { 59 | const cleanup = () => { 60 | process.stdin.setRawMode(false); 61 | process.stdin.pause(); 62 | process.stdin.removeAllListeners('keypress'); 63 | while (process.stdin.read() !== null) { /* flush */ } 64 | }; 65 | 66 | const onKeypress = (str) => { 67 | if (/^[yYnN]$/.test(str)) { 68 | cleanup(); 69 | const response = str.toUpperCase() === 'Y' ? 'y' : 'N'; 70 | // process.stdout.write(`${response}\n`); 71 | res(/^[yY]$/.test(str)); 72 | } 73 | }; 74 | 75 | process.stdin.setRawMode(true); 76 | process.stdin.resume(); 77 | readline.emitKeypressEvents(process.stdin); 78 | process.stdin.on('keypress', onKeypress); 79 | }); 80 | } 81 | 82 | /* ───────────────────────── Generic Picker UI ─────────────────────────── */ 83 | 84 | /** 85 | * Factory function to create an interactive terminal picker UI. 86 | * Allows selecting an item from a list using arrow keys/vim keys. 87 | * @param {string[]} items - The list of strings to choose from. 88 | * @param {string} [title='Picker'] - The title displayed on the picker box. 89 | * @returns {function(): Promise} - An async function that, when called, 90 | * displays the picker and returns the selected item or null if cancelled. 91 | */ 92 | export function makePicker(items, title = 'Picker') { 93 | return async function picker() { 94 | closeReadline(); 95 | if (!items.length) return null; 96 | 97 | let idx = items.length - 1; 98 | const render = () => { 99 | const lines = items.map((it,i) => `${i===idx?chalk.cyan('➤'): ' '} ${it}`); 100 | const help = chalk.gray('\nUse ↑/↓ or k/j, Enter to choose, Esc/q to cancel'); 101 | const boxed = boxen([...lines, help].join('\n'), { ...BOX.PICKER, title }); 102 | 103 | if (render.prevLines) { 104 | process.stdout.write(`\x1B[${render.prevLines}F`); // cursor up 105 | process.stdout.write('\x1B[J'); // clear to end 106 | } 107 | process.stdout.write(boxed + '\n'); 108 | render.prevLines = boxed.split('\n').length; 109 | }; 110 | render.prevLines = 0; 111 | render(); 112 | 113 | return new Promise(resolve => { 114 | const cleanup = () => { 115 | process.stdin.setRawMode(false); 116 | process.stdin.pause(); 117 | process.stdin.removeAllListeners('keypress'); 118 | process.stdout.write('\x1B[J'); 119 | }; 120 | 121 | const onKey = (str, key) => { 122 | if (key.name === 'up' || str === 'k') { idx = Math.max(0, idx-1); render(); } 123 | if (key.name === 'down' || str === 'j') { idx = Math.min(items.length-1, idx+1); render(); } 124 | if (key.name === 'return') { cleanup(); resolve(items[idx]); } 125 | if (key.name === 'escape' || str === 'q') { cleanup(); resolve(null); } 126 | }; 127 | 128 | process.stdin.setRawMode(true); 129 | process.stdin.resume(); 130 | readline.emitKeypressEvents(process.stdin); 131 | process.stdin.on('keypress', onKey); 132 | }); 133 | }; 134 | } -------------------------------------------------------------------------------- /src/utils/tempscript.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Temporary Python Script Module 3 | * 4 | * Facilitates communication with Ollama language models via Python. 5 | * This module creates a temporary Python script with optimized settings 6 | * for efficient LLM interactions. The approach allows for better performance 7 | * configuration and processing than direct JS-to-Ollama communication, 8 | * particularly for complex prompts and responses. 9 | */ 10 | 11 | import { promises as fs } from 'fs'; 12 | import { join } from 'path'; 13 | import { dirname } from 'path'; 14 | import { fileURLToPath } from 'url'; 15 | import { spawn } from 'child_process'; 16 | 17 | // Get directory references 18 | const __filename = fileURLToPath(import.meta.url); 19 | const __dirname = dirname(__filename); 20 | 21 | // Template for the Python script with optimized settings 22 | export const PYTHON_SCRIPT_TEMPLATE = ` 23 | import sys 24 | import os 25 | 26 | # Add parent directory to Python path 27 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 28 | 29 | from ollama_call import OllamaSetup 30 | from optimization import LLMOptimizer 31 | 32 | def analyze_error(prompt, model_name, optimization_set="error_analysis"): 33 | # Initialize Ollama 34 | ollama = OllamaSetup(model_name=model_name) 35 | if not ollama.ensure_setup(): 36 | return "Error: Failed to setup Ollama" 37 | 38 | # Get optimized options for maximum performance 39 | options = LLMOptimizer.get_optimized_options( 40 | input_length=len(prompt), 41 | deterministic=False, 42 | use_quantization=True 43 | ) 44 | 45 | # Add performance-focused optimizations based on the optimization set 46 | optimization_sets = { 47 | "error_analysis": { 48 | "temperature": 0.3, 49 | "num_predict": 512, 50 | "num_thread": min(8, os.cpu_count() or 2), 51 | "num_batch": 32, 52 | "mmap": True, 53 | "int8": True, 54 | "f16": False, 55 | "repeat_penalty": 1.0, 56 | "top_k": 10, 57 | "top_p": 0.9, 58 | "cache_mode": "all", 59 | "use_mmap": True, 60 | "use_mlock": True, 61 | "block_size": 32, 62 | "per_block_scales": [1.0], 63 | "zero_points": [0] 64 | }, 65 | "error_determination": { 66 | "temperature": 0.1, 67 | "num_predict": 32, 68 | "num_thread": min(8, os.cpu_count() or 2), 69 | "num_batch": 32, 70 | "mmap": True, 71 | "int8": True, 72 | "f16": False, 73 | "repeat_penalty": 1.0, 74 | "top_k": 10, 75 | "top_p": 0.9, 76 | "cache_mode": "all", 77 | "use_mmap": True, 78 | "use_mlock": True 79 | }, 80 | "command_generation": { 81 | "temperature": 0.1, 82 | "num_predict": 256, 83 | "num_thread": min(8, os.cpu_count() or 2), 84 | "num_batch": 32, 85 | "mmap": True, 86 | "int8": True, 87 | "f16": False, 88 | "repeat_penalty": 1.0, 89 | "top_k": 40, 90 | "top_p": 0.95, 91 | "cache_mode": "all", 92 | "use_mmap": True, 93 | "use_mlock": True 94 | }, 95 | "patch_generation": { 96 | "temperature": 0.1, 97 | "num_predict": 768, 98 | "num_thread": min(8, os.cpu_count() or 2), 99 | "num_batch": 32, 100 | "mmap": True, 101 | "int8": True, 102 | "f16": False, 103 | "repeat_penalty": 1.0, 104 | "top_k": 40, 105 | "top_p": 0.95, 106 | "cache_mode": "all", 107 | "use_mmap": True, 108 | "use_mlock": True 109 | } 110 | } 111 | 112 | # Get the appropriate optimization set 113 | opt_set = optimization_sets.get(optimization_set, optimization_sets["error_analysis"]) 114 | options.update(opt_set) 115 | 116 | # Query model with optimized settings 117 | response = ollama.query_model(prompt, options) 118 | return response.get("response", "No response from model") 119 | 120 | if __name__ == "__main__": 121 | prompt = sys.argv[1] 122 | model = sys.argv[2] 123 | optimization_set = sys.argv[3] if len(sys.argv) > 3 else "error_analysis" 124 | result = analyze_error(prompt, model, optimization_set) 125 | print("-" * 80) 126 | print(result) 127 | print("-" * 80) 128 | `; 129 | 130 | /* ───────────────────────── LLM Query Runner ────────────────────────────── */ 131 | /** 132 | * Runs an LLM query using a Python script with optimized settings. 133 | * Creates a temporary Python script, executes it, and captures the output. 134 | * 135 | * @param {string} prompt - The prompt to send to the LLM 136 | * @param {string} model - The model name to use (e.g., 'phi4:latest') 137 | * @param {string} [optimization_set="error_analysis"] - The optimization set to use 138 | * @returns {Promise} - The LLM response 139 | */ 140 | export async function runLLMWithTempScript(prompt, model, optimization_set = "error_analysis") { 141 | const tempScriptPath = join(__dirname, 'temp_analyze.py'); 142 | 143 | try { 144 | // Write the temporary Python script 145 | await fs.writeFile(tempScriptPath, PYTHON_SCRIPT_TEMPLATE); 146 | 147 | // Run the script with the prompt, model, and optimization set as arguments 148 | return await new Promise((resolve) => { 149 | const child = spawn('python3', [tempScriptPath, prompt, model, optimization_set]); 150 | let output = ''; 151 | 152 | child.stdout.on('data', (data) => { 153 | output += data.toString(); 154 | }); 155 | 156 | child.stderr.on('data', (data) => { 157 | output += data.toString(); 158 | }); 159 | 160 | child.on('close', async () => { 161 | try { 162 | // Clean up the temporary file 163 | await fs.unlink(tempScriptPath); 164 | } catch (error) { 165 | console.error('Error cleaning up temp file:', error); 166 | } 167 | 168 | // Extract the result from between the marker lines 169 | const match = output.match(/^-{80}\n([\s\S]*?)\n-{80}$/m); 170 | resolve(match ? match[1].trim() : output.trim()); 171 | }); 172 | }); 173 | } catch (error) { 174 | // Clean up in case of error 175 | try { 176 | await fs.unlink(tempScriptPath); 177 | } catch { 178 | // Ignore errors during cleanup 179 | } 180 | throw error; 181 | } 182 | } -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Cloi 2 | 3 | Thank you for your interest in contributing to Cloi! This document provides guidelines and instructions for contributing to the project. Please read it carefully before submitting any contributions. 4 | 5 | ## Code of Conduct 6 | 7 | By participating in this project, you agree to abide by our Code of Conduct. Please be respectful and considerate of others in all interactions related to this project. 8 | 9 | ### Our Pledge 10 | We are committed to making participation in this project a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. 11 | 12 | ### Our Standards 13 | Examples of behavior that contributes to a positive environment include: 14 | - Using welcoming and inclusive language 15 | - Being respectful of differing viewpoints and experiences 16 | - Gracefully accepting constructive criticism 17 | - Focusing on what is best for the community 18 | - Showing empathy towards other community members 19 | 20 | Examples of unacceptable behavior include: 21 | - The use of sexualized language or imagery and unwelcome sexual attention or advances 22 | - Trolling, insulting/derogatory comments, and personal or political attacks 23 | - Public or private harassment 24 | - Publishing others' private information without explicit permission 25 | - Other conduct which could reasonably be considered inappropriate in a professional setting 26 | 27 | ### Enforcement 28 | - Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, and other contributions that are not aligned with this Code of Conduct 29 | - Project maintainers who do not follow the Code of Conduct may be removed from the project team 30 | 31 | ### Reporting 32 | If you experience or witness unacceptable behavior, or have any other concerns, please report it by contacting the project maintainers at [contact email/issue tracker]. 33 | 34 | ## Getting Started 35 | 36 | ### Prerequisites 37 | - Python 3.8 or higher 38 | - Git 39 | - A GitHub account 40 | - Basic understanding of command-line tools 41 | 42 | ### Development Setup 43 | 1. Fork the repository 44 | 2. Clone your fork: 45 | ```bash 46 | git clone https://github.com/YOUR-USERNAME/cloi.git 47 | cd cloi 48 | ``` 49 | 50 | 3. Set up the development environment: 51 | ```bash 52 | python -m venv venv 53 | source venv/bin/activate # On Windows: venv\Scripts\activate 54 | pip install -e ".[dev]" 55 | ``` 56 | 57 | 4. Install pre-commit hooks: 58 | ```bash 59 | pre-commit install 60 | ``` 61 | 62 | ## Development Workflow 63 | 64 | ### Branching Strategy 65 | - `main`: Production-ready code 66 | - `develop`: Development branch 67 | - Feature branches: `feature/your-feature-name` 68 | - Bug fix branches: `fix/issue-description` 69 | - Documentation branches: `docs/description` 70 | 71 | ### Making Changes 72 | 1. Create a new branch for your changes: 73 | ```bash 74 | git checkout -b feature/your-feature-name 75 | ``` 76 | 77 | 2. Make your changes following our coding standards 78 | 3. Write or update tests as needed 79 | 4. Run tests locally: 80 | ```bash 81 | pytest 82 | ``` 83 | 84 | 5. Commit your changes with clear, descriptive commit messages: 85 | ```bash 86 | git commit -m "feat: add new feature X" 87 | ``` 88 | 89 | ### Commit Message Format 90 | We follow the [Conventional Commits](https://www.conventionalcommits.org/) specification: 91 | - `feat`: New feature 92 | - `fix`: Bug fix 93 | - `docs`: Documentation changes 94 | - `style`: Code style changes (formatting, etc.) 95 | - `refactor`: Code changes that neither fix bugs nor add features 96 | - `test`: Adding or modifying tests 97 | - `chore`: Changes to build process or auxiliary tools 98 | 99 | ### Pull Request Process 100 | 1. Update the README.md with details of changes if needed 101 | 2. Update the documentation if you're changing functionality 102 | 3. The PR will be merged once you have the sign-off of at least one maintainer 103 | 4. Ensure all CI checks pass 104 | 105 | ## Coding Standards 106 | 107 | ### Python Code Style 108 | - Follow [PEP 8](https://www.python.org/dev/peps/pep-0008/) guidelines 109 | - Use type hints for all function parameters and return values 110 | - Maximum line length: 88 characters (Black formatter default) 111 | - Use Black for code formatting 112 | - Use isort for import sorting 113 | - Use flake8 for linting 114 | 115 | ### Documentation 116 | - Use Google-style docstrings 117 | - Include examples in docstrings where appropriate 118 | - Keep README.md and other documentation up to date 119 | - Document all public APIs 120 | 121 | ### Testing 122 | - Write unit tests for all new features 123 | - Maintain or improve test coverage 124 | - Use pytest for testing 125 | - Include both positive and negative test cases 126 | 127 | ## Project Structure 128 | ``` 129 | bin/ 130 | ├── index.js # Main CLI entry point 131 | └── cloi-setup.cjs # Installation setup script 132 | src/ 133 | ├── ui/ # User interface components 134 | │ ├── boxen.js # Boxed UI components 135 | │ └── prompt.js # User prompts and input handling 136 | ├── core/ # Core functionality 137 | │ ├── command.js # Command execution 138 | │ ├── history.js # Shell history management 139 | │ ├── llm.js # LLM interaction and analysis 140 | │ └── patch.js # Code patching utilities 141 | ├── utils/ # Utility functions 142 | │ ├── file.js # File operations 143 | │ └── traceback.js # Error traceback parsing 144 | └── cli/ # CLI implementation 145 | └── index.js # Main CLI logic 146 | ``` 147 | 148 | ## License Considerations 149 | 150 | ### CC BY-NC 4.0 License 151 | By contributing to this project, you agree that your contributions will be licensed under the [Creative Commons Attribution-NonCommercial 4.0 International License](https://creativecommons.org/licenses/by-nc/4.0/). 152 | 153 | Key points: 154 | - Your contributions must be your own work 155 | - You retain copyright to your contributions 156 | - You grant the project a non-exclusive license to use your contributions 157 | - The project cannot be used for commercial purposes 158 | - All users must provide attribution 159 | 160 | ### Third-Party Code 161 | - Do not include code from other projects without proper attribution 162 | - Ensure any third-party code is compatible with CC BY-NC 4.0 163 | - Document all third-party dependencies 164 | 165 | ## Getting Help 166 | 167 | - Open an issue for bug reports or feature requests 168 | - Join our community chat for discussions 169 | - Check existing issues and pull requests before creating new ones 170 | 171 | ## Recognition 172 | 173 | Contributors will be recognized in the following ways: 174 | - Listed in the project's README.md 175 | - Mentioned in release notes 176 | - Given credit in documentation where appropriate 177 | 178 | ## Release Process 179 | 180 | 1. Version bump (following semantic versioning) 181 | 2. Update CHANGELOG.md 182 | 3. Create release notes 183 | 4. Tag the release 184 | 5. Update documentation 185 | 186 | ## Additional Resources 187 | 188 | - [Python Documentation](https://docs.python.org/) 189 | - [GitHub Flow](https://guides.github.com/introduction/flow/) 190 | - [Conventional Commits](https://www.conventionalcommits.org/) 191 | - [Black Code Style](https://black.readthedocs.io/) 192 | - [Pytest Documentation](https://docs.pytest.org/) 193 | 194 | Thank you for contributing to Cloi! -------------------------------------------------------------------------------- /src/core/traceback.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Error Traceback Analysis Module 3 | * 4 | * Provides utilities for analyzing error logs, extracting file paths and line numbers, 5 | * and displaying relevant code snippets. This module is crucial for error diagnosis, 6 | * as it locates and highlights the code sections where errors originate, providing 7 | * essential context for both users and the LLM analysis functions. 8 | */ 9 | 10 | import { existsSync } from 'fs'; 11 | import { runCommand } from './command.js'; // Updated import path 12 | import boxen from 'boxen'; 13 | import { BOX } from '../ui/boxen.js'; 14 | import { basename } from 'path'; 15 | import { echoCommand, truncateOutput} from '../ui/boxen.js'; 16 | import chalk from 'chalk'; 17 | 18 | /* ───────────────────────────── User File Check ────────────────────────── */ 19 | /** 20 | * Checks if a given file path likely belongs to user code rather than system/library code. 21 | * Used to filter traceback entries to focus on relevant files. 22 | * @param {string} p - The file path to check. 23 | * @returns {boolean} - True if the path seems to be user code. 24 | */ 25 | export function isUserFile(p) { 26 | const skip = [ 27 | 'site-packages','dist-packages','node_modules', 28 | 'lib/python','/usr/lib/','/system/','', 29 | '__pycache__','<__array_function__>' 30 | ]; 31 | const low = p.toLowerCase(); 32 | return !skip.some(m => low.includes(m)) && existsSync(p); 33 | } 34 | 35 | /* ───────────────────────────── Extract Traceback Files ────────────────────────── */ 36 | /** 37 | * Parses a log string (typically stderr output) to extract file paths and line numbers 38 | * from Python-style traceback lines ("File \"path\", line num"). Filters for user files. 39 | * @param {string} log - The log output containing tracebacks. 40 | * @returns {Map} - A map of user file paths to the most relevant line number found. 41 | */ 42 | export function extractFilesFromTraceback(log) { 43 | const re = /File \"([^\"]+)\", line (\d+)/g; 44 | const stackFrames = []; 45 | let m; 46 | 47 | // Extract all stack frames with file paths and line numbers 48 | while ((m = re.exec(log)) !== null) { 49 | const file = m[1], line = parseInt(m[2], 10); 50 | stackFrames.push({ file, line, position: m.index }); 51 | } 52 | 53 | // Sort by position in the trace (deeper frames appear first in the trace) 54 | stackFrames.sort((a, b) => a.position - b.position); 55 | 56 | // Filter to user files only 57 | const userFrames = stackFrames.filter(frame => isUserFile(frame.file)); 58 | 59 | const result = new Map(); 60 | 61 | // We want to find the deepest frame for each file, which is typically 62 | // the most relevant line where the error actually occurs 63 | if (userFrames.length > 0) { 64 | // Group by file path 65 | const fileGroups = {}; 66 | for (const frame of userFrames) { 67 | if (!fileGroups[frame.file]) { 68 | fileGroups[frame.file] = []; 69 | } 70 | fileGroups[frame.file].push(frame); 71 | } 72 | 73 | // For each file, get the deepest frame (last in the stack trace) 74 | for (const file in fileGroups) { 75 | const frames = fileGroups[file]; 76 | // The last frame in the group is typically the actual error line 77 | const deepestFrame = frames[frames.length - 1]; 78 | result.set(file, deepestFrame.line); 79 | } 80 | } 81 | 82 | return result; 83 | } 84 | 85 | /* ───────────────────────────── Read File Context ────────────────────────── */ 86 | /** 87 | * Reads and formats a section of a file around a specific line number. 88 | * Used to provide context around errors identified in tracebacks. 89 | * @param {string} file - The path to the file. 90 | * @param {number} line - The central line number. 91 | * @param {number} [ctx=30] - The number of lines to include before and after the target line. 92 | * @returns {string} - Raw code snippet from the file. 93 | */ 94 | export function readFileContext(file, line, ctx = 30) { 95 | const start = Math.max(1, line - ctx); 96 | const end = line + ctx; 97 | const cmd = `sed -n '${start},${end}p' ${file}`; // sed is faster than cat 98 | 99 | const { ok, output } = runCommand(cmd, 5_000); 100 | if (!ok) return `Error reading ${file}: ${output.trim()}`; 101 | 102 | return output; 103 | } 104 | 105 | /* ───────────────────────────── Build Error Context ────────────────────────── */ 106 | /** 107 | * Builds a consolidated code context string based on files extracted from a traceback log. 108 | * @param {string} log - The error log containing tracebacks. 109 | * @param {number} [contextSize=30] - The number of lines to include before and after each error line. 110 | * @param {boolean} [includeHeaders=true] - Whether to include file path and line number headers. 111 | * @returns {string} - A string containing formatted code snippets from relevant files, 112 | * or an empty string if no user files are found in the traceback. 113 | */ 114 | export function buildErrorContext(log, contextSize = 30, includeHeaders = true) { 115 | const files = extractFilesFromTraceback(log); 116 | if (!files.size) return ''; 117 | 118 | const ctx = []; 119 | for (const [file, line] of files) { 120 | if (includeHeaders) { 121 | ctx.push(`\n--- ${file} (line ${line}) ---`); 122 | } 123 | ctx.push(readFileContext(file, line, contextSize)); 124 | } 125 | 126 | return ctx.join('\n'); 127 | } 128 | 129 | /* ───────────────────────────── Show Code Snippet ────────────────────────── */ 130 | /** 131 | * Displays a code snippet from a file around a specific line, fetched using `sed`. 132 | * Shows only the error line plus one line before and after. 133 | * @param {string} file - Path to the file. 134 | * @param {number} line - Target line number. 135 | * @param {number} [ctx=1] - Lines of context before and after (default is 1). 136 | */ 137 | export function showSnippet(file, line, ctx = 30) { 138 | const start = Math.max(1, line - ctx), end = line + ctx; 139 | const cmd = `sed -n '${start},${end}p' ${basename(file)}`; 140 | console.log(chalk.gray(` Retrieving file context ${basename(file)}...`)); 141 | echoCommand(cmd); 142 | const { ok, output } = runCommand(cmd, 5000); 143 | } 144 | 145 | /* ───────────────────────────── Display Error Snippets ────────────────────────── */ 146 | /** 147 | * Iterates through files identified in an error log's traceback and displays 148 | * relevant code snippets using `showSnippet`. 149 | * @param {string} log - The error log content. 150 | */ 151 | export function displaySnippetsFromError(log) { 152 | for (const [file, line] of extractFilesFromTraceback(log)) { 153 | showSnippet(file, line); 154 | } 155 | } 156 | 157 | /** 158 | * Extracts the exact line of code where the error occurs. 159 | * @param {string} file - Path to the file containing the error. 160 | * @param {number} line - Line number where the error occurs. 161 | * @returns {string} - The exact line of code that has the error. 162 | */ 163 | export function extractErrorLine(file, line) { 164 | const cmd = `sed -n '${line}p' ${file}`; 165 | const { ok, output } = runCommand(cmd, 1000); 166 | if (!ok || !output.trim()) { 167 | return `Unable to read line ${line} from ${file}`; 168 | } 169 | return output.trim(); 170 | } 171 | 172 | /** 173 | * Gets all error lines from files mentioned in a traceback. 174 | * @param {string} log - The error log containing tracebacks. 175 | * @returns {string} - A string with all error lines, one per line. 176 | */ 177 | export function getErrorLines(log) { 178 | const files = extractFilesFromTraceback(log); 179 | if (!files.size) return ''; 180 | 181 | const errorLines = []; 182 | for (const [file, line] of files) { 183 | errorLines.push(extractErrorLine(file, line)); 184 | } 185 | 186 | return errorLines.join('\n'); 187 | } -------------------------------------------------------------------------------- /bin/ollama_setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Automatic Ollama installation script for CLOI 4 | 5 | This script checks if Ollama is installed and installs it if necessary. 6 | It also verifies the service is running and starts it if needed. 7 | """ 8 | 9 | import os 10 | import platform 11 | import shutil 12 | import subprocess 13 | import sys 14 | import time 15 | import requests 16 | 17 | 18 | class OllamaSetup: 19 | """Handles Ollama installation, service management, and model management""" 20 | 21 | def __init__(self, model_name="phi4"): 22 | """ 23 | Initialize the Ollama setup handler 24 | 25 | Args: 26 | model_name: Name of the model to use (default: phi4) 27 | """ 28 | self.model_name = model_name 29 | self.system = platform.system().lower() 30 | self.ollama_url = "http://localhost:11434" 31 | 32 | def check_installation(self): 33 | """Check if Ollama is installed on the system""" 34 | return shutil.which('ollama') is not None 35 | 36 | def install_ollama(self): 37 | """Install Ollama based on the operating system""" 38 | print("Ollama not found. Installing Ollama...") 39 | 40 | if self.system == "linux": 41 | try: 42 | # Check if curl is installed 43 | if not shutil.which('curl'): 44 | print("curl is required but not installed. Installing curl...") 45 | subprocess.run("sudo apt-get update && sudo apt-get install -y curl", shell=True, check=True) 46 | 47 | print("Installing Ollama on Linux...") 48 | subprocess.run( 49 | "curl -fsSL https://ollama.com/install.sh | sh", 50 | shell=True, check=True 51 | ) 52 | print("Ollama installed successfully on Linux") 53 | return True 54 | except subprocess.CalledProcessError as e: 55 | print(f"Failed to install Ollama on Linux: {e}") 56 | print("Please try manual installation:") 57 | print("1. curl -fsSL https://ollama.com/install.sh | sh") 58 | print("2. Or visit https://ollama.com for alternative installation methods") 59 | return False 60 | 61 | elif self.system == "darwin": # macOS 62 | try: 63 | print("Installing Ollama on macOS...") 64 | 65 | # Check if Homebrew is installed 66 | if shutil.which('brew'): 67 | print("Using Homebrew to install Ollama...") 68 | subprocess.run("brew install ollama", shell=True, check=True) 69 | else: 70 | # Using the official install script for macOS 71 | subprocess.run( 72 | "curl -fsSL https://ollama.com/install.sh | sh", 73 | shell=True, check=True 74 | ) 75 | 76 | print("Ollama installed successfully on macOS") 77 | return True 78 | except subprocess.CalledProcessError as e: 79 | print(f"Failed to install Ollama on macOS: {e}") 80 | print("Please try manual installation:") 81 | print("1. If you have Homebrew: brew install ollama") 82 | print("2. Download from https://ollama.com") 83 | return False 84 | else: 85 | print(f"Unsupported operating system: {self.system}") 86 | print("Please install Ollama manually from https://ollama.com") 87 | return False 88 | 89 | def is_service_running(self): 90 | """Check if the Ollama service is running""" 91 | try: 92 | response = requests.get(f"{self.ollama_url}/api/version", timeout=2) 93 | return response.status_code == 200 94 | except requests.RequestException: 95 | return False 96 | 97 | def start_service(self): 98 | """Start the Ollama service if it's not running""" 99 | if self.is_service_running(): 100 | print("Ollama service is already running.") 101 | return True 102 | 103 | print("Starting Ollama service...") 104 | 105 | try: 106 | if self.system == "darwin" or self.system == "linux": 107 | # Start the service in the background 108 | subprocess.Popen( 109 | ["ollama", "serve"], 110 | stdout=subprocess.DEVNULL, 111 | stderr=subprocess.DEVNULL, 112 | start_new_session=True 113 | ) 114 | 115 | # Wait for the service to start 116 | for _ in range(10): # Wait up to 10 seconds 117 | time.sleep(1) 118 | if self.is_service_running(): 119 | print("Ollama service started successfully.") 120 | return True 121 | 122 | print("Warning: Ollama service started but not responding within the timeout.") 123 | return False 124 | else: 125 | print(f"Service auto-start not supported on {self.system}.") 126 | print("Please start Ollama manually before using CLOI.") 127 | return False 128 | except Exception as e: 129 | print(f"Failed to start Ollama service: {e}") 130 | print("Please start Ollama manually using 'ollama serve' before using CLOI.") 131 | return False 132 | 133 | def ensure_model_available(self): 134 | """Make sure the default model is available""" 135 | if not self.is_service_running(): 136 | print("Ollama service is not running. Cannot check/install models.") 137 | return False 138 | 139 | try: 140 | # Check if model is already pulled 141 | result = subprocess.run( 142 | ["ollama", "list"], 143 | capture_output=True, 144 | text=True, 145 | check=True 146 | ) 147 | 148 | if self.model_name in result.stdout: 149 | print(f"Model {self.model_name} is already available.") 150 | return True 151 | 152 | print(f"Downloading model {self.model_name}...") 153 | # Pull the model 154 | subprocess.run( 155 | ["ollama", "pull", self.model_name], 156 | check=True 157 | ) 158 | print(f"Model {self.model_name} downloaded successfully.") 159 | return True 160 | except subprocess.CalledProcessError as e: 161 | print(f"Failed to check/download model: {e}") 162 | return False 163 | 164 | 165 | def main(): 166 | """Main function to ensure Ollama is installed and running""" 167 | setup = OllamaSetup() 168 | 169 | # Check if Ollama is installed 170 | if not setup.check_installation(): 171 | # Install Ollama 172 | if not setup.install_ollama(): 173 | print("Failed to install Ollama automatically.") 174 | print("Please install Ollama manually from https://ollama.com") 175 | sys.exit(1) 176 | 177 | # Start the service 178 | if not setup.start_service(): 179 | print("Warning: Could not start Ollama service.") 180 | print("Please start Ollama manually using 'ollama serve' before using CLOI.") 181 | 182 | # Ensure default model is available 183 | if not setup.ensure_model_available(): 184 | print("Warning: Could not ensure the default model is available.") 185 | print("Please run 'ollama pull phi4' manually before using CLOI.") 186 | 187 | print("Ollama setup completed successfully!") 188 | 189 | 190 | if __name__ == "__main__": 191 | main() -------------------------------------------------------------------------------- /src/utils/optimization.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import re 4 | import logging 5 | from typing import Dict, Any, Optional, List 6 | from functools import lru_cache 7 | import orjson # Replace standard json with orjson for performance 8 | 9 | # Configure logging 10 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 11 | logger = logging.getLogger("optimization") 12 | 13 | # Precompile regex patterns 14 | TIMESTAMP_PATTERN = re.compile(r'\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}(.\d+)?\]') 15 | 16 | class Int8Quantizer: 17 | """Lightweight 8-bit quantizer optimized for speed""" 18 | 19 | def __init__(self, cache_dir=None): 20 | self.quant_cache = {} 21 | self.is_initialized = False 22 | self.cache_dir = cache_dir or os.path.join(os.path.expanduser("~"), ".llm_quantization") 23 | os.makedirs(self.cache_dir, exist_ok=True) 24 | 25 | def initialize(self, ollama_instance, model_name, advanced_mode=False): 26 | """Simple initialization with minimal overhead""" 27 | cache_path = os.path.join(self.cache_dir, f"{model_name}_int8_calibration.json") 28 | 29 | # Try loading from cache first 30 | if os.path.exists(cache_path): 31 | if self.load_calibration_data(model_name, cache_path): 32 | return True 33 | 34 | # Use fixed optimal parameters 35 | thread_count = max(2, os.cpu_count() or 4) 36 | 37 | # Save parameters with optimized values 38 | self.quant_cache[model_name] = { 39 | "num_thread": thread_count, 40 | "num_batch": 32, # Fixed optimal batch size 41 | "cache_mode": "all", 42 | "use_mmap": True, 43 | "use_mlock": True, 44 | "int8": True, 45 | "f16": False 46 | } 47 | 48 | self.is_initialized = True 49 | self.save_calibration_data(model_name, cache_path) 50 | return True 51 | 52 | def optimize_options(self, options: Optional[Dict[str, Any]], model_name: str) -> Dict[str, Any]: 53 | """Apply minimal optimizations to model options""" 54 | if options is None: 55 | options = {} 56 | 57 | # Apply model-specific optimizations 58 | options_copy = options.copy() 59 | 60 | # Enable essential optimizations 61 | options_copy.update({ 62 | "mmap": True, 63 | "int8": True, 64 | "f16": False, 65 | "cache_mode": "all" 66 | }) 67 | 68 | # Add optimized parameters if available 69 | if model_name in self.quant_cache: 70 | options_copy.update({ 71 | "num_batch": self.quant_cache[model_name]["num_batch"], 72 | "num_thread": self.quant_cache[model_name]["num_thread"] 73 | }) 74 | 75 | return options_copy 76 | 77 | def save_calibration_data(self, model_name: str, filepath: Optional[str] = None) -> str: 78 | """Save calibration data to file for future use""" 79 | if model_name not in self.quant_cache: 80 | raise ValueError(f"Model {model_name} not calibrated") 81 | 82 | if filepath is None: 83 | filepath = f"{model_name}_int8_calibration.json" 84 | 85 | data = { 86 | "model_name": model_name, 87 | "quant_cache": self.quant_cache[model_name], 88 | "timestamp": time.time(), 89 | "version": "1.0" 90 | } 91 | 92 | with open(filepath, 'wb') as f: 93 | f.write(orjson.dumps(data, option=orjson.OPT_INDENT_2)) 94 | 95 | return filepath 96 | 97 | def load_calibration_data(self, model_name: str, filepath: str) -> bool: 98 | """Load calibration data from a file""" 99 | try: 100 | with open(filepath, 'rb') as f: 101 | data = orjson.loads(f.read()) 102 | 103 | if data.get("model_name") == model_name: 104 | self.quant_cache[model_name] = data["quant_cache"] 105 | self.is_initialized = True 106 | logger.info(f"Loaded calibration data for {model_name}") 107 | return True 108 | else: 109 | logger.warning(f"Calibration data mismatch: expected {model_name}, got {data.get('model_name')}") 110 | return False 111 | except Exception as e: 112 | logger.error(f"Error loading calibration data: {e}") 113 | return False 114 | 115 | class QuantizedOllamaWrapper: 116 | """Lightweight wrapper for Ollama calls with essential optimizations""" 117 | 118 | def __init__(self, ollama_instance, model_name): 119 | self.ollama = ollama_instance 120 | self.model_name = model_name 121 | self.quantizer = Int8Quantizer() 122 | self.is_quantized = False 123 | self.request_cache = lru_cache(maxsize=100)(self._query_model_uncached) 124 | 125 | def enable_quantization(self, advanced_mode=False): 126 | """Enable quantization with minimal overhead""" 127 | if not self.is_quantized: 128 | success = self.quantizer.initialize(self.ollama, self.model_name, advanced_mode) 129 | self.is_quantized = success 130 | return self.is_quantized 131 | 132 | def _query_model_uncached(self, prompt, options_tuple): 133 | """Uncached query implementation with minimal overhead""" 134 | # Convert options tuple back to dict 135 | options = dict(options_tuple) if options_tuple else {} 136 | 137 | # Apply essential optimizations 138 | opt_options = options.copy() 139 | opt_options.update({ 140 | "mmap": True, 141 | "int8": True, 142 | "f16": False, 143 | "num_thread": min(4, os.cpu_count() or 2), 144 | "num_batch": 32, # Fixed optimal batch size 145 | "cache_mode": "all" 146 | }) 147 | 148 | # Make the optimized API call 149 | return self.ollama.query_model(prompt, opt_options) 150 | 151 | def query_model(self, prompt: str, options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 152 | """Query the model with minimal overhead""" 153 | if not self.is_quantized: 154 | self.enable_quantization() 155 | 156 | # Convert options dict to tuple of items for hashing 157 | options_tuple = tuple(sorted((k, v) for k, v in (options or {}).items() 158 | if k in ['temperature', 'top_p', 'top_k'])) 159 | 160 | # Use the cached function 161 | return self.request_cache(prompt, options_tuple) 162 | 163 | class LLMOptimizer: 164 | """Centralized optimization module for LLM calls""" 165 | 166 | _last_warmup_time = 0 167 | _warmup_interval = 300 # 5 minutes 168 | _quant_cache = {} 169 | 170 | @staticmethod 171 | def initialize_quantization(ollama_instance, model_name: str) -> bool: 172 | """Initialize quantization with block-wise approach 173 | 174 | Args: 175 | ollama_instance: The Ollama instance to initialize 176 | model_name: Name of the model to initialize 177 | 178 | Returns: 179 | True if initialization was successful 180 | """ 181 | try: 182 | # Use fixed optimal parameters 183 | thread_count = max(2, os.cpu_count() or 4) 184 | 185 | # Save parameters with optimized values 186 | LLMOptimizer._quant_cache[model_name] = { 187 | "num_thread": thread_count, 188 | "num_batch": 32, # Fixed optimal batch size 189 | "cache_mode": "all", 190 | "use_mmap": True, 191 | "use_mlock": True, 192 | "int8": True, 193 | "f16": False, 194 | "block_size": 32, # Block size for block-wise quantization 195 | "per_block_scales": [1.0], # Initial scale 196 | "zero_points": [0] # Initial zero point 197 | } 198 | 199 | return True 200 | except Exception as e: 201 | print(f"Warning: Quantization initialization failed: {e}") 202 | return False 203 | 204 | @staticmethod 205 | def warmup_model(ollama_instance, model_name: str) -> bool: 206 | """Warm up the model to reduce cold start latency 207 | 208 | Args: 209 | ollama_instance: The Ollama instance to warm up 210 | model_name: Name of the model to warm up 211 | 212 | Returns: 213 | True if warmup was successful 214 | """ 215 | current_time = time.time() 216 | 217 | # Only warm up if enough time has passed since last warmup 218 | if current_time - LLMOptimizer._last_warmup_time < LLMOptimizer._warmup_interval: 219 | return True 220 | 221 | try: 222 | # Initialize quantization if not already done 223 | if model_name not in LLMOptimizer._quant_cache: 224 | LLMOptimizer.initialize_quantization(ollama_instance, model_name) 225 | 226 | # Use a minimal prompt for warmup 227 | warmup_prompt = "Warming up model" 228 | 229 | # Get optimized options for warmup 230 | options = LLMOptimizer.get_optimized_options( 231 | input_length=len(warmup_prompt), 232 | deterministic=True 233 | ) 234 | 235 | # Make a minimal inference 236 | response = ollama_instance.query_model(warmup_prompt, options) 237 | 238 | # Update last warmup time 239 | LLMOptimizer._last_warmup_time = current_time 240 | 241 | return True 242 | except Exception as e: 243 | print(f"Warning: Model warmup failed: {e}") 244 | return False 245 | 246 | @staticmethod 247 | def get_optimized_options(options: Dict[str, Any] = None, 248 | input_length: Optional[int] = None, 249 | deterministic: bool = False, 250 | use_quantization: bool = True) -> Dict[str, Any]: 251 | """Get optimized options for LLM calls 252 | 253 | Args: 254 | options: Optional base options to merge with optimized settings 255 | input_length: Length of input text for dynamic batch sizing 256 | deterministic: Whether to use more deterministic settings 257 | use_quantization: Whether to use quantization settings 258 | 259 | Returns: 260 | Dictionary of optimized options 261 | """ 262 | if options is None: 263 | options = {} 264 | 265 | # Base optimized options 266 | optimized = { 267 | "temperature": 0.1 if deterministic else 0.2, 268 | "num_predict": min(256, input_length + 100) if input_length else 256, 269 | "num_thread": min(8, os.cpu_count() or 2), 270 | "num_batch": 64, # Increased batch size for faster processing 271 | "cache_mode": "all", 272 | "mmap": True, 273 | "int8": True, 274 | "f16": False, 275 | "repeat_penalty": 1.0, # Disable repeat penalty for speed 276 | "top_k": 1, # Only consider top token for speed 277 | "top_p": 0.1 # Lower top_p for faster sampling 278 | } 279 | 280 | # Add quantization settings if enabled 281 | if use_quantization: 282 | optimized.update({ 283 | "use_mmap": True, 284 | "use_mlock": True, 285 | "block_size": 32, # Block size for block-wise quantization 286 | "per_block_scales": [1.0], # Initial scale 287 | "zero_points": [0] # Initial zero point 288 | }) 289 | 290 | # Merge with provided options 291 | optimized.update(options) 292 | return optimized 293 | 294 | @staticmethod 295 | def get_optimized_prompt(prompt: str, max_length: int = 1000) -> str: 296 | """Optimize prompt for better performance 297 | 298 | Args: 299 | prompt: Original prompt 300 | max_length: Maximum prompt length 301 | 302 | Returns: 303 | Optimized prompt 304 | """ 305 | # Truncate if too long 306 | if len(prompt) > max_length: 307 | prompt = prompt[:max_length] + "..." 308 | 309 | return prompt.strip() -------------------------------------------------------------------------------- /src/utils/ollama_call.py: -------------------------------------------------------------------------------- 1 | """ 2 | Ollama Setup Script for Phi Model 3 | --------------------------------- 4 | This script handles the installation, setup, and management of Ollama with the Phi model 5 | for completely local LLM inference. 6 | """ 7 | import os 8 | import sys 9 | import platform 10 | import subprocess 11 | import requests 12 | import time 13 | import json 14 | import shutil 15 | import argparse 16 | from typing import Optional, Dict, Any, List, Tuple, Union 17 | 18 | class OllamaSetup: 19 | """Handles Ollama installation, service management, and model management""" 20 | 21 | def __init__(self, model_name: str = "phi4"): 22 | """ 23 | Initialize the Ollama setup handler 24 | 25 | Args: 26 | model_name: Name of the model to use (default: phi4) 27 | """ 28 | self.model_name = model_name 29 | self.system = platform.system().lower() 30 | self.ollama_url = "http://localhost:11434" 31 | 32 | def check_installation(self) -> bool: 33 | """Check if Ollama is installed on the system""" 34 | return shutil.which('ollama') is not None 35 | 36 | def install_ollama(self) -> bool: 37 | """Install Ollama based on the operating system""" 38 | print("Ollama not found. Installing Ollama...") 39 | 40 | if self.system == "linux": 41 | try: 42 | # Check if curl is installed 43 | if not shutil.which('curl'): 44 | print("curl is required but not installed. Installing curl...") 45 | subprocess.run("sudo apt-get update && sudo apt-get install -y curl", shell=True, check=True) 46 | 47 | print("Installing Ollama on Linux...") 48 | subprocess.run( 49 | "curl -fsSL https://ollama.com/install.sh | sh", 50 | shell=True, check=True 51 | ) 52 | print("Ollama installed successfully on Linux") 53 | return True 54 | except subprocess.CalledProcessError as e: 55 | print(f"Failed to install Ollama on Linux: {e}") 56 | print("Please try manual installation:") 57 | print("1. curl -fsSL https://ollama.com/install.sh | sh") 58 | print("2. Or visit https://ollama.com for alternative installation methods") 59 | return False 60 | 61 | elif self.system == "darwin": # macOS 62 | try: 63 | print("Installing Ollama on macOS...") 64 | 65 | # Check if Homebrew is installed 66 | if shutil.which('brew'): 67 | print("Using Homebrew to install Ollama...") 68 | subprocess.run("brew install ollama", shell=True, check=True) 69 | else: 70 | # Using the official install script for macOS 71 | subprocess.run( 72 | "curl -fsSL https://ollama.com/install.sh | sh", 73 | shell=True, check=True 74 | ) 75 | 76 | print("Ollama installed successfully on macOS") 77 | return True 78 | except subprocess.CalledProcessError as e: 79 | print(f"Failed to install Ollama on macOS: {e}") 80 | print("Please try manual installation:") 81 | print("1. If you have Homebrew: brew install ollama") 82 | print("2. Download from https://ollama.com") 83 | return False 84 | else: 85 | print(f"Unsupported operating system: {self.system}") 86 | print("Please install Ollama manually from https://ollama.com") 87 | return False 88 | 89 | def check_service_running(self) -> bool: 90 | """Check if Ollama service is running""" 91 | try: 92 | response = requests.get(f"{self.ollama_url}/api/tags", timeout=2) 93 | return response.status_code == 200 94 | except requests.exceptions.RequestException: 95 | return False 96 | 97 | def start_service(self) -> bool: 98 | """Start the Ollama service""" 99 | print("Starting Ollama service...") 100 | 101 | # Check if service is already running 102 | if self.check_service_running(): 103 | print("Ollama service is already running") 104 | return True 105 | 106 | # Start Ollama service 107 | if self.system in ["linux", "darwin"]: 108 | with open("ollama_service.log", "w") as log_file: 109 | process = subprocess.Popen( 110 | ["ollama", "serve"], 111 | stdout=log_file, 112 | stderr=log_file 113 | ) 114 | 115 | # Wait for the service to start 116 | print("Waiting for Ollama service to start...") 117 | for i in range(15): # Try 15 times with 2-second delays 118 | if i > 0 and i % 5 == 0: 119 | print(f"Still waiting... ({i*2} seconds)") 120 | if self.check_service_running(): 121 | print("Ollama service is running") 122 | return True 123 | time.sleep(2) 124 | 125 | print("Failed to start Ollama service. Check the logs in ollama_service.log") 126 | print("You can try starting it manually with 'ollama serve' in a separate terminal") 127 | return False 128 | 129 | def list_models(self) -> List[str]: 130 | """List available models in Ollama""" 131 | try: 132 | result = subprocess.run( 133 | ["ollama", "list"], 134 | stdout=subprocess.PIPE, 135 | stderr=subprocess.PIPE, 136 | text=True, 137 | check=True 138 | ) 139 | 140 | # Parse the models from output 141 | models = [] 142 | for line in result.stdout.splitlines(): 143 | if line and not line.startswith("NAME"): # Skip header 144 | parts = line.split() 145 | if parts: 146 | models.append(parts[0]) 147 | return models 148 | except subprocess.SubprocessError: 149 | return [] 150 | 151 | def pull_model(self) -> bool: 152 | """Pull the specified model from Ollama""" 153 | print(f"Checking for {self.model_name} model...") 154 | 155 | # Check if model already exists 156 | if self.model_name in self.list_models(): 157 | print(f"{self.model_name} model is already downloaded") 158 | return True 159 | 160 | print(f"Pulling the {self.model_name} model... This may take several minutes.") 161 | print("(Most Phi models are 4-5GB in size)") 162 | 163 | try: 164 | # Pull the model with progress output 165 | process = subprocess.Popen( 166 | ["ollama", "pull", self.model_name], 167 | stdout=subprocess.PIPE, 168 | stderr=subprocess.STDOUT, 169 | text=True, 170 | bufsize=1, 171 | universal_newlines=True 172 | ) 173 | 174 | # Print progress 175 | for line in iter(process.stdout.readline, ''): 176 | print(line.strip()) 177 | if not line: 178 | break 179 | 180 | process.stdout.close() 181 | return_code = process.wait() 182 | 183 | if return_code == 0: 184 | print(f"{self.model_name} model pulled successfully") 185 | return True 186 | else: 187 | print(f"Failed to pull {self.model_name} model. Return code: {return_code}") 188 | return False 189 | except subprocess.SubprocessError as e: 190 | print(f"Error pulling {self.model_name} model: {e}") 191 | return False 192 | 193 | def delete_model(self, model_name: str) -> bool: 194 | """Delete a model from Ollama""" 195 | print(f"Deleting {model_name} model...") 196 | 197 | try: 198 | result = subprocess.run( 199 | ["ollama", "rm", model_name], 200 | stdout=subprocess.PIPE, 201 | stderr=subprocess.PIPE, 202 | text=True, 203 | check=True 204 | ) 205 | print(f"{model_name} model deleted successfully") 206 | return True 207 | except subprocess.SubprocessError as e: 208 | print(f"Error deleting {model_name} model: {e}") 209 | return False 210 | 211 | def query_model(self, prompt: str, options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 212 | """ 213 | Send a prompt to the local Ollama API and get a response. 214 | 215 | Args: 216 | prompt: The prompt to send to the model 217 | options: Optional parameters for the model (temperature, etc) 218 | 219 | Returns: 220 | Dict containing the model's response and metadata 221 | """ 222 | url = f"{self.ollama_url}/api/generate" 223 | 224 | payload = { 225 | "model": self.model_name, 226 | "prompt": prompt, 227 | "stream": True # Enable streaming 228 | } 229 | 230 | # Add options if provided 231 | if options: 232 | payload["options"] = options 233 | 234 | max_retries = 3 235 | retry_delay = 2 236 | 237 | for attempt in range(max_retries): 238 | try: 239 | response = requests.post(url, json=payload, timeout=120, stream=True) 240 | response.raise_for_status() 241 | 242 | # Process streaming response 243 | full_response = "" 244 | for line in response.iter_lines(): 245 | if line: 246 | try: 247 | json_response = json.loads(line) 248 | if "response" in json_response: 249 | full_response += json_response["response"] 250 | if json_response.get("done", False): 251 | break 252 | except json.JSONDecodeError: 253 | continue 254 | 255 | return {"response": full_response} 256 | 257 | except requests.exceptions.RequestException as e: 258 | if attempt < max_retries - 1: 259 | print(f"Connection attempt {attempt + 1} failed: {e}") 260 | print(f"Retrying in {retry_delay} seconds...") 261 | time.sleep(retry_delay) 262 | retry_delay *= 2 # Exponential backoff 263 | else: 264 | return {"error": f"Error communicating with Ollama API: {str(e)}"} 265 | 266 | def ensure_setup(self) -> bool: 267 | """ 268 | Ensure Ollama is installed, running, and the model is available. 269 | Returns True if setup is successful, False otherwise. 270 | """ 271 | # Check operating system compatibility 272 | if self.system not in ["linux", "darwin"]: 273 | print(f"This script supports Linux and macOS only. Detected system: {platform.system()}") 274 | print("Please install Ollama manually from https://ollama.com") 275 | return False 276 | 277 | print(f"Running on {platform.system()} {platform.release()}") 278 | 279 | # Check if Ollama is installed 280 | if not self.check_installation(): 281 | # Install Ollama if not installed 282 | if not self.install_ollama(): 283 | print("Failed to install Ollama. Please install manually and try again.") 284 | return False 285 | else: 286 | print("Ollama is already installed") 287 | 288 | # Start Ollama service 289 | if not self.start_service(): 290 | print("Failed to start Ollama service. Please start it manually and try again.") 291 | return False 292 | 293 | # Pull model if needed 294 | if not self.pull_model(): 295 | print(f"Failed to pull the {self.model_name} model. Please try again later.") 296 | return False 297 | 298 | return True 299 | 300 | if __name__ == "__main__": 301 | parser = argparse.ArgumentParser(description="Ollama Setup and Model Management") 302 | parser.add_argument("--model", type=str, default="phi4", help="Model name to use") 303 | parser.add_argument("--pull", action="store_true", help="Pull the specified model") 304 | parser.add_argument("--delete", action="store_true", help="Delete the specified model") 305 | args = parser.parse_args() 306 | 307 | ollama = OllamaSetup(model_name=args.model) 308 | 309 | if args.pull: 310 | if not ollama.ensure_setup(): 311 | sys.exit(1) 312 | if not ollama.pull_model(): 313 | sys.exit(1) 314 | elif args.delete: 315 | if not ollama.delete_model(args.model): 316 | sys.exit(1) 317 | else: 318 | if not ollama.ensure_setup(): 319 | sys.exit(1) 320 | -------------------------------------------------------------------------------- /src/cli/index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | /** 3 | * Main CLI Application Entry Point 4 | * 5 | * This is the core entry point for the CLOI application, providing an interactive 6 | * command-line interface for error analysis and automatic debugging. 7 | * 8 | * The module integrates all other components (LLM, UI, patch application, etc.) 9 | * to provide a seamless experience for users to analyze and fix errors in their 10 | * terminal commands and code files. It handles command-line arguments, manages the 11 | * interactive loop, and coordinates the debugging workflow. 12 | */ 13 | 14 | /* ---------------------------------------------------------------------------- 15 | * CLOI — Secure Agentic Debugger 16 | * ---------------------------------------------------------------------------- 17 | */ 18 | 19 | import chalk from 'chalk'; 20 | import boxen from 'boxen'; 21 | import yargs from 'yargs'; 22 | import { hideBin } from 'yargs/helpers'; 23 | import { join } from 'path'; 24 | import { fileURLToPath } from 'url'; 25 | import { dirname } from 'path'; 26 | import fs from 'fs'; 27 | 28 | // Import from our modules 29 | import { BOX, echoCommand, truncateOutput, createCommandBox } from '../ui/boxen.js'; 30 | import { startThinking } from '../core/llm.js'; 31 | import { askYesNo, getReadline, closeReadline} from '../ui/prompt.js'; 32 | import { runCommand } from '../core/command.js'; 33 | import { readHistory, lastRealCommand, selectHistoryItem } from '../utils/history.js'; 34 | import { analyzeWithLLM, determineErrorType, generateTerminalCommandFix, generatePatch, selectModelItem, installModel, summarizeCodeWithLLM, readModels, getAvailableModels } from '../core/llm.js'; 35 | import { extractDiff, confirmAndApply } from '../core/patch.js'; 36 | import { ensureDir, writeDebugLog } from '../utils/file.js'; 37 | import { displaySnippetsFromError, readFileContext, extractFilesFromTraceback } from '../core/traceback.js'; 38 | 39 | // Get directory references 40 | const __filename = fileURLToPath(import.meta.url); 41 | const __dirname = dirname(__filename); 42 | 43 | /* ───────────────────────── Interactive Loop ────────────────────────────── */ 44 | /** 45 | * Runs the main interactive loop of the FigAI CLI. 46 | * Presents a prompt allowing the user to execute commands like /analyze, /debug, /history, /model. 47 | * Manages the state (last command, current model) between interactions. 48 | * @param {string|null} initialCmd - The initial command to have ready for analysis/debugging. 49 | * @param {number} limit - The history limit to use for /history selection. 50 | * @param {string} [initialModel='phi4:latest'] - The initial model to use. 51 | */ 52 | async function interactiveLoop(initialCmd, limit, initialModel = 'phi4:latest') { 53 | let lastCmd = initialCmd; 54 | let currentModel = initialModel; 55 | 56 | while (true) { 57 | console.log(boxen( 58 | `${chalk.gray('Type a command')} (${chalk.blue('/debug')}, ${chalk.blue('/model')}, ${chalk.blue('/history')}, ${chalk.blue('/help')}, ${chalk.blue('/exit')})`, 59 | BOX.PROMPT 60 | )); 61 | 62 | const input = await new Promise(r => 63 | getReadline().question('> ', t => r(t.trim().toLowerCase())) 64 | ); 65 | 66 | switch (input) { 67 | 68 | case '/debug': { 69 | // Skip command confirmation prompt and directly run debug loop 70 | process.stdout.write('\n'); 71 | await debugLoop(lastCmd, limit, currentModel); 72 | // Reset terminal state and readline 73 | process.stdout.write('\n'); 74 | closeReadline(); 75 | getReadline(); 76 | break; 77 | } 78 | 79 | case '/history': { 80 | const sel = await selectHistoryItem(limit); 81 | if (sel) { 82 | lastCmd = sel; 83 | console.log(boxen(`Selected command: ${lastCmd}`, { ...BOX.OUTPUT, title: 'History Selection' })); 84 | // Skip running the command and just continue the loop with updated lastCmd 85 | } 86 | // Reset terminal state and readline 87 | process.stdout.write('\n'); 88 | closeReadline(); 89 | getReadline(); 90 | break; 91 | } 92 | 93 | case '/model': { 94 | const newModel = await selectModelItem(); 95 | if (newModel) { 96 | currentModel = newModel; 97 | process.stdout.write('\n'); 98 | console.log(boxen(`Using model: ${currentModel}`, BOX.PROMPT)); 99 | } 100 | // Reset terminal state and readline 101 | // process.stdout.write('\n'); 102 | closeReadline(); 103 | getReadline(); 104 | break; 105 | } 106 | 107 | case '/help': 108 | console.log(boxen( 109 | [ 110 | '/debug – auto-patch errors using chosen LLM', 111 | '/model – pick from installed Ollama models', 112 | '/history – pick from recent shell commands', 113 | '/help – show this help', 114 | '/exit – quit' 115 | ].join('\n'), 116 | BOX.PROMPT 117 | )); 118 | break; 119 | 120 | case '/exit': 121 | closeReadline(); 122 | console.log(chalk.blue('bye, for now...')); 123 | return; 124 | 125 | case '': 126 | break; 127 | 128 | default: 129 | console.log(chalk.red('Unknown command. Type'), chalk.bold('/help')); 130 | } 131 | } 132 | } 133 | 134 | /* ─────────────── Debug loop ─────────────── */ 135 | /** 136 | * Main debugging loop that analyzes errors and fixes them. 137 | * 1. Runs the current command (`cmd`). 138 | * 2. If successful, breaks the loop. 139 | * 3. If error, analyzes the error (`analyzeWithLLM`). 140 | * 4. Determines error type (`determineErrorType`). 141 | * 5. If Terminal Issue: generates a new command (`generateTerminalCommandFix`), confirms with user, updates `cmd`. 142 | * 6. If Code Issue: generates a patch (`generatePatch`), confirms and applies (`confirmAndApply`). 143 | * 7. Logs the iteration details (`writeDebugLog`). 144 | * Continues until the command succeeds or the user cancels. 145 | * @param {string} initialCmd - The command to start debugging. 146 | * @param {number} limit - History limit (passed down from interactive loop/args). 147 | * @param {string} [currentModel='phi4:latest'] - The Ollama model to use. 148 | */ 149 | async function debugLoop(initialCmd, limit, currentModel = 'phi4:latest') { 150 | const iterations = []; 151 | const ts = new Date().toISOString().replace(/[-:.TZ]/g, '').slice(0, 15); 152 | const logDir = join(__dirname, 'debug_history'); 153 | await ensureDir(logDir); 154 | const logPath = join(logDir, `${ts}.txt`); 155 | 156 | // Get current working directory for context 157 | console.log(chalk.gray(' Locating current working directory...')); 158 | echoCommand('pwd'); 159 | const { output: currentDir } = runCommand('pwd'); 160 | 161 | // Initialize file content and summary variables outside try-catch scope 162 | let fileContentRaw = ''; 163 | let fileContentWithLineNumbers = ''; 164 | let codeSummary = ''; 165 | let filePath = ''; 166 | 167 | // First, run the command to see the error 168 | let cmd = initialCmd; 169 | console.log(chalk.gray(' Reading errors...')); 170 | echoCommand(cmd); 171 | const { ok, output } = runCommand(cmd); 172 | 173 | if (ok && !/error/i.test(output)) { 174 | console.log(boxen(chalk.green('No errors detected.'), { ...BOX.OUTPUT, title: 'Success' })); 175 | return; 176 | } 177 | 178 | // Extract possible file paths from the command 179 | try { 180 | // Extract possible filename from commands like "python file.py", "node script.js", etc. 181 | let possibleFile = initialCmd; 182 | 183 | // Common command prefixes to check for 184 | const commandPrefixes = ['python', 'python3', 'node', 'ruby', 'perl', 'php', 'java', 'javac', 'bash', 'sh']; 185 | 186 | // Check if the command starts with any of the common prefixes 187 | for (const prefix of commandPrefixes) { 188 | if (initialCmd.startsWith(prefix + ' ')) { 189 | // Extract everything after the prefix and a space 190 | possibleFile = initialCmd.substring(prefix.length + 1).trim(); 191 | break; 192 | } 193 | } 194 | 195 | // Further extract arguments if present (get first word that doesn't start with -) 196 | possibleFile = possibleFile.split(' ').find(part => part && !part.startsWith('-')) || ''; 197 | 198 | // First check relative path 199 | filePath = possibleFile; 200 | let isFile = filePath && fs.existsSync(filePath) && fs.statSync(filePath).isFile(); 201 | 202 | // If not a file, try as absolute path 203 | if (!isFile && filePath && !filePath.startsWith('/')) { 204 | filePath = join(currentDir.trim(), filePath); 205 | isFile = fs.existsSync(filePath) && fs.statSync(filePath).isFile(); 206 | } 207 | 208 | // Check if we need additional context from the file 209 | // We'll read file content only if: 210 | // 1. It's a valid file AND 211 | // 2. There are NO clear error lines in the traceback 212 | const filesWithErrors = extractFilesFromTraceback(output); 213 | const hasErrorLineInfo = filesWithErrors.size > 0; 214 | 215 | if (isFile && !hasErrorLineInfo) { 216 | console.log(chalk.gray(` Analyzing file content...`)); 217 | // Show the sed command that will be used 218 | const start = 1; // Since we want first 200 lines, starting from line 1 219 | const end = 200; // Read first 200 lines 220 | const sedCmd = `sed -n '${start},${end}p' ${filePath}`; 221 | echoCommand(sedCmd); 222 | 223 | // Use readFileContext to get the first 200 lines (using line 100 as center with ctx=100) 224 | fileContentRaw = readFileContext(filePath, 100, 100); 225 | 226 | // Create a version with line numbers for analysis 227 | fileContentWithLineNumbers = fileContentRaw.split('\n') 228 | .map((line, index) => `${index + 1}: ${line}`) 229 | .join('\n'); 230 | 231 | // Summarize the content - use the version with line numbers for better context 232 | codeSummary = await summarizeCodeWithLLM(fileContentWithLineNumbers, currentModel); 233 | // Display summary as indented gray text instead of boxen 234 | console.log('\n' +' ' + chalk.gray(codeSummary) + '\n'); 235 | } 236 | } catch (error) { 237 | console.log(chalk.yellow(` Note: Could not analyze file content: ${error.message}`)); 238 | } 239 | 240 | // Display snippets from error traceback 241 | if (!ok || /error/i.test(output)) { 242 | displaySnippetsFromError(output); 243 | } 244 | 245 | /* eslint-disable no-await-in-loop */ 246 | while (true) { 247 | // First, run analysis like /analyze would do, but pass additional context 248 | const analysis = await analyzeWithLLM(output, currentModel, fileContentWithLineNumbers, codeSummary, filePath); 249 | // Display analysis as indented gray text instead of boxen 250 | console.log('\n' +' ' + chalk.gray(analysis.replace(/\n/g, '\n ')) + '\n'); 251 | 252 | // Determine if this is a terminal command issue using LLM 253 | const errorType = await determineErrorType(output, analysis, currentModel); 254 | // Display error type as indented gray text 255 | console.log(' ' + chalk.gray(errorType) + '\n'); 256 | 257 | if (errorType === "TERMINAL_COMMAND_ERROR") { 258 | // Generate a new command to fix the issue 259 | const prevCommands = iterations.map(i => i.patch).filter(Boolean); 260 | const newCommand = await generateTerminalCommandFix(prevCommands, analysis, currentModel); 261 | 262 | // Show the proposed command 263 | console.log(boxen(newCommand, { ...BOX.OUTPUT, title: 'Proposed Command' })); 264 | 265 | // Ask for confirmation 266 | if (!(await askYesNo('Run this command?'))) { 267 | console.log(chalk.yellow('Debug loop aborted by user.')); 268 | break; 269 | } 270 | 271 | // Update the command for the next iteration 272 | cmd = newCommand; 273 | iterations.push({ error: output, patch: newCommand, analysis: analysis }); 274 | } else { 275 | // Original code file patching logic 276 | const rawDiff = await generatePatch( 277 | output, 278 | iterations.map(i => i.patch), 279 | analysis, 280 | currentDir.trim(), 281 | currentModel, 282 | fileContentRaw, 283 | codeSummary 284 | ); 285 | 286 | // Just extract the diff without displaying it 287 | const cleanDiff = extractDiff(rawDiff); 288 | 289 | // Check if we have a valid diff 290 | const isValidDiff = 291 | // Standard unified diff format 292 | (cleanDiff.includes('---') && cleanDiff.includes('+++')) || 293 | // Path with @@ hunks and -/+ changes 294 | (cleanDiff.includes('@@') && cleanDiff.includes('-') && cleanDiff.includes('+')) || 295 | // File path and -/+ lines without @@ marker (simpler format) 296 | (cleanDiff.includes('/') && cleanDiff.includes('-') && cleanDiff.includes('+')); 297 | 298 | if (!isValidDiff) { 299 | console.error(chalk.red('LLM did not return a valid diff. Aborting debug loop.')); 300 | break; 301 | } 302 | 303 | const applied = await confirmAndApply(cleanDiff, currentDir.trim()); 304 | 305 | if (!applied) { 306 | console.log(chalk.yellow('Debug loop aborted by user.')); 307 | break; 308 | } 309 | 310 | iterations.push({ error: output, patch: cleanDiff, analysis: analysis }); 311 | 312 | // Write the debug log 313 | await writeDebugLog(iterations, logPath); 314 | console.log(chalk.gray(`Debug session saved to ${logPath}`)); 315 | 316 | // Exit the loop after applying the patch instead of running the command again 317 | console.log(chalk.green('Patch applied. Returning to main loop.')); 318 | break; 319 | } 320 | 321 | await writeDebugLog(iterations, logPath); 322 | console.log(chalk.gray(`Debug session saved to ${logPath}`)); 323 | } 324 | } 325 | 326 | 327 | /* ─────────────────────────────── Main ──────────────────────────────── */ 328 | 329 | /** 330 | * Main entry point for the Cloi CLI application. 331 | * Parses command line arguments using yargs, displays a banner, 332 | * and routes execution based on the provided flags (`--analyze`, `--debug`, `--history`, `model`). 333 | * Handles fetching the last command and initiating the appropriate loop (interactive or debug). 334 | */ 335 | (async function main() { 336 | const argv = yargs(hideBin(process.argv)) 337 | .option('model', { 338 | alias: 'm', 339 | describe: 'Ollama model to use for completions', 340 | default: 'phi4:14b', 341 | type: 'string' 342 | }) 343 | .help().alias('help', '?') 344 | .parse(); 345 | 346 | // Check if the specified model is installed, install if not 347 | let currentModel = argv.model; 348 | 349 | if (currentModel) { 350 | const installedModels = readModels(); 351 | 352 | if (!installedModels.includes(currentModel)) { 353 | console.log(boxen( 354 | `Model ${currentModel} is not installed. Install now?\nThis may take a few minutes.\n\nProceed (y/N):`, 355 | { ...BOX.CONFIRM, title: 'Model Installation' } 356 | )); 357 | 358 | const response = await askYesNo('', true); 359 | console.log(response ? 'y' : 'N'); 360 | 361 | if (response) { 362 | console.log(chalk.blue(`Installing ${currentModel}...`)); 363 | const success = await installModel(currentModel); 364 | 365 | if (!success) { 366 | console.log(chalk.yellow(`Failed to install ${currentModel}. Using default model instead.`)); 367 | currentModel = 'phi4:latest'; 368 | } else { 369 | console.log(chalk.green(`Successfully installed ${currentModel}.`)); 370 | } 371 | } else { 372 | console.log(chalk.yellow(`Using default model instead.`)); 373 | currentModel = 'phi4:latest'; 374 | } 375 | } 376 | } 377 | 378 | const banner = chalk.blueBright.bold('Cloi') + ' — secure agentic debugging tool'; 379 | console.log(boxen( 380 | `${banner}\n↳ model: ${currentModel}\n↳ completely local and secure`, 381 | BOX.WELCOME 382 | )); 383 | 384 | const lastCmd = await lastRealCommand(); 385 | if (!lastCmd) { 386 | console.log(chalk.yellow('No commands found in history.')); 387 | return; 388 | } 389 | 390 | console.log(boxen(lastCmd, { ...BOX.WELCOME, title: 'Last Command'})); 391 | await interactiveLoop(lastCmd, 15, currentModel); 392 | })().catch(err => { 393 | console.error(chalk.red(`Fatal: ${err.message}`)); 394 | process.exit(1); 395 | }); -------------------------------------------------------------------------------- /src/core/patch.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Patch Generation and Application Module 3 | * 4 | * Provides utilities for creating, extracting, displaying, and applying code patches. 5 | * This module handles the core functionality of generating unified diff patches and 6 | * safely applying them to the codebase. It includes robust error handling and fallback 7 | * mechanisms to ensure patches can be applied successfully across different environments. 8 | */ 9 | 10 | import { execSync, spawnSync } from 'child_process'; 11 | import { askYesNo } from '../ui/prompt.js'; 12 | import chalk from 'chalk'; 13 | import boxen from 'boxen'; 14 | import { BOX } from '../ui/boxen.js'; 15 | import fs from 'fs/promises'; 16 | 17 | 18 | /* ───────────────────────── Directory Creation Helper ────────────────────────────── */ 19 | async function ensureDir(dir) { 20 | try { await fs.mkdir(dir, { recursive: true }); } catch {} 21 | } 22 | 23 | /* ───────────────────────── Git Repository Detection ────────────────────────────── */ 24 | function inGitRepo() { 25 | try { 26 | execSync('git rev-parse --is-inside-work-tree', { stdio: 'ignore' }); 27 | return true; 28 | } catch { 29 | return false; 30 | } 31 | } 32 | 33 | /* ───────────────────────── Patch Level Detection ────────────────────────────── */ 34 | function detectStripLevel(firstPath) { 35 | if (firstPath.startsWith('/')) return 0; // absolute → keep whole path 36 | if (firstPath.startsWith('a/')) return 1; // git-style a/ b/ 37 | return 0; // plain relative file 38 | } 39 | 40 | /* ────────────────────────────── Show Patch ─────────────────────────────────── */ 41 | /** 42 | * Displays a diff/patch in a formatted way. 43 | * @param {string} diff - The diff/patch text to display. 44 | */ 45 | export function showPatch(diff) { 46 | // Check if the diff has proper hunk headers, add if missing 47 | if (diff.includes('---') && diff.includes('+++') && !diff.includes('@@')) { 48 | // Simple heuristic to add hunk headers if missing 49 | const lines = diff.split('\n'); 50 | let headerEndIndex = -1; 51 | 52 | // Find where the header ends (after +++ line) 53 | for (let i = 0; i < lines.length; i++) { 54 | if (lines[i].startsWith('+++')) { 55 | headerEndIndex = i + 1; 56 | break; 57 | } 58 | } 59 | 60 | // If we found the header end and there are changes after it 61 | if (headerEndIndex >= 0 && headerEndIndex < lines.length) { 62 | // Simple header with line 1 and appropriate context 63 | const hunkHeader = '@@ -1,1 +1,1 @@'; 64 | lines.splice(headerEndIndex, 0, hunkHeader); 65 | diff = lines.join('\n'); 66 | } 67 | } 68 | 69 | // Colorize diff: '+' lines are green, '-' lines are red 70 | const colorizedDiff = diff.split('\n').map(line => { 71 | if (line.startsWith('+')) { 72 | return chalk.green(line); 73 | } else if (line.startsWith('-')) { 74 | return chalk.red(line); 75 | } else if (line.startsWith('@@')) { 76 | return chalk.cyan(line); 77 | } else { 78 | return line; 79 | } 80 | }).join('\n'); 81 | 82 | console.log(boxen(colorizedDiff.trim() || '(empty diff)', { ...BOX.OUTPUT, title: 'Proposed Patch' })); 83 | } 84 | 85 | /* ───────────────────────── Unified Patch Application ────────────────────────────── */ 86 | /** 87 | * Applies a unified diff patch to the codebase. 88 | * Tries applying the patch using the standard `patch` command with different 89 | * strip levels (`-p`) for robustness. Falls back to `git apply --3way` if 90 | * inside a Git repository and initial attempts fail. 91 | * @param {string} diff - The unified diff content. 92 | * @param {string} [cwd=process.cwd()] - The working directory where paths are relative to. 93 | * @throws {Error} If the patch command fails after all attempts. 94 | */ 95 | function applyPatch(diff, cwd = process.cwd()) { 96 | // 1) Normalise line endings 97 | diff = diff.replace(/\r\n/g, '\n'); 98 | 99 | // 2) Remove Git metadata lines that confuse patch(1) 100 | diff = diff.replace(/^index [0-9a-f]+\.\.[0-9a-f]+ [0-9]+$/gm, ''); 101 | diff = diff.replace(/^diff --git .+$/gm, ''); 102 | 103 | // 3) Ensure long header lines aren't wrapped 104 | diff = diff.replace(/^(---|\+\+\+)\s+([^\n]+)\n[ \t]+([^\n]+)/gm, 105 | (_, pfx, a, b) => `${pfx} ${a}${b}`); 106 | 107 | // 4) Quick fix: add missing "+" to comment lines inside hunks 108 | diff = fixMissingCommentPlusSign(diff).diff; 109 | 110 | // 5) Ensure diff ends with a newline 111 | if (!diff.endsWith('\n')) diff += '\n'; 112 | 113 | // 6) Try patch(1) with a range of strip levels. 114 | // Paths produced by LLMs can include deep prefixes like 115 | // a/Users//Desktop/…/file.py which require -p3 or -p4. 116 | // We therefore try a broader range (0-8) instead of just 0-2. 117 | const pLevels = Array.from({ length: 9 }, (_, i) => i); // [0,1,2,3,4,5,6,7,8] 118 | for (const p of pLevels) { 119 | const res = spawnSync('patch', [ 120 | `-p${p}`, '--batch', '--forward', '--fuzz', '3', '--reject-file=-' 121 | ], { cwd, input: diff, stdio: ['pipe', 'inherit', 'inherit'] }); 122 | if (res.status === 0) return; // success 123 | } 124 | 125 | // 7) Fallback: git apply --3way if inside a repo 126 | if (inGitRepo()) { 127 | const git = spawnSync('git', ['apply', '--3way', '--whitespace=nowarn', '-'], 128 | { cwd, input: diff, stdio: ['pipe', 'inherit', 'inherit'] }); 129 | if (git.status === 0) return; 130 | } 131 | 132 | throw new Error('Patch command failed'); 133 | } 134 | 135 | /** 136 | * Fixes missing + signs on comment lines in the diff 137 | * @param {string} diff - The diff content to fix 138 | * @returns {Object} - Object containing fixed diff and whether it was fixed 139 | */ 140 | function fixMissingCommentPlusSign(diff) { 141 | const lines = diff.split('\n'); 142 | let inHunk = false; 143 | let wasModified = false; 144 | 145 | for (let i = 0; i < lines.length; i++) { 146 | const line = lines[i]; 147 | 148 | // Check if we're in a hunk 149 | if (line.startsWith('@@')) { 150 | inHunk = true; 151 | continue; 152 | } 153 | 154 | // If we're in a hunk and find a line that looks like a comment 155 | // but doesn't have a prefix (-, +, or space), assume it should be a + line 156 | if (inHunk && !line.startsWith('-') && !line.startsWith('+') && !line.startsWith(' ') && line.trim().startsWith('#')) { 157 | lines[i] = '+ ' + line.trim(); 158 | wasModified = true; 159 | } 160 | } 161 | 162 | return { 163 | diff: lines.join('\n'), 164 | fixed: wasModified 165 | }; 166 | } 167 | 168 | /* ───────────────────────── Diff Text Extraction ────────────────────────────── */ 169 | /** 170 | * Extracts the unified diff portion from a larger text that may contain explanations or other text. 171 | * @param {string} diffText - The text containing a diff. 172 | * @returns {string} - The cleaned diff text. 173 | */ 174 | export function extractDiff(diffText) { 175 | // First, check if this is already a clean diff from our structured generation 176 | if (diffText.trim().startsWith('--- a/') && 177 | diffText.includes('\n+++ b/')) { 178 | return diffText.trim(); 179 | } 180 | 181 | // Handle the new format with FINAL DIFF marker 182 | const finalDiffMatch = diffText.match(/FINAL DIFF\s*([\s\S]+?)(?:```\s*\n\n|$)/i); 183 | if (finalDiffMatch) { 184 | // Capture everything after FINAL DIFF but before the closing code block if present 185 | let diffContent = finalDiffMatch[1].trim(); 186 | 187 | // Remove any trailing markdown code block markers and text after them 188 | diffContent = diffContent.replace(/```[\s\S]*$/, '').trim(); 189 | 190 | return diffContent; 191 | } 192 | 193 | // Handle markdown code block format with diff language 194 | const markdownMatch = diffText.match(/```(?:diff)?\n([\s\S]+?)```/); 195 | if (markdownMatch) { 196 | const content = markdownMatch[1].trim(); 197 | 198 | return content; 199 | } 200 | 201 | // Handle plain diff format that might include explanation text 202 | const diffMatch = diffText.match(/^(---[\s\S]+?(?:\n\+\+\+[\s\S]+?)(?:\n@@[\s\S]+?))/m); 203 | if (diffMatch) { 204 | return diffMatch[1].trim(); 205 | } 206 | 207 | // If the response already seems to be a clean diff, return it as is 208 | if (diffText.trim().startsWith('---') && diffText.includes('+++')) { 209 | // Extract just the unified diff part 210 | const endOfDiffIndex = diffText.indexOf('```', diffText.indexOf('+++')); 211 | if (endOfDiffIndex > 0) { 212 | return diffText.substring(0, endOfDiffIndex).trim(); 213 | } 214 | 215 | return diffText.trim(); 216 | } 217 | 218 | // Additional handling for specific formats we've seen 219 | if (diffText.includes('```') && diffText.includes('@@ -')) { 220 | // Extract the content between the first @@ and the closing ``` 221 | const match = diffText.match(/(@@[\s\S]+?)(?:```|$)/); 222 | if (match) { 223 | // Get everything from the file paths before @@ to the match 224 | const headerMatch = diffText.match(/(---[\s\S]+?\+\+\+[\s\S]+?)(@@)/); 225 | if (headerMatch) { 226 | return (headerMatch[1] + match[1]).trim(); 227 | } 228 | return match[1].trim(); 229 | } 230 | } 231 | 232 | // Fall back to returning as is after trimming 233 | return diffText.trim(); 234 | } 235 | 236 | /* ───────────────────────── Patch Confirmation And Apply ────────────────────────────── */ 237 | /** 238 | * Displays a generated patch, asks the user for confirmation, and applies it if confirmed. 239 | * @param {string} diff - The unified diff patch content. 240 | * @param {string} [cwd=process.cwd()] - The working directory for applying the patch. 241 | * @returns {Promise} - True if the patch was successfully applied, false otherwise. 242 | */ 243 | export async function confirmAndApply(diff, cwd = process.cwd()) { 244 | // Display the patch 245 | showPatch(diff); 246 | 247 | // Ask for confirmation 248 | if (!(await askYesNo('Apply this patch?'))) return false; 249 | 250 | try { 251 | // Apply the patch if confirmed 252 | applyPatch(diff, cwd); 253 | console.log(chalk.green('✓ Patch applied\n')); 254 | return true; 255 | } catch (e) { 256 | console.error(chalk.red(`Patch failed: ${e.message}`)); 257 | return false; 258 | } 259 | } 260 | 261 | /* ────────────────────────── Structured Diff Generation ─────────────────────────── */ 262 | /** 263 | * Converts structured patch data to unified diff format 264 | * @param {Object} patchData - The structured patch data 265 | * @param {string} currentDir - The current working directory 266 | * @returns {string} - Unified diff format text 267 | */ 268 | export function convertToUnifiedDiff(patchData, currentDir) { 269 | // Group changes by file 270 | const fileChanges = {}; 271 | 272 | // Normalize structure and handle any string processing before grouping 273 | patchData.changes.forEach(change => { 274 | // Normalize the file path 275 | const filePath = change.file_path; 276 | 277 | if (!fileChanges[filePath]) { 278 | fileChanges[filePath] = []; 279 | } 280 | 281 | // Clone the change to avoid modifying original 282 | const normalizedChange = { ...change }; 283 | 284 | // Normalize the change to ensure consistent handling 285 | // Make sure old_line and new_line are properly stored as single entities 286 | // even if they contain newlines 287 | fileChanges[filePath].push(normalizedChange); 288 | }); 289 | 290 | // Generate diff for each file 291 | let diffOutput = ''; 292 | 293 | for (const [filePath, changes] of Object.entries(fileChanges)) { 294 | // Sort changes by line number 295 | changes.sort((a, b) => a.line_number - b.line_number); 296 | 297 | // Add file headers 298 | diffOutput += `--- a/${filePath}\n`; 299 | diffOutput += `+++ b/${filePath}\n`; 300 | 301 | // Generate hunks 302 | const hunks = generateHunks(changes, filePath); 303 | diffOutput += hunks; 304 | } 305 | 306 | return diffOutput; 307 | } 308 | 309 | /** 310 | * Generates hunks for a unified diff from a list of changes 311 | * @param {Array} changes - List of changes for a single file 312 | * @param {string} filePath - The path of the file being changed 313 | * @returns {string} - Formatted hunks 314 | */ 315 | function generateHunks(changes, filePath) { 316 | // Add logging for debugging 317 | //console.log('Generating hunks from changes:', JSON.stringify(changes, null, 2)); 318 | 319 | if (!changes || changes.length === 0) { 320 | console.log('No changes to process'); 321 | return ''; 322 | } 323 | 324 | try { 325 | // Create separate hunks for each change instead of grouping them 326 | let output = ''; 327 | 328 | // Sort changes by line number for consistent ordering 329 | changes.sort((a, b) => a.line_number - b.line_number); 330 | 331 | // Process each change as its own hunk 332 | changes.forEach(change => { 333 | const lineNumber = change.line_number; 334 | 335 | // Count additions and deletions for this change 336 | let deletions = (change.old_line && change.old_line.trim()) ? 1 : 0; 337 | let additions = (change.new_line !== null) ? 1 : 0; 338 | 339 | // Make sure we always have valid counts 340 | if (deletions === 0) deletions = 1; 341 | if (additions === 0) additions = 1; 342 | 343 | // Create the hunk header for this single change 344 | const hunkHeader = `@@ -${lineNumber},${deletions} +${lineNumber},${additions} @@`; 345 | output += hunkHeader + '\n'; 346 | 347 | //console.log('Generated hunk header:', hunkHeader); 348 | 349 | // Add the change 350 | if (change.old_line !== undefined && change.old_line !== null) { 351 | output += processCodeLine(change.old_line, '-', filePath); 352 | } 353 | if (change.new_line !== null) { 354 | output += processCodeLine(change.new_line, '+', filePath); 355 | } 356 | }); 357 | 358 | // Log the final output for debugging 359 | // console.log('Generated diff output:'); 360 | // console.log(output); 361 | 362 | return output; 363 | } catch (error) { 364 | console.log(chalk.red('Error generating hunks:', error.message)); 365 | return ''; // Return empty string on error 366 | } 367 | } 368 | 369 | /** 370 | * Process a line of code for diff output, handling both literal \n and actual newlines 371 | * @param {string} codeText - The code text to process 372 | * @param {string} prefix - The prefix to use (+ or -) 373 | * @param {string} filePath - The file path for language detection 374 | * @returns {string} - Formatted diff lines with prefix 375 | */ 376 | function processCodeLine(codeText, prefix, filePath) { 377 | if (typeof codeText !== 'string') return ''; 378 | 379 | // Handle both cases: escaped \n sequences and actual newlines in the input 380 | let lines = []; 381 | 382 | // First, split on literal \n sequences 383 | if (codeText.includes('\\n')) { 384 | lines = codeText.split('\\n'); 385 | } else if (codeText.includes('\n')) { 386 | // If not escaped but has actual newlines 387 | lines = codeText.split('\n'); 388 | } else { 389 | // Single line 390 | return prefix + codeText + '\n'; 391 | } 392 | 393 | // Get file extension for language detection 394 | const fileExt = filePath.split('.').pop().toLowerCase(); 395 | 396 | // Check if this is a string literal that should be preserved as-is 397 | const isStringLiteral = detectStringLiteral(codeText, fileExt); 398 | 399 | // If it's a string literal with \n, preserve it as a single line 400 | if (isStringLiteral && lines.length > 1 && codeText.includes('\\n')) { 401 | // This is a string with \n that should be treated as literal in source code 402 | return prefix + codeText + '\n'; 403 | } 404 | 405 | // For normal multi-line scenarios 406 | // Find indentation of the first line to preserve for subsequent lines 407 | const indentMatch = lines[0].match(/^(\s+)/); 408 | const baseIndent = indentMatch ? indentMatch[1] : ''; 409 | 410 | // Format each line with the appropriate prefix and indentation 411 | return lines.map((l, i) => { 412 | // First line keeps original indentation 413 | if (i === 0) { 414 | return prefix + l; 415 | } 416 | // Subsequent lines get prefix plus base indentation plus content 417 | return prefix + baseIndent + l; 418 | }).join('\n') + '\n'; 419 | } 420 | 421 | /** 422 | * Detects if text contains a string literal that should preserve \n escapes 423 | * @param {string} text - The code text to analyze 424 | * @param {string} fileExt - File extension for language detection 425 | * @returns {boolean} - Whether this appears to be a string literal 426 | */ 427 | function detectStringLiteral(text, fileExt) { 428 | // Check for common string patterns based on language 429 | switch (fileExt) { 430 | case 'py': 431 | // Python strings: f-strings, triple quotes, regular quotes 432 | return ( 433 | text.includes('f"') || text.includes("f'") || 434 | text.includes('"""') || text.includes("'''") || 435 | (text.includes('"') && text.match(/"/g).length >= 2) || 436 | (text.includes("'") && text.match(/'/g).length >= 2) 437 | ); 438 | 439 | case 'c': case 'h': case 'cpp': case 'hpp': case 'cc': case 'cxx': 440 | // C/C++ strings: double quotes or character literals 441 | return ( 442 | (text.includes('"') && text.match(/"/g).length >= 2) || 443 | (text.includes("'") && text.match(/'/g).length >= 2) 444 | ); 445 | 446 | case 'rs': 447 | // Rust strings: raw strings r"..." or regular strings 448 | return ( 449 | text.includes('r#"') || 450 | text.includes('r"') || 451 | (text.includes('"') && text.match(/"/g).length >= 2) 452 | ); 453 | 454 | case 'go': 455 | // Go strings: backtick strings or regular strings 456 | return ( 457 | text.includes('`') || 458 | (text.includes('"') && text.match(/"/g).length >= 2) 459 | ); 460 | 461 | case 'js': case 'ts': case 'jsx': case 'tsx': 462 | // JavaScript/TypeScript: template literals, regular strings 463 | return ( 464 | text.includes('`') || 465 | (text.includes('"') && text.match(/"/g).length >= 2) || 466 | (text.includes("'") && text.match(/'/g).length >= 2) 467 | ); 468 | 469 | case 'java': case 'kt': case 'scala': 470 | // Java/Kotlin/Scala strings 471 | return ( 472 | (text.includes('"') && text.match(/"/g).length >= 2) || 473 | (text.includes("'") && text.match(/'/g).length >= 2) 474 | ); 475 | 476 | default: 477 | // General string detection as fallback 478 | return ( 479 | (text.includes('"') && text.match(/"/g).length >= 2) || 480 | (text.includes("'") && text.match(/'/g).length >= 2) || 481 | text.includes('`') || 482 | text.includes('"""') || 483 | text.includes("'''") 484 | ); 485 | } 486 | } -------------------------------------------------------------------------------- /src/core/llm.js: -------------------------------------------------------------------------------- 1 | /** 2 | * LLM Integration and Error Analysis Module 3 | * 4 | * Manages interactions with Ollama language models for error analysis and code fixes. 5 | * This module handles model selection, installation, error classification, and 6 | * generation of solutions (both terminal commands and code patches). It includes 7 | * UI feedback elements like progress indicators and thinking animations to enhance 8 | * the user experience during potentially lengthy LLM operations. 9 | */ 10 | 11 | import { execSync, spawn } from 'child_process'; 12 | import { makePicker } from '../ui/prompt.js'; 13 | import { askYesNo, closeReadline, getReadline } from '../ui/prompt.js'; 14 | import { checkNetwork } from './command.js'; 15 | import chalk from 'chalk'; 16 | import { buildErrorContext, extractFilesFromTraceback, getErrorLines } from './traceback.js'; 17 | import { BOX } from '../ui/boxen.js'; 18 | import boxen from 'boxen'; 19 | import ollama from 'ollama' 20 | import { runLLMWithTempScript } from '../utils/tempscript.js'; 21 | import { promises as fs } from 'fs'; 22 | 23 | import { cpus } from 'os'; 24 | import { convertToUnifiedDiff, extractDiff } from './patch.js'; 25 | 26 | /* ───────────────────────── Available Models Provider ────────────────────────────── */ 27 | /** 28 | * Returns a static list of recommended Ollama models for use with FigAI. 29 | * @returns {string[]} - Array of model names (e.g., 'phi4:latest'). 30 | */ 31 | export function getAvailableModels() { 32 | return [ 33 | 'llama3.1:8b', 34 | 'gemma3:4b', 35 | 'gemma3:12b', 36 | 'gemma3:27b', 37 | 'qwen3:8b', 38 | 'qwen3:14b', 39 | 'qwen3:30b', 40 | 'phi4:14b' 41 | ]; 42 | } 43 | 44 | /* ───────────────────────── Installed Models Reader ────────────────────────────── */ 45 | /** 46 | * Reads the list of currently installed Ollama models using `ollama list`. 47 | * @returns {string[]} - An array of installed model names, or empty array on error. 48 | */ 49 | export function readModels() { 50 | try { 51 | const output = execSync('ollama list', { encoding: 'utf8' }); 52 | 53 | // Log the raw output for debugging 54 | //console.log(chalk.gray('Detected installed models:')); 55 | //console.log(chalk.gray(output)); 56 | 57 | const models = output 58 | .split(/\r?\n/) 59 | .slice(1) // drop header line: NAME SIZE 60 | .filter(Boolean) 61 | .map(l => l.split(/\s+/)[0]); // first token is the model name 62 | 63 | //console.log(chalk.gray('Detected installed models:'), models); 64 | return models; 65 | } catch (error) { 66 | console.error(chalk.red('Error reading models:'), error.message); 67 | return []; 68 | } 69 | } 70 | 71 | /* ───────────────────────── Interactive Model Selector ────────────────────────────── */ 72 | /** 73 | * Allows the user to select an Ollama model using the interactive picker. 74 | * Shows available models if online, or only installed models if offline. 75 | * Marks installed models visually and prompts for installation if an uninstalled model is chosen. 76 | * @returns {Promise} - The selected model name, or null if cancelled or installation failed. 77 | */ 78 | export async function selectModelItem() { 79 | const isOnline = checkNetwork(); 80 | let models; 81 | let title; 82 | 83 | if (isOnline) { 84 | models = getAvailableModels(); 85 | title = 'Available Models'; 86 | // Get installed models to check status 87 | const installedModels = readModels(); 88 | 89 | // To ensure we don't miss any models, create a combined list 90 | const allModels = [...new Set([...models, ...installedModels])]; 91 | 92 | // Create display-friendly versions with installation status 93 | const displayNames = allModels.map(model => { 94 | const isInstalled = installedModels.includes(model); 95 | const displayName = model.replace(/:latest$/, ''); 96 | return `${displayName} ${isInstalled ? chalk.green('✓') : chalk.gray('-')}`; 97 | }); 98 | 99 | // Create pairs with install status for sorting (installed first, then alphabetical) 100 | const modelPairs = displayNames.map((display, i) => { 101 | const isInstalled = installedModels.includes(allModels[i]); 102 | return [display, allModels[i], isInstalled]; 103 | }); 104 | 105 | // Sort installed models first, then alphabetically 106 | modelPairs.sort((a, b) => { 107 | // First sort by installation status 108 | if (a[2] && !b[2]) return -1; 109 | if (!a[2] && b[2]) return 1; 110 | // Then sort alphabetically 111 | return a[0].localeCompare(b[0]); 112 | }); 113 | 114 | // Extract sorted display names and original models 115 | const sortedDisplayNames = modelPairs.map(pair => pair[0]); 116 | const sortedModels = modelPairs.map(pair => pair[1]); 117 | 118 | // Create picker with sorted display names 119 | const picker = makePicker(sortedDisplayNames, title); 120 | const selected = await picker(); 121 | 122 | if (!selected) return null; 123 | 124 | const selectedModel = sortedModels[sortedDisplayNames.indexOf(selected)]; 125 | const isInstalled = installedModels.includes(selectedModel); 126 | 127 | if (!isInstalled) { 128 | console.log(boxen( 129 | `Install ${selectedModel}?\nThis may take a few minutes.\n\nProceed (y/N):`, 130 | { ...BOX.CONFIRM, title: 'Confirm Installation' } 131 | )); 132 | const response = await askYesNo('', true); 133 | console.log(response ? 'y' : 'N'); 134 | if (response) { 135 | const success = await installModel(selectedModel); 136 | if (!success) return null; 137 | } else { 138 | return null; 139 | } 140 | } 141 | 142 | return selectedModel; 143 | } else { 144 | models = readModels(); 145 | title = 'Installed Models'; 146 | 147 | // Create display-friendly versions of model names (strip ":latest" suffix) 148 | const displayNames = models.map(model => model.replace(/:latest$/, '')); 149 | 150 | // Create sorted pairs of [displayName, originalModel] for sorting and maintaining mapping 151 | const modelPairs = displayNames.map((display, i) => [display, models[i]]); 152 | modelPairs.sort((a, b) => a[0].localeCompare(b[0])); 153 | 154 | // Extract sorted display names and original models 155 | const sortedDisplayNames = modelPairs.map(pair => pair[0]); 156 | const sortedModels = modelPairs.map(pair => pair[1]); 157 | 158 | // Create picker with sorted display names 159 | const picker = makePicker(sortedDisplayNames, title); 160 | const selected = await picker(); 161 | // Map back to the original model name if something was selected 162 | return selected ? sortedModels[sortedDisplayNames.indexOf(selected)] : null; 163 | } 164 | } 165 | 166 | /* ───────────────────────── Model Installation Handler ────────────────────────────── */ 167 | /** 168 | * Installs an Ollama model using `ollama pull`, displaying progress. 169 | * @param {string} modelName - The name of the model to install (e.g., 'phi4:latest'). 170 | * @returns {Promise} - True if installation succeeded, false otherwise. 171 | */ 172 | export async function installModel(modelName) { 173 | const downloader = startDownloading(modelName); 174 | try { 175 | const child = spawn('ollama', ['pull', modelName]); 176 | 177 | child.stdout.on('data', (data) => { 178 | const output = data.toString(); 179 | // Extract progress information from Ollama's output 180 | const progressMatch = output.match(/(\d+%)/); 181 | if (progressMatch) { 182 | downloader.updateProgress(chalk.blue(progressMatch[0])); 183 | } 184 | }); 185 | 186 | child.stderr.on('data', (data) => { 187 | const output = data.toString(); 188 | // Extract progress information from Ollama's error output 189 | const progressMatch = output.match(/(\d+%)/); 190 | if (progressMatch) { 191 | downloader.updateProgress(chalk.blue(progressMatch[0])); 192 | } 193 | }); 194 | 195 | return new Promise((resolve) => { 196 | child.on('close', (code) => { 197 | downloader.stop(); 198 | resolve(code === 0); 199 | }); 200 | }); 201 | } catch (error) { 202 | downloader.stop(); 203 | console.error(chalk.red(`Failed to install model ${modelName}: ${error.message}`)); 204 | return false; 205 | } 206 | } 207 | 208 | /* ───────────────────────── Model Uninstallation Handler ────────────────────────────── */ 209 | /** 210 | * Uninstalls a specified Ollama model after confirmation. 211 | * @param {string} modelName - The name of the model to remove. 212 | */ 213 | export async function uninstallModel(modelName) { 214 | try { 215 | console.log(boxen( 216 | `Uninstall ${modelName}?\nThis will remove the model from your system.\n\nProceed (y/N):`, 217 | { ...BOX.CONFIRM, title: 'Confirm Uninstallation' } 218 | )); 219 | const response = await askYesNo('', true); 220 | 221 | if (response) { 222 | execSync(`ollama rm ${modelName}`, { stdio: 'ignore' }); 223 | process.stdout.write('\n'); 224 | console.log(chalk.green(`Model ${modelName} has been uninstalled.`)); 225 | } 226 | // Reset terminal state and readline 227 | // process.stdout.write('\n'); 228 | closeReadline(); 229 | // Don't directly modify rl here 230 | getReadline(); 231 | } catch (error) { 232 | process.stdout.write('\n'); 233 | console.error(chalk.red(`Failed to uninstall model ${modelName}: ${error.message}`)); 234 | // Reset terminal state and readline 235 | process.stdout.write('\n'); 236 | closeReadline(); 237 | // Don't directly modify rl here 238 | getReadline(); 239 | } 240 | } 241 | 242 | /* ────────────────────────── Thinking Animation ─────────────────────────── */ 243 | 244 | /** 245 | * Starts and manages a terminal spinner animation with changing text phrases. 246 | * Indicates that a potentially long-running operation (like LLM interaction) is in progress. 247 | * @param {string[]} [customPhrases] - Optional custom phrases to display during thinking 248 | * @returns {function(): void} - A function to stop the animation and clear the line. 249 | */ 250 | export function startThinking(customPhrases) { 251 | const defaultPhrases = [ 252 | 'Brewing ideas','Cooking up something','Putting it together', 253 | 'Low-key figuring it out','Thoughts are thoughting', 254 | 'Prompt engineering in progress','Summoning tokens', 255 | 'Reasoning like a transformer','Tokens are tokening', 256 | 'Forking the universe','Ctrl+C won\'t help here' 257 | ]; 258 | 259 | const phrases = customPhrases || defaultPhrases.sort(() => Math.random() - 0.5); 260 | let seconds = 0; 261 | let spinnerFrame = 0; 262 | let currentPhrase = phrases[0]; 263 | const spinner = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']; 264 | const startTime = Date.now(); 265 | 266 | const updateDisplay = () => { 267 | process.stdout.clearLine(0); 268 | process.stdout.cursorTo(0); 269 | process.stdout.write(`${spinner[spinnerFrame]} ${currentPhrase} (${seconds}s)`); 270 | }; 271 | 272 | // Spinner animation 273 | const spinnerInterval = setInterval(() => { 274 | spinnerFrame = (spinnerFrame + 1) % spinner.length; 275 | updateDisplay(); 276 | }, 80); 277 | 278 | const tick = () => { 279 | currentPhrase = phrases[Math.floor(seconds / 10) % phrases.length]; 280 | seconds++; 281 | updateDisplay(); 282 | }; 283 | tick(); 284 | const id = setInterval(tick, 1000); 285 | return () => { 286 | clearInterval(id); 287 | clearInterval(spinnerInterval); 288 | process.stdout.write('\n'); 289 | }; 290 | } 291 | 292 | /** 293 | * Returns thinking phrases specifically for error analysis 294 | * @returns {string[]} - Array of thinking phrases for analysis 295 | */ 296 | export function getThinkingPhrasesForAnalysis() { 297 | return [ 298 | 'Parsing the error, line by line...', 299 | 'Locating the point of failure...', 300 | 'Trying to make sense of the red text...', 301 | 'This terminal error looks familiar...', 302 | 'Analyzing what went wrong, precisely...', 303 | 'Diagnosing the issue like a seasoned dev...', 304 | 'Unraveling the terminal\'s last cry...', 305 | 'Let\'s see why the shell screamed this time...' 306 | ].sort(() => Math.random() - 0.5); 307 | } 308 | 309 | /** 310 | * Returns thinking phrases specifically for patch generation 311 | * @returns {string[]} - Array of thinking phrases for patch generation 312 | */ 313 | export function getThinkingPhrasesForPatch() { 314 | return [ 315 | 'Locating the offending lines...', 316 | 'Composing a surgical code fix...', 317 | 'Patching with precision...', 318 | 'Rewriting history, one `+` at a time...', 319 | 'Turning errors into green text...', 320 | 'Looking for the cleanest possible fix...', 321 | 'Coding like it\'s commit time...', 322 | 'Preparing a fix you can actually `git apply`...', 323 | ].sort(() => Math.random() - 0.5); 324 | } 325 | 326 | /** 327 | * Returns thinking phrases specifically for code summarization 328 | * @returns {string[]} - Array of thinking phrases for code summarization 329 | */ 330 | export function getThinkingPhrasesForSummarization() { 331 | return [ 332 | 'Reading the codebase...', 333 | 'Parsing code structures...', 334 | 'Understanding the logic flow...', 335 | 'Extracting core concepts...', 336 | 'Identifying key components...', 337 | 'Mapping functions and relationships...', 338 | 'Distilling essential patterns...', 339 | 'Compressing code into concepts...', 340 | 'Finding the signal in the syntax...', 341 | 'Translating code to human language...' 342 | ].sort(() => Math.random() - 0.5); 343 | } 344 | 345 | /* ────────────────────────── Download Progress Indicator ─────────────────────────── */ 346 | /** 347 | * Starts and manages a terminal spinner animation specifically for model downloads. 348 | * Includes updating progress text based on Ollama output. 349 | * @param {string} modelName - The name of the model being downloaded. 350 | * @returns {{stop: function(): void, updateProgress: function(string): void}} - An object with 351 | * functions to stop the animation and update the displayed progress percentage. 352 | */ 353 | export function startDownloading(modelName) { 354 | // let seconds = 0; 355 | let spinnerFrame = 0; 356 | let progress = ''; 357 | const spinner = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']; 358 | const startTime = Date.now(); 359 | 360 | const updateDisplay = () => { 361 | process.stdout.clearLine(0); 362 | process.stdout.cursorTo(0); 363 | process.stdout.write(`${spinner[spinnerFrame]} Installing ${modelName}… ${progress}`); 364 | }; 365 | 366 | // Spinner animation 367 | const spinnerInterval = setInterval(() => { 368 | spinnerFrame = (spinnerFrame + 1) % spinner.length; 369 | updateDisplay(); 370 | }, 80); 371 | 372 | const tick = () => { 373 | // seconds++; 374 | updateDisplay(); 375 | }; 376 | tick(); 377 | const id = setInterval(tick, 1000); 378 | return { 379 | stop: () => { 380 | clearInterval(id); 381 | clearInterval(spinnerInterval); 382 | const totalTime = ((Date.now() - startTime) / 1000).toFixed(1); 383 | // process.stdout.write(`\n Model ${modelName} installed in ${totalTime}s\n`); 384 | }, 385 | updateProgress: (newProgress) => { 386 | progress = newProgress; 387 | updateDisplay(); 388 | } 389 | }; 390 | } 391 | 392 | /* ────────────────────────── Error Analysis With LLM ─────────────────────────── */ 393 | /** 394 | * Analyzes error output with an LLM using a Python script with optimized settings. 395 | * @param {string} errorOutput - The error output to analyze. 396 | * @param {string} [model='phi4:latest'] - The Ollama model to use. 397 | * @param {string} [fileContent=''] - Optional file content for additional context. 398 | * @param {string} [codeSummary=''] - Optional code summary for additional context. 399 | * @returns {Promise} - The analysis text from the LLM. 400 | */ 401 | export async function analyzeWithLLM(errorOutput, model = 'phi4:latest', fileContent = '', codeSummary = '', filePath) { 402 | // Start thinking spinner to provide visual feedback 403 | const stopThinking = startThinking(getThinkingPhrasesForAnalysis()); 404 | 405 | try { 406 | // Handle missing Ollama or model by ensuring it's there 407 | try { 408 | execSync('which ollama', { stdio: 'ignore' }); 409 | } catch { 410 | stopThinking(); // Stop spinner before exiting 411 | console.error(chalk.red('Ollama CLI not found. Please install Ollama first.')); 412 | process.exit(1); 413 | } 414 | 415 | // Ensure the model exists 416 | const models = await readModels(); 417 | if (!models.includes(model)) { 418 | console.log(chalk.yellow(`Model ${model} not found. Installing...`)); 419 | if (!(await installModel(model))) { 420 | stopThinking(); // Stop spinner before exiting 421 | console.error(chalk.red(`Failed to install ${model}. Please install it manually.`)); 422 | process.exit(1); 423 | } 424 | } 425 | 426 | // Set up the prompt for analysis with full context (±30 lines) 427 | const context = buildErrorContext(errorOutput, 30); 428 | 429 | // Build the prompt with additional context if provided 430 | let promptParts = [ 431 | 'You are a helpful terminal assistant analysing command errors.', 432 | '', 433 | 'ERROR OUTPUT:', 434 | errorOutput, 435 | '', 436 | 'FILE PATH:', 437 | filePath, 438 | ]; 439 | 440 | // Add code summary if provided 441 | if (codeSummary) { 442 | promptParts.push('CODE SUMMARY:'); 443 | promptParts.push(codeSummary); 444 | promptParts.push(''); 445 | } 446 | 447 | // Add file content if provided 448 | if (fileContent) { 449 | promptParts.push('FILE CONTENT (First ~200 lines with line numbers):'); 450 | promptParts.push(fileContent); 451 | promptParts.push(''); 452 | } 453 | 454 | // Add traceback context if available 455 | if (context) { 456 | promptParts.push('TRACEBACK CONTEXT (±30 lines):'); 457 | promptParts.push(context); 458 | promptParts.push(''); 459 | } 460 | 461 | // Add instructions 462 | promptParts.push('Your response MUST include these sections:'); 463 | promptParts.push('1. ERROR LOCATION: Specify the file name (example.py) and the exact line number (line 45) where you believe the error is occurring. Nothing else.'); 464 | promptParts.push('2. Explain **VERY** concisely what went wrong.'); 465 | promptParts.push('3. FIX: Propose a concrete solution to fix the error. There might be multiple fixes required for the same error, so put all in one code chunk. Do not move onto another error. No alternatives. Be final with your solution.'); 466 | promptParts.push(''); 467 | promptParts.push('Be precise about the error line number, even if it\'s not explicitly mentioned in the traceback.'); 468 | promptParts.push('No to low explanation only and focused on the root cause and solution. Keep it **VERY** concise.'); 469 | 470 | const prompt = promptParts.join('\n'); 471 | 472 | // Run the analysis using the TempScript approach 473 | const output = await runLLMWithTempScript(prompt, model, 'error_analysis'); 474 | 475 | // Stop thinking spinner before returning 476 | stopThinking(); 477 | 478 | return output.trim(); 479 | } catch (error) { 480 | // Make sure we stop the spinner even if there's an error 481 | stopThinking(); 482 | return `Error during analysis: ${error.message}`; 483 | } 484 | } 485 | 486 | /* ────────────────────────── Code Summarization With LLM ─────────────────────────── */ 487 | /** 488 | * Summarizes code context with an LLM using a Python script with optimized settings. 489 | * @param {string} codeContent - The code content to summarize. 490 | * @param {string} [model='phi4:latest'] - The Ollama model to use. 491 | * @returns {Promise} - The summary text from the LLM. 492 | */ 493 | export async function summarizeCodeWithLLM(codeContent, model = 'phi4:latest') { 494 | // Start thinking spinner to provide visual feedback 495 | const stopThinking = startThinking(getThinkingPhrasesForSummarization()); 496 | 497 | try { 498 | // Handle missing Ollama or model by ensuring it's there 499 | try { 500 | execSync('which ollama', { stdio: 'ignore' }); 501 | } catch { 502 | stopThinking(); // Stop spinner before exiting 503 | console.error(chalk.red('Ollama CLI not found. Please install Ollama first.')); 504 | process.exit(1); 505 | } 506 | 507 | // Ensure the model exists 508 | const models = await readModels(); 509 | if (!models.includes(model)) { 510 | console.log(chalk.yellow(`Model ${model} not found. Installing...`)); 511 | if (!(await installModel(model))) { 512 | stopThinking(); // Stop spinner before exiting 513 | console.error(chalk.red(`Failed to install ${model}. Please install it manually.`)); 514 | process.exit(1); 515 | } 516 | } 517 | 518 | const prompt = ` 519 | You are a concise code summarization assistant. 520 | 521 | CODE: 522 | ${codeContent} 523 | 524 | Provide an ultra-concise summary of this code in EXACTLY 1-2 lines maximum. Your summary must: 525 | - Describe the main purpose/functionality 526 | - Mention key components or patterns if relevant 527 | - Be immediately useful to a developer skimming the code 528 | - Not exceed 2 lines under any circumstances 529 | 530 | Your entire response should be 1-2 lines only. No introductions, explanations, or lists. Make it concise and to the point. 531 | `.trim(); 532 | 533 | // Run the summarization using the TempScript approach 534 | const output = await runLLMWithTempScript(prompt, model, 'error_analysis'); 535 | 536 | // Stop thinking spinner before returning 537 | stopThinking(); 538 | 539 | return output.trim(); 540 | } catch (error) { 541 | // Make sure we stop the spinner even if there's an error 542 | stopThinking(); 543 | return `Error during summarization: ${error.message}`; 544 | } 545 | } 546 | 547 | /* ────────────────────────── Terminal Error Pattern Detection ─────────────────────────── */ 548 | /** 549 | * Checks if an error is likely related to a terminal command issue. 550 | * @param {string} errorOutput - The error output to analyze. 551 | * @returns {boolean} - True if it seems to be a terminal command issue. 552 | */ 553 | export function isTerminalCommandError(errorOutput) { 554 | const terminalErrors = [ 555 | /command not found/i, 556 | /no such file or directory/i, 557 | /permission denied/i, 558 | /not installed/i, 559 | /invalid option/i, 560 | /unknown option/i, 561 | /missing argument/i, 562 | /too many arguments/i, 563 | /not recognized as an internal or external command/i, 564 | /is not recognized as a command/i, 565 | ]; 566 | 567 | return terminalErrors.some(pattern => pattern.test(errorOutput)); 568 | } 569 | 570 | /* ────────────────────────── Error Classification ─────────────────────────── */ 571 | /** 572 | * Determines the type of error (terminal command or code issue) using LLM. 573 | * @param {string} errorOutput - The error output to analyze. 574 | * @param {string} analysis - Previous analysis of the error. 575 | * @param {string} model - The model to use. 576 | * @returns {Promise} - Either "TERMINAL_COMMAND_ERROR" or "CODE_FILE_ISSUE" 577 | */ 578 | export async function determineErrorType(errorOutput, analysis, model) { 579 | // First do a quick check for obvious terminal errors 580 | if (isTerminalCommandError(errorOutput)) { 581 | return "TERMINAL_COMMAND_ERROR"; 582 | } 583 | 584 | // Start thinking spinner to provide visual feedback 585 | const stopThinking = startThinking(getThinkingPhrasesForAnalysis()); 586 | 587 | try { 588 | // Use LLM to determine more complex cases 589 | const prompt = ` 590 | You are a binary classifier AI. Your ONLY task is to classify if a fix requires code changes or terminal commands. 591 | 592 | ANALYSIS: 593 | ${analysis} 594 | 595 | INSTRUCTIONS: 596 | 1. Look at the Proposed Fix section 597 | 2. If the fix requires running a command (like pip install, npm install, etc.), output: TERMINAL_COMMAND_ERROR 598 | 3. If the fix requires changing code files, output: CODE_FILE_ISSUE 599 | 4. You MUST output ONLY ONE of these exact phrases, and no additional thoughts: "TERMINAL_COMMAND_ERROR" or "CODE_FILE_ISSUE" 600 | 601 | Output ONLY ONE of these exact phrases. No need for long explanations. Just a simple single word output: 602 | 'TERMINAL_ISSUE', 603 | 'CODE_ISSUE' 604 | `.trim(); 605 | 606 | // Run the analysis using the TempScript approach 607 | const output = await runLLMWithTempScript(prompt, model, 'error_determination'); 608 | 609 | // Stop thinking spinner before processing result 610 | stopThinking(); 611 | 612 | const cleanOutput = output.trim(); 613 | const isTerminal = cleanOutput.includes('TERMINAL_COMMAND_ERROR'); 614 | 615 | return isTerminal ? "TERMINAL_COMMAND_ERROR" : "CODE_FILE_ISSUE"; 616 | } catch (error) { 617 | // Make sure we stop the spinner even if there's an error 618 | stopThinking(); 619 | return "CODE_FILE_ISSUE"; // Default to code issue if error 620 | } 621 | } 622 | 623 | /* ────────────────────────── Terminal Command Generator ─────────────────────────── */ 624 | /** 625 | * Generates a new terminal command to fix an error using LLM. 626 | * @param {string[]} prevCommands - Previous attempted fix commands. 627 | * @param {string} analysis - Previous error analysis. 628 | * @param {string} model - The model to use. 629 | * @returns {Promise} - The generated command. 630 | */ 631 | export async function generateTerminalCommandFix(prevCommands, analysis, model) { 632 | // Start thinking spinner to provide visual feedback 633 | const stopThinking = startThinking(getThinkingPhrasesForAnalysis()); 634 | 635 | try { 636 | // Format previous commands more robustly 637 | const prevCommandsText = Array.isArray(prevCommands) && prevCommands.length > 0 638 | ? `\n\nPreviously tried commands:\n${prevCommands.map(cmd => `- ${cmd}`).join('\n')}` 639 | : ''; 640 | 641 | const prompt = ` 642 | You are a terminal command fixing AI. Given an analysis, extract a new command to fix it. 643 | 644 | Error Analysis: 645 | ${analysis} 646 | 647 | Previous Commands: 648 | ${prevCommandsText} 649 | 650 | Instructions: 651 | 1. Analyze the Proposed Fix section carefully 652 | 2. Extract a single command that will fix the issue 653 | 3. The command should be complete and ready to run 654 | 4. Do not include any explanations, commentary, or markdown formatting 655 | 5. Only output the command itself 656 | 657 | Example Format: 658 | pip install missing-package 659 | 660 | Generate ONLY the command, nothing else. No explanations, no markdown, just the raw command. 661 | Make sure it's valid syntax that can be directly executed in a terminal. 662 | `.trim(); 663 | 664 | // Run the analysis using the TempScript approach 665 | const output = await runLLMWithTempScript(prompt, model, 'command_generation'); 666 | 667 | // Stop thinking spinner before processing result 668 | stopThinking(); 669 | 670 | // Clean the output to get just the command 671 | let command = output.trim(); 672 | 673 | // Remove markdown code blocks if present 674 | command = command.replace(/^```[a-z]*\n/, '').replace(/\n```$/, ''); 675 | 676 | // Remove any leading "Run: " or similar text 677 | command = command.replace(/^(Run|Execute|Type|Use|Try):\s*/i, ''); 678 | 679 | // Remove any $ prefix (common in examples) 680 | command = command.replace(/^\$\s*/, ''); 681 | 682 | // Ensure we have a valid command 683 | if (!command || command.startsWith('Error')) { 684 | throw new Error('Failed to generate a valid command'); 685 | } 686 | 687 | return command; 688 | } catch (error) { 689 | // Make sure we stop the spinner even if there's an error 690 | stopThinking(); 691 | return `echo "Error generating command: ${error.message}"`; 692 | } 693 | } 694 | 695 | /* ────────────────────────── Code Patch Generator ─────────────────────────── */ 696 | /** 697 | * Generates a patch to fix code issues using LLM with structured outputs. 698 | * @param {string} errorOutput - The error output. 699 | * @param {string[]} prevPatches - Previous attempted patches. 700 | * @param {string} analysis - Previous error analysis. 701 | * @param {string} currentDir - The current working directory. 702 | * @param {string} model - The model to use. 703 | * @param {string} [fileContent=''] - Optional file content for additional context. 704 | * @param {string} [codeSummary=''] - Optional code summary for additional context. 705 | * @returns {Promise} - The generated diff. 706 | */ 707 | export async function generatePatch(errorOutput, prevPatches, analysis, currentDir = process.cwd(), model, fileContent = '', codeSummary = '') { 708 | // Start thinking spinner to provide visual feedback 709 | const stopThinking = startThinking(getThinkingPhrasesForPatch()); 710 | 711 | try { 712 | const prevPatchesText = prevPatches.length 713 | ? `\n\nPreviously attempted patches:\n${prevPatches.join('\n\n')}` 714 | : ''; 715 | 716 | // Extract file paths and line numbers from the traceback 717 | const filesWithErrors = extractFilesFromTraceback(errorOutput); 718 | const errorFiles = Array.from(filesWithErrors.keys()).join('\n'); 719 | const errorLines = Array.from(filesWithErrors.values()).join('\n'); 720 | 721 | // Get the exact lines of code where errors occur 722 | const exactErrorCode = getErrorLines(errorOutput); 723 | 724 | // Get only the last two lines of the error output 725 | const errorOutputLines = errorOutput.split('\n'); 726 | const lastTwoLines = errorOutputLines.slice(-2).join('\n'); 727 | 728 | // Get the code context with reduced context size (±3 lines) for generatePatch 729 | // Don't include file path and line headers in the context 730 | const context = buildErrorContext(errorOutput, 3, false); 731 | 732 | const parts = [ 733 | // ───── Intro 734 | 'You are a code-fixing AI. Given an analysis, extract a structured patch to fix it.', 735 | '', 736 | 737 | // ───── Analysis 738 | 'Error Analysis', 739 | analysis, 740 | '', 741 | 742 | // ───── Current Directory (for context) 743 | '### Current Working Directory', 744 | currentDir, 745 | '', 746 | ]; 747 | 748 | // Add code summary if provided 749 | if (codeSummary) { 750 | parts.push('### Code Summary'); 751 | parts.push(codeSummary); 752 | parts.push(''); 753 | } 754 | 755 | // Add file content if provided - use raw content for patch generation to avoid line number issues 756 | if (fileContent) { 757 | parts.push('### File Content (First ~200 lines)'); 758 | parts.push(fileContent); 759 | parts.push(''); 760 | } 761 | 762 | // Continue with the rest of the sections 763 | parts.push( 764 | // ───── Error Files (separate section) 765 | '### Error File:', 766 | errorFiles || '(none)', 767 | '', 768 | 769 | // ───── Error Lines (separate section) 770 | '### Error Line:', 771 | errorLines || '(none)', 772 | '', 773 | 774 | // ───── Error Code (exact line with error) 775 | '### Error Code:', 776 | exactErrorCode || '(none)', 777 | '', 778 | 779 | // ───── Code Context (with reduced context) 780 | '### Code Context (±3 lines from error locations)', 781 | context || '(none)', 782 | '', 783 | 784 | // ───── Previous patches (optional) 785 | '### Previous Patches', 786 | prevPatchesText || '(none)', 787 | '', 788 | ); 789 | 790 | // Add structured output instructions 791 | parts.push( 792 | // ───── Instructions 793 | '### Instructions', 794 | 'Analyze the error and generate a structured patch in JSON format with the following schema:', 795 | '{', 796 | ' "changes": [', 797 | ' {', 798 | ' "file_path": "relative/path/to/file.py",', 799 | ' "line_number": 42,', 800 | ' "old_line": " z = x + yy",', 801 | ' "new_line": " z = x + y"', 802 | ' },', 803 | ' ...', 804 | ' ],', 805 | ' "description": "Fixed typo in variable name and syntax error"', 806 | '}', 807 | '', 808 | '1. Ensure the file_path is relative to: ' + currentDir, 809 | '2. Include the ENTIRE line for both old_line and new_line', 810 | '3. For deletions, include old_line but set new_line to null', 811 | '4. For additions, set line_number of the line that comes before and set old_line to ""', 812 | '', 813 | 'Make sure to:', 814 | '1. Only include lines that are actually changed', 815 | '2. Never modify the same line twice', 816 | '3. Keep the changes as minimal as possible', 817 | '4. Maintain the correct order of operations', 818 | '', 819 | '### JSON Output' 820 | ); 821 | 822 | const prompt = parts.join('\n'); 823 | 824 | // Define the schema for the patch response 825 | const patchSchema = { 826 | type: "object", 827 | properties: { 828 | changes: { 829 | type: "array", 830 | items: { 831 | type: "object", 832 | properties: { 833 | file_path: { type: "string" }, 834 | line_number: { type: "integer" }, 835 | old_line: { type: "string" }, 836 | new_line: { type: "string", nullable: true } 837 | }, 838 | required: ["file_path", "line_number", "old_line"] 839 | } 840 | }, 841 | description: { type: "string" } 842 | }, 843 | required: ["changes"] 844 | }; 845 | 846 | let output; 847 | try { 848 | // Add diagnostic logs 849 | //console.log(chalk.gray('Starting Ollama API call with model:', model)); 850 | const cpuThreads = Math.min(8, (cpus()?.length || 2)); 851 | 852 | // Try using the structured output API if available 853 | const response = await ollama.chat({ 854 | model: model, 855 | messages: [{ role: 'user', content: prompt }], 856 | format: patchSchema, // Pass the schema to the format parameter 857 | stream: false, // Structured outputs don't work with streaming 858 | options: { 859 | // Use the same optimized settings from patch_generation preset 860 | temperature: 0.1, 861 | num_predict: 768, 862 | num_thread: cpuThreads, 863 | num_batch: 32, 864 | mmap: true, 865 | int8: true, 866 | f16: false, 867 | repeat_penalty: 1.0, 868 | top_k: 40, 869 | top_p: 0.95, 870 | cache_mode: "all", 871 | use_mmap: true, 872 | use_mlock: true 873 | } 874 | }); 875 | 876 | // Log successful response 877 | 878 | // The response will automatically be in the schema format 879 | const structuredPatch = JSON.parse(response.message.content); 880 | 881 | console.log(chalk.gray(JSON.stringify(structuredPatch, null, 2))); 882 | 883 | // Convert the structured patch to unified diff format 884 | output = convertToUnifiedDiff(structuredPatch, currentDir); 885 | 886 | 887 | } catch (error) { 888 | // Add detailed error logging 889 | console.log(chalk.red('Ollama API call error details:')); 890 | console.log(chalk.red('Error name:', error.name)); 891 | //console.log(chalk.red('Error message:', error.message)); 892 | //console.log(chalk.red('Error stack:', error.stack)); 893 | 894 | // Fall back to the traditional approach if structured outputs fail 895 | console.log(chalk.yellow(`Structured format unavailable - reverting to standard text output: ${error.message}`)); 896 | output = await runLLMWithTempScript(prompt, model, 'patch_generation'); 897 | } 898 | 899 | // Stop thinking spinner before processing result 900 | stopThinking(); 901 | 902 | return output.trim(); 903 | } catch (error) { 904 | // Make sure we stop the spinner even if there's an error 905 | stopThinking(); 906 | console.error(chalk.red(`Error generating patch: ${error.message}`)); 907 | return ''; 908 | } 909 | } --------------------------------------------------------------------------------