├── update.sh ├── .vscode ├── settings.json └── launch.json ├── src ├── declarations │ └── inquirer_interactive_list_prompt.d.ts ├── api │ ├── stream.ts │ ├── ollama.ts │ ├── api.ts │ ├── cmdh.ts │ ├── openai.ts │ └── text_generation_web_ui.ts ├── system.ts ├── parseResponse.ts ├── configure.ts └── cmdh.ts ├── .gitignore ├── .github └── workflows │ └── gitleaks.yml ├── tsconfig.json ├── LICENSE ├── package.json ├── system.prompt ├── install.sh └── README.md /update.sh: -------------------------------------------------------------------------------- 1 | git pull && npx tsc -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "*.prompt": "markdown" 4 | } 5 | } -------------------------------------------------------------------------------- /src/declarations/inquirer_interactive_list_prompt.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'inquirer-interactive-list-prompt'; -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Node.js build output and dependencies 2 | node_modules/ 3 | dist/ 4 | 5 | # Environment variables 6 | .env 7 | 8 | # Log files 9 | *.log 10 | 11 | # Editor-specific files 12 | *.sublime* 13 | *.iml 14 | .idea/ 15 | *.swp 16 | *.swo 17 | 18 | # Operating System-specific files 19 | .DS_Store 20 | Thumbs.db 21 | *.lock 22 | 23 | # Compiled TypeScript -> JavaScript 24 | dist -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "type": "node", 6 | "request": "launch", 7 | "name": "Run cmdh", 8 | "sourceMaps": true, 9 | "program": "${workspaceFolder}/src/cmdh.ts", 10 | "console": "integratedTerminal", 11 | "preLaunchTask": "npm: build", 12 | "args": [] 13 | }, 14 | ], 15 | 16 | } -------------------------------------------------------------------------------- /.github/workflows/gitleaks.yml: -------------------------------------------------------------------------------- 1 | name: gitleaks 2 | on: 3 | pull_request: 4 | push: 5 | workflow_dispatch: 6 | schedule: 7 | - cron: "0 4 * * *" # run once a day at 4 AM 8 | jobs: 9 | scan: 10 | name: gitleaks 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v3 14 | with: 15 | fetch-depth: 0 16 | - uses: gitleaks/gitleaks-action@v2 17 | env: 18 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 19 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "emitDecoratorMetadata": true, 4 | "experimentalDecorators": true, 5 | "allowSyntheticDefaultImports": true, 6 | "esModuleInterop": true, 7 | "sourceMap": true, 8 | "module": "ESNext", 9 | "moduleResolution": "node", 10 | "target": "ES2020", 11 | "outDir": "dist", 12 | "rootDir": "./", 13 | "strict": true, 14 | "skipLibCheck": true, 15 | "forceConsistentCasingInFileNames": true, 16 | "types": ["node"] 17 | }, 18 | "exclude": [ 19 | "node_modules" 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /src/api/stream.ts: -------------------------------------------------------------------------------- 1 | export const readStream = async ( 2 | stream: ReadableStream, 3 | onStream: (value: string) => string) => { 4 | const reader = stream.getReader(); 5 | const decoder = new TextDecoder(); 6 | let done = false; 7 | let innerBuffer = ''; 8 | 9 | try { 10 | while (!done) { 11 | const { value, done: doneReading } = await reader.read(); 12 | done = doneReading; 13 | const decodedValue = decoder.decode(value); 14 | innerBuffer += onStream(decodedValue); 15 | } 16 | } catch (e) { 17 | console.error(e); 18 | } 19 | 20 | return { 21 | done, 22 | value: innerBuffer, 23 | }; 24 | }; 25 | -------------------------------------------------------------------------------- /src/system.ts: -------------------------------------------------------------------------------- 1 | import { exec } from 'child_process'; 2 | import { promisify } from 'util'; 3 | 4 | const execAsync = promisify(exec); 5 | 6 | export async function getSystemInfo() { 7 | try { 8 | const { stdout: distroOutput } = await execAsync(`grep DISTRIB_DESCRIPTION /etc/*-release | cut -d '=' -f 2 | sed 's/"//g'`); 9 | const { stdout: archOutput } = await execAsync('uname -m'); 10 | 11 | const distro = distroOutput.trim(); 12 | const arch = archOutput.trim(); 13 | 14 | return { 15 | distro, 16 | arch, 17 | }; 18 | } catch (error: any) { 19 | console.error(`Error executing command: ${error.message}`); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/api/ollama.ts: -------------------------------------------------------------------------------- 1 | import { Ollama } from 'ollama'; 2 | 3 | // Generate a response from ollama 4 | export async function generate(prompt: string, system: string) { 5 | const { OLLAMA_HOST } = process.env; 6 | 7 | const ollama = new Ollama({ host: OLLAMA_HOST }) 8 | 9 | const response = await ollama.chat({ 10 | model: process.env.OLLAMA_MODEL_NAME ?? '', 11 | messages: [{ 12 | 'role': 'system', 13 | 'content': system, 14 | }, { 15 | 'role': 'user', 16 | 'content': prompt 17 | }], 18 | stream: true, 19 | }); 20 | 21 | let buffer = ''; 22 | for await (const part of response) { 23 | if (part.message) { 24 | buffer += part.message.content 25 | } 26 | } 27 | return buffer; 28 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) cmdh 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/api/api.ts: -------------------------------------------------------------------------------- 1 | import { oraPromise } from 'ora'; 2 | import { generate as generateCmdh } from './cmdh.js'; 3 | import { generate as generateOpenAI } from './openai.js'; 4 | import { generate as generateOllama } from './ollama.js'; 5 | import { generate as generateTextGenerationWebUI } from './text_generation_web_ui.js'; 6 | 7 | const generateFunctionMap = { 8 | 'cmdh': generateCmdh, 9 | 'OpenAI': generateOpenAI, 10 | 'ollama': generateOllama, 11 | 'text-generation-webui': generateTextGenerationWebUI 12 | } 13 | 14 | export async function startChat(prompt: string, system: string, showOra = true) { 15 | const generateFunction = generateFunctionMap[process.env.LLM_HOST as keyof typeof generateFunctionMap]; 16 | if (showOra) { 17 | const promise = generateFunction(prompt, system); 18 | 19 | return oraPromise(promise, { 20 | text: truncate(`Retrieving command... ${prompt}`, 75) 21 | }) 22 | } else { 23 | return await generateFunction(prompt, system); 24 | } 25 | } 26 | 27 | function truncate(input: string, length: number) { 28 | if (input.length > length) { 29 | const truncated = input.substring(0, length).replace(/\n/g, ' ') + '...'; 30 | return truncated; 31 | } 32 | return input; 33 | } -------------------------------------------------------------------------------- /src/api/cmdh.ts: -------------------------------------------------------------------------------- 1 | import { readStream } from "./stream.js"; 2 | 3 | export async function generate(prompt: string, system: string) { 4 | try { 5 | const { CMDH_API_BASE, CMDH_API_KEY } = process.env; 6 | const endpoint = '/api/generate'; // API endpoint 7 | 8 | const url = new URL(endpoint, CMDH_API_BASE).toString(); // Construct the full URL 9 | 10 | const requestBody = { 11 | prompt, 12 | system, 13 | apiKey: CMDH_API_KEY, 14 | model: process.env.MODEL_NAME, 15 | }; 16 | 17 | const response = await fetch(url, { 18 | method: 'POST', 19 | body: JSON.stringify(requestBody), 20 | headers: { 21 | 'Content-Type': 'application/json' 22 | } 23 | }); 24 | 25 | if (!response.ok) { 26 | throw new Error(`HTTP error! Status: ${response.status}`); 27 | } 28 | 29 | // Handle the stream 30 | const reader = response.body; 31 | if(!reader) { 32 | throw new Error("Reader was null."); 33 | } 34 | const streamResponse = await readStream( 35 | reader, 36 | value => value 37 | ); 38 | 39 | return streamResponse.value 40 | } catch (e) { 41 | console.log('An error occurred while communicating with the Cmdh API. Please try again later.'); 42 | } 43 | } -------------------------------------------------------------------------------- /src/api/openai.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from "openai"; 2 | 3 | export async function generate(prompt: string, system: string) { 4 | try { 5 | const openai = new OpenAI(); 6 | 7 | const stream = await openai.chat.completions.create({ 8 | model: process.env.OPENAI_MODEL_NAME || '', 9 | messages: [ 10 | { role: 'system', content: system }, 11 | { role: 'user', content: prompt } 12 | ], 13 | stream: true, 14 | }); 15 | 16 | try { 17 | let buffer = ''; 18 | // Collecting data from the stream 19 | for await (const chunk of stream) { 20 | // Assuming chunk is a string or can be converted to string 21 | const content = chunk.choices[0].delta.content 22 | if (content) { 23 | buffer += content; 24 | } 25 | } 26 | return buffer; 27 | } catch (e) { 28 | console.error("Failed to read stream: ", e); 29 | } 30 | } catch (e: any) { 31 | if (e.message.includes('OPENAI_API_KEY')) { 32 | console.error('You must set your OpenAI API key using "cmdh configure" before using the OpenAI mode.'); 33 | } else { 34 | console.error('An error occurred while communicating with the OpenAI API. Please try again later.'); 35 | if (e.message) { 36 | console.error(`Error message: ${e.message}`) 37 | } 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cmdh", 3 | "type": "module", 4 | "dependencies": { 5 | "chalk": "^5.2.0", 6 | "clipboardy": "^3.0.0", 7 | "dotenv": "^16.0.3", 8 | "form-data": "^4.0.0", 9 | "inquirer": "^9.3.7", 10 | "inquirer-interactive-list-prompt": "^1.0.4", 11 | "ollama": "^0.4.4", 12 | "openai": "^4.24.0", 13 | "ora": "^6.3.0", 14 | "ts-loader": "^9.5.1" 15 | }, 16 | "description": "cmdh (i.e. command helper) is a program that invokes an LLM model to convert a command request into a desired command, which you can then execute.", 17 | "version": "0.0.1", 18 | "main": "index.js", 19 | "scripts": { 20 | "build": "tsc", 21 | "test": "jest", 22 | "test:debug": "node --inspect-brk node_modules/.bin/jest --runInBand" 23 | }, 24 | "repository": { 25 | "type": "git", 26 | "url": "git+https://github.com/pgibler/cmdm.git" 27 | }, 28 | "keywords": [ 29 | "ai", 30 | "terminal", 31 | "shell", 32 | "linux", 33 | "command", 34 | "assistant" 35 | ], 36 | "author": "gibler", 37 | "license": "MIT", 38 | "bugs": { 39 | "url": "https://github.com/pgibler/cmdh/issues" 40 | }, 41 | "homepage": "https://github.com/pgibler/cmdh#readme", 42 | "devDependencies": { 43 | "@babel/preset-typescript": "^7.25.7", 44 | "@types/inquirer": "^9.0.7", 45 | "@types/jest": "^29.5.13", 46 | "ts-jest": "^29.2.5", 47 | "typescript": "^5.6.3" 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /system.prompt: -------------------------------------------------------------------------------- 1 | You are a helpful assistent that interprets user command descriptions and responds with the shell command in the JSON protocol specified in this document. 2 | 3 | JSON output format: 4 | { 5 | "setupCommands": string[], 6 | "desiredCommand": string, 7 | "nonInteractive": "true" | "false", 8 | "safetyLevel": "delete" | "overwrite" | "safe", 9 | "assistantMessage": string, 10 | } 11 | 12 | Only respond with valid JSON. Do not include helper text or surround the JSON with quotes. The JSON must be readable as-is by a JSON parser that will ingest it. 13 | 14 | Set the "setupCommands" field to an array containing all package installation commands. If there are no setup commands, make it an empty array. 15 | 16 | Set the "desiredCommand" field to the Linux command the user is describing. 17 | 18 | Set "nonInteractive" to true if the command can be run in the non-interactive shell the same as it would in the interactive shell. Set it to false if the command explicitly requires the interactive shell to run at all. 19 | 20 | Set "safetyLevel" field to: 21 | 22 | - "delete" if the command deletes one or more files. 23 | - "overwrite" if the command overwrites a file or adds content to an existing file. 24 | - "safe" if the command does not delete, modify, or overwrite any files or configurations. 25 | 26 | Set the "assistantMessage" field to a message explaining the functionality of the Linux command that you set in the "desiredCommand" field. 27 | 28 | The machine running the Linux commands has the following configuration: 29 | Distro: {distro} 30 | Architecture: {arch} 31 | The Linux command you return must be runnable on the machine. 32 | -------------------------------------------------------------------------------- /src/parseResponse.ts: -------------------------------------------------------------------------------- 1 | type CommandRequestResponse = { 2 | setupCommands: string[], 3 | desiredCommand: string, 4 | nonInteractive: boolean, 5 | safetyLevel: 'delete' | 'overwrite' | 'safe', 6 | assistantMessage: string 7 | } 8 | 9 | export function parseResponse(responseData: string): CommandRequestResponse | null { 10 | try { 11 | const escapedResponse = responseData.replace(/\\n/g, "\n"); 12 | const data = JSON.parse(escapedResponse); 13 | 14 | const safetLevelValid = data.safetyLevel === 'delete' 15 | || data.safetyLevel === 'overwrite' 16 | || data.safetyLevel === 'safe'; 17 | 18 | // Weak-check equality for values of true (boolean) and 'true' (string) 19 | const nonInteractiveValid = data.nonInteractive === true || data.nonInteractive == 'true' 20 | 21 | const desiredCommandValid = typeof data.desiredCommand === 'string'; 22 | 23 | const assistantMessageValid = typeof data.assistantMessage === 'string'; 24 | 25 | if (!(desiredCommandValid 26 | && nonInteractiveValid 27 | && safetLevelValid 28 | && assistantMessageValid)) { 29 | console.error('Invalid response structure:', data); 30 | throw `Invalid response structure detected.`; 31 | } 32 | 33 | return { 34 | setupCommands: data.setupCommands ?? [], 35 | desiredCommand: data.desiredCommand, 36 | nonInteractive: Boolean(data.nonInteractive), 37 | safetyLevel: data.safetyLevel, 38 | assistantMessage: data.assistantMessage 39 | }; 40 | } catch (e) { 41 | console.error(`Failed to parse response:\n${JSON.stringify(responseData)}`, e); 42 | throw `Failed to parse response.\n\n${JSON.stringify(responseData)}`; 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/api/text_generation_web_ui.ts: -------------------------------------------------------------------------------- 1 | import fetch, { Headers } from 'node-fetch'; 2 | 3 | export async function generate(prompt: string, system: string) { 4 | try { 5 | const headers = new Headers({ 6 | 'Content-Type': 'application/json', 7 | }); 8 | const body = JSON.stringify({ 9 | messages: [ 10 | { role: 'system', content: system }, 11 | { role: 'user', content: prompt } 12 | ], 13 | mode: 'instruct', 14 | instruction_template: 'Alpaca', 15 | stream: true, 16 | }); 17 | 18 | const { 19 | TEXT_GENERATION_WEBUI_HOST, 20 | TEXT_GENERATION_WEBUI_PORT 21 | } = process.env; 22 | 23 | const response = await fetch(`${TEXT_GENERATION_WEBUI_HOST}:${TEXT_GENERATION_WEBUI_PORT}/v1/chat/completions`, { 24 | method: 'POST', 25 | headers: headers, 26 | body: body, 27 | }); 28 | 29 | if (response.ok && response.body) { 30 | let buffer = ''; 31 | 32 | // Return a promise that resolves when the stream ends 33 | return new Promise((resolve, reject) => { 34 | response.body.on('data', (chunk: Buffer) => { 35 | const textChunk = chunk.toString(); 36 | const dataPattern = /^data: (.*)$/gm; 37 | let match; 38 | 39 | while ((match = dataPattern.exec(textChunk)) !== null) { 40 | const data = JSON.parse(match[1]); 41 | const content = data.choices[0]?.delta?.content; 42 | if (content) { 43 | buffer += content; 44 | } 45 | } 46 | }); 47 | 48 | response.body.on('end', () => { 49 | resolve(buffer); 50 | }); 51 | 52 | response.body.on('error', (err: Error) => { 53 | reject('Stream reading error: ' + err.message); 54 | }); 55 | }); 56 | } else { 57 | console.log('Failed to fetch completions: ', response.statusText); 58 | } 59 | } catch (e) { 60 | console.log('An error occurred while fetching completions: ', e); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if npm is available 4 | if ! command -v npm &> /dev/null; then 5 | echo "npm could not be found, please install Node.js and npm" 6 | exit 1 7 | fi 8 | 9 | if ! command -v node &> /dev/null; then 10 | echo "node could not be found, please install Node.js and npm" 11 | exit 1 12 | fi 13 | 14 | if ! command -v npx tsc &> /dev/null; then 15 | echo "tsc could not be found, please install tsc (TypeScript CLI & compiler)" 16 | exit 1 17 | fi 18 | 19 | NODE_VERSION="$(node --version)" 20 | NPM_VERSION="$(npm --version)" 21 | TSC_VERSION="$(npx tsc -v)" 22 | 23 | echo "Node version: $NODE_VERSION" 24 | echo "npm version: $NPM_VERSION" 25 | echo "tsc version: $TSC_VERSION" 26 | 27 | # Install npm dependencies 28 | npm install 29 | 30 | npx tsc 31 | 32 | # Setup .env file 33 | node ./dist/src/cmdh.js configure 34 | 35 | # Get the directory where the install script is located 36 | CMDH_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 37 | 38 | # Alias command using the dynamically determined path 39 | alias_cmd="alias cmdh='node $CMDH_DIR/dist/src/cmdh.js'" 40 | 41 | # Function to add alias and reload config for Bash and Zsh 42 | add_alias_and_reload() { 43 | shell_rc=$1 44 | echo "$alias_cmd" >> "$shell_rc" 45 | source "$shell_rc" 46 | } 47 | 48 | # Detect the shell and update the corresponding config file 49 | if [ -n "$BASH_VERSION" ]; then 50 | # Bash shell 51 | add_alias_and_reload "${HOME}/.bashrc" 52 | echo "Alias added to .bashrc." 53 | elif [ -n "$ZSH_VERSION" ]; then 54 | # Zsh shell 55 | add_alias_and_reload "${HOME}/.zshrc" 56 | echo "Alias added to .zshrc." 57 | elif [ -n "$FISH_VERSION" ]; then 58 | # Fish shell 59 | echo "set -Ux cmdh 'node $CMDH_DIR/dist/index.js'" | fish 60 | echo "Alias added to Fish universal variables." 61 | else 62 | echo "Unsupported shell. Please add the alias manually to your shell initializer." 63 | echo "$alias_cmd" 64 | exit 1 65 | fi 66 | 67 | echo "Installation complete!" 68 | echo "Reload your shell config file or open a new terminal session to run cmdh." 69 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cmdh - Generate Linux commands using an LLM 2 | 3 | cmdh (short for Command Helper) is a tool that invokes LLM models provided by ollama or OpenAI to convert a command request into a desired command. 4 | 5 | Use it to look up commands and flags that that you don't know offhand or generate complex commands with chaining. 6 | 7 | [cmdh_demonstration_video.webm](https://user-images.githubusercontent.com/119892/233747166-552339ef-f3fe-4eb5-9161-db574b6f96fc.webm) 8 | 9 | ## Features 10 | 11 | - Generate Linux commands from natural language 12 | - Interactively run the commands using a hotkey menu system 13 | - Differentiates between shell command types: interactive and non-interactive 14 | - Supports [ChatGPT](https://platform.openai.com/docs/overview), [ollama](https://ollama.ai/), and [text-generation-webui](https://github.com/oobabooga/text-generation-webui) 15 | 16 | ## Prerequisites 17 | 18 | - NodeJS - [installation guide](https://nodejs.org/en/download/package-manager) 19 | - npm - [installation guide](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) 20 | - tsc - [installation guide](https://www.npmjs.com/package/typescript) 21 | 22 | ## Installation 23 | 24 | 1. Set up and configure cmdh using the following command: 25 | ``` 26 | git clone https://github.com/pgibler/cmdh.git && cd cmdh && ./install.sh 27 | ``` 28 | 2. Run it like so: 29 | ``` 30 | cmdh 'Output the number of lines of code committed to git last month' 31 | ``` 32 | 3. Interact with the result interface to run the setup commands, desired command, all of the commands, or quit. 33 | 34 | **NOTE**: You will have to reload your .bashrc / .zshrc / etc. or open a new terminal to make the cmdh command available in the shell. In Debian / Ubuntu, this is done by running `source ~/.bashrc`. 35 | 36 | ## Configuring 37 | 38 | Before running cmdh, you will need to configure an LLM host and set configuration options required of that host. 39 | 40 | - Run `cmdh configure` to start the configuration wizard. You will be asked to select an LLM host and input settings required by that host. 41 | - Run `cmdh configure show` to display your current configuration. 42 | 43 | ### OpenAI 44 | 45 | 1. Generate an OpenAI key [here](https://platform.openai.com/api-keys). 46 | 2. Run `cmdh configure` and select the OpenAI option. 47 | 3. Select a model & input your OpenAI key. 48 | 49 | ### ollama 50 | 51 | 1. Install & run the ollama service & pull the codellama model using the following commands: 52 | ``` 53 | curl https://ollama.ai/install.sh | sh 54 | ollama pull codellama 55 | ``` 56 | 2. Run `cmdh configure`, select the ollama option, and set 'codellama' as the model. 57 | 58 | ### text-generation-webui 59 | 60 | 1. Clone the repo: `git clone https://github.com/oobabooga/text-generation-webui` 61 | 2. Navigate to the cloned text-generation-webui folder and start the server by running `./start_linux --api --listen`. 62 | 3. Open the web UI for text-generation-webui (http://localhost:7860), open the "Model" tab, and in the "Download model or LoRA" form, input `Trelis/Llama-2-7b-chat-hf-function-calling-v2` and press "Download". 63 | 4. Click the reload button next to the Model dropdown menu under the model tab & select `llama-2-7b-function-calling.Q3_K_M.gguf`. Then click "Load" to load the model. 64 | 2. Run `cmdh configure` and choose the 'text-generation-webui' option. 65 | 66 | cmdh will automatically send the prompts to whichever model is loaded by text-generation-webui. 67 | 68 | HuggingFace model URL: [https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v2](https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v2) 69 | 70 | ## Roadmap 71 | 72 | The issue tracker is mostly feature requests I have put in so I don't forget them. If you have any bug reports or good ideas, please include them in the tracker. 73 | 74 | ## Issues 75 | 76 | If you run into any issues installing or running cmdh, please open a ticket in the project tracker. Include a detailed bug report with stacktraces and inputs and the mode of operation (OpenAI or ollama). 77 | -------------------------------------------------------------------------------- /src/configure.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs' 2 | import inquirer from 'inquirer'; 3 | 4 | export default async function configure(promptArg: string) { 5 | if (promptArg === 'show') { 6 | await showConfiguration(); 7 | } else { 8 | await modify(); 9 | } 10 | } 11 | 12 | async function showConfiguration() { 13 | const config = { 14 | OPENAI_API_KEY: process.env.OPENAI_API_KEY, 15 | OPENAI_MODEL_NAME: process.env.OPENAI_MODEL_NAME, 16 | OLLAMA_MODEL_NAME: process.env.OLLAMA_MODEL_NAME, 17 | CMDH_MODEL_NAME: process.env.CMDH_MODEL_NAME, 18 | TEXT_GENERATION_WEBUI_MODEL_NAME: process.env.TEXT_GENERATION_WEBUI_MODEL_NAME, 19 | OLLAMA_HOST: process.env.OLLAMA_HOST, 20 | CMDH_API_KEY: process.env.CMDH_API_KEY, 21 | CMDH_API_BASE: process.env.CMDH_API_BASE, 22 | LLM_HOST: process.env.LLM_HOST, 23 | }; 24 | 25 | console.log(`LLM Host: ${config.LLM_HOST}`); 26 | 27 | // Use a switch or if-else block to display the model name based on LLM_HOST 28 | switch (config.LLM_HOST) { 29 | case 'OpenAI': 30 | console.log(`Model: ${config.OPENAI_MODEL_NAME}`); 31 | console.log(`API key: ${config.OPENAI_API_KEY}`); 32 | break; 33 | case 'ollama': 34 | console.log(`Model: ${config.OLLAMA_MODEL_NAME}`); 35 | console.log(`ollama host URL: ${config.OLLAMA_HOST}`); 36 | break; 37 | case 'cmdh': 38 | console.log(`Model: ${config.CMDH_MODEL_NAME}`); 39 | console.log(`API key: ${config.CMDH_API_KEY}`); 40 | console.log(`API base URL: ${config.CMDH_API_BASE}`); 41 | break; 42 | case 'text-generation-webui': 43 | console.log(`Model: ${config.TEXT_GENERATION_WEBUI_MODEL_NAME}`); 44 | // Additional configuration for text-generation-webui can be displayed here 45 | break; 46 | default: 47 | console.log('Unknown LLM Host.'); 48 | } 49 | } 50 | 51 | async function modify() { 52 | // Update currentConfig to remove MODEL_NAME and include distinct model names for each host 53 | const currentConfig = { 54 | OPENAI_API_KEY: process.env.OPENAI_API_KEY || '', 55 | OPENAI_MODEL_NAME: process.env.OPENAI_MODEL_NAME || 'gpt-4', // Default for OpenAI 56 | OLLAMA_MODEL_NAME: process.env.OLLAMA_MODEL_NAME || 'custom-model', // Assuming a default 57 | CMDH_MODEL_NAME: process.env.CMDH_MODEL_NAME || 'custom-model', // Assuming a default 58 | TEXT_GENERATION_WEBUI_MODEL_NAME: process.env.TEXT_GENERATION_WEBUI_MODEL_NAME || 'custom-model', // Assuming a default 59 | OLLAMA_HOST: process.env.OLLAMA_HOST || 'http://localhost:11434', 60 | CMDH_API_KEY: process.env.CMDH_API_KEY || '', 61 | CMDH_API_BASE: process.env.CMDH_API_BASE || 'https://cmdh.ai/', 62 | TEXT_GENERATION_WEBUI_HOST: process.env.TEXT_GENERATION_WEBUI_HOST || 'http://127.0.0.1', 63 | TEXT_GENERATION_WEBUI_PORT: process.env.TEXT_GENERATION_WEBUI_PORT || '5000' 64 | }; 65 | 66 | const llmHostPrompt = await inquirer.prompt({ 67 | name: 'LLM_HOST', 68 | type: 'list', 69 | message: 'Which LLM host do you want to use?', 70 | choices: ['OpenAI', 'ollama', 'text-generation-webui'], // Ensure all options are included 71 | }); 72 | 73 | const llmHost = llmHostPrompt.LLM_HOST; 74 | 75 | async function getQuestions() { 76 | if (llmHost === 'OpenAI') { 77 | console.log("Configure the OpenAI API key and model to use.") 78 | return [{ 79 | name: 'OPENAI_API_KEY', 80 | type: 'input', 81 | message: 'Enter your OpenAI API Key:', 82 | default: currentConfig.OPENAI_API_KEY, 83 | }, { 84 | name: 'OPENAI_MODEL_NAME', 85 | type: 'list', 86 | message: 'Which model do you want to use?', 87 | choices: ['gpt-4o', 'gpt-4', 'gpt-3.5-turbo'], 88 | default: 'gpt-4o', 89 | }] 90 | } else if (llmHost === 'ollama') { 91 | console.log("Configure the ollama URL and model to use.") 92 | return [{ 93 | name: 'OLLAMA_MODEL_NAME', 94 | type: 'input', 95 | message: 'Enter the model name:', 96 | default: 'llama3', 97 | }, { 98 | name: 'OLLAMA_HOST', 99 | type: 'input', 100 | message: 'Enter the ollama URL:', 101 | default: currentConfig.OLLAMA_HOST, 102 | }] 103 | } else if (llmHost === 'cmdh') { 104 | console.log("Configure the cmdh-ai API key and model to use.") 105 | return [{ 106 | name: 'CMDH_MODEL_NAME', // Update to use distinct model name variable 107 | type: 'input', 108 | message: 'Enter the model name:', 109 | default: 'llama3', // Use the host-specific default 110 | }, { 111 | name: 'CMDH_API_KEY', 112 | type: 'input', 113 | message: 'Enter your cmdh API key:', 114 | default: currentConfig.CMDH_API_KEY, 115 | }]; 116 | } else if (llmHost === 'text-generation-webui') { 117 | console.log("Configure the text-generation-webui host and port.") 118 | return [{ // No model name for text-generation-webui, configure host and port only 119 | name: 'TEXT_GENERATION_WEBUI_HOST', 120 | type: 'input', 121 | message: 'Enter the text-generation-webui URL:', 122 | default: currentConfig.TEXT_GENERATION_WEBUI_HOST, 123 | }, { 124 | name: 'TEXT_GENERATION_WEBUI_PORT', 125 | type: 'input', 126 | message: 'Enter the text-generation-webui port:', 127 | default: currentConfig.TEXT_GENERATION_WEBUI_PORT, 128 | }]; 129 | } 130 | } 131 | 132 | const questions = await getQuestions(); 133 | 134 | if(!questions) { 135 | throw 'Could not get questions'; 136 | } 137 | 138 | const answers = await inquirer.prompt(questions); 139 | 140 | // Modify this part to handle distinct model names 141 | const modelEnvName = `${llmHost.toUpperCase()}_MODEL_NAME`; // Construct the environment variable name for the model 142 | const combined = { ...currentConfig, [modelEnvName]: answers.MODEL_NAME, ...answers }; 143 | 144 | // Update the configuration string to include distinct model names 145 | let newConfig = [ 146 | `OPENAI_API_KEY=${combined.OPENAI_API_KEY}`, 147 | `OPENAI_MODEL_NAME=${combined.OPENAI_MODEL_NAME || ''}`, 148 | `OLLAMA_MODEL_NAME=${combined.OLLAMA_MODEL_NAME || ''}`, 149 | `CMDH_MODEL_NAME=${combined.CMDH_MODEL_NAME || ''}`, 150 | `TEXT_GENERATION_WEBUI_MODEL_NAME=${combined.TEXT_GENERATION_WEBUI_MODEL_NAME || ''}`, 151 | `OLLAMA_HOST=${combined.OLLAMA_HOST}`, 152 | `CMDH_API_KEY=${combined.CMDH_API_KEY}`, 153 | `CMDH_API_BASE=${combined.CMDH_API_BASE}`, 154 | `TEXT_GENERATION_WEBUI_HOST=${combined.TEXT_GENERATION_WEBUI_HOST}`, 155 | `TEXT_GENERATION_WEBUI_PORT=${combined.TEXT_GENERATION_WEBUI_PORT}`, 156 | `LLM_HOST=${llmHost}` 157 | ].join('\n'); 158 | 159 | fs.writeFileSync('.env', newConfig); 160 | console.log('Configuration updated.'); 161 | } 162 | -------------------------------------------------------------------------------- /src/cmdh.ts: -------------------------------------------------------------------------------- 1 | import * as fs from 'fs'; 2 | import * as path from 'path'; 3 | import { promisify } from 'util'; 4 | import { startChat } from './api/api.js'; 5 | import { getSystemInfo } from './system.js'; 6 | import chalk from 'chalk'; 7 | import prompt from 'inquirer-interactive-list-prompt'; 8 | import inquirer from 'inquirer'; 9 | import configure from './configure.js' 10 | 11 | const readFile = promisify(fs.readFile); 12 | 13 | import * as dotenv from 'dotenv'; 14 | dotenv.config(); 15 | 16 | async function main() { 17 | // Forward the command line arguments to this function 18 | const command = process.argv.slice(2); 19 | await run(command); 20 | } 21 | 22 | main(); 23 | 24 | async function run(promptArgs: string[]) { 25 | if (promptArgs.length > 0) { 26 | if (promptArgs[0] === 'configure') { 27 | await configure(promptArgs[1]); 28 | return; 29 | } else { 30 | await handlePrompt(promptArgs[0]); 31 | } 32 | } else { 33 | const executionMode = await inquirer.prompt<{ EXECUTION_MODE: string }>({ 34 | name: 'EXECUTION_MODE', 35 | type: 'list', 36 | message: 'Select ', 37 | choices: ['Input a prompt', 'Manage configuration'], 38 | }); 39 | 40 | const executionModeSelection = executionMode.EXECUTION_MODE; 41 | 42 | if (executionModeSelection === 'Input a prompt') { 43 | async function questionAsync(message: string) { 44 | const answers = await inquirer.prompt<{ COMMAND_REQUEST: string }>({ 45 | name: 'COMMAND_REQUEST', 46 | type: 'input', 47 | message, 48 | default: 'Output available hard drive space', 49 | }); 50 | 51 | return answers.COMMAND_REQUEST; 52 | } 53 | const input = await questionAsync('Enter a prompt to request a command: '); 54 | await handlePrompt(input); 55 | } else if (executionModeSelection === 'Manage configuration') { 56 | const configurationMode = await inquirer.prompt<{ CONFIGURATION_MODE: string }>({ 57 | name: 'CONFIGURATION_MODE', 58 | type: 'list', 59 | message: 'Select ', 60 | choices: ['Modify', 'Show'], 61 | }); 62 | 63 | const configurationModeSelection = configurationMode.CONFIGURATION_MODE; 64 | if (configurationModeSelection === 'Modify') { 65 | await configure('') 66 | } else if (configurationModeSelection === 'Show') { 67 | await configure('show') 68 | } 69 | } 70 | } 71 | 72 | async function handlePrompt(input: string) { 73 | if(!process.env.LLM_HOST) { 74 | console.error("Run `cmdh configure` to set your LLM host."); 75 | return; 76 | } 77 | 78 | async function getSystemMessage() { 79 | const dirname = path.dirname(new URL(import.meta.url).pathname) 80 | const systemMessageTemplate = await readFile(path.resolve(dirname, '../../system.prompt'), 'utf-8'); 81 | const systemInfo = await getSystemInfo(); 82 | if(!systemInfo) { 83 | throw "Could not retrieve system info"; 84 | } 85 | const { distro, arch } = systemInfo; 86 | return systemMessageTemplate.replace('{distro}', distro).replace('{arch}', arch); 87 | } 88 | 89 | const systemMessage = await getSystemMessage(); 90 | 91 | const response = await startChat(input, systemMessage); 92 | if(!response) { 93 | throw 'Could not get response' 94 | } 95 | const parsedResponse = parseResponse(response); 96 | if(!parsedResponse) { 97 | throw 'Could not parse response.' 98 | } 99 | const { setupCommands, desiredCommand, nonInteractive, safetyLevel, assistantMessage } = parsedResponse; 100 | 101 | if (setupCommands.length > 0) { 102 | console.log(chalk.green('setup commands:'), `[ ${setupCommands.map((command: string) => chalk.blue(command)).join(', ')} ]`); 103 | } 104 | console.log(chalk.green('desired command:'), chalk.yellow(desiredCommand)); 105 | console.log(chalk.cyan('assistant message:'), assistantMessage); 106 | 107 | if (safetyLevel === 'overwrite') { 108 | console.log(chalk.red('WARNING: This command will overwrite files on your system.')) 109 | } else if (safetyLevel === 'delete') { 110 | console.log(chalk.redBright('WARNING: This command will delete files on your system.')) 111 | } 112 | 113 | await promptAndExecute(setupCommands, desiredCommand, nonInteractive); 114 | } 115 | } 116 | 117 | async function promptAndExecute(setupCommands: string[], desiredCommand: string, nonInteractive: boolean) { 118 | let choice = await getPromptChoice(nonInteractive, setupCommands); 119 | 120 | switch (choice.toLowerCase()) { 121 | case 'all': // run all 122 | await runAllCommands(setupCommands, desiredCommand); 123 | break; 124 | case 'desired': 125 | await runCommands([desiredCommand]); 126 | break; 127 | case 'emit': 128 | copyCommand(desiredCommand); 129 | break; 130 | case 'setup': 131 | await runCommands(setupCommands); 132 | break; 133 | case 'quit': 134 | console.log('👋'); 135 | break; 136 | default: 137 | console.log('No option selected. Exiting program.') 138 | break; 139 | } 140 | } 141 | 142 | async function getPromptChoice(nonInteractive: boolean, setupCommands: string[]): Promise { 143 | const options = [ 144 | { 145 | name: 'Run desired command', 146 | value: 'desired', 147 | key: 'd', 148 | }, 149 | { 150 | name: 'Quit', 151 | value: 'quit', 152 | key: 'q', 153 | }, 154 | ]; 155 | 156 | const defaultOption = !nonInteractive ? 'copy' : ( 157 | nonInteractive && setupCommands.length > 0 ? 'all' : 'desired' 158 | ); 159 | 160 | if (setupCommands.length > 0) { 161 | options.unshift({ name: 'Run setup commands', value: 'setup', key: 's' }) 162 | options.unshift({ name: 'Run all commands', value: 'all', key: 'a' }); 163 | } 164 | if (!nonInteractive) { 165 | const spliceIndex = options.findIndex(option => option.value === 'desired') + 1 166 | const copyChoice = { name: 'Copy command to clipboard', value: 'copy', key: 'c', }; 167 | options.splice(spliceIndex, 0, copyChoice); 168 | } 169 | 170 | const answer = await prompt({ 171 | message: 'Choose an option:', 172 | choices: options, 173 | default: defaultOption, 174 | }); 175 | 176 | return answer; 177 | } 178 | 179 | import { spawn } from 'child_process'; 180 | import { parseResponse } from './parseResponse.js'; 181 | 182 | async function runAllCommands(setupCommands: string[], desiredCommand: string) { 183 | await runCommands(setupCommands); 184 | await runCommands([desiredCommand]); 185 | } 186 | 187 | async function runCommands(commands: string[]) { 188 | for (const command of commands) { 189 | // Run each command 190 | console.log(`Running: ${command}`); 191 | try { 192 | await runCommandWithSpawn(command); 193 | } catch (error: any) { 194 | console.error(`Error executing command: ${error.message}`); 195 | } 196 | } 197 | } 198 | 199 | function runCommandWithSpawn(command: string) { 200 | return new Promise((resolve, reject) => { 201 | const spawnedCommand = spawn(command, { stdio: 'inherit', shell: true }); 202 | 203 | spawnedCommand.on('error', (error) => { 204 | reject(error); 205 | }); 206 | 207 | spawnedCommand.on('close', (code) => { 208 | if (code === 0) { 209 | resolve(true); 210 | } else { 211 | reject(new Error(`Command exited with code ${code}`)); 212 | } 213 | }); 214 | }); 215 | } 216 | 217 | async function copyCommand(command: string, showImportErrorMessage = false) { 218 | try { 219 | // Dynamically import clipboardy 220 | const clipboardy = (await import('clipboardy')).default; 221 | 222 | // If import succeeds, use clipboardy to copy the command 223 | clipboardy.writeSync(command); 224 | console.log('Command copied to clipboard'); 225 | } catch (error) { 226 | // Fallback to logging the command if import or execution fails 227 | if (showImportErrorMessage) { 228 | console.log('Failed to copy command using clipboardy. Please manually copy the following command:'); 229 | } 230 | console.log(command); 231 | } 232 | } 233 | 234 | --------------------------------------------------------------------------------