├── ai-terminal ├── .npmrc ├── src-tauri │ ├── src │ │ ├── command │ │ │ ├── git_commands │ │ │ │ ├── mod.rs │ │ │ │ └── git.rs │ │ │ ├── autocomplete │ │ │ │ ├── mod.rs │ │ │ │ └── autocomplete_command.rs │ │ │ ├── types │ │ │ │ ├── mod.rs │ │ │ │ ├── command_state.rs │ │ │ │ └── command_manager.rs │ │ │ ├── core │ │ │ │ ├── mod.rs │ │ │ │ ├── terminate_command.rs │ │ │ │ └── execute_command.rs │ │ │ └── mod.rs │ │ ├── ollama │ │ │ ├── model_request │ │ │ │ ├── mod.rs │ │ │ │ └── request.rs │ │ │ ├── mod.rs │ │ │ └── types │ │ │ │ ├── mod.rs │ │ │ │ ├── ollama_state.rs │ │ │ │ ├── ollama_response.rs │ │ │ │ ├── ollama_request.rs │ │ │ │ ├── ollama_model_list.rs │ │ │ │ └── ollama_model.rs │ │ ├── lib.rs │ │ ├── utils │ │ │ ├── mod.rs │ │ │ ├── operating_system_utils.rs │ │ │ ├── file_system_utils.rs │ │ │ └── command.rs │ │ └── main.rs │ ├── build.rs │ ├── icons │ │ ├── 32x32.png │ │ ├── icon.icns │ │ ├── icon.ico │ │ ├── icon.png │ │ ├── 128x128.png │ │ ├── 128x128@2x.png │ │ ├── StoreLogo.png │ │ ├── Square30x30Logo.png │ │ ├── Square44x44Logo.png │ │ ├── Square71x71Logo.png │ │ ├── Square89x89Logo.png │ │ ├── Square107x107Logo.png │ │ ├── Square142x142Logo.png │ │ ├── Square150x150Logo.png │ │ ├── Square284x284Logo.png │ │ └── Square310x310Logo.png │ ├── .gitignore │ ├── capabilities │ │ └── default.json │ ├── entitlements.plist │ ├── Cargo.toml │ └── tauri.conf.json ├── .DS_Store ├── src │ ├── app │ │ ├── app.routes.ts │ │ ├── app.config.ts │ │ ├── app.component.html │ │ └── app.component.css │ ├── types │ │ └── tauri.d.ts │ ├── main.ts │ ├── index.html │ ├── styles.css │ └── assets │ │ ├── angular.svg │ │ └── tauri.svg ├── ai-terminal_0.2.0_amd64.deb ├── proxy.conf.json ├── .idea │ ├── vcs.xml │ ├── .gitignore │ ├── modules.xml │ └── ai-terminal.iml ├── tsconfig.app.json ├── tsconfig.json ├── package.json ├── angular.json ├── README.md ├── setup-homebrew-tap.sh └── build-macos.sh ├── .github ├── workflows │ ├── CODEOWNERS │ └── release.yml └── ISSUE_TEMPLATE │ └── bug_report.md ├── .DS_Store ├── demo.gif ├── .gitattributes ├── requirements.txt ├── FineTuned ├── README.md ├── export_for_ollama.py ├── test_model.py └── train.py ├── .gitignore ├── test_session_isolation.md └── README.md /ai-terminal/.npmrc: -------------------------------------------------------------------------------- 1 | registry=https://registry.npmjs.org/ -------------------------------------------------------------------------------- /.github/workflows/CODEOWNERS: -------------------------------------------------------------------------------- 1 | 2 | * @MicheleVerriello @Hitomamacs 3 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/git_commands/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod git; 2 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/ollama/model_request/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod request; 2 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | tauri_build::build() 3 | } 4 | -------------------------------------------------------------------------------- /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/.DS_Store -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/autocomplete/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod autocomplete_command; 2 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/ollama/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod model_request; 2 | pub mod types; 3 | -------------------------------------------------------------------------------- /demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/demo.gif -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | WebSite/ai-terminal[[:space:]]demo.mov filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod command; 2 | pub mod ollama; 3 | pub mod utils; 4 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/types/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod command_manager; 2 | pub mod command_state; 3 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/core/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod execute_command; 2 | pub mod terminate_command; 3 | -------------------------------------------------------------------------------- /ai-terminal/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/.DS_Store -------------------------------------------------------------------------------- /ai-terminal/src/app/app.routes.ts: -------------------------------------------------------------------------------- 1 | import { Routes } from "@angular/router"; 2 | 3 | export const routes: Routes = []; 4 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod command; 2 | pub mod file_system_utils; 3 | pub mod operating_system_utils; 4 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod autocomplete; 2 | pub mod core; 3 | pub mod git_commands; 4 | pub mod types; 5 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/32x32.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/icon.icns: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/icon.icns -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/icon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/icon.ico -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/icon.png -------------------------------------------------------------------------------- /ai-terminal/ai-terminal_0.2.0_amd64.deb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/ai-terminal_0.2.0_amd64.deb -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/128x128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/128x128.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/128x128@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/128x128@2x.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/StoreLogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/StoreLogo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square30x30Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/Square30x30Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square44x44Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/Square44x44Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square71x71Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/Square71x71Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square89x89Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/Square89x89Logo.png -------------------------------------------------------------------------------- /ai-terminal/src/types/tauri.d.ts: -------------------------------------------------------------------------------- 1 | declare module '@tauri-apps/api' { 2 | export function invoke(cmd: string, args?: Record): Promise; 3 | } -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square107x107Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/Square107x107Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square142x142Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/Square142x142Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square150x150Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/Square150x150Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square284x284Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/Square284x284Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square310x310Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/HEAD/ai-terminal/src-tauri/icons/Square310x310Logo.png -------------------------------------------------------------------------------- /ai-terminal/proxy.conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "/api": { 3 | "target": "http://localhost:11434", 4 | "secure": false, 5 | "changeOrigin": true, 6 | "logLevel": "debug" 7 | } 8 | } -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/ollama/types/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod ollama_model; 2 | pub mod ollama_model_list; 3 | pub mod ollama_request; 4 | pub mod ollama_response; 5 | pub mod ollama_state; 6 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/ollama/types/ollama_state.rs: -------------------------------------------------------------------------------- 1 | // Add Ollama state management 2 | pub struct OllamaState { 3 | pub current_model: String, 4 | pub api_host: String, 5 | } 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=2.0.0 2 | transformers>=4.30.0 3 | datasets>=2.12.0 4 | peft>=0.4.0 5 | accelerate>=0.20.0 6 | tensorboard>=2.12.0 7 | huggingface-hub>=0.16.0 8 | bitsandbytes>=0.40.0 -------------------------------------------------------------------------------- /ai-terminal/.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Generated by Tauri 6 | # will have schema files for capabilities auto-completion 7 | /gen/schemas 8 | -------------------------------------------------------------------------------- /ai-terminal/.idea/.gitignore: -------------------------------------------------------------------------------- 1 | # Default ignored files 2 | /shelf/ 3 | /workspace.xml 4 | # Editor-based HTTP Client requests 5 | /httpRequests/ 6 | # Datasource local storage ignored files 7 | /dataSources/ 8 | /dataSources.local.xml 9 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/ollama/types/ollama_response.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Serialize, Deserialize)] 4 | pub struct OllamaResponse { 5 | model: String, 6 | pub response: String, 7 | done: bool, 8 | } 9 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/ollama/types/ollama_request.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Serialize, Deserialize)] 4 | pub struct OllamaRequest { 5 | pub model: String, 6 | pub prompt: String, 7 | pub stream: bool, 8 | } 9 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/ollama/types/ollama_model_list.rs: -------------------------------------------------------------------------------- 1 | use crate::ollama::types::ollama_model::OllamaModel; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | #[derive(Debug, Serialize, Deserialize)] 5 | pub struct OllamaModelList { 6 | pub models: Vec, 7 | } 8 | -------------------------------------------------------------------------------- /ai-terminal/src/app/app.config.ts: -------------------------------------------------------------------------------- 1 | import { ApplicationConfig } from "@angular/core"; 2 | import { provideRouter } from "@angular/router"; 3 | 4 | import { routes } from "./app.routes"; 5 | 6 | export const appConfig: ApplicationConfig = { 7 | providers: [provideRouter(routes)], 8 | }; 9 | -------------------------------------------------------------------------------- /ai-terminal/src/main.ts: -------------------------------------------------------------------------------- 1 | import { bootstrapApplication } from "@angular/platform-browser"; 2 | import { appConfig } from "./app/app.config"; 3 | import { AppComponent } from "./app/app.component"; 4 | 5 | bootstrapApplication(AppComponent, appConfig).catch((err) => 6 | console.error(err), 7 | ); 8 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/ollama/types/ollama_model.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Serialize, Deserialize)] 4 | pub struct OllamaModel { 5 | pub name: String, 6 | pub size: u64, 7 | modified_at: String, 8 | // Add other fields as needed 9 | } 10 | -------------------------------------------------------------------------------- /ai-terminal/tsconfig.app.json: -------------------------------------------------------------------------------- 1 | /* To learn more about this file see: https://angular.io/config/tsconfig. */ 2 | { 3 | "extends": "./tsconfig.json", 4 | "compilerOptions": { 5 | "outDir": "./out-tsc/app", 6 | "types": [] 7 | }, 8 | "files": ["src/main.ts"], 9 | "include": ["src/**/*.d.ts"] 10 | } 11 | -------------------------------------------------------------------------------- /ai-terminal/.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/capabilities/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "../gen/schemas/desktop-schema.json", 3 | "identifier": "default", 4 | "description": "Capability for the main window", 5 | "windows": ["main"], 6 | "permissions": [ 7 | "core:default", 8 | "opener:default", 9 | { 10 | "identifier": "shell:allow-execute", 11 | "allow": [ 12 | { 13 | "name": "exec-any", 14 | "cmd": "*", 15 | "args": ["*"], 16 | "sidecar": false 17 | } 18 | ] 19 | } 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/types/command_state.rs: -------------------------------------------------------------------------------- 1 | use std::process::Child; 2 | use std::sync::{Arc, Mutex}; 3 | 4 | // Store the current working directory for each command 5 | #[derive(Clone)] 6 | pub struct CommandState { 7 | pub current_dir: String, 8 | pub child_wait_handle: Option>>, // For wait() and kill() 9 | pub child_stdin: Option>>, // For writing 10 | pub pid: Option, 11 | pub is_ssh_session_active: bool, // Added for persistent SSH 12 | pub remote_current_dir: Option, // New field for remote SSH path 13 | } 14 | -------------------------------------------------------------------------------- /ai-terminal/.idea/ai-terminal.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/entitlements.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | com.apple.security.cs.allow-jit 6 | 7 | com.apple.security.cs.allow-unsigned-executable-memory 8 | 9 | com.apple.security.cs.disable-library-validation 10 | 11 | com.apple.security.automation.apple-events 12 | 13 | com.apple.security.get-task-allow 14 | 15 | com.apple.security.inherit 16 | 17 | 18 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS Version [Ex: macOS 12.x] 28 | - Architecture [Ex: intel] 29 | - Ai terminal version [Ex: 0.9] 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /ai-terminal/src/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Tauri + Angular 6 | 7 | 8 | 9 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /ai-terminal/src/styles.css: -------------------------------------------------------------------------------- 1 | /* Global styles */ 2 | html, body { 3 | margin: 0; 4 | padding: 0; 5 | height: 100%; 6 | width: 100%; 7 | overflow: hidden; 8 | } 9 | 10 | /* Make sure app-root doesn't overflow */ 11 | app-root { 12 | display: block; 13 | height: 100%; 14 | width: 100%; 15 | overflow: hidden; 16 | } 17 | 18 | /* Prevent horizontal scrollbars in all panels */ 19 | .panel, .panel-content, .output-area, .command-history, .command-entry, .command-output { 20 | max-width: 100%; 21 | overflow-x: hidden; 22 | } 23 | 24 | /* Ensure all pre-wrapped text properly wraps */ 25 | pre, code, .command-text, .command-output div { 26 | white-space: pre-wrap !important; 27 | word-wrap: break-word !important; 28 | word-break: break-word !important; 29 | max-width: 100%; 30 | } 31 | 32 | /* Import component styles */ 33 | @import './app/app.component.css'; -------------------------------------------------------------------------------- /ai-terminal/src/assets/angular.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 10 | 11 | 12 | 13 | 15 | 16 | -------------------------------------------------------------------------------- /ai-terminal/tsconfig.json: -------------------------------------------------------------------------------- 1 | /* To learn more about this file see: https://angular.io/config/tsconfig. */ 2 | { 3 | "compileOnSave": false, 4 | "compilerOptions": { 5 | "outDir": "./dist/out-tsc", 6 | "forceConsistentCasingInFileNames": true, 7 | "strict": true, 8 | "noImplicitOverride": true, 9 | "noPropertyAccessFromIndexSignature": true, 10 | "noImplicitReturns": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "skipLibCheck": true, 13 | "esModuleInterop": true, 14 | "sourceMap": true, 15 | "declaration": false, 16 | "experimentalDecorators": true, 17 | "moduleResolution": "bundler", 18 | "importHelpers": true, 19 | "target": "ES2022", 20 | "module": "ES2022", 21 | "useDefineForClassFields": false, 22 | "lib": ["ES2022", "dom"], 23 | "typeRoots": ["./node_modules/@types", "./src/types"] 24 | }, 25 | "angularCompilerOptions": { 26 | "enableI18nLegacyMessageIdFormat": false, 27 | "strictInjectionParameters": true, 28 | "strictInputAccessModifiers": true, 29 | "strictTemplates": true 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/utils/operating_system_utils.rs: -------------------------------------------------------------------------------- 1 | use crate::command::types::command_manager::CommandManager; 2 | use tauri::State; 3 | 4 | // Add a helper function to get the OS information 5 | pub fn get_operating_system() -> String { 6 | #[cfg(target_os = "windows")] 7 | return "Windows".to_string(); 8 | 9 | #[cfg(target_os = "macos")] 10 | return "macOS".to_string(); 11 | 12 | #[cfg(target_os = "linux")] 13 | return "Linux".to_string(); 14 | } 15 | 16 | #[tauri::command] 17 | pub fn get_system_environment_variables() -> Result, String> { 18 | let env_vars: Vec<(String, String)> = std::env::vars().collect(); 19 | Ok(env_vars) 20 | } 21 | 22 | #[tauri::command] 23 | pub fn get_current_pid( 24 | session_id: String, 25 | command_manager: State<'_, CommandManager>, 26 | ) -> Result { 27 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 28 | let key = session_id; 29 | 30 | if let Some(state) = states.get(&key) { 31 | Ok(state.pid.unwrap_or(0)) 32 | } else { 33 | Ok(0) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ai-terminal" 3 | version = "1.0.6" 4 | description = "Your AI Mate Inside Your Favourite Terminal" 5 | authors = ["Michele Verriello", "Marco De Vellis"] 6 | edition = "2021" 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [lib] 11 | # The `_lib` suffix may seem redundant but it is necessary 12 | # to make the lib name unique and wouldn't conflict with the bin name. 13 | # This seems to be only an issue on Windows, see https://github.com/rust-lang/cargo/issues/8519 14 | name = "ai_terminal_lib" 15 | crate-type = ["staticlib", "cdylib", "rlib"] 16 | 17 | [build-dependencies] 18 | tauri-build = { version = "2", features = [] } 19 | 20 | [dependencies] 21 | tauri = { version = "2.4.0", features = ["macos-private-api"] } 22 | tauri-plugin-opener = "2" 23 | serde = { version = "1", features = ["derive"] } 24 | dirs = "6.0.0" 25 | reqwest = { version = "0.12.15", features = ["json"] } 26 | nix = { version = "0.30", features = ["signal"] } 27 | tauri-plugin-shell = "2" 28 | fix-path-env = { git = "https://github.com/tauri-apps/fix-path-env-rs" } 29 | serde_json = "1.0" 30 | -------------------------------------------------------------------------------- /ai-terminal/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ai-terminal", 3 | "version": "1.0.6", 4 | "scripts": { 5 | "ng": "ng", 6 | "start": "ng serve --proxy-config proxy.conf.json", 7 | "build": "ng build", 8 | "watch": "ng build --watch --configuration development", 9 | "tauri": "tauri", 10 | "dev": "tauri dev" 11 | }, 12 | "private": true, 13 | "dependencies": { 14 | "@angular/animations": "^19.2.13", 15 | "@angular/common": "^19.2.13", 16 | "@angular/compiler": "^19.2.13", 17 | "@angular/core": "^19.2.13", 18 | "@angular/forms": "^19.2.13", 19 | "@angular/platform-browser": "^19.2.13", 20 | "@angular/platform-browser-dynamic": "^19.2.13", 21 | "@angular/router": "^19.2.13", 22 | "@tauri-apps/api": "^2.4.0", 23 | "@tauri-apps/plugin-opener": "^2", 24 | "@tauri-apps/plugin-shell": "^2.2.1", 25 | "rxjs": "~7.8.0", 26 | "tslib": "^2.3.0", 27 | "zone.js": "~0.15.0" 28 | }, 29 | "devDependencies": { 30 | "@angular/build": "^19.2.13", 31 | "@angular/cli": "^19.2.13", 32 | "@angular/compiler-cli": "^19.2.13", 33 | "@npmcli/package-json": "^6.1.1", 34 | "@tauri-apps/cli": "^2", 35 | "@types/jasmine": "~5.1.0", 36 | "glob": "^11.0.1", 37 | "jasmine-core": "~5.1.0", 38 | "karma": "~6.4.0", 39 | "karma-chrome-launcher": "~3.2.0", 40 | "karma-coverage": "~2.2.0", 41 | "karma-jasmine": "~5.1.0", 42 | "karma-jasmine-html-reporter": "~2.1.0", 43 | "rimraf": "^6.0.1", 44 | "typescript": "~5.8.2" 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /FineTuned/README.md: -------------------------------------------------------------------------------- 1 | # Fine-Tuning Guide for TinyLlama with nl2bash 2 | 3 | This guide explains how to fine-tune the **TinyLlama/TinyLlama-1.1B-Chat-v1.0** model using the **nl2bash** dataset. The dataset includes over 20,000 examples split into two files—one containing bash command descriptions and the other containing the corresponding commands. 4 | 5 | ## Prerequisites 6 | 7 | - **Python 3.8+** installed. 8 | - Clone this repository and the [llama2.cpp](https://github.com/ggerganov/llama.cpp) repository. 9 | - Install required dependencies (e.g., PyTorch, Transformers). 10 | - Ensure your environment is set up for Hugging Face LoRa format. 11 | 12 | ## Fine-Tuning Steps 13 | 14 | 1. **Run Fine-Tuning Script** 15 | Execute `llama2.py` to fine-tune the model using the nl2bash dataset. This script produces updated weights in the LoRa format. 16 | ```bash 17 | python llama2.py 18 | ``` 19 | 20 | 2. **Merge Weights for Ollama** 21 | Run the export process (e.g., via an export script) to merge the base model with the updated weights. This step prepares the model for conversion to the Ollama format. 22 | ```bash 23 | python export_for_ollama.sh 24 | ``` 25 | 26 | 27 | 3. **Convert to GGUF Format** 28 | Convert the merged model into a gguf model compatible with Ollama by running: 29 | ```bash 30 | python ~/llama.cpp/convert_hf_to_gguf.py --outfile ai-terminal/ai-terminal/FineTuned/ollama_model/gguf/opt_1.3b_f16.gguf ai-terminal/ai-terminal/FineTuned/ollama_model/merged_model 31 | ``` 32 | 33 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/types/command_manager.rs: -------------------------------------------------------------------------------- 1 | use crate::command::types::command_state::CommandState; 2 | use crate::ollama::types::ollama_state::OllamaState; 3 | use std::collections::HashMap; 4 | use std::env; 5 | use std::sync::Mutex; 6 | 7 | // Structure to handle command output streaming 8 | pub struct CommandManager { 9 | pub commands: Mutex>, 10 | pub ollama: Mutex, 11 | } 12 | 13 | impl CommandManager { 14 | pub fn new() -> Self { 15 | let mut initial_commands = HashMap::new(); 16 | initial_commands.insert( 17 | "default_state".to_string(), 18 | CommandState { 19 | current_dir: env::current_dir() 20 | .unwrap_or_default() 21 | .to_string_lossy() 22 | .to_string(), 23 | child_wait_handle: None, 24 | child_stdin: None, 25 | pid: None, 26 | is_ssh_session_active: false, // Initialize here 27 | remote_current_dir: None, // Initialize new field 28 | }, 29 | ); 30 | CommandManager { 31 | commands: Mutex::new(initial_commands), 32 | ollama: Mutex::new(OllamaState { 33 | current_model: "llama3.2:latest".to_string(), // Default model will now be overridden by frontend 34 | api_host: "http://localhost:11434".to_string(), // Default Ollama host 35 | }), 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # These are backup files generated by rustfmt 7 | **/*.rs.bk 8 | 9 | # MSVC Windows builds of rustc generate these, which store debugging information 10 | *.pdb 11 | 12 | # RustRover 13 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 14 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 15 | # and can be added to the global gitignore or merged into this file. For a more nuclear 16 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 17 | #.idea/ 18 | 19 | # Python bytecode 20 | __pycache__/ 21 | *.py[cod] 22 | *$py.class 23 | *.so 24 | .Python 25 | 26 | # Distribution / packaging 27 | dist/ 28 | build/ 29 | *.egg-info/ 30 | 31 | # Virtual environments 32 | venv/ 33 | env/ 34 | ENV/ 35 | .env/ 36 | 37 | # Training outputs 38 | results/ 39 | logs/ 40 | fine_tuned_model/ 41 | merged_opt_1.3b/ 42 | model_cache/ 43 | ollama_model/ 44 | exported_model/ 45 | llama2-7b-finetuned/ 46 | 47 | # Dataset files 48 | *.nl 49 | *.cm 50 | 51 | # Jupyter Notebook 52 | .ipynb_checkpoints 53 | 54 | # IDE specific files 55 | .idea/ 56 | .vscode/ 57 | *.swp 58 | *.swo 59 | 60 | # OS specific 61 | .DS_Store 62 | Thumbs.db 63 | 64 | # Large model files 65 | *.bin 66 | *.pt 67 | *.pth 68 | *.ckpt 69 | *.safetensors 70 | 71 | # Logs 72 | *.log 73 | .DS_Store 74 | /ai-terminal/.angular 75 | /ai-terminal/node_modules 76 | .DS_Store 77 | ai-terminal/.DS_Store 78 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/core/terminate_command.rs: -------------------------------------------------------------------------------- 1 | use crate::command::types::command_manager::CommandManager; 2 | use tauri::State; 3 | 4 | #[tauri::command] 5 | pub fn terminate_command( 6 | session_id: String, 7 | command_manager: State<'_, CommandManager>, 8 | ) -> Result<(), String> { 9 | let mut states = command_manager.commands.lock().map_err(|e| e.to_string())?; 10 | let key = session_id; 11 | 12 | let pid = if let Some(state) = states.get(&key) { 13 | state.pid.unwrap_or(0) 14 | } else { 15 | return Err("No active process found".to_string()); 16 | }; 17 | 18 | if pid == 0 { 19 | return Err("No active process to terminate".to_string()); 20 | } 21 | 22 | #[cfg(unix)] 23 | { 24 | use nix::sys::signal::{kill, Signal}; 25 | use nix::unistd::Pid; 26 | 27 | // Try to send SIGTERM first 28 | if let Err(err) = kill(Pid::from_raw(pid as i32), Signal::SIGTERM) { 29 | return Err(format!("Failed to send SIGTERM: {}", err)); 30 | } 31 | 32 | // Give the process a moment to terminate gracefully 33 | std::thread::sleep(std::time::Duration::from_millis(100)); 34 | 35 | // If it's still running, force kill with SIGKILL 36 | if let Err(err) = kill(Pid::from_raw(pid as i32), Signal::SIGKILL) { 37 | return Err(format!("Failed to send SIGKILL: {}", err)); 38 | } 39 | } 40 | 41 | // Clear the PID after successful termination 42 | if let Some(state) = states.get_mut(&key) { 43 | state.pid = None; 44 | } 45 | 46 | Ok(()) 47 | } 48 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/tauri.conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.tauri.app/config/2", 3 | "productName": "ai-terminal", 4 | "version": "1.0.6", 5 | "identifier": "com.ai-terminal.dev", 6 | "build": { 7 | "beforeDevCommand": "npm run start", 8 | "devUrl": "http://localhost:1420", 9 | "beforeBuildCommand": "npm run build", 10 | "frontendDist": "../dist/ai-terminal/browser" 11 | }, 12 | "app": { 13 | "windows": [ 14 | { 15 | "title": "AI Terminal", 16 | "width": 1024, 17 | "height": 768, 18 | "minWidth": 800, 19 | "minHeight": 600, 20 | "center": true, 21 | "fullscreen": false, 22 | "resizable": true 23 | } 24 | ], 25 | "security": { 26 | "csp": null 27 | }, 28 | "macOSPrivateApi": true 29 | }, 30 | "bundle": { 31 | "active": true, 32 | "targets": ["dmg", "app", "deb"], 33 | "publisher": "AI Terminal Foundation", 34 | "copyright": "© 2025 AI Terminal Foundation", 35 | "category": "DeveloperTool", 36 | "shortDescription": "AI-powered terminal assistant", 37 | "longDescription": "AI-powered terminal assistant with natural language support", 38 | "icon": [ 39 | "icons/32x32.png", 40 | "icons/128x128.png", 41 | "icons/128x128@2x.png", 42 | "icons/icon.icns", 43 | "icons/icon.ico" 44 | ], 45 | "macOS": { 46 | "frameworks": [], 47 | "minimumSystemVersion": "10.15", 48 | "exceptionDomain": "", 49 | "signingIdentity": null, 50 | "entitlements": null, 51 | "providerShortName": null 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate fix_path_env; 2 | 3 | use ai_terminal_lib::command::types::command_manager::CommandManager; 4 | use ai_terminal_lib::{command, ollama, utils}; 5 | use std::env; 6 | 7 | fn main() { 8 | let _ = fix_path_env::fix(); 9 | 10 | let command_manager = CommandManager::new(); 11 | 12 | tauri::Builder::default() 13 | .plugin(tauri_plugin_shell::init()) 14 | .setup(|_app| { 15 | // Add any setup logic here 16 | Ok(()) 17 | }) 18 | .manage(command_manager) 19 | .plugin(tauri_plugin_opener::init()) 20 | .invoke_handler(tauri::generate_handler![ 21 | command::core::execute_command::execute_command, 22 | command::core::execute_command::execute_sudo_command, 23 | command::core::terminate_command::terminate_command, 24 | utils::operating_system_utils::get_current_pid, 25 | command::autocomplete::autocomplete_command::autocomplete, 26 | utils::file_system_utils::get_working_directory, 27 | utils::file_system_utils::get_home_directory, 28 | ollama::model_request::request::ask_ai, 29 | ollama::model_request::request::get_models, 30 | ollama::model_request::request::switch_model, 31 | ollama::model_request::request::get_host, 32 | ollama::model_request::request::set_host, 33 | command::git_commands::git::get_git_branch, 34 | command::git_commands::git::get_git_branches, 35 | command::git_commands::git::switch_branch, 36 | utils::operating_system_utils::get_system_environment_variables, 37 | command::git_commands::git::git_fetch_and_pull, 38 | command::git_commands::git::git_commit_and_push, 39 | command::git_commands::git::get_github_remote_and_branch, 40 | ]) 41 | .run(tauri::generate_context!()) 42 | .expect("error while running tauri application"); 43 | } 44 | -------------------------------------------------------------------------------- /test_session_isolation.md: -------------------------------------------------------------------------------- 1 | # Terminal Session Isolation Test 2 | 3 | ## Overview 4 | This document outlines how to test the multiple terminal sessions feature with independent working directories. 5 | 6 | ## Test Steps 7 | 8 | 1. **Launch the Application** 9 | - The application should start with one terminal tab named "Terminal 1" 10 | - The current working directory should be displayed in the prompt 11 | 12 | 2. **Create Multiple Sessions** 13 | - Click the "+" button to create a new terminal tab 14 | - You should now have "Terminal 1" and "Terminal 2" 15 | 16 | 3. **Test Directory Independence** 17 | - In Terminal 1: 18 | ```bash 19 | cd /tmp 20 | pwd 21 | ``` 22 | This should show `/tmp` 23 | 24 | - Switch to Terminal 2 (click on the tab) 25 | - In Terminal 2: 26 | ```bash 27 | pwd 28 | ``` 29 | This should show the original directory (likely your home directory), NOT `/tmp` 30 | 31 | - In Terminal 2: 32 | ```bash 33 | cd /var 34 | pwd 35 | ``` 36 | This should show `/var` 37 | 38 | - Switch back to Terminal 1 39 | - In Terminal 1: 40 | ```bash 41 | pwd 42 | ``` 43 | This should still show `/tmp`, proving the sessions are isolated 44 | 45 | 4. **Test Session Persistence** 46 | - Create multiple directories in different sessions 47 | - Switch between tabs multiple times 48 | - Each session should maintain its own working directory 49 | 50 | ## Expected Results 51 | - Each terminal tab maintains its own independent working directory 52 | - Changing directory in one tab does not affect other tabs 53 | - Session state is preserved when switching between tabs 54 | - Git branch information is session-specific 55 | - SSH sessions are isolated per tab 56 | 57 | ## Success Criteria 58 | ✅ Multiple terminal tabs can be created 59 | ✅ Each tab has an independent working directory 60 | ✅ Directory changes in one tab don't affect others 61 | ✅ Session switching preserves state 62 | ✅ UI properly shows active tab and allows tab management 63 | -------------------------------------------------------------------------------- /ai-terminal/angular.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "./node_modules/@angular/cli/lib/config/schema.json", 3 | "version": 1, 4 | "newProjectRoot": "projects", 5 | "cli": { 6 | "analytics": false 7 | }, 8 | "projects": { 9 | "ai-terminal": { 10 | "projectType": "application", 11 | "root": "", 12 | "sourceRoot": "src", 13 | "prefix": "app", 14 | "architect": { 15 | "build": { 16 | "builder": "@angular/build:application", 17 | "options": { 18 | "outputPath": "dist/ai-terminal", 19 | "index": "src/index.html", 20 | "browser": "src/main.ts", 21 | "polyfills": ["zone.js"], 22 | "tsConfig": "tsconfig.app.json", 23 | "assets": ["src/assets"] 24 | }, 25 | "configurations": { 26 | "production": { 27 | "budgets": [ 28 | { 29 | "type": "initial", 30 | "maximumWarning": "500kb", 31 | "maximumError": "1mb" 32 | }, 33 | { 34 | "type": "anyComponentStyle", 35 | "maximumWarning": "300kb", 36 | "maximumError": "1mb" 37 | } 38 | ], 39 | "outputHashing": "all" 40 | }, 41 | "development": { 42 | "optimization": false, 43 | "extractLicenses": false, 44 | "sourceMap": true 45 | } 46 | }, 47 | "defaultConfiguration": "production" 48 | }, 49 | "serve": { 50 | "builder": "@angular/build:dev-server", 51 | "options": { 52 | "port": 1420 53 | }, 54 | "configurations": { 55 | "production": { 56 | "buildTarget": "ai-terminal:build:production" 57 | }, 58 | "development": { 59 | "buildTarget": "ai-terminal:build:development" 60 | } 61 | }, 62 | "defaultConfiguration": "development" 63 | } 64 | } 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /ai-terminal/README.md: -------------------------------------------------------------------------------- 1 | # Tauri + Angular 2 | 3 | This template should help get you started developing with Tauri and Angular. 4 | 5 | ## Recommended IDE Setup 6 | 7 | [VS Code](https://code.visualstudio.com/) + [Tauri](https://marketplace.visualstudio.com/items?itemName=tauri-apps.tauri-vscode) + [rust-analyzer](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer) + [Angular Language Service](https://marketplace.visualstudio.com/items?itemName=Angular.ng-template). 8 | 9 | # AI Terminal 10 | 11 | AI Terminal is a powerful terminal interface with AI capabilities. It allows you to interact with your terminal using natural language commands and provides an integrated AI assistant powered by Ollama. 12 | 13 | ## Features 14 | 15 | - Natural language command interpretation 16 | - Integrated AI assistant 17 | - Command history and auto-completion 18 | - Cross-platform support (macOS, Windows, Linux) 19 | 20 | ## Installation 21 | 22 | ### macOS (Homebrew) 23 | 24 | You can install AI Terminal using Homebrew: 25 | ```bash 26 | brew tap AiTerminalFoundation/ai-terminal 27 | brew install ai-terminal 28 | ``` 29 | 30 | After installation, you can launch the application from Spotlight or run it from the terminal: 31 | 32 | ```bash 33 | ai-terminal 34 | ``` 35 | 36 | ### Requirements 37 | 38 | - For AI features: [Ollama](https://ollama.ai/) (can be installed with `brew install ollama`) 39 | 40 | ## Building from Source 41 | 42 | ### Prerequisites 43 | 44 | - Node.js 18+ 45 | - Rust and Cargo 46 | - Tauri CLI 47 | 48 | ### macOS Universal Binary 49 | 50 | To build a universal binary for macOS (arm64 + x86_64): 51 | 52 | ```bash 53 | # Install dependencies 54 | npm install 55 | 56 | # Install create-dmg tool for packaging 57 | brew install create-dmg 58 | 59 | # Run the build script 60 | chmod +x build-macos.sh 61 | ./build-macos.sh 62 | ``` 63 | 64 | This will create a universal binary DMG installer at `src-tauri/target/universal-apple-darwin/bundle/dmg/ai-terminal-[version].dmg`. 65 | 66 | ## Contributing 67 | 68 | Contributions are welcome! Please feel free to submit a Pull Request. 69 | 70 | ## License 71 | 72 | AI Terminal is licensed under the MIT License - see the LICENSE file for details. 73 | -------------------------------------------------------------------------------- /ai-terminal/setup-homebrew-tap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit on error 4 | set -e 5 | 6 | echo "🍺 Setting up Homebrew tap repository for ai-terminal..." 7 | 8 | # Check if GitHub CLI is installed 9 | if ! command -v gh &> /dev/null; then 10 | echo "GitHub CLI (gh) is not installed. Please install it with: brew install gh" 11 | exit 1 12 | fi 13 | 14 | # Check if logged in to GitHub 15 | if ! gh auth status &> /dev/null; then 16 | echo "Please log in to GitHub with: gh auth login" 17 | exit 1 18 | fi 19 | 20 | # Create the tap repository on GitHub 21 | echo "Creating GitHub repository for Homebrew tap..." 22 | REPO_NAME="ai-terminal" 23 | ORGANIZATION="AiTerminalFoundation" 24 | 25 | # Check if the repo already exists 26 | if gh repo view $ORGANIZATION/$REPO_NAME &> /dev/null; then 27 | echo "Repository $ORGANIZATION/$REPO_NAME already exists. Skipping creation." 28 | else 29 | echo "Creating repository $ORGANIZATION/$REPO_NAME..." 30 | gh repo create $ORGANIZATION/$REPO_NAME --public --description "Homebrew Tap for AI Terminal" || { 31 | echo "Failed to create repository. Please create it manually on GitHub." 32 | exit 1 33 | } 34 | fi 35 | 36 | # Clone the repo 37 | echo "Cloning the tap repository..." 38 | TMP_DIR=$(mktemp -d) 39 | cd $TMP_DIR 40 | gh repo clone $ORGANIZATION/$REPO_NAME || { 41 | echo "Failed to clone repository. Please check if it exists and you have access." 42 | exit 1 43 | } 44 | 45 | cd $REPO_NAME 46 | 47 | # Copy the formula to the repository 48 | echo "Copying formula to the repository..." 49 | cp "$OLDPWD/ai-terminal.rb" ./Formula/ 50 | 51 | # Commit and push changes 52 | echo "Committing and pushing changes..." 53 | git add ./Formula/ai-terminal.rb 54 | git commit -m "Update ai-terminal formula to version $(grep -m1 "version" $OLDPWD/package.json | cut -d '"' -f 4)" 55 | git push 56 | 57 | echo "✅ Homebrew tap repository setup complete!" 58 | echo "Users can now install ai-terminal with:" 59 | echo " brew tap $ORGANIZATION/ai-terminal" 60 | echo " brew install ai-terminal" 61 | echo "" 62 | echo "To update the formula in the future, run:" 63 | echo " ./build-macos.sh" 64 | echo " ./setup-homebrew-tap.sh" 65 | 66 | # Clean up temporary directory 67 | cd $OLDPWD 68 | rm -rf $TMP_DIR -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI Terminal 2 | 3 | A Tauri + Angular terminal application with integrated AI capabilities. 4 | ![AI Terminal Demo](demo.gif) 5 | ## Features 6 | 7 | - Natural language command interpretation 8 | - Integrated AI assistant 9 | - Command history and auto-completion 10 | - Cross-platform support (macOS, Windows, Linux) 11 | - Modern UI built with Tauri and Angular 12 | 13 | ## Requirements 14 | 15 | - Node.js 18+ 16 | - Rust and Cargo 17 | - For AI features: [Ollama](https://ollama.ai/) (can be installed with `brew install ollama`) 18 | 19 | ## Development Setup 20 | 21 | 1. Clone the repository: 22 | ``` 23 | git clone https://github.com/your-username/ai-terminal.git 24 | cd ai-terminal 25 | ``` 26 | 27 | 2. Install dependencies and run the project: 28 | ``` 29 | cd ai-terminal 30 | npm install 31 | npm run tauri dev 32 | ``` 33 | 34 | ## Installation 35 | 36 | ### macOS (Homebrew) 37 | 38 | You can install AI Terminal using Homebrew: 39 | 40 | ```bash 41 | brew tap AiTerminalFoundation/ai-terminal 42 | brew install --cask ai-terminal 43 | ``` 44 | 45 | After installation, you can launch the application from Spotlight or run it from the terminal: 46 | 47 | ```bash 48 | ai-terminal 49 | ``` 50 | 51 | ## Quick Guide to Using Ollama to Download `macsdeve/BetterBash3` Model 52 | 53 | ### Linux 54 | 55 | 1. **Install Ollama** 56 | 57 | Open your terminal and run: 58 | 59 | ```bash 60 | curl -fsSL https://ollama.com/install.sh | sh 61 | ``` 62 | 63 | 2. **Download the Model** 64 | 65 | Run the following command: 66 | 67 | ```bash 68 | ollama pull macsdeve/BetterBash3 69 | ``` 70 | 71 | ### macOS 72 | 73 | 1. **Download Ollama** 74 | 75 | - Visit [Ollama download page](https://ollama.com/download/mac). 76 | - Click **Download for macOS**. 77 | 78 | 2. **Install Ollama** 79 | 80 | - Open the downloaded `.zip` file from your `Downloads` folder. 81 | - Drag the `Ollama.app` into your `Applications` folder. 82 | - Open `Ollama.app` and follow any prompts. 83 | 84 | 3. **Download the Model** 85 | 86 | Open Terminal and execute: 87 | 88 | ```bash 89 | ollama pull macsdeve/BetterBash3 90 | ``` 91 | 92 | ## Contributing 93 | 94 | Contributions are welcome! Please feel free to submit a Pull Request. 95 | 96 | ## License 97 | 98 | [MIT License](LICENSE) 99 | -------------------------------------------------------------------------------- /ai-terminal/src/assets/tauri.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/utils/file_system_utils.rs: -------------------------------------------------------------------------------- 1 | use crate::command::types::command_manager::CommandManager; 2 | use std::env; 3 | use std::process::Command; 4 | use tauri::{command, State}; 5 | 6 | pub fn get_shell_path() -> Option { 7 | // First try to get the user's default shell 8 | let shell = if cfg!(target_os = "windows") { 9 | "cmd" 10 | } else { 11 | // Try to get the user's default shell from /etc/shells or fallback to common shells 12 | let shells = ["/bin/zsh", "/bin/bash", "/bin/sh"]; 13 | for shell in shells.iter() { 14 | if std::path::Path::new(shell).exists() { 15 | return Some(shell.to_string()); 16 | } 17 | } 18 | "sh" // Fallback 19 | }; 20 | 21 | // Try to get PATH using the shell's login mode and sourcing initialization files 22 | let command = if shell.contains("zsh") { 23 | "source ~/.zshrc 2>/dev/null || true; source ~/.zshenv 2>/dev/null || true; echo $PATH" 24 | } else if shell.contains("bash") { 25 | "source ~/.bashrc 2>/dev/null || true; source ~/.bash_profile 2>/dev/null || true; echo $PATH" 26 | } else { 27 | "echo $PATH" 28 | }; 29 | 30 | let output = Command::new(shell) 31 | .arg("-l") // Login shell to get proper environment 32 | .arg("-c") 33 | .arg(command) 34 | .output() 35 | .ok()?; 36 | 37 | if output.status.success() { 38 | let path = String::from_utf8_lossy(&output.stdout).trim().to_string(); 39 | if !path.is_empty() { 40 | return Some(path); 41 | } 42 | } 43 | 44 | // If the shell method fails, try to get PATH from the environment 45 | env::var("PATH").ok() 46 | } 47 | 48 | #[command] 49 | pub fn get_working_directory( 50 | session_id: String, 51 | command_manager: State<'_, CommandManager>, 52 | ) -> Result { 53 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 54 | let key = session_id; 55 | 56 | if let Some(state) = states.get(&key) { 57 | if state.is_ssh_session_active { 58 | // Return the stored remote CWD, or a default if not yet known 59 | Ok(state 60 | .remote_current_dir 61 | .clone() 62 | .unwrap_or_else(|| "remote:~".to_string())) 63 | } else { 64 | Ok(state.current_dir.clone()) 65 | } 66 | } else { 67 | // Fallback if the session doesn't exist - create a new default state 68 | Ok(env::current_dir() 69 | .unwrap_or_default() 70 | .to_string_lossy() 71 | .to_string()) 72 | } 73 | } 74 | 75 | #[command] 76 | pub fn get_home_directory() -> Result { 77 | dirs::home_dir() 78 | .map(|path| path.to_string_lossy().to_string()) 79 | .ok_or_else(|| "Could not determine home directory".to_string()) 80 | } 81 | 82 | // Helper function to split a path into directory and file prefix parts 83 | pub fn split_path_prefix(path: &str) -> (&str, &str) { 84 | match path.rfind('/') { 85 | Some(index) => { 86 | let (dir, file) = path.split_at(index + 1); 87 | (dir, file) 88 | } 89 | None => ("", path), 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /FineTuned/export_for_ollama.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import argparse 4 | from transformers import AutoModelForCausalLM, AutoTokenizer 5 | from peft import PeftModel, PeftConfig 6 | import shutil 7 | import subprocess 8 | 9 | def merge_and_export( 10 | lora_model_path="./llama2-1.1b-finetuned", 11 | output_dir="./ollama_model", 12 | model_name="my-finetuned-model" 13 | ): 14 | """Merge LoRA weights with base model and export for Ollama.""" 15 | print(f"Loading LoRA model from {lora_model_path}...") 16 | 17 | os.makedirs(output_dir, exist_ok=True) 18 | 19 | config = PeftConfig.from_pretrained(lora_model_path) 20 | base_model_name = "meta-llama/Llama-2-7b-chat-hf" 21 | print(f"Base model: {base_model_name}") 22 | 23 | print("Loading base model...") 24 | base_model = AutoModelForCausalLM.from_pretrained( 25 | base_model_name, 26 | torch_dtype=torch.float16, 27 | low_cpu_mem_usage=True, 28 | device_map="cpu" 29 | ) 30 | 31 | print("Loading LoRA adapters...") 32 | model = PeftModel.from_pretrained(base_model, lora_model_path) 33 | 34 | print("Merging weights...") 35 | model = model.merge_and_unload() 36 | 37 | merged_model_path = os.path.join(output_dir, "merged_model") 38 | print(f"Saving merged model to {merged_model_path}...") 39 | model.save_pretrained(merged_model_path) 40 | 41 | tokenizer = AutoTokenizer.from_pretrained(base_model_name) 42 | tokenizer.save_pretrained(merged_model_path) 43 | 44 | model_name_for_file = base_model_name.split('/')[-1] 45 | 46 | modelfile_content = "FROM " + model_name_for_file + "\n" 47 | modelfile_content += "PARAMETER temperature 0.7\n" 48 | modelfile_content += "PARAMETER top_p 0.9\n" 49 | modelfile_content += "PARAMETER stop \"### Instruction:\"\n" 50 | modelfile_content += "PARAMETER stop \"### Response:\"\n\n" 51 | modelfile_content += "TEMPLATE \"\"\"\n" 52 | modelfile_content += "### Instruction:\n" 53 | modelfile_content += "{{.Input}}\n\n" 54 | modelfile_content += "### Response:\n" 55 | modelfile_content += "\"\"\"" 56 | 57 | modelfile_path = os.path.join(output_dir, "Modelfile") 58 | with open(modelfile_path, "w") as f: 59 | f.write(modelfile_content) 60 | 61 | print(f"Created Modelfile at {modelfile_path}") 62 | print("\nTo create the Ollama model, run:") 63 | print(f"ollama create {model_name} -f {modelfile_path}") 64 | print(f"\nThen convert the model to GGUF format using export_for_ollama_gguf.py") 65 | print(f"python export_for_ollama_gguf.py --model_dir {merged_model_path} --output_dir {output_dir}/gguf") 66 | print(f"\nFinally, import the GGUF model into Ollama:") 67 | print(f"ollama import {output_dir}/gguf/{model_name}.gguf") 68 | 69 | if __name__ == "__main__": 70 | parser = argparse.ArgumentParser(description="Export fine-tuned model for Ollama") 71 | parser.add_argument("--lora_model_path", type=str, default="./llama2-1.1b-finetuned2", 72 | help="Path to the LoRA model") 73 | parser.add_argument("--output_dir", type=str, default="./ollama_model2", 74 | help="Output directory for the exported model") 75 | parser.add_argument("--model_name", type=str, default="my-finetuned-model", 76 | help="Name for the Ollama model") 77 | 78 | args = parser.parse_args() 79 | 80 | merge_and_export(args.lora_model_path, args.output_dir, args.model_name) -------------------------------------------------------------------------------- /ai-terminal/build-macos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit on error 4 | set -e 5 | 6 | VERSION=$(grep -m1 "version" package.json | cut -d '"' -f 4) 7 | echo "🚀 Building ai-terminal v$VERSION for macOS Universal..." 8 | 9 | # Build frontend 10 | echo "📦 Building frontend..." 11 | npm run build 12 | 13 | # Build Tauri app for multiple architectures 14 | echo "🔨 Building Universal binary for macOS..." 15 | # Build for Apple Silicon (arm64) 16 | rustup target add aarch64-apple-darwin 17 | # Build for Intel (x86_64) 18 | rustup target add x86_64-apple-darwin 19 | 20 | # Build both architectures 21 | echo "Building for ARM64..." 22 | npm run tauri build -- --target aarch64-apple-darwin 23 | echo "Building for x86_64..." 24 | npm run tauri build -- --target x86_64-apple-darwin 25 | 26 | # Create universal binary 27 | echo "Creating universal binary..." 28 | mkdir -p src-tauri/target/universal-apple-darwin/release 29 | lipo -create \ 30 | src-tauri/target/aarch64-apple-darwin/release/ai-terminal \ 31 | src-tauri/target/x86_64-apple-darwin/release/ai-terminal \ 32 | -output src-tauri/target/universal-apple-darwin/release/ai-terminal 33 | 34 | # Create app bundle with universal binary 35 | echo "Creating universal app bundle..." 36 | APP_PATH="src-tauri/target/universal-apple-darwin/bundle/macos/ai-terminal.app" 37 | mkdir -p "$APP_PATH/Contents/MacOS" 38 | # Copy the universal binary 39 | cp src-tauri/target/universal-apple-darwin/release/ai-terminal "$APP_PATH/Contents/MacOS/" 40 | # Copy app bundle contents from one of the architectures 41 | cp -R src-tauri/target/aarch64-apple-darwin/release/bundle/macos/ai-terminal.app/Contents/Resources "$APP_PATH/Contents/" 42 | cp src-tauri/target/aarch64-apple-darwin/release/bundle/macos/ai-terminal.app/Contents/Info.plist "$APP_PATH/Contents/" 43 | 44 | # Sign the application bundle 45 | echo "🔑 Signing application bundle..." 46 | codesign --force --options runtime --sign "$APPLE_DEVELOPER_ID" \ 47 | --entitlements src-tauri/entitlements.plist \ 48 | "$APP_PATH" --deep --timestamp 49 | 50 | # Create DMG 51 | echo "📦 Creating DMG installer..." 52 | DMG_PATH="src-tauri/target/universal-apple-darwin/bundle/dmg/ai-terminal-$VERSION.dmg" 53 | mkdir -p "$(dirname "$DMG_PATH")" 54 | 55 | # Check if create-dmg is available 56 | if command -v create-dmg &> /dev/null; then 57 | echo "Using create-dmg for DMG creation..." 58 | create-dmg \ 59 | --volname "ai-terminal" \ 60 | --volicon "src-tauri/icons/icon.icns" \ 61 | --window-pos 200 120 \ 62 | --window-size 800 400 \ 63 | --icon-size 100 \ 64 | --icon "ai-terminal.app" 200 190 \ 65 | --hide-extension "ai-terminal.app" \ 66 | --app-drop-link 600 185 \ 67 | "$DMG_PATH" \ 68 | "$APP_PATH" 69 | else 70 | echo "create-dmg not found, using hdiutil..." 71 | # Create a temporary directory for DMG creation 72 | TMP_DMG_DIR=$(mktemp -d) 73 | cp -R "$APP_PATH" "$TMP_DMG_DIR/" 74 | 75 | # Create a symlink to Applications folder 76 | ln -s /Applications "$TMP_DMG_DIR/Applications" 77 | 78 | # Create the DMG 79 | hdiutil create -volname "ai-terminal" -srcfolder "$TMP_DMG_DIR" -ov -format UDZO "$DMG_PATH" 80 | 81 | # Clean up 82 | rm -rf "$TMP_DMG_DIR" 83 | fi 84 | 85 | # Sign the DMG 86 | echo "🔑 Signing DMG..." 87 | codesign --force --sign "$APPLE_DEVELOPER_ID" "$DMG_PATH" --timestamp 88 | 89 | # Notarize the DMG 90 | echo "📝 Notarizing DMG..." 91 | xcrun notarytool submit "$DMG_PATH" \ 92 | --key "$APPLE_API_KEY" \ 93 | --key-id "$APPLE_API_KEY_ID" \ 94 | --issuer "$APPLE_API_ISSUER" \ 95 | --wait 96 | 97 | # Staple the notarization ticket 98 | echo "📎 Stapling notarization ticket to DMG..." 99 | xcrun stapler staple "$DMG_PATH" 100 | 101 | # Calculate SHA256 for Homebrew 102 | SHA256=$(shasum -a 256 "$DMG_PATH" | awk '{print $1}') 103 | 104 | echo "✅ Build complete! DMG is available at: $DMG_PATH" 105 | echo "✅ SHA256: $SHA256" -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/utils/command.rs: -------------------------------------------------------------------------------- 1 | use crate::command::types::command_manager::CommandManager; 2 | use crate::ollama::types::ollama_model_list::OllamaModelList; 3 | use tauri::State; 4 | 5 | // Handle special commands like /help, /models, /model 6 | pub async fn handle_special_command( 7 | command: String, 8 | command_manager: State<'_, CommandManager>, 9 | ) -> Result { 10 | match command.as_str() { 11 | "/help" => Ok("Available commands:\n\ 12 | /help - Show this help message\n\ 13 | /models - List available models\n\ 14 | /model [name] - Show current model or switch to a different model\n\ 15 | /host [url] - Show current API host or set a new one" 16 | .to_string()), 17 | "/models" => { 18 | // Get list of available models from Ollama API 19 | let api_host; 20 | 21 | // Scope the mutex lock to drop it before any async operations 22 | { 23 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 24 | api_host = ollama_state.api_host.clone(); 25 | // MutexGuard is dropped here 26 | } 27 | 28 | let client = reqwest::Client::new(); 29 | let res = client 30 | .get(format!("{}/api/tags", api_host)) 31 | .send() 32 | .await 33 | .map_err(|e| format!("Failed to get models from Ollama API: {}", e))?; 34 | 35 | if !res.status().is_success() { 36 | return Err(format!("Ollama API error: {}", res.status())); 37 | } 38 | 39 | let models: OllamaModelList = res 40 | .json() 41 | .await 42 | .map_err(|e| format!("Failed to parse models list: {}", e))?; 43 | 44 | let mut result = String::from("Available models:\n"); 45 | for model in models.models { 46 | result.push_str(&format!("- {} ({} bytes)\n", model.name, model.size)); 47 | } 48 | Ok(result) 49 | } 50 | cmd if cmd.starts_with("/model") => { 51 | let parts: Vec<&str> = cmd.split_whitespace().collect(); 52 | 53 | // Handle showing current model 54 | if parts.len() == 1 { 55 | let current_model; 56 | { 57 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 58 | current_model = ollama_state.current_model.clone(); 59 | } 60 | Ok(format!("Current model: {}", current_model)) 61 | } 62 | // Handle switching model 63 | else if parts.len() >= 2 { 64 | let new_model = parts[1].to_string(); 65 | { 66 | let mut ollama_state = 67 | command_manager.ollama.lock().map_err(|e| e.to_string())?; 68 | ollama_state.current_model = new_model.clone(); 69 | } 70 | Ok(format!("Switched to model: {}", new_model)) 71 | } else { 72 | Err("Invalid model command. Use /model [name] to switch models.".to_string()) 73 | } 74 | } 75 | cmd if cmd.starts_with("/host") => { 76 | let parts: Vec<&str> = cmd.split_whitespace().collect(); 77 | 78 | // Handle showing current host 79 | if parts.len() == 1 { 80 | let current_host; 81 | { 82 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 83 | current_host = ollama_state.api_host.clone(); 84 | } 85 | Ok(format!("Current Ollama API host: {}", current_host)) 86 | } 87 | // Handle changing host 88 | else if parts.len() >= 2 { 89 | let new_host = parts[1].to_string(); 90 | { 91 | let mut ollama_state = 92 | command_manager.ollama.lock().map_err(|e| e.to_string())?; 93 | ollama_state.api_host = new_host.clone(); 94 | } 95 | Ok(format!("Changed Ollama API host to: {}", new_host)) 96 | } else { 97 | Err("Invalid host command. Use /host [url] to change the API host.".to_string()) 98 | } 99 | } 100 | _ => Err(format!( 101 | "Unknown command: {}. Type /help for available commands.", 102 | command 103 | )), 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /FineTuned/test_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import argparse 4 | from transformers import AutoTokenizer, AutoModelForCausalLM 5 | from peft import PeftModel, PeftConfig 6 | 7 | def load_model(model_path="./llama2-7b-quantized"): 8 | """Load the fine-tuned model and tokenizer.""" 9 | try: 10 | # Load the configuration 11 | config = PeftConfig.from_pretrained(model_path) 12 | 13 | # Load the base model 14 | base_model = AutoModelForCausalLM.from_pretrained( 15 | config.base_model_name_or_path, 16 | trust_remote_code=True, 17 | low_cpu_mem_usage=True, 18 | device_map="auto" 19 | ) 20 | 21 | # Load the fine-tuned model 22 | model = PeftModel.from_pretrained(base_model, model_path) 23 | 24 | # Load tokenizer 25 | tokenizer = AutoTokenizer.from_pretrained( 26 | config.base_model_name_or_path, # Use base model for tokenizer 27 | use_fast=True 28 | ) 29 | 30 | if tokenizer.pad_token is None: 31 | tokenizer.pad_token = tokenizer.eos_token 32 | 33 | return model, tokenizer 34 | except Exception as e: 35 | print(f"Error loading model: {e}") 36 | return None, None 37 | 38 | def generate_response(model, tokenizer, prompt, max_length=100, temperature=0.7): 39 | """Generate a response for the given prompt.""" 40 | formatted_prompt = f"### Instruction:\n{prompt}\n\n### Response:" 41 | 42 | inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device) 43 | 44 | outputs = model.generate( 45 | **inputs, 46 | max_length=max_length + inputs.input_ids.shape[1], # Account for prompt length 47 | temperature=temperature, 48 | top_p=0.9, 49 | do_sample=True, 50 | num_return_sequences=1 51 | ) 52 | 53 | response = tokenizer.decode(outputs[0], skip_special_tokens=True) 54 | 55 | if "### Response:" in response: 56 | response = response.split("### Response:")[1].strip() 57 | 58 | return response 59 | 60 | def interactive_mode(model, tokenizer): 61 | """Run an interactive session with the model.""" 62 | print("\n=== Interactive Mode ===") 63 | print("Type 'exit' to quit") 64 | 65 | while True: 66 | prompt = input("\nEnter your prompt: ") 67 | if prompt.lower() == 'exit': 68 | break 69 | 70 | response = generate_response(model, tokenizer, prompt) 71 | print(f"\nResponse: {response}") 72 | 73 | def test_with_examples(model, tokenizer, examples=None): 74 | """Test the model with a list of example prompts.""" 75 | if examples is None: 76 | examples = [ 77 | "How do I list all files in a directory?", 78 | "How can I find the largest files in a directory?", 79 | "What's the command to check disk space?", 80 | "How do I search for text in files?", 81 | "How to compress a folder in Linux?" 82 | ] 83 | 84 | print("\n=== Testing with Examples ===") 85 | for prompt in examples: 86 | response = generate_response(model, tokenizer, prompt) 87 | print(f"\nPrompt: {prompt}") 88 | print(f"Response: {response}") 89 | print("-" * 50) 90 | 91 | if __name__ == "__main__": 92 | parser = argparse.ArgumentParser(description="Test a fine-tuned language model") 93 | parser.add_argument("--model_path", type=str, default="./llama2-7b-finetuned", 94 | help="Path to the fine-tuned model (default: ./llama2-7b-finetuned)") 95 | parser.add_argument("--interactive", action="store_true", 96 | help="Run in interactive mode") 97 | parser.add_argument("--examples", action="store_true", 98 | help="Test with example prompts") 99 | parser.add_argument("--prompt", type=str, 100 | help="Single prompt to test") 101 | parser.add_argument("--temperature", type=float, default=0.7, 102 | help="Temperature for generation (default: 0.7)") 103 | parser.add_argument("--max_length", type=int, default=100, 104 | help="Maximum length for generation (default: 100)") 105 | 106 | args = parser.parse_args() 107 | 108 | device = "cuda" if torch.cuda.is_available() else "cpu" 109 | print(f"Using device: {device}") 110 | 111 | print(f"Loading model from {args.model_path}...") 112 | model, tokenizer = load_model(args.model_path) 113 | 114 | if model is None or tokenizer is None: 115 | print("Failed to load model. Exiting.") 116 | exit(1) 117 | 118 | print("Model loaded successfully") 119 | 120 | if args.prompt: 121 | response = generate_response(model, tokenizer, args.prompt, 122 | args.max_length, args.temperature) 123 | print(f"\nPrompt: {args.prompt}") 124 | print(f"Response: {response}") 125 | 126 | elif args.interactive: 127 | interactive_mode(model, tokenizer) 128 | 129 | elif args.examples: 130 | test_with_examples(model, tokenizer) 131 | 132 | else: 133 | test_with_examples(model, tokenizer) 134 | interactive_mode(model, tokenizer) -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/ollama/model_request/request.rs: -------------------------------------------------------------------------------- 1 | use crate::command::types::command_manager::CommandManager; 2 | use crate::ollama::types::ollama_model_list::OllamaModelList; 3 | use crate::ollama::types::ollama_request::OllamaRequest; 4 | use crate::ollama::types::ollama_response::OllamaResponse; 5 | use crate::utils::command::handle_special_command; 6 | use crate::utils::operating_system_utils::get_operating_system; 7 | use tauri::{command, State}; 8 | 9 | // Implement the ask_ai function for Ollama integration 10 | #[command] 11 | pub async fn ask_ai( 12 | question: String, 13 | model_override: Option, 14 | command_manager: State<'_, CommandManager>, 15 | ) -> Result { 16 | // Check if this is a special command 17 | if question.starts_with('/') { 18 | return handle_special_command(question, command_manager).await; 19 | } 20 | 21 | // Regular message to Ollama 22 | let model; 23 | let api_host; 24 | 25 | // Scope the mutex lock to drop it before any async operations 26 | { 27 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 28 | // Use the model_override if provided, otherwise use the default 29 | model = model_override.unwrap_or_else(|| ollama_state.current_model.clone()); 30 | api_host = ollama_state.api_host.clone(); 31 | // MutexGuard is dropped here at the end of scope 32 | } 33 | 34 | // Get the current operating system 35 | let os = get_operating_system(); 36 | 37 | // Create a system prompt that includes OS information and formatting instructions 38 | let system_prompt = format!( 39 | "You are a helpful terminal assistant. The user is using a {} operating system. \ 40 | When providing terminal commands, ensure they are compatible with {}. \ 41 | When asked for a command, respond with ONLY the command in this format: ```command```\ 42 | The command should be a single line without any explanation or additional text.", 43 | os, os 44 | ); 45 | 46 | // Combine the system prompt with the user's question 47 | let combined_prompt = format!("{}\n\nUser: {}", system_prompt, question); 48 | 49 | let client = reqwest::Client::new(); 50 | let res = client 51 | .post(format!("{}/api/generate", api_host)) 52 | .json(&OllamaRequest { 53 | model, 54 | prompt: combined_prompt, 55 | stream: false, 56 | }) 57 | .send() 58 | .await 59 | .map_err(|e| format!("Failed to send request to Ollama API: {}", e))?; 60 | 61 | if !res.status().is_success() { 62 | return Err(format!("Ollama API error: {}", res.status())); 63 | } 64 | 65 | let response: OllamaResponse = res 66 | .json() 67 | .await 68 | .map_err(|e| format!("Failed to parse Ollama response: {}", e))?; 69 | 70 | Ok(response.response) 71 | } 72 | 73 | // Add function to get models from Ollama API 74 | #[command] 75 | pub async fn get_models(command_manager: State<'_, CommandManager>) -> Result { 76 | // Get the API host from the Ollama state 77 | let api_host; 78 | { 79 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 80 | api_host = ollama_state.api_host.clone(); 81 | } 82 | 83 | // Request the list of models from Ollama 84 | let client = reqwest::Client::new(); 85 | let res = client 86 | .get(format!("{}/api/tags", api_host)) 87 | .send() 88 | .await 89 | .map_err(|e| format!("Failed to get models from Ollama API: {}", e))?; 90 | 91 | if !res.status().is_success() { 92 | return Err(format!("Ollama API error: {}", res.status())); 93 | } 94 | 95 | // Parse the response 96 | let models: OllamaModelList = res 97 | .json() 98 | .await 99 | .map_err(|e| format!("Failed to parse models list: {}", e))?; 100 | 101 | // Format the response 102 | let mut result = String::from("Available models:\n"); 103 | for model in models.models { 104 | result.push_str(&format!("- {} ({} bytes)\n", model.name, model.size)); 105 | } 106 | Ok(result) 107 | } 108 | 109 | // Add function to switch model 110 | #[command] 111 | pub fn switch_model( 112 | model: String, 113 | command_manager: State<'_, CommandManager>, 114 | ) -> Result { 115 | let mut ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 116 | ollama_state.current_model = model.clone(); 117 | Ok(format!("Switched to model: {}", model)) 118 | } 119 | 120 | // Add function to get current API host 121 | #[command] 122 | pub fn get_host(command_manager: State<'_, CommandManager>) -> Result { 123 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 124 | Ok(format!( 125 | "Current Ollama API host: {}", 126 | ollama_state.api_host 127 | )) 128 | } 129 | 130 | // Add function to set API host 131 | #[command] 132 | pub fn set_host( 133 | host: String, 134 | command_manager: State<'_, CommandManager>, 135 | ) -> Result { 136 | let mut ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 137 | ollama_state.api_host = host.clone(); 138 | Ok(format!("Changed Ollama API host to: {}", host)) 139 | } 140 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/autocomplete/autocomplete_command.rs: -------------------------------------------------------------------------------- 1 | use crate::command::types::command_manager::CommandManager; 2 | use crate::utils::file_system_utils::split_path_prefix; 3 | use std::fs; 4 | use std::path::{Path, PathBuf}; 5 | use tauri::{command, State}; 6 | 7 | #[command] 8 | pub fn autocomplete( 9 | input: String, 10 | session_id: String, 11 | command_manager: State<'_, CommandManager>, 12 | ) -> Result, String> { 13 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 14 | let key = session_id; 15 | 16 | let current_dir = if let Some(state) = states.get(&key) { 17 | &state.current_dir 18 | } else { 19 | return Err("Could not determine current directory".to_string()); 20 | }; 21 | 22 | let input_parts: Vec<&str> = input.split_whitespace().collect(); 23 | 24 | // Autocomplete commands if it's the first word 25 | if input_parts.len() <= 1 && input_parts.first() != Some(&"cd") { 26 | // Common shell commands to suggest 27 | let common_commands = vec![ 28 | "cd", "ls", "pwd", "mkdir", "touch", "cat", "echo", "grep", "find", "cp", "mv", "rm", 29 | "tar", "gzip", "ssh", "curl", "wget", "history", "exit", "clear", "top", "ps", "kill", 30 | "ping", 31 | ]; 32 | 33 | // Filter commands that match input prefix 34 | let input_prefix = input_parts.first().unwrap_or(&""); 35 | 36 | // Case-insensitive filtering for commands 37 | let matches: Vec = common_commands 38 | .iter() 39 | .filter(|&cmd| cmd.to_lowercase().starts_with(&input_prefix.to_lowercase())) 40 | .map(|&cmd| cmd.to_string()) 41 | .collect(); 42 | 43 | if !matches.is_empty() { 44 | return Ok(matches); 45 | } 46 | } 47 | 48 | // If we have a cd command, autocomplete directories 49 | let path_to_complete = if input_parts.first() == Some(&"cd") { 50 | if input_parts.len() > 1 { 51 | // Handle cd command with argument 52 | input_parts.last().unwrap_or(&"") 53 | } else { 54 | // Handle cd with no argument - show all directories in current folder 55 | "" 56 | } 57 | } else if !input_parts.is_empty() && input_parts[0].contains('/') { 58 | // Handle path directly 59 | input_parts[0] 60 | } else if input_parts.len() > 1 { 61 | // Handle second argument as path for any command 62 | input_parts.last().unwrap_or(&"") 63 | } else { 64 | // Default to empty string if no path found 65 | "" 66 | }; 67 | 68 | // If input starts with cd, or we have a potential path to complete 69 | if input_parts.first() == Some(&"cd") || !path_to_complete.is_empty() { 70 | let (dir_to_search, prefix) = split_path_prefix(path_to_complete); 71 | 72 | // Create a Path for the directory to search 73 | let search_path = if dir_to_search.starts_with('/') || dir_to_search.starts_with('~') { 74 | if dir_to_search.starts_with('~') { 75 | let home = dirs::home_dir().ok_or("Could not determine home directory")?; 76 | let without_tilde = dir_to_search.trim_start_matches('~'); 77 | let rel_path = without_tilde.trim_start_matches('/'); 78 | if rel_path.is_empty() { 79 | home 80 | } else { 81 | home.join(rel_path) 82 | } 83 | } else { 84 | PathBuf::from(dir_to_search) 85 | } 86 | } else { 87 | Path::new(current_dir).join(dir_to_search) 88 | }; 89 | 90 | if search_path.exists() && search_path.is_dir() { 91 | let entries = fs::read_dir(search_path).map_err(|e| e.to_string())?; 92 | 93 | let mut matches = Vec::new(); 94 | for entry in entries.flatten() { 95 | let file_name = entry.file_name(); 96 | let file_name_str = file_name.to_string_lossy(); 97 | 98 | // Include all entries for empty prefix, otherwise filter by prefix (case-insensitive) 99 | if prefix.is_empty() 100 | || file_name_str 101 | .to_lowercase() 102 | .starts_with(&prefix.to_lowercase()) 103 | { 104 | let is_dir = entry.file_type().map(|ft| ft.is_dir()).unwrap_or(false); 105 | 106 | // For the 'cd' command, only show directories 107 | if !input_parts.is_empty() && input_parts[0] == "cd" && !is_dir { 108 | continue; 109 | } 110 | 111 | // Add trailing slash for directories 112 | let suggestion = if is_dir { 113 | format!("{}/", file_name_str) 114 | } else { 115 | file_name_str.to_string() 116 | }; 117 | 118 | // Construct the full path suggestion for the command 119 | let base_path = if dir_to_search.is_empty() { 120 | "".to_string() 121 | } else { 122 | format!("{}/", dir_to_search.trim_end_matches('/')) 123 | }; 124 | 125 | matches.push(format!("{}{}", base_path, suggestion)); 126 | } 127 | } 128 | 129 | if !matches.is_empty() { 130 | // Sort matches alphabetically, case-insensitive 131 | matches.sort_by_key(|a| a.to_lowercase()); 132 | return Ok(matches); 133 | } 134 | } 135 | } 136 | 137 | Ok(Vec::new()) 138 | } 139 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | 8 | jobs: 9 | build-macos: 10 | runs-on: macos-latest 11 | permissions: 12 | contents: write 13 | steps: 14 | - uses: actions/checkout@v4 15 | 16 | - name: Setup Node.js 17 | uses: actions/setup-node@v4 18 | with: 19 | node-version: '20' 20 | 21 | - name: Install Rust stable 22 | uses: dtolnay/rust-toolchain@stable 23 | 24 | - name: Install create-dmg 25 | run: brew install create-dmg 26 | 27 | - name: Install dependencies 28 | run: | 29 | cd ai-terminal 30 | npm ci --force 31 | 32 | - name: Import Code-Signing Certificates 33 | uses: apple-actions/import-codesign-certs@v1 34 | with: 35 | p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }} 36 | p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }} 37 | 38 | - name: Prepare Apple API Key 39 | run: | 40 | echo "${{ secrets.APPLE_API_KEY }}" > /tmp/apple_api_key.p8 41 | chmod 600 /tmp/apple_api_key.p8 42 | echo "APPLE_API_KEY=/tmp/apple_api_key.p8" >> $GITHUB_ENV 43 | 44 | - name: Build macOS Universal 45 | run: | 46 | cd ai-terminal 47 | chmod +x build-macos.sh 48 | ./build-macos.sh 49 | env: 50 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 51 | APPLE_DEVELOPER_ID: ${{ secrets.APPLE_DEVELOPER_ID }} 52 | APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }} 53 | APPLE_API_KEY_ID: ${{ secrets.APPLE_API_KEY_ID }} 54 | APPLE_API_ISSUER: ${{ secrets.APPLE_API_ISSUER }} 55 | 56 | - name: Upload macOS Artifact 57 | uses: actions/upload-artifact@v4 58 | with: 59 | name: macos-dmg 60 | path: ai-terminal/src-tauri/target/universal-apple-darwin/bundle/dmg/*.dmg 61 | 62 | build-linux: 63 | runs-on: ubuntu-latest 64 | permissions: 65 | contents: write 66 | strategy: 67 | matrix: 68 | architecture: [x86_64] # , aarch64] # Temporarily disabled ARM builds 69 | steps: 70 | - uses: actions/checkout@v4 71 | 72 | - name: Setup Node.js 73 | uses: actions/setup-node@v4 74 | with: 75 | node-version: '20' 76 | 77 | - name: Install Rust stable 78 | uses: dtolnay/rust-toolchain@stable 79 | 80 | - name: Install dependencies 81 | run: | 82 | cd ai-terminal 83 | npm ci 84 | 85 | - name: Install Linux build dependencies 86 | run: | 87 | sudo apt-get update 88 | # Try installing the newer version first, fall back to older version if needed 89 | if ! sudo apt-get install -y libwebkit2gtk-4.1-dev; then 90 | sudo apt-get install -y libwebkit2gtk-4.0-dev 91 | fi 92 | sudo apt-get install -y build-essential curl wget libssl-dev libgtk-3-dev libayatana-appindicator3-dev 93 | 94 | - name: Build Linux DEB 95 | run: | 96 | cd ai-terminal 97 | npm run tauri build 98 | env: 99 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 100 | 101 | - name: Upload Linux Artifact 102 | uses: actions/upload-artifact@v4 103 | with: 104 | name: linux-deb 105 | path: ai-terminal/src-tauri/target/release/bundle/deb/*.deb 106 | 107 | release: 108 | needs: [build-macos, build-linux] 109 | runs-on: ubuntu-latest 110 | permissions: 111 | contents: write 112 | steps: 113 | - uses: actions/checkout@v4 114 | 115 | - name: Download macOS Artifact 116 | uses: actions/download-artifact@v4 117 | with: 118 | name: macos-dmg 119 | path: artifacts/macos 120 | 121 | - name: Download Linux Artifact 122 | uses: actions/download-artifact@v4 123 | with: 124 | name: linux-deb 125 | path: artifacts/linux 126 | 127 | - name: Generate tag 128 | run: | 129 | cd ai-terminal 130 | VERSION=$(node -p "require('./package.json').version") 131 | echo "RELEASE_TAG=v${VERSION}" >> $GITHUB_ENV 132 | echo "RELEASE_VERSION=${VERSION}" >> $GITHUB_ENV 133 | 134 | - name: Create and push tag 135 | run: | 136 | git tag ${{ env.RELEASE_TAG }} 137 | git push origin ${{ env.RELEASE_TAG }} 138 | 139 | - name: Create release 140 | uses: softprops/action-gh-release@v1 141 | with: 142 | tag_name: ${{ env.RELEASE_TAG }} 143 | files: | 144 | artifacts/macos/*.dmg 145 | artifacts/linux/*.deb 146 | env: 147 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 148 | 149 | - name: Update Homebrew Formula and Cask 150 | env: 151 | HOMEBREW_TAP_REPO: AiTerminalFoundation/homebrew-ai-terminal 152 | GITHUB_TOKEN: ${{ secrets.HOMEBREW_TOKEN }} 153 | run: | 154 | # Clone the tap repository 155 | git clone https://x-access-token:${GITHUB_TOKEN}@github.com/AiTerminalFoundation/homebrew-ai-terminal.git homebrew-tap 156 | cd homebrew-tap 157 | 158 | # Use the version from the environment 159 | VERSION="${{ env.RELEASE_VERSION }}" 160 | DMG_URL="https://github.com/AiTerminalFoundation/ai-terminal/releases/download/v${VERSION}/ai-terminal-${VERSION}.dmg" 161 | SHA256=$(curl -sL "${DMG_URL}" | shasum -a 256 | awk '{print $1}') 162 | 163 | # Update Formula/ai-terminal.rb 164 | sed -i.bak "s/version \".*\"/version \"${VERSION}\"/" Formula/ai-terminal.rb 165 | sed -i.bak "s|url \".*\"|url \"${DMG_URL}\"|" Formula/ai-terminal.rb 166 | sed -i.bak "s/sha256 \".*\"/sha256 \"${SHA256}\"/" Formula/ai-terminal.rb 167 | rm Formula/ai-terminal.rb.bak 168 | 169 | # Update Casks/ai-terminal.rb 170 | sed -i.bak "s/version \".*\"/version \"${VERSION}\"/" Casks/ai-terminal.rb 171 | sed -i.bak "s|url \".*\"|url \"${DMG_URL}\"|" Casks/ai-terminal.rb 172 | sed -i.bak "s/sha256 \".*\"/sha256 \"${SHA256}\"/" Casks/ai-terminal.rb 173 | rm Casks/ai-terminal.rb.bak 174 | 175 | # Commit and push changes 176 | git config user.name "GitHub Actions" 177 | git config user.email "actions@github.com" 178 | git add Formula/ai-terminal.rb Casks/ai-terminal.rb 179 | git commit -m "Update ai-terminal to v${VERSION}" 180 | git push 181 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/git_commands/git.rs: -------------------------------------------------------------------------------- 1 | use crate::command::types::command_manager::CommandManager; 2 | use crate::utils::file_system_utils::get_shell_path; 3 | use std::process::Command; 4 | use tauri::{command, State}; 5 | 6 | pub fn new_git_command() -> Command { 7 | let mut cmd = Command::new("git"); 8 | if let Some(path_val) = get_shell_path() { 9 | if let Ok(current_path) = std::env::var("PATH") { 10 | let new_path = format!("{}{}{}", path_val, std::path::MAIN_SEPARATOR, current_path); 11 | cmd.env("PATH", new_path); 12 | } else { 13 | cmd.env("PATH", path_val); 14 | } 15 | } 16 | cmd 17 | } 18 | 19 | #[command] 20 | pub fn get_git_branch( 21 | session_id: String, 22 | command_manager: State<'_, CommandManager>, 23 | ) -> Result { 24 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 25 | let key = session_id; 26 | 27 | let current_dir = if let Some(state) = states.get(&key) { 28 | &state.current_dir 29 | } else { 30 | return Ok("".to_string()); 31 | }; 32 | 33 | // Get current branch 34 | let mut cmd = new_git_command(); 35 | cmd.arg("rev-parse") 36 | .arg("--abbrev-ref") 37 | .arg("HEAD") 38 | .current_dir(current_dir); 39 | 40 | let output = cmd.output().map_err(|e| e.to_string())?; 41 | 42 | if output.status.success() { 43 | let branch = String::from_utf8_lossy(&output.stdout).trim().to_string(); 44 | Ok(branch) 45 | } else { 46 | Ok("".to_string()) 47 | } 48 | } 49 | 50 | #[command] 51 | pub fn get_git_branches( 52 | session_id: String, 53 | command_manager: State<'_, CommandManager>, 54 | ) -> Result, String> { 55 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 56 | let key = session_id; 57 | 58 | let current_dir = if let Some(state) = states.get(&key) { 59 | &state.current_dir 60 | } else { 61 | return Err("Could not determine current directory for session".to_string()); 62 | }; 63 | 64 | let mut cmd = new_git_command(); 65 | cmd.arg("branch") 66 | .arg("-a") 67 | .arg("--no-color") 68 | .current_dir(current_dir); 69 | 70 | let output = cmd 71 | .output() 72 | .map_err(|e| format!("Failed to execute git branch: {}", e))?; 73 | 74 | if !output.status.success() { 75 | return Err(String::from_utf8_lossy(&output.stderr).to_string()); 76 | } 77 | 78 | let branches = String::from_utf8_lossy(&output.stdout) 79 | .lines() 80 | .map(|line| line.trim().replace("* ", "").to_string()) 81 | .filter(|line| !line.contains("->")) // Filter out HEAD pointers 82 | .collect::>(); 83 | 84 | Ok(branches) 85 | } 86 | 87 | #[command] 88 | pub fn switch_branch( 89 | branch_name: String, 90 | session_id: String, 91 | command_manager: State<'_, CommandManager>, 92 | ) -> Result<(), String> { 93 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 94 | let key = session_id; 95 | 96 | let current_dir = if let Some(state) = states.get(&key) { 97 | state.current_dir.clone() 98 | } else { 99 | return Err("Could not determine current directory for session".to_string()); 100 | }; 101 | 102 | // 1. Check for local changes 103 | let mut status_cmd = new_git_command(); 104 | status_cmd 105 | .arg("status") 106 | .arg("--porcelain") 107 | .current_dir(current_dir.clone()); 108 | 109 | let status_output = status_cmd 110 | .output() 111 | .map_err(|e| format!("Failed to execute git status: {}", e))?; 112 | 113 | let needs_stash = !status_output.stdout.is_empty(); 114 | 115 | if needs_stash { 116 | // 2. Stash changes if necessary 117 | let mut stash_cmd = new_git_command(); 118 | stash_cmd.arg("stash").current_dir(current_dir.clone()); 119 | 120 | let stash_output = stash_cmd 121 | .output() 122 | .map_err(|e| format!("Failed to execute git stash: {}", e))?; 123 | if !stash_output.status.success() { 124 | return Err(String::from_utf8_lossy(&stash_output.stderr).to_string()); 125 | } 126 | } 127 | 128 | // 3. Checkout the new branch 129 | let mut checkout_cmd = new_git_command(); 130 | checkout_cmd 131 | .arg("checkout") 132 | .arg(branch_name.clone()) 133 | .current_dir(current_dir.clone()); 134 | 135 | let checkout_output = checkout_cmd 136 | .output() 137 | .map_err(|e| format!("Failed to execute git checkout: {}", e))?; 138 | 139 | if !checkout_output.status.success() { 140 | // If checkout fails, try to pop stash if we created one 141 | if needs_stash { 142 | let mut stash_pop_cmd = new_git_command(); 143 | stash_pop_cmd 144 | .arg("stash") 145 | .arg("pop") 146 | .current_dir(current_dir.clone()); 147 | 148 | stash_pop_cmd.output().map_err(|e| { 149 | format!( 150 | "Failed to execute git stash pop after failed checkout: {}", 151 | e 152 | ) 153 | })?; 154 | } 155 | return Err(String::from_utf8_lossy(&checkout_output.stderr).to_string()); 156 | } 157 | 158 | // 4. Pop stash if changes were stashed 159 | if needs_stash { 160 | let mut stash_pop_cmd = new_git_command(); 161 | stash_pop_cmd 162 | .arg("stash") 163 | .arg("pop") 164 | .current_dir(current_dir); 165 | 166 | let stash_pop_output = stash_pop_cmd 167 | .output() 168 | .map_err(|e| format!("Failed to execute git stash pop: {}", e))?; 169 | 170 | if !stash_pop_output.status.success() { 171 | // This is not ideal, the user has switched branch but stash pop failed. 172 | // We can return an error message to inform the user. 173 | let error_message = String::from_utf8_lossy(&stash_pop_output.stderr).to_string(); 174 | return Err(format!( 175 | "Branch switched to {}, but 'git stash pop' failed: {}", 176 | branch_name, error_message 177 | )); 178 | } 179 | } 180 | 181 | Ok(()) 182 | } 183 | 184 | #[tauri::command] 185 | pub fn git_fetch_and_pull( 186 | session_id: String, 187 | command_manager: State<'_, CommandManager>, 188 | ) -> Result { 189 | let mut command_manager_guard = command_manager.commands.lock().unwrap(); 190 | let command_state = command_manager_guard 191 | .get_mut(&session_id) 192 | .ok_or_else(|| "Session not found".to_string())?; 193 | 194 | let mut fetch_cmd = new_git_command(); 195 | fetch_cmd.current_dir(&command_state.current_dir); 196 | fetch_cmd.arg("fetch"); 197 | 198 | let fetch_output = fetch_cmd.output().map_err(|e| e.to_string())?; 199 | if !fetch_output.status.success() { 200 | return Err(String::from_utf8_lossy(&fetch_output.stderr).to_string()); 201 | } 202 | 203 | let mut pull_cmd = new_git_command(); 204 | pull_cmd.current_dir(&command_state.current_dir); 205 | pull_cmd.arg("pull"); 206 | 207 | let pull_output = pull_cmd.output().map_err(|e| e.to_string())?; 208 | if !pull_output.status.success() { 209 | return Err(String::from_utf8_lossy(&pull_output.stderr).to_string()); 210 | } 211 | 212 | let mut output = String::new(); 213 | output.push_str("Fetch output:\\n"); 214 | output.push_str(&String::from_utf8_lossy(&fetch_output.stdout)); 215 | output.push_str(&String::from_utf8_lossy(&fetch_output.stderr)); 216 | output.push_str("\\nPull output:\\n"); 217 | output.push_str(&String::from_utf8_lossy(&pull_output.stdout)); 218 | output.push_str(&String::from_utf8_lossy(&pull_output.stderr)); 219 | 220 | Ok(output) 221 | } 222 | 223 | #[tauri::command] 224 | pub fn git_commit_and_push( 225 | session_id: String, 226 | message: String, 227 | command_manager: State<'_, CommandManager>, 228 | ) -> Result { 229 | let mut command_manager_guard = command_manager.commands.lock().unwrap(); 230 | let command_state = command_manager_guard 231 | .get_mut(&session_id) 232 | .ok_or_else(|| "Session not found".to_string())?; 233 | 234 | let mut add_cmd = new_git_command(); 235 | add_cmd.current_dir(&command_state.current_dir); 236 | add_cmd.arg("add").arg("."); 237 | let add_output = add_cmd.output().map_err(|e| e.to_string())?; 238 | if !add_output.status.success() { 239 | return Err(String::from_utf8_lossy(&add_output.stderr).to_string()); 240 | } 241 | 242 | let mut commit_cmd = new_git_command(); 243 | commit_cmd.current_dir(&command_state.current_dir); 244 | commit_cmd.arg("commit").arg("-m").arg(&message); 245 | let commit_output = commit_cmd.output().map_err(|e| e.to_string())?; 246 | if !commit_output.status.success() { 247 | return Err(String::from_utf8_lossy(&commit_output.stderr).to_string()); 248 | } 249 | 250 | let mut push_cmd = new_git_command(); 251 | push_cmd.current_dir(&command_state.current_dir); 252 | push_cmd.arg("push"); 253 | let push_output = push_cmd.output().map_err(|e| e.to_string())?; 254 | if !push_output.status.success() { 255 | return Err(String::from_utf8_lossy(&push_output.stderr).to_string()); 256 | } 257 | 258 | let mut output = String::new(); 259 | output.push_str("Commit output:\\n"); 260 | output.push_str(&String::from_utf8_lossy(&commit_output.stdout)); 261 | output.push_str(&String::from_utf8_lossy(&commit_output.stderr)); 262 | output.push_str("\\nPush output:\\n"); 263 | output.push_str(&String::from_utf8_lossy(&push_output.stdout)); 264 | output.push_str(&String::from_utf8_lossy(&push_output.stderr)); 265 | 266 | Ok(output) 267 | } 268 | 269 | #[tauri::command] 270 | pub fn get_github_remote_and_branch( 271 | session_id: String, 272 | command_manager: tauri::State<'_, CommandManager>, 273 | ) -> Result { 274 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 275 | let key = session_id; 276 | let current_dir = if let Some(state) = states.get(&key) { 277 | &state.current_dir 278 | } else { 279 | return Err("Could not determine current directory for session".to_string()); 280 | }; 281 | 282 | // Get remote URL 283 | let mut remote_cmd = new_git_command(); 284 | remote_cmd 285 | .arg("remote") 286 | .arg("get-url") 287 | .arg("origin") 288 | .current_dir(current_dir); 289 | let remote_output = remote_cmd.output().map_err(|e| e.to_string())?; 290 | if !remote_output.status.success() { 291 | return Err(String::from_utf8_lossy(&remote_output.stderr).to_string()); 292 | } 293 | let remote_url = String::from_utf8_lossy(&remote_output.stdout) 294 | .trim() 295 | .to_string(); 296 | 297 | // Get branch name 298 | let mut branch_cmd = new_git_command(); 299 | branch_cmd 300 | .arg("rev-parse") 301 | .arg("--abbrev-ref") 302 | .arg("HEAD") 303 | .current_dir(current_dir); 304 | let branch_output = branch_cmd.output().map_err(|e| e.to_string())?; 305 | if !branch_output.status.success() { 306 | return Err(String::from_utf8_lossy(&branch_output.stderr).to_string()); 307 | } 308 | let branch = String::from_utf8_lossy(&branch_output.stdout) 309 | .trim() 310 | .to_string(); 311 | 312 | Ok(serde_json::json!({ "remoteUrl": remote_url, "branch": branch })) 313 | } 314 | -------------------------------------------------------------------------------- /FineTuned/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | # Set CUDA allocation configuration to allow expandable segments 4 | os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" 5 | 6 | # --- Colab-specific: authenticate and mount Google Drive --- 7 | # try: 8 | # from google.colab import drive, auth 9 | # print("Authenticating user...") 10 | # auth.authenticate_user() # Explicitly authenticate 11 | # print("Mounting Google Drive...") 12 | # drive.mount('/content/drive', force_remount=True) 13 | # DEFAULT_OUTPUT_DIR = "/content/drive/MyDrive/llama2-improved" 14 | # except Exception as e: 15 | # print("Google Drive mounting failed:", e) 16 | # DEFAULT_OUTPUT_DIR = "./llama2-improved" 17 | 18 | import torch 19 | import argparse 20 | from transformers import ( 21 | AutoModelForCausalLM, 22 | AutoTokenizer, 23 | TrainingArguments, 24 | Trainer, 25 | DataCollatorForLanguageModeling, 26 | EarlyStoppingCallback 27 | ) 28 | from datasets import Dataset 29 | from peft import LoraConfig, get_peft_model 30 | import wandb 31 | import random 32 | import numpy as np 33 | 34 | def seed_everything(seed=42): 35 | """Set seeds for reproducibility.""" 36 | random.seed(seed) 37 | np.random.seed(seed) 38 | torch.manual_seed(seed) 39 | if torch.cuda.is_available(): 40 | torch.cuda.manual_seed_all(seed) 41 | os.environ['PYTHONHASHSEED'] = str(seed) 42 | 43 | def load_data(input_file, output_file, test_size=0.05): 44 | """Load data with improved formatting for context handling.""" 45 | with open(input_file, 'r', encoding='utf-8') as f_in: 46 | inputs = [line.strip() for line in f_in] 47 | with open(output_file, 'r', encoding='utf-8') as f_out: 48 | outputs = [line.strip() for line in f_out] 49 | assert len(inputs) == len(outputs), "Input and output counts must match" 50 | 51 | data = [] 52 | for i in range(len(inputs)): 53 | input_text = inputs[i] 54 | formatted_text = f"### Instruction:\n{input_text}\n\n### Response:\n{outputs[i]}" 55 | data.append({ 56 | "input": inputs[i], 57 | "output": outputs[i], 58 | "text": formatted_text 59 | }) 60 | 61 | random.shuffle(data) 62 | split_idx = int(len(data) * (1 - test_size)) 63 | train_data = data[:split_idx] 64 | val_data = data[split_idx:] 65 | 66 | return Dataset.from_list(train_data), Dataset.from_list(val_data) 67 | 68 | def load_model(model_id, device="cpu"): 69 | """Load model with optimized configuration for context learning.""" 70 | try: 71 | print(f"Loading tokenizer from {model_id}") 72 | tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=True) 73 | if tokenizer.pad_token is None: 74 | tokenizer.pad_token = tokenizer.eos_token 75 | 76 | load_options = { 77 | "cache_dir": "./model_cache", 78 | "low_cpu_mem_usage": True, 79 | "trust_remote_code": True, 80 | } 81 | 82 | # Use a sequential device map to offload parts of the model if necessary. 83 | if device == "cuda" and torch.cuda.is_available(): 84 | load_options.update({ 85 | "torch_dtype": torch.float16, 86 | "device_map": "sequential", 87 | }) 88 | else: 89 | load_options.update({ 90 | "torch_dtype": torch.float32, 91 | }) 92 | 93 | if device == "cuda" and torch.cuda.is_available(): 94 | torch.cuda.empty_cache() 95 | 96 | print(f"Loading model from {model_id}") 97 | model = AutoModelForCausalLM.from_pretrained(model_id, **load_options) 98 | model.config.use_cache = False 99 | 100 | print("Determining target modules for LoRA") 101 | if "llama" in model_id.lower(): 102 | target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"] 103 | elif "mistral" in model_id.lower(): 104 | target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"] 105 | elif "falcon" in model_id.lower(): 106 | target_modules = ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"] 107 | elif "tinyllama" in model_id.lower(): 108 | target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"] 109 | else: 110 | target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"] 111 | 112 | print(f"Using target modules: {target_modules}") 113 | model.train() 114 | 115 | lora_config = LoraConfig( 116 | r=8, 117 | lora_alpha=16, 118 | target_modules=target_modules, 119 | lora_dropout=0.05, 120 | bias="none", 121 | task_type="CAUSAL_LM", 122 | ) 123 | 124 | print("Applying LoRA adapters") 125 | model = get_peft_model(model, lora_config) 126 | 127 | if hasattr(model, "enable_input_require_grads"): 128 | model.enable_input_require_grads() 129 | if hasattr(model, "gradient_checkpointing_enable"): 130 | print("Enabling gradient checkpointing") 131 | model.gradient_checkpointing_enable() 132 | 133 | model.train() 134 | 135 | print("Trainable parameters:") 136 | model.print_trainable_parameters() 137 | trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) 138 | print(f"Total trainable parameters: {trainable_params}") 139 | 140 | if trainable_params == 0: 141 | raise ValueError("No trainable parameters found in the model") 142 | 143 | return model, tokenizer 144 | 145 | except Exception as e: 146 | print(f"Error loading model: {e}") 147 | print("Attempting to load a fallback model...") 148 | fallback_model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" 149 | tokenizer = AutoTokenizer.from_pretrained(fallback_model, trust_remote_code=True) 150 | tokenizer.pad_token = tokenizer.eos_token 151 | 152 | fallback_options = { 153 | "cache_dir": "./model_cache", 154 | "torch_dtype": torch.float16 if device == "cuda" else torch.float32, 155 | "low_cpu_mem_usage": True, 156 | "trust_remote_code": True, 157 | } 158 | if device == "cuda": 159 | fallback_options["device_map"] = "sequential" 160 | 161 | model = AutoModelForCausalLM.from_pretrained(fallback_model, **fallback_options) 162 | model.config.use_cache = False 163 | model.train() 164 | 165 | lora_config = LoraConfig( 166 | r=8, 167 | lora_alpha=16, 168 | target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], 169 | lora_dropout=0.05, 170 | bias="none", 171 | task_type="CAUSAL_LM" 172 | ) 173 | model = get_peft_model(model, lora_config) 174 | if hasattr(model, "enable_input_require_grads"): 175 | model.enable_input_require_grads() 176 | if hasattr(model, "gradient_checkpointing_enable"): 177 | model.gradient_checkpointing_enable() 178 | 179 | model.train() 180 | print("Trainable parameters in fallback model:") 181 | model.print_trainable_parameters() 182 | 183 | return model, tokenizer 184 | 185 | def tokenize_function(examples, tokenizer, max_length=512): 186 | """Tokenize with improved handling for context examples.""" 187 | results = tokenizer( 188 | examples["text"], 189 | padding="max_length", 190 | truncation=True, 191 | max_length=max_length, 192 | return_tensors="pt" 193 | ) 194 | results["labels"] = results["input_ids"].clone() 195 | pad_token_id = tokenizer.pad_token_id 196 | results["labels"] = [ 197 | [(label if label != pad_token_id else -100) for label in labels] 198 | for labels in results["labels"] 199 | ] 200 | return results 201 | 202 | def compute_metrics(eval_preds): 203 | """Custom metrics for evaluating context-aware performance.""" 204 | predictions, labels = eval_preds 205 | predictions = np.argmax(predictions, axis=-1) 206 | mask = labels != -100 207 | labels = labels[mask] 208 | predictions = predictions[mask] 209 | accuracy = (predictions == labels).mean() 210 | return {"accuracy": accuracy} 211 | 212 | def main(): 213 | parser = argparse.ArgumentParser(description="Improved training process for context handling") 214 | parser.add_argument("--input_file", type=str, default="improved.nl", help="Path to input file") 215 | parser.add_argument("--output_file", type=str, default="improved.cm", help="Path to output file") 216 | parser.add_argument("--model_id", type=str, default="meta-llama/Llama-3.2-3B", help="Hugging Face model ID") 217 | parser.add_argument("--output_dir", type=str, default=DEFAULT_OUTPUT_DIR, help="Output directory (Google Drive folder if in Colab)") 218 | parser.add_argument("--batch_size", type=int, default=10, help="Batch size for training") 219 | parser.add_argument("--learning_rate", type=float, default=3e-4, help="Learning rate") 220 | parser.add_argument("--num_epochs", type=int, default=2, help="Number of training epochs") 221 | parser.add_argument("--warmup_ratio", type=float, default=0.1, help="Warmup ratio") 222 | parser.add_argument("--max_length", type=int, default=512, help="Max length for tokenization") 223 | parser.add_argument("--seed", type=int, default=42, help="Random seed") 224 | parser.add_argument("--use_wandb", action="store_true", help="Use Weights & Biases for tracking") 225 | parser.add_argument("--no_gradient_checkpointing", action="store_true", help="Disable gradient checkpointing") 226 | parser.add_argument("--force_cpu", action="store_true", help="Force the use of CPU even if a GPU is available") 227 | 228 | args, _ = parser.parse_known_args() 229 | 230 | seed_everything(args.seed) 231 | 232 | if args.use_wandb: 233 | wandb.init(project="llama2-terminal-commands", name="context-improved") 234 | 235 | if args.force_cpu: 236 | device = "cpu" 237 | else: 238 | device = "cuda" if torch.cuda.is_available() else "cpu" 239 | print(f"Using device: {device}") 240 | 241 | print("Loading data...") 242 | train_dataset, val_dataset = load_data(args.input_file, args.output_file) 243 | print(f"Loaded {len(train_dataset)} training examples and {len(val_dataset)} validation examples") 244 | 245 | print(f"Loading model {args.model_id}...") 246 | model, tokenizer = load_model(args.model_id, device) 247 | 248 | print("Tokenizing datasets...") 249 | tokenized_train = train_dataset.map( 250 | lambda examples: tokenize_function(examples, tokenizer, args.max_length), 251 | batched=True, 252 | remove_columns=train_dataset.column_names 253 | ) 254 | tokenized_val = val_dataset.map( 255 | lambda examples: tokenize_function(examples, tokenizer, args.max_length), 256 | batched=True, 257 | remove_columns=val_dataset.column_names 258 | ) 259 | 260 | data_collator = DataCollatorForLanguageModeling( 261 | tokenizer=tokenizer, 262 | mlm=False 263 | ) 264 | 265 | training_args = TrainingArguments( 266 | output_dir=args.output_dir, 267 | per_device_train_batch_size=args.batch_size, 268 | per_device_eval_batch_size=args.batch_size, 269 | evaluation_strategy="steps", 270 | eval_steps=500, 271 | logging_steps=50, 272 | gradient_accumulation_steps=4, 273 | num_train_epochs=args.num_epochs, 274 | weight_decay=0.01, 275 | warmup_ratio=args.warmup_ratio, 276 | lr_scheduler_type="cosine", 277 | learning_rate=args.learning_rate, 278 | save_steps=200, 279 | save_total_limit=3, 280 | load_best_model_at_end=True, 281 | metric_for_best_model="eval_loss", 282 | greater_is_better=False, 283 | push_to_hub=False, 284 | report_to="wandb" if args.use_wandb else "none", 285 | gradient_checkpointing=not args.no_gradient_checkpointing, 286 | fp16=device == "cuda", 287 | ddp_find_unused_parameters=False, 288 | dataloader_drop_last=True, 289 | optim="adamw_torch", 290 | remove_unused_columns=False, 291 | ) 292 | 293 | from transformers.trainer import Trainer as BaseTrainer 294 | original_move_model = BaseTrainer._move_model_to_device 295 | def safe_move_model(self, model, device): 296 | if any(p.device.type == "meta" for p in model.parameters()): 297 | print("Detected meta tensors, using to_empty() to move model") 298 | return model.to_empty(device=device) 299 | return original_move_model(self, model, device) 300 | BaseTrainer._move_model_to_device = safe_move_model 301 | 302 | trainer = Trainer( 303 | model=model, 304 | args=training_args, 305 | train_dataset=tokenized_train, 306 | eval_dataset=tokenized_val, 307 | tokenizer=tokenizer, 308 | data_collator=data_collator, 309 | callbacks=[EarlyStoppingCallback(early_stopping_patience=3)], 310 | ) 311 | 312 | print("Starting training...") 313 | trainer.train() 314 | 315 | print(f"Saving model to {args.output_dir}") 316 | trainer.save_model(args.output_dir) 317 | tokenizer.save_pretrained(args.output_dir) 318 | 319 | print("Training complete!") 320 | 321 | if args.use_wandb: 322 | wandb.finish() 323 | 324 | if __name__ == "__main__": 325 | main() 326 | -------------------------------------------------------------------------------- /ai-terminal/src/app/app.component.html: -------------------------------------------------------------------------------- 1 |
2 | 3 |
4 |
5 |
6 | Terminal v{{version}} 7 | 8 | 11 |
12 | 13 | 14 |
15 |
16 |
18 | 20 | {{ session.name }} 21 | 22 | 26 |
27 | 30 |
31 |
32 | 33 |
34 |
35 |
37 |
38 | $ 39 | 44 | {{ entry.command }} 45 | 46 |
47 | 56 | 62 | 68 |
69 |
70 |
71 | 72 | 73 | 74 |
{{ line }}
77 |
78 |
79 |
80 |
81 |
82 |
83 | {{ currentWorkingDirectory }} 84 |
85 | 88 | 91 | 94 | 97 |
98 |
99 |
103 | {{ branch }} 104 |
105 |
106 |
107 |
108 |
109 |
110 | 111 |
117 |
118 |
121 | {{ suggestion }} 122 |
123 |
124 |
125 |
126 | {{ isHistorySearchActive ? '🔍' : '$' 127 | }} 128 | 132 |
133 |
134 |
135 |
136 | 137 | 138 |
140 |
141 | 142 | 143 |
144 |
145 |
146 | AI Assistant 147 | 148 | 151 |
152 |
153 |
154 |
155 | 156 |
157 | > 158 | {{ entry.message }} 159 | 168 |
169 |
170 | 171 | 172 | 173 | 174 | 175 | 176 |
177 |
178 | 179 | 180 | {{ transformCodeForDisplay(entry.codeBlocks[getCodeBlockIndex(segment)].code) }} 181 | 182 | 183 | 191 | 199 | 206 | 207 | 208 | 210 | {{ getCommandExplanation(entry.codeBlocks[getCodeBlockIndex(segment)].code) }} 211 | 212 |
213 |
214 |
215 |
216 | 217 | 218 |
219 |
220 | 221 | 222 | 224 | 226 | 227 | 228 | 229 | 230 |
231 |
232 |
233 |
234 |
235 |
236 | > 237 | 239 |
240 |
241 |
242 |
243 |
244 | 245 | 246 |
247 |
248 |

Enter Commit Message

249 | 251 |
252 | 253 | 254 |
255 |
256 |
257 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/command/core/execute_command.rs: -------------------------------------------------------------------------------- 1 | use crate::command::types::command_manager::CommandManager; 2 | use crate::command::types::command_state::CommandState; 3 | use crate::utils::file_system_utils::get_shell_path; 4 | use std::collections::HashMap; 5 | use std::io::{BufReader, Read, Write}; 6 | use std::os::unix::process::CommandExt; 7 | use std::path::Path; 8 | use std::process::{Child, Command, Stdio}; 9 | use std::sync::{Arc, Mutex, MutexGuard}; 10 | use std::{env, thread}; 11 | use tauri::{command, AppHandle, Emitter, Manager, State}; 12 | 13 | #[command] 14 | pub fn execute_command( 15 | command: String, 16 | session_id: String, 17 | ssh_password: Option, 18 | app_handle: AppHandle, 19 | command_manager: State<'_, CommandManager>, 20 | ) -> Result { 21 | const SSH_NEEDS_PASSWORD_MARKER: &str = "SSH_INTERACTIVE_PASSWORD_PROMPT_REQUESTED"; 22 | const SSH_PRE_EXEC_PASSWORD_EVENT: &str = "ssh_pre_exec_password_request"; 23 | const COMMAND_FORWARDED_TO_ACTIVE_SSH_MARKER: &str = "COMMAND_FORWARDED_TO_ACTIVE_SSH"; 24 | 25 | // Phase 1: Check and handle active SSH session 26 | { 27 | let mut states_guard = command_manager.commands.lock().map_err(|e| e.to_string())?; 28 | 29 | let mut state = get_command_state(&mut states_guard, session_id.clone()); 30 | 31 | if state.is_ssh_session_active { 32 | if let Some(stdin_arc_for_thread) = state.child_stdin.clone() { 33 | let active_pid_for_log = state.pid.unwrap_or(0); 34 | 35 | if let Err(e) = app_handle.emit("command_forwarded_to_ssh", command.clone()) { 36 | eprintln!( 37 | "[Rust EXEC DEBUG] Failed to emit command_forwarded_to_ssh: {}", 38 | e 39 | ); 40 | } 41 | 42 | let app_handle_clone_for_thread = app_handle.clone(); 43 | let command_clone_for_thread = command.clone(); 44 | let session_id_clone_for_thread = session_id.clone(); 45 | 46 | thread::spawn(move || { 47 | let command_manager_state_for_thread = 48 | app_handle_clone_for_thread.state::(); 49 | 50 | let mut stdin_guard = match stdin_arc_for_thread.lock() { 51 | Ok(guard) => guard, 52 | Err(e) => { 53 | if let Ok(mut states_lock_in_thread) = 54 | command_manager_state_for_thread.commands.lock() 55 | { 56 | if let Some(s) = 57 | states_lock_in_thread.get_mut(&session_id_clone_for_thread) 58 | { 59 | if s.pid == Some(active_pid_for_log) && s.is_ssh_session_active 60 | { 61 | s.is_ssh_session_active = false; 62 | s.child_stdin = None; 63 | s.remote_current_dir = None; 64 | } 65 | } 66 | } 67 | let _ = app_handle_clone_for_thread.emit("ssh_session_ended", serde_json::json!({ "pid": active_pid_for_log, "reason": format!("SSH session error (stdin lock): {}", e)})); 68 | let _ = app_handle_clone_for_thread.emit( 69 | "command_error", 70 | format!( 71 | "Failed to send to SSH (stdin lock '{}'): {}", 72 | command_clone_for_thread, e 73 | ), 74 | ); 75 | let _ = 76 | app_handle_clone_for_thread.emit("command_end", "Command failed."); 77 | return; 78 | } 79 | }; 80 | 81 | let is_remote_cd = command_clone_for_thread.trim().starts_with("cd "); 82 | let actual_command_to_write_ssh = if is_remote_cd { 83 | let marker = format!( 84 | "__REMOTE_CD_PWD_MARKER_{}__", 85 | std::time::SystemTime::now() 86 | .duration_since(std::time::UNIX_EPOCH) 87 | .unwrap() 88 | .as_secs_f64() 89 | .to_string() 90 | .replace('.', "") 91 | ); 92 | let cd_command_part = command_clone_for_thread.trim(); 93 | format!( 94 | "{} && printf '%s\\n' '{}' && pwd && printf '%s\\n' '{}'\n", 95 | cd_command_part, marker, marker 96 | ) 97 | } else { 98 | format!("{}\n", command_clone_for_thread) 99 | }; 100 | 101 | let write_attempt = 102 | stdin_guard.write_all(actual_command_to_write_ssh.as_bytes()); 103 | 104 | let final_result = if write_attempt.is_ok() { 105 | stdin_guard.flush() 106 | } else { 107 | write_attempt 108 | }; 109 | 110 | if let Err(e) = final_result { 111 | if let Ok(mut states_lock_in_thread) = 112 | command_manager_state_for_thread.commands.lock() 113 | { 114 | if let Some(s) = 115 | states_lock_in_thread.get_mut(&session_id_clone_for_thread) 116 | { 117 | if s.pid == Some(active_pid_for_log) && s.is_ssh_session_active { 118 | s.is_ssh_session_active = false; 119 | s.child_stdin = None; 120 | s.remote_current_dir = None; 121 | } 122 | } 123 | } 124 | let _ = app_handle_clone_for_thread.emit("ssh_session_ended", serde_json::json!({ "pid": active_pid_for_log, "reason": format!("SSH session ended (stdin write/flush error): {}", e)})); 125 | let _ = app_handle_clone_for_thread.emit( 126 | "command_error", 127 | format!( 128 | "Failed to send to SSH (stdin write/flush '{}'): {}", 129 | command_clone_for_thread, e 130 | ), 131 | ); 132 | let _ = app_handle_clone_for_thread.emit("command_end", "Command failed."); 133 | } 134 | }); 135 | 136 | drop(states_guard); 137 | return Ok(COMMAND_FORWARDED_TO_ACTIVE_SSH_MARKER.to_string()); 138 | } else { 139 | // state.child_stdin is None, but state.is_ssh_session_active was true 140 | let active_pid_for_log = state.pid.unwrap_or(0); 141 | state.is_ssh_session_active = false; 142 | state.pid = None; // Clear PID as session is now considered broken 143 | state.remote_current_dir = None; 144 | drop(states_guard); 145 | let _ = app_handle.emit("ssh_session_ended", serde_json::json!({ "pid": active_pid_for_log, "reason": "SSH session inconsistency: active but no stdin."})); 146 | return Err("SSH session conflict: active but no stdin. Please retry.".to_string()); 147 | } 148 | } 149 | } 150 | 151 | // Phase 2: Handle 'cd' command (if not in an SSH session) 152 | // The `cd` command logic remains largely the same, it acquires its own lock. 153 | if command.starts_with("cd ") || command == "cd" { 154 | // This block is the original 'cd' handling logic. 155 | // It will lock `command_manager.commands` internally. 156 | let mut states_guard_cd = command_manager.commands.lock().map_err(|e| e.to_string())?; 157 | let mut command_state_cd = get_command_state(&mut states_guard_cd, session_id.clone()); 158 | 159 | let path = command.trim_start_matches("cd").trim(); 160 | if path.is_empty() || path == "~" || path == "~/" { 161 | return if let Some(home_dir) = dirs::home_dir() { 162 | let home_path = home_dir.to_string_lossy().to_string(); 163 | command_state_cd.current_dir = home_path.clone(); 164 | drop(states_guard_cd); // Release lock before emitting and returning 165 | let _ = app_handle.emit("command_end", "Command completed successfully."); 166 | Ok(format!("Changed directory to {}", home_path)) 167 | } else { 168 | drop(states_guard_cd); 169 | let _ = app_handle.emit("command_end", "Command failed."); 170 | Err("Could not determine home directory".to_string()) 171 | }; 172 | } 173 | let current_path = Path::new(&command_state_cd.current_dir); 174 | let new_path = if path.starts_with('~') { 175 | if let Some(home_dir) = dirs::home_dir() { 176 | let without_tilde = path.trim_start_matches('~'); 177 | let rel_path = without_tilde.trim_start_matches('/'); 178 | if rel_path.is_empty() { 179 | home_dir 180 | } else { 181 | home_dir.join(rel_path) 182 | } 183 | } else { 184 | drop(states_guard_cd); 185 | return Err("Could not determine home directory".to_string()); 186 | } 187 | } else if path.starts_with('/') { 188 | std::path::PathBuf::from(path) 189 | } else { 190 | let mut result_path = current_path.to_path_buf(); 191 | let path_components: Vec<&str> = path.split('/').collect(); 192 | for component in path_components { 193 | if component == ".." { 194 | if let Some(parent) = result_path.parent() { 195 | result_path = parent.to_path_buf(); 196 | } else { 197 | drop(states_guard_cd); 198 | let _ = app_handle.emit("command_end", "Command failed."); 199 | return Err("Already at root directory".to_string()); 200 | } 201 | } else if component != "." && !component.is_empty() { 202 | result_path = result_path.join(component); 203 | } 204 | } 205 | result_path 206 | }; 207 | return if new_path.exists() { 208 | command_state_cd.current_dir = new_path.to_string_lossy().to_string(); 209 | let current_dir_for_ok = command_state_cd.current_dir.clone(); 210 | drop(states_guard_cd); 211 | let _ = app_handle.emit("command_end", "Command completed successfully."); 212 | Ok(format!("Changed directory to {}", current_dir_for_ok)) 213 | } else { 214 | drop(states_guard_cd); 215 | let _ = app_handle.emit("command_end", "Command failed."); 216 | Err(format!("Directory not found: {}", path)) 217 | }; 218 | } 219 | 220 | // Phase 3: Prepare for and execute new command (local or new SSH) 221 | let current_dir_clone = { 222 | let mut states_guard_dir = command_manager.commands.lock().map_err(|e| e.to_string())?; 223 | let state_dir = get_command_state(&mut states_guard_dir, session_id.clone()); 224 | state_dir.current_dir.clone() 225 | }; // Lock for current_dir released. 226 | 227 | // Proactive SSH password handling (if not in an SSH session) 228 | let is_plain_ssh_attempt = 229 | command.contains("ssh ") && !command.trim_start().starts_with("sudo ssh "); 230 | if is_plain_ssh_attempt && ssh_password.is_none() { 231 | app_handle 232 | .emit(SSH_PRE_EXEC_PASSWORD_EVENT, command.clone()) 233 | .map_err(|e| e.to_string())?; 234 | return Ok(SSH_NEEDS_PASSWORD_MARKER.to_string()); 235 | } 236 | 237 | let mut command_to_run = command.clone(); 238 | let app_handle_clone = app_handle.clone(); 239 | 240 | let mut env_map: HashMap = std::env::vars().collect(); 241 | if !env_map.contains_key("PATH") { 242 | if let Some(path_val) = get_shell_path() { 243 | env_map.insert("PATH".to_string(), path_val); 244 | } 245 | } 246 | 247 | // let script_path_option: Option = None; // Removed unused variable 248 | 249 | // This flag determines if the command we are about to spawn *could* start a persistent SSH session 250 | let is_potential_ssh_session_starter = is_plain_ssh_attempt; 251 | 252 | let original_command_is_sudo = command.trim_start().starts_with("sudo "); 253 | let original_command_is_sudo_ssh = command.trim_start().starts_with("sudo ssh "); 254 | 255 | let mut cmd_to_spawn: Command; 256 | let mut child: Child; 257 | 258 | // Prepare command_to_run if it's an SSH command, before deciding on sshpass 259 | if is_potential_ssh_session_starter && !original_command_is_sudo_ssh { 260 | // Avoid mangling "sudo ssh ..." here 261 | let original_command_parts: Vec<&str> = command.split_whitespace().collect(); 262 | let mut first_non_option_idx_after_ssh: Option = None; 263 | 264 | // Find the first argument after "ssh" that doesn't start with '-' 265 | // This helps distinguish `ssh host` from `ssh host remote_command` 266 | let ssh_keyword_idx = original_command_parts.iter().position(|&p| p == "ssh"); 267 | 268 | if let Some(idx_ssh) = ssh_keyword_idx { 269 | for i in (idx_ssh + 1)..original_command_parts.len() { 270 | if !original_command_parts[i].starts_with('-') { 271 | first_non_option_idx_after_ssh = Some(i); 272 | break; 273 | } 274 | } 275 | 276 | let is_likely_interactive_ssh = match first_non_option_idx_after_ssh { 277 | Some(idx) => idx == original_command_parts.len() - 1, // True if the first non-option (host) is the last part 278 | None => false, // e.g., "ssh -p 22" without host, or just "ssh" 279 | }; 280 | 281 | let ssh_options_prefix = "ssh -t -t -o StrictHostKeyChecking=accept-new"; 282 | // Arguments are everything after "ssh" in the original command 283 | let args_after_ssh_keyword_in_original = original_command_parts 284 | .iter() 285 | .skip(idx_ssh + 1) 286 | .cloned() 287 | .collect::>() 288 | .join(" "); 289 | 290 | if is_likely_interactive_ssh { 291 | // For interactive: ssh -options user@host 292 | command_to_run = format!( 293 | "{} {}", 294 | ssh_options_prefix, 295 | args_after_ssh_keyword_in_original.trim_end() 296 | ); 297 | } else if first_non_option_idx_after_ssh.is_some() { 298 | // For non-interactive (ssh user@host remote_command): ssh -options user@host remote_command 299 | command_to_run = format!( 300 | "{} {}", 301 | ssh_options_prefix, args_after_ssh_keyword_in_original 302 | ); 303 | } else { 304 | // Could be just "ssh" or "ssh -options", keep as is but with prefix, though likely won't connect 305 | command_to_run = format!( 306 | "{} {}", 307 | ssh_options_prefix, args_after_ssh_keyword_in_original 308 | ); 309 | } 310 | } 311 | } 312 | 313 | // Now, use the (potentially transformed) command_to_run for direct/sshpass spawning 314 | if is_potential_ssh_session_starter && !original_command_is_sudo { 315 | let executable_name: String; 316 | let mut arguments: Vec = Vec::new(); 317 | 318 | if let Some(password_value) = ssh_password { 319 | executable_name = "sshpass".to_string(); 320 | arguments.push("-p".to_string()); 321 | arguments.push(password_value); // password_value is a String, gets moved here 322 | // command_to_run is the full "ssh -t -t ..." string 323 | arguments.extend(command_to_run.split_whitespace().map(String::from)); 324 | } else { 325 | // No password provided: use plain ssh 326 | // command_to_run is already "ssh -t -t ..." 327 | let parts: Vec = command_to_run 328 | .split_whitespace() 329 | .map(String::from) 330 | .collect(); 331 | if parts.is_empty() || parts[0] != "ssh" { 332 | return Err(format!( 333 | "Failed to parse SSH command for direct execution: {}", 334 | command_to_run 335 | )); 336 | } 337 | executable_name = parts[0].clone(); // Should be "ssh" 338 | arguments.extend(parts.iter().skip(1).cloned()); 339 | } 340 | 341 | cmd_to_spawn = Command::new(&executable_name); 342 | for arg in &arguments { 343 | cmd_to_spawn.arg(arg); 344 | } 345 | 346 | // env_map is passed as is. If SSH_ASKPASS was in it from a broader environment, 347 | // sshpass should take precedence or ssh (in key auth) would ignore it if not needed. 348 | cmd_to_spawn 349 | .current_dir(¤t_dir_clone) 350 | .envs(&env_map) 351 | .stdout(Stdio::piped()) 352 | .stderr(Stdio::piped()) 353 | .stdin(Stdio::piped()); 354 | 355 | // setsid() was removed here in a previous step, which is good. 356 | 357 | child = match cmd_to_spawn.spawn() { 358 | Ok(c) => c, 359 | Err(e) => { 360 | return Err(format!( 361 | "Failed to start direct command ({}): {}", 362 | executable_name, e 363 | )) 364 | } 365 | }; 366 | } else { 367 | // Fallback to sh -c for non-SSH or sudo commands 368 | let final_shell_command = if original_command_is_sudo && !original_command_is_sudo_ssh { 369 | command_to_run.clone() 370 | } else { 371 | format!("exec {}", command_to_run) 372 | }; 373 | 374 | let mut sh_cmd_to_spawn = Command::new("sh"); 375 | sh_cmd_to_spawn 376 | .arg("-c") 377 | .arg(&final_shell_command) 378 | .current_dir(¤t_dir_clone) 379 | .envs(&env_map) 380 | .stdout(Stdio::piped()) 381 | .stderr(Stdio::piped()) 382 | .stdin(Stdio::piped()); // Ensure stdin is piped for sh -c as well 383 | 384 | #[cfg(unix)] 385 | unsafe { 386 | sh_cmd_to_spawn.pre_exec(|| match nix::unistd::setsid() { 387 | Ok(_) => Ok(()), 388 | Err(e) => Err(std::io::Error::new( 389 | std::io::ErrorKind::Other, 390 | format!("setsid failed: {}", e), 391 | )), 392 | }); 393 | } 394 | 395 | child = match sh_cmd_to_spawn.spawn() { 396 | Ok(c) => c, 397 | Err(e) => return Err(format!("Failed to start command via sh -c: {}", e)), 398 | }; 399 | } 400 | 401 | let pid = child.id(); 402 | // Take IO handles before moving child into Arc> 403 | let child_stdin_handle = child.stdin.take().map(|stdin| Arc::new(Mutex::new(stdin))); 404 | let child_stdout_handle = child.stdout.take(); 405 | let child_stderr_handle = child.stderr.take(); 406 | let child_wait_handle_arc = Arc::new(Mutex::new(child)); // Now 'child' has no IO handles 407 | let session_id_for_wait_thread = session_id.clone(); 408 | 409 | { 410 | let mut states_guard_update = command_manager.commands.lock().map_err(|e| e.to_string())?; 411 | let mut state_to_update = get_command_state(&mut states_guard_update, session_id.clone()); 412 | 413 | state_to_update.pid = Some(pid); 414 | state_to_update.child_wait_handle = Some(child_wait_handle_arc.clone()); // Store wait handle 415 | 416 | if is_potential_ssh_session_starter { 417 | state_to_update.child_stdin = child_stdin_handle; // Store stdin handle for SSH 418 | state_to_update.is_ssh_session_active = true; 419 | state_to_update.remote_current_dir = Some("remote:~".to_string()); // Initial placeholder 420 | let _ = app_handle_clone.emit("ssh_session_started", serde_json::json!({ "pid": pid })); 421 | 422 | // Attempt to send initial PWD command 423 | if let Some(stdin_arc_for_init_pwd) = state_to_update.child_stdin.clone() { 424 | let app_handle_for_init_pwd_thread = app_handle_clone.clone(); // Clone app_handle for the thread 425 | let initial_pid_for_init_pwd_error = pid; 426 | let session_id_for_init_pwd_thread = session_id.clone(); 427 | 428 | thread::spawn(move || { 429 | // Get CommandManager state inside the thread using the moved app_handle 430 | let command_manager_state_for_thread = 431 | app_handle_for_init_pwd_thread.state::(); 432 | 433 | let initial_pwd_marker = format!( 434 | "__INITIAL_REMOTE_PWD_MARKER_{}__", 435 | std::time::SystemTime::now() 436 | .duration_since(std::time::UNIX_EPOCH) 437 | .unwrap() 438 | .as_secs_f64() 439 | .to_string() 440 | .replace('.', "") 441 | ); 442 | let initial_pwd_command = format!( 443 | "echo '{}'; pwd; echo '{}'\n", 444 | initial_pwd_marker, initial_pwd_marker 445 | ); 446 | 447 | match stdin_arc_for_init_pwd.lock() { 448 | Ok(mut stdin_guard) => { 449 | if let Err(e) = stdin_guard 450 | .write_all(initial_pwd_command.as_bytes()) 451 | .and_then(|_| stdin_guard.flush()) 452 | { 453 | if let Ok(mut states_lock) = 454 | command_manager_state_for_thread.commands.lock() 455 | { 456 | // Use state obtained within the thread 457 | if let Some(s) = 458 | states_lock.get_mut(&session_id_for_init_pwd_thread) 459 | { 460 | if s.pid == Some(initial_pid_for_init_pwd_error) 461 | && s.is_ssh_session_active 462 | { 463 | s.is_ssh_session_active = false; 464 | s.child_stdin = None; 465 | s.remote_current_dir = None; 466 | let _ = app_handle_for_init_pwd_thread.emit("ssh_session_ended", serde_json::json!({ "pid": initial_pid_for_init_pwd_error, "reason": format!("SSH session error (initial PWD send for pid {}): {}", initial_pid_for_init_pwd_error, e)})); 467 | } 468 | } 469 | } 470 | } 471 | } 472 | Err(e) => { 473 | if let Ok(mut states_lock) = 474 | command_manager_state_for_thread.commands.lock() 475 | { 476 | // Use state obtained within the thread 477 | if let Some(s) = 478 | states_lock.get_mut(&session_id_for_init_pwd_thread) 479 | { 480 | if s.pid == Some(initial_pid_for_init_pwd_error) 481 | && s.is_ssh_session_active 482 | { 483 | s.is_ssh_session_active = false; 484 | s.child_stdin = None; 485 | s.remote_current_dir = None; 486 | let _ = app_handle_for_init_pwd_thread.emit("ssh_session_ended", serde_json::json!({ "pid": initial_pid_for_init_pwd_error, "reason": format!("SSH session error (initial PWD stdin lock for pid {}): {}", initial_pid_for_init_pwd_error, e)})); 487 | } 488 | } 489 | } 490 | } 491 | } 492 | }); 493 | } 494 | } else { 495 | state_to_update.is_ssh_session_active = false; 496 | state_to_update.child_stdin = None; // Ensure stdin is None for non-SSH commands 497 | state_to_update.remote_current_dir = None; // Ensure remote_dir is None for non-SSH 498 | } 499 | } // states_guard_update lock released 500 | 501 | if let Some(stdout_stream) = child_stdout_handle { 502 | // Use the taken stdout 503 | let app_handle_for_stdout_mgr = app_handle_clone.clone(); 504 | let app_handle_for_stdout_emit = app_handle_clone.clone(); 505 | let current_pid_for_stdout_context = pid; 506 | let session_id_for_stdout_thread = session_id.clone(); 507 | 508 | thread::spawn(move || { 509 | let mut reader = BufReader::new(stdout_stream); 510 | let mut buffer = [0; 2048]; 511 | let mut line_buffer = String::new(); 512 | 513 | enum PwdMarkerParseState { 514 | Idle, 515 | AwaitingPwd(String), 516 | AwaitingEndMarker(String), 517 | } 518 | let mut pwd_marker_state = PwdMarkerParseState::Idle; 519 | 520 | let current_thread_id = std::thread::current().id(); 521 | 522 | loop { 523 | match reader.read(&mut buffer) { 524 | Ok(0) => { 525 | if !line_buffer.is_empty() { 526 | if let Err(e) = app_handle_for_stdout_emit 527 | .emit("command_output", line_buffer.clone()) 528 | { 529 | println!("[Rust STDOUT Thread {:?} PID {}] Error emitting final command_output: {}", current_thread_id, current_pid_for_stdout_context, e); 530 | } 531 | } 532 | break; 533 | } 534 | Ok(n) => { 535 | let output_chunk_str = String::from_utf8_lossy(&buffer[..n]).to_string(); 536 | line_buffer.push_str(&output_chunk_str); 537 | 538 | while let Some(newline_pos) = line_buffer.find('\n') { 539 | let line_segment = 540 | line_buffer.drain(..=newline_pos).collect::(); 541 | let current_line_trimmed = line_segment.trim().to_string(); 542 | 543 | if current_line_trimmed.is_empty() { 544 | match pwd_marker_state { 545 | PwdMarkerParseState::Idle => { 546 | if let Err(e) = app_handle_for_stdout_emit 547 | .emit("command_output", line_segment.clone()) 548 | { 549 | println!("[Rust STDOUT Thread {:?} PID {}] Error emitting whitespace/newline: {}", current_thread_id, current_pid_for_stdout_context, e); 550 | } 551 | } 552 | _ => {} 553 | } 554 | continue; 555 | } 556 | 557 | let mut emit_this_segment_to_frontend = true; 558 | 559 | match pwd_marker_state { 560 | PwdMarkerParseState::Idle => { 561 | if current_line_trimmed.starts_with("__REMOTE_CD_PWD_MARKER_") 562 | || current_line_trimmed 563 | .starts_with("__INITIAL_REMOTE_PWD_MARKER_") 564 | { 565 | pwd_marker_state = PwdMarkerParseState::AwaitingPwd( 566 | current_line_trimmed.clone(), 567 | ); 568 | emit_this_segment_to_frontend = false; 569 | } 570 | } 571 | PwdMarkerParseState::AwaitingPwd(ref marker_val) => { 572 | let new_pwd = current_line_trimmed.clone(); 573 | 574 | let command_manager_state = 575 | app_handle_for_stdout_mgr.state::(); 576 | if let Ok(mut states_guard) = 577 | command_manager_state.commands.lock() 578 | { 579 | if let Some(state) = 580 | states_guard.get_mut(&session_id_for_stdout_thread) 581 | { 582 | if state.pid == Some(current_pid_for_stdout_context) 583 | && state.is_ssh_session_active 584 | { 585 | state.remote_current_dir = Some(new_pwd.clone()); 586 | if let Err(e) = app_handle_for_stdout_emit.emit( 587 | "remote_directory_updated", 588 | new_pwd.clone(), 589 | ) { 590 | eprintln!("[Rust STDOUT Thread {:?} PID {}] Failed to emit remote_directory_updated: {}", current_thread_id, current_pid_for_stdout_context, e); 591 | } 592 | } 593 | } 594 | } 595 | pwd_marker_state = 596 | PwdMarkerParseState::AwaitingEndMarker(marker_val.clone()); 597 | emit_this_segment_to_frontend = false; 598 | } 599 | PwdMarkerParseState::AwaitingEndMarker(ref marker_val) => { 600 | if current_line_trimmed == *marker_val { 601 | pwd_marker_state = PwdMarkerParseState::Idle; 602 | emit_this_segment_to_frontend = false; 603 | } else { 604 | pwd_marker_state = PwdMarkerParseState::Idle; 605 | if current_line_trimmed 606 | .starts_with("__REMOTE_CD_PWD_MARKER_") 607 | || current_line_trimmed 608 | .starts_with("__INITIAL_REMOTE_PWD_MARKER_") 609 | { 610 | pwd_marker_state = PwdMarkerParseState::AwaitingPwd( 611 | current_line_trimmed.clone(), 612 | ); 613 | emit_this_segment_to_frontend = false; 614 | } 615 | } 616 | } 617 | } 618 | 619 | if emit_this_segment_to_frontend { 620 | if let Err(e) = app_handle_for_stdout_emit 621 | .emit("command_output", line_segment.clone()) 622 | { 623 | println!("[Rust STDOUT Thread {:?} PID {}] Error emitting command_output: {}", current_thread_id, current_pid_for_stdout_context, e); 624 | } 625 | } 626 | } 627 | } 628 | Err(e) => { 629 | if e.kind() == std::io::ErrorKind::Interrupted { 630 | continue; 631 | } 632 | if !line_buffer.is_empty() { 633 | if let Err(emit_e) = app_handle_for_stdout_emit 634 | .emit("command_output", line_buffer.clone()) 635 | { 636 | println!("[Rust STDOUT Thread {:?} PID {}] Error emitting final command_output on error: {}", current_thread_id, current_pid_for_stdout_context, emit_e); 637 | } 638 | } 639 | break; 640 | } 641 | } 642 | } 643 | }); 644 | } 645 | 646 | if let Some(stderr_stream) = child_stderr_handle { 647 | // Use the taken stderr 648 | let app_handle_stderr = app_handle.clone(); 649 | thread::spawn(move || { 650 | let mut reader = BufReader::new(stderr_stream); 651 | let mut buffer = [0; 2048]; 652 | let current_thread_id = std::thread::current().id(); // Get thread ID once 653 | loop { 654 | match reader.read(&mut buffer) { 655 | Ok(0) => { 656 | break; 657 | } 658 | Ok(n) => { 659 | let error_chunk = String::from_utf8_lossy(&buffer[..n]).to_string(); 660 | if !error_chunk.contains("[sudo] password") { 661 | if let Err(e) = 662 | app_handle_stderr.emit("command_error", error_chunk.clone()) 663 | { 664 | println!( 665 | "[Rust STDERR Thread {:?}] Error emitting command_error: {}", 666 | current_thread_id, e 667 | ); 668 | } 669 | } 670 | } 671 | Err(e) => { 672 | if e.kind() == std::io::ErrorKind::Interrupted { 673 | continue; 674 | } 675 | break; 676 | } 677 | } 678 | } 679 | }); 680 | } 681 | 682 | // The wait thread now uses child_wait_handle_arc 683 | let app_handle_wait = app_handle_clone.clone(); 684 | let app_handle_for_thread_state = app_handle.clone(); 685 | let was_ssh_session_starter = is_potential_ssh_session_starter; 686 | let initial_child_pid_for_wait_thread = pid; 687 | 688 | thread::spawn(move || { 689 | let status_result = { 690 | // Lock the child_wait_handle_arc to wait on the child 691 | let mut child_guard = match child_wait_handle_arc.lock() { 692 | Ok(guard) => guard, 693 | Err(e) => { 694 | // Emit error and end messages 695 | let _ = app_handle_wait.emit( 696 | "command_error", 697 | format!("Error locking child for wait: {}", e), 698 | ); 699 | let _ = app_handle_wait 700 | .emit("command_end", "Command failed due to wait lock error."); 701 | return; 702 | } 703 | }; 704 | // child_guard is MutexGuard 705 | child_guard.wait() 706 | }; 707 | 708 | { 709 | // Cleanup block 710 | let command_manager_state_in_thread = 711 | app_handle_for_thread_state.state::(); 712 | let mut states_guard_cleanup = match command_manager_state_in_thread.commands.lock() { 713 | Ok(guard) => guard, 714 | Err(e) => { 715 | return; 716 | } 717 | }; 718 | 719 | let key_cleanup = session_id_for_wait_thread.clone(); 720 | if let Some(state_to_clear) = states_guard_cleanup.get_mut(&key_cleanup) { 721 | // Important: Only clear if the PID matches, to avoid race conditions 722 | // if another command started and this wait thread is for an older one. 723 | if state_to_clear.pid == Some(initial_child_pid_for_wait_thread) { 724 | state_to_clear.child_wait_handle = None; 725 | state_to_clear.pid = None; // PID is cleared here 726 | if was_ssh_session_starter && state_to_clear.is_ssh_session_active { 727 | state_to_clear.is_ssh_session_active = false; 728 | state_to_clear.child_stdin = None; // Also clear stdin if it was an SSH session 729 | state_to_clear.remote_current_dir = None; // Clear remote dir 730 | 731 | let _ = app_handle_wait.emit("ssh_session_ended", serde_json::json!({ "pid": initial_child_pid_for_wait_thread, "reason": "SSH session ended normally."})); 732 | } else if was_ssh_session_starter { 733 | // SSH session starter but was already marked inactive (e.g. by write thread error) 734 | // Ensure remote_current_dir is also cleared if it hasn't been. 735 | state_to_clear.remote_current_dir = None; 736 | state_to_clear.child_stdin = None; 737 | } 738 | } 739 | } 740 | } // states_guard_cleanup lock released 741 | 742 | match status_result { 743 | Ok(status) => { 744 | let exit_msg = if status.success() { 745 | "Command completed successfully." 746 | } else { 747 | "Command failed." 748 | }; 749 | let _ = app_handle_wait.emit("command_end", exit_msg); 750 | } 751 | Err(e) => { 752 | let _ = app_handle_wait 753 | .emit("command_error", format!("Error waiting for command: {}", e)); 754 | // Also emit command_end because the command effectively ended, albeit with an error during wait 755 | let _ = app_handle_wait.emit("command_end", "Command failed due to wait error."); 756 | } 757 | } 758 | }); 759 | 760 | Ok("Command started. Output will stream in real-time.".to_string()) 761 | } 762 | 763 | #[command] 764 | pub fn execute_sudo_command( 765 | command: String, 766 | session_id: String, 767 | password: String, 768 | app_handle: AppHandle, 769 | command_manager: State<'_, CommandManager>, 770 | ) -> Result { 771 | let mut states = command_manager.commands.lock().map_err(|e| e.to_string())?; 772 | 773 | let key = session_id; 774 | let state = states.entry(key.clone()).or_insert_with(|| CommandState { 775 | current_dir: env::current_dir() 776 | .unwrap_or_default() 777 | .to_string_lossy() 778 | .to_string(), 779 | child_wait_handle: None, 780 | child_stdin: None, 781 | pid: None, 782 | is_ssh_session_active: false, 783 | remote_current_dir: None, 784 | }); 785 | 786 | let current_dir = state.current_dir.clone(); 787 | 788 | let mut child_process = match Command::new("sudo") 789 | .arg("-S") 790 | .arg("bash") 791 | .arg("-c") 792 | .arg( 793 | command 794 | .split_whitespace() 795 | .skip(1) 796 | .collect::>() 797 | .join(" "), 798 | ) // Skip "sudo" and join the rest 799 | .current_dir(¤t_dir) 800 | .stdin(Stdio::piped()) 801 | .stdout(Stdio::piped()) 802 | .stderr(Stdio::piped()) 803 | .spawn() 804 | { 805 | Ok(child) => child, 806 | Err(e) => { 807 | return Err(format!("Failed to start sudo command: {}", e)); 808 | } 809 | }; 810 | 811 | let child_pid = child_process.id(); // Get PID 812 | let sudo_stdin = child_process.stdin.take().map(|s| Arc::new(Mutex::new(s))); // Take stdin 813 | let sudo_stdout = child_process.stdout.take(); // Take stdout 814 | let sudo_stderr = child_process.stderr.take(); // Take stderr 815 | 816 | let child_arc = Arc::new(Mutex::new(child_process)); // Store the Child itself for waiting 817 | 818 | state.child_wait_handle = Some(child_arc.clone()); // Store wait handle 819 | state.pid = Some(child_pid); // Store PID 820 | // For sudo, is_ssh_session_active remains false, child_stdin for SSH is not set. 821 | 822 | // Send password to stdin 823 | if let Some(stdin_arc) = sudo_stdin { 824 | // Use the taken and Arc-wrapped stdin 825 | let app_handle_stdin = app_handle.clone(); 826 | thread::spawn(move || { 827 | let mut stdin_guard = match stdin_arc.lock() { 828 | Ok(guard) => guard, 829 | Err(e) => { 830 | let _ = app_handle_stdin.emit("command_error", e.to_string()); 831 | return; 832 | } 833 | }; 834 | if stdin_guard 835 | .write_all(format!("{}", password).as_bytes()) 836 | .is_err() 837 | { 838 | let _ = app_handle_stdin.emit("command_error", "Failed to send password to sudo"); 839 | } 840 | }); 841 | } 842 | 843 | // Use the taken stdout_stream 844 | if let Some(stdout_stream) = sudo_stdout { 845 | let app_handle_stdout = app_handle.clone(); 846 | thread::spawn(move || { 847 | let mut reader = BufReader::new(stdout_stream); 848 | let mut buffer = [0; 2048]; // Read in chunks 849 | loop { 850 | match reader.read(&mut buffer) { 851 | Ok(0) => break, // EOF 852 | Ok(n) => { 853 | let output_chunk = String::from_utf8_lossy(&buffer[..n]).to_string(); 854 | let _ = app_handle_stdout.emit("command_output", output_chunk); 855 | } 856 | Err(e) => { 857 | if e.kind() == std::io::ErrorKind::Interrupted { 858 | continue; 859 | } 860 | let _ = app_handle_stdout 861 | .emit("command_output", format!("Error reading stdout: {}", e)); 862 | break; 863 | } 864 | } 865 | } 866 | }); 867 | } 868 | 869 | // Use the taken stderr_stream 870 | if let Some(stderr_stream) = sudo_stderr { 871 | let app_handle_stderr = app_handle.clone(); 872 | thread::spawn(move || { 873 | let mut reader = BufReader::new(stderr_stream); 874 | let mut buffer = [0; 2048]; // Read in chunks 875 | loop { 876 | match reader.read(&mut buffer) { 877 | Ok(0) => break, // EOF 878 | Ok(n) => { 879 | let error_chunk = String::from_utf8_lossy(&buffer[..n]).to_string(); 880 | if !error_chunk.contains("[sudo] password") { 881 | let _ = app_handle_stderr.emit("command_error", error_chunk.clone()); 882 | } 883 | } 884 | Err(e) => { 885 | if e.kind() == std::io::ErrorKind::Interrupted { 886 | continue; 887 | } 888 | let _ = app_handle_stderr 889 | .emit("command_error", format!("Error reading stderr: {}", e)); 890 | break; 891 | } 892 | } 893 | } 894 | }); 895 | } 896 | 897 | let child_arc_clone = child_arc.clone(); 898 | let app_handle_wait = app_handle.clone(); 899 | thread::spawn(move || { 900 | let status = { 901 | let mut child_guard = child_arc_clone.lock().unwrap(); 902 | match child_guard.wait() { 903 | Ok(status) => status, 904 | Err(e) => { 905 | let _ = app_handle_wait 906 | .emit("command_error", format!("Error waiting for command: {}", e)); 907 | return; 908 | } 909 | } 910 | }; 911 | 912 | let _ = app_handle_wait.emit("command_end", format!("Success: {}", status.success())); 913 | }); 914 | 915 | Ok("Command started. Output will stream in realtime.".to_string()) 916 | } 917 | 918 | fn get_command_state( 919 | command_state_guard: &mut MutexGuard>, 920 | session_id: String, 921 | ) -> CommandState { 922 | command_state_guard 923 | .entry(session_id) 924 | .or_insert_with(|| CommandState { 925 | current_dir: env::current_dir() 926 | .unwrap_or_default() 927 | .to_string_lossy() 928 | .to_string(), 929 | child_wait_handle: None, 930 | child_stdin: None, 931 | pid: None, 932 | is_ssh_session_active: false, // ensure default 933 | remote_current_dir: None, 934 | }) 935 | .clone() 936 | } 937 | -------------------------------------------------------------------------------- /ai-terminal/src/app/app.component.css: -------------------------------------------------------------------------------- 1 | :host { 2 | display: block; 3 | height: 100vh; 4 | width: 100vw; 5 | overflow: hidden; 6 | background-color: #282a36; 7 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 8 | font-size: 14px; 9 | } 10 | 11 | /* Remove all other styles */ 12 | 13 | .logo.angular:hover { 14 | filter: drop-shadow(0 0 2em #e32727); 15 | } 16 | 17 | :root { 18 | font-family: Inter, Avenir, Helvetica, Arial, sans-serif; 19 | font-size: 12px; 20 | line-height: 24px; 21 | font-weight: 400; 22 | 23 | color: #0f0f0f; 24 | background-color: #f6f6f6; 25 | 26 | font-synthesis: none; 27 | text-rendering: optimizeLegibility; 28 | -webkit-font-smoothing: antialiased; 29 | -moz-osx-font-smoothing: grayscale; 30 | -webkit-text-size-adjust: 100%; 31 | } 32 | 33 | .container { 34 | margin: 0; 35 | padding-top: 10vh; 36 | display: flex; 37 | flex-direction: column; 38 | justify-content: center; 39 | text-align: center; 40 | } 41 | 42 | .logo { 43 | height: 6em; 44 | padding: 1.5em; 45 | will-change: filter; 46 | transition: 0.75s; 47 | } 48 | 49 | .logo.tauri:hover { 50 | filter: drop-shadow(0 0 2em #24c8db); 51 | } 52 | 53 | .row { 54 | display: flex; 55 | justify-content: center; 56 | } 57 | 58 | a { 59 | font-weight: 500; 60 | color: #646cff; 61 | text-decoration: inherit; 62 | } 63 | 64 | a:hover { 65 | color: #535bf2; 66 | } 67 | 68 | h1 { 69 | text-align: center; 70 | } 71 | 72 | input, 73 | button { 74 | border-radius: 8px; 75 | border: 1px solid transparent; 76 | padding: 0.6em 1.2em; 77 | font-size: 12px; 78 | font-weight: 500; 79 | font-family: inherit; 80 | color: #0f0f0f; 81 | background-color: #ffffff; 82 | transition: border-color 0.25s; 83 | box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); 84 | } 85 | 86 | button { 87 | cursor: pointer; 88 | } 89 | 90 | button:hover { 91 | border-color: #396cd8; 92 | } 93 | 94 | button:active { 95 | border-color: #396cd8; 96 | background-color: #e8e8e8; 97 | } 98 | 99 | input, 100 | button { 101 | outline: none; 102 | } 103 | 104 | #greet-input { 105 | margin-right: 5px; 106 | } 107 | 108 | @media (prefers-color-scheme: dark) { 109 | :root { 110 | color: #f6f6f6; 111 | background-color: #2f2f2f; 112 | } 113 | 114 | a:hover { 115 | color: #24c8db; 116 | } 117 | 118 | input, 119 | button { 120 | color: #ffffff; 121 | background-color: #0f0f0f98; 122 | } 123 | 124 | button:active { 125 | background-color: #0f0f0f69; 126 | } 127 | } 128 | 129 | .split-container { 130 | display: flex; 131 | width: 100%; 132 | height: 100%; 133 | position: relative; 134 | user-select: none; 135 | } 136 | 137 | .panel { 138 | height: 100%; 139 | background-color: #282a36; 140 | color: #f8f8f2; 141 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 142 | display: flex; 143 | flex-direction: column; 144 | overflow: hidden; 145 | border-radius: 8px; 146 | } 147 | 148 | .terminal-panel { 149 | min-width: 200px; 150 | max-width: 80%; 151 | margin: 8px 4px 8px 8px; 152 | overflow-x: hidden; 153 | } 154 | 155 | .ai-panel { 156 | flex: 1; 157 | min-width: 200px; 158 | margin: 8px 8px 8px 4px; 159 | } 160 | 161 | .panel-content { 162 | height: 100%; 163 | display: flex; 164 | flex-direction: column; 165 | border-radius: 8px; 166 | overflow: hidden; 167 | padding: 0; 168 | margin: 0; 169 | background-color: transparent; 170 | max-width: 100%; 171 | } 172 | 173 | .output-area { 174 | flex: 1; 175 | overflow-y: auto; 176 | overflow-x: hidden; 177 | /* Prevent horizontal scrollbar */ 178 | font-size: 12px; 179 | line-height: 1.5; 180 | border-radius: 0; 181 | scroll-behavior: smooth; 182 | margin-top: 0; 183 | background-color: #282a36; 184 | box-sizing: border-box; 185 | padding-bottom: 32px; 186 | position: relative; 187 | z-index: 1; 188 | user-select: text; 189 | width: 100%; 190 | /* Ensure it takes the full width */ 191 | } 192 | 193 | .terminal-panel .input-area { 194 | border: 1px solid rgba(80, 250, 123, 0.5); 195 | border-top: 1px solid rgba(139, 233, 253, 0.5); 196 | box-shadow: 0 4px 12px rgba(80, 250, 123, 0.1), 0 0 3px rgba(80, 250, 123, 0.15); 197 | } 198 | 199 | .terminal-panel .input-area:hover, 200 | .terminal-panel .input-area:focus-within { 201 | box-shadow: 0 6px 16px rgba(80, 250, 123, 0.15), 0 0 6px rgba(80, 250, 123, 0.2); 202 | } 203 | 204 | .ai-panel .input-area { 205 | border: 1px solid rgba(189, 147, 249, 0.5); 206 | border-top: 1px solid rgba(255, 121, 198, 0.5); 207 | box-shadow: 0 4px 12px rgba(189, 147, 249, 0.1), 0 0 3px rgba(189, 147, 249, 0.15); 208 | } 209 | 210 | .ai-panel .input-area:hover, 211 | .ai-panel .input-area:focus-within { 212 | box-shadow: 0 6px 16px rgba(189, 147, 249, 0.15), 0 0 6px rgba(189, 147, 249, 0.2); 213 | } 214 | 215 | .input-area { 216 | position: relative; 217 | padding: 12px 16px; 218 | display: flex; 219 | align-items: flex-start; 220 | background-color: #282a36; 221 | border-radius: 8px; 222 | margin: 4px 16px 16px 16px; 223 | flex-direction: column; 224 | width: calc(100% - 32px); 225 | box-sizing: border-box; 226 | transform: none; 227 | z-index: 5; 228 | transition: box-shadow 0.3s ease; 229 | } 230 | 231 | .input-area:hover, 232 | .input-area:focus-within { 233 | transform: none; 234 | } 235 | 236 | .terminal-panel .current-directory { 237 | color: rgba(80, 250, 123, 0.8); 238 | } 239 | 240 | .ai-panel .current-directory { 241 | color: rgba(189, 147, 249, 0.8); 242 | } 243 | 244 | .current-directory { 245 | padding: 8px 15px; 246 | background-color: #282a36; 247 | color: #a9a9a9; 248 | font-size: 13px; 249 | border-top: 1px solid #444; 250 | display: flex; 251 | align-items: center; 252 | justify-content: space-between; 253 | } 254 | 255 | .git-branch { 256 | color: #ffc107; 257 | font-weight: normal; 258 | margin-left: auto; 259 | } 260 | 261 | .prompt-container { 262 | display: flex; 263 | width: 100%; 264 | align-items: center; 265 | padding: 0 4px; 266 | } 267 | 268 | .prompt { 269 | margin-right: 12px; 270 | white-space: nowrap; 271 | overflow: hidden; 272 | text-overflow: ellipsis; 273 | display: inline-block; 274 | line-height: 24px; 275 | } 276 | 277 | /* Terminal prompt is softer green */ 278 | .terminal-panel .prompt { 279 | color: rgba(80, 250, 123, 0.85); 280 | } 281 | 282 | /* AI prompt is softer purple */ 283 | .ai-panel .prompt { 284 | color: rgba(189, 147, 249, 0.85); 285 | } 286 | 287 | /* Command prompt is yellow/gold */ 288 | .prompt-command { 289 | color: #ffc107; 290 | } 291 | 292 | .input-area textarea { 293 | flex: 1; 294 | background: transparent; 295 | border: none; 296 | color: #c9c9c9; 297 | font-family: monospace; 298 | font-size: 12px; 299 | outline: none; 300 | resize: none; 301 | min-height: 24px; 302 | overflow-y: auto; 303 | line-height: 1.5; 304 | width: calc(100% - 24px); 305 | padding: 0; 306 | letter-spacing: 0.75px; 307 | } 308 | 309 | .input-area textarea:disabled { 310 | opacity: 0.5; 311 | cursor: not-allowed; 312 | } 313 | 314 | .input-area textarea::placeholder { 315 | color: #6272a4; 316 | opacity: 0.7; 317 | } 318 | 319 | /* Panel resizing */ 320 | .resizer { 321 | width: 8px; 322 | background-color: transparent; 323 | cursor: col-resize; 324 | transition: all 0.2s ease; 325 | position: relative; 326 | user-select: none; 327 | -webkit-user-select: none; 328 | -moz-user-select: none; 329 | -ms-user-select: none; 330 | margin: 8px 0; 331 | } 332 | 333 | .resizer::before { 334 | content: ''; 335 | position: absolute; 336 | left: 50%; 337 | transform: translateX(-50%); 338 | width: 2px; 339 | height: 100%; 340 | background-color: #44475a; 341 | border-radius: 1px; 342 | opacity: 0.5; 343 | transition: all 0.2s ease; 344 | } 345 | 346 | .resizer:hover::before { 347 | background-color: rgba(189, 147, 249, 0.6); 348 | width: 3px; 349 | box-shadow: 0 0 8px rgba(189, 147, 249, 0.3); 350 | opacity: 0.7; 351 | } 352 | 353 | .resizer.resizing::before { 354 | background-color: rgba(189, 147, 249, 0.7); 355 | width: 3px; 356 | box-shadow: 0 0 12px rgba(189, 147, 249, 0.4); 357 | opacity: 0.8; 358 | } 359 | 360 | /* Scrollbar styling */ 361 | .output-area::-webkit-scrollbar { 362 | width: 8px; 363 | } 364 | 365 | .output-area::-webkit-scrollbar-track { 366 | background: #282a36; 367 | border-radius: 4px; 368 | } 369 | 370 | .output-area::-webkit-scrollbar-thumb { 371 | background: #44475a; 372 | border-radius: 4px; 373 | opacity: 0.5; 374 | } 375 | 376 | .output-area::-webkit-scrollbar-thumb:hover { 377 | background: #6272a4; 378 | opacity: 0.8; 379 | } 380 | 381 | /* Command and chat entry styling */ 382 | .command-entry { 383 | border-top: 1px solid #44475a; 384 | transition: all 0.2s ease; 385 | max-width: 100%; 386 | box-sizing: border-box; 387 | } 388 | 389 | .command-entry-actions { 390 | display: flex; 391 | gap: 8px; 392 | margin-left: auto; 393 | } 394 | 395 | .output-action-button { 396 | background: none; 397 | border: 1px solid rgba(98, 114, 164, 0.3); 398 | padding: 4px; 399 | color: #f8f8f2; 400 | cursor: pointer; 401 | transition: all 0.2s ease; 402 | display: flex; 403 | align-items: center; 404 | justify-content: center; 405 | border-radius: 4px; 406 | opacity: 0.6; 407 | } 408 | 409 | .output-action-button:hover { 410 | opacity: 1; 411 | background-color: rgba(98, 114, 164, 0.2); 412 | border-color: rgba(98, 114, 164, 0.8); 413 | } 414 | 415 | .output-action-button svg { 416 | stroke: currentColor; 417 | } 418 | 419 | .chat-entry { 420 | margin-bottom: 20px; 421 | padding: 8px; 422 | border: 1px solid #44475a; 423 | border-radius: 8px; 424 | opacity: 0.9; 425 | transition: all 0.2s ease; 426 | max-width: 100%; 427 | overflow-x: hidden; 428 | /* Prevent horizontal overflow */ 429 | box-sizing: border-box; 430 | position: relative; 431 | } 432 | 433 | .chat-entry:hover { 434 | border-color: rgba(189, 147, 249, 0.4); 435 | box-shadow: 0 4px 12px rgba(189, 147, 249, 0.15); 436 | opacity: 1; 437 | } 438 | 439 | .chat-line { 440 | display: flex; 441 | align-items: flex-start; 442 | margin-bottom: 4px; 443 | font-size: 12px; 444 | border-bottom: 1px solid rgba(139, 233, 253, 0.2); 445 | padding-bottom: 8px; 446 | margin-bottom: 8px; 447 | } 448 | 449 | .chat-line .prompt { 450 | color: #50fa7b; 451 | margin-right: 8px; 452 | font-weight: normal; 453 | } 454 | 455 | .chat-line .message { 456 | color: #f8f8f2; 457 | font-weight: normal; 458 | } 459 | 460 | .chat-output { 461 | color: #f8f8f2; 462 | line-height: 1.5; 463 | font-size: 12px; 464 | white-space: pre-wrap; 465 | word-wrap: break-word; 466 | margin-left: 8px; 467 | position: relative; 468 | padding-top: 8px; 469 | display: flex; 470 | flex-wrap: wrap; 471 | align-items: center; 472 | gap: 4px; 473 | transition: all 0.2s ease; 474 | border: 1px solid transparent; 475 | border-radius: 4px; 476 | padding: 8px; 477 | } 478 | 479 | .chat-output:hover { 480 | transform: none; 481 | border: 1px solid transparent; 482 | box-shadow: none; 483 | z-index: auto; 484 | } 485 | 486 | .chat-output>div { 487 | display: inline-flex; 488 | align-items: center; 489 | flex-wrap: wrap; 490 | gap: 4px; 491 | } 492 | 493 | .copy-icon { 494 | position: absolute; 495 | bottom: 8px; 496 | right: 8px; 497 | background: none; 498 | border: 1px solid rgba(255, 255, 255, 0); 499 | padding: 4px; 500 | color: transparent; 501 | cursor: pointer; 502 | transition: all 0.2s ease; 503 | display: flex; 504 | align-items: center; 505 | justify-content: center; 506 | border-radius: 4px; 507 | opacity: 0.6; 508 | z-index: 1; 509 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); 510 | } 511 | 512 | .copy-icon:hover { 513 | opacity: 1; 514 | background-color: rgba(98, 114, 164, 0.1); 515 | transform: none; 516 | box-shadow: none; 517 | border-color: rgba(98, 114, 164, 0.8); 518 | } 519 | 520 | .copy-icon svg { 521 | width: 14px; 522 | height: 14px; 523 | stroke: #fbfbfb; 524 | stroke-width: 1.5; 525 | fill: transparent; 526 | transition: all 0.2s ease; 527 | } 528 | 529 | .copy-icon:hover svg { 530 | stroke: #ffffff; 531 | width: 16px; 532 | height: 16px; 533 | } 534 | 535 | /* Add tooltip styles for copy icon */ 536 | .copy-icon::after { 537 | content: "Copy"; 538 | position: absolute; 539 | bottom: 100%; 540 | left: 50%; 541 | transform: translateX(-50%); 542 | padding: 4px 8px; 543 | background-color: #282a36; 544 | color: #f8f8f2; 545 | font-size: 12px; 546 | border-radius: 4px; 547 | white-space: nowrap; 548 | opacity: 0; 549 | visibility: hidden; 550 | transition: all 0.2s ease; 551 | pointer-events: none; 552 | border: 1px solid rgba(98, 114, 164, 0.2); 553 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2); 554 | z-index: 1000; 555 | } 556 | 557 | .copy-icon:hover::after { 558 | opacity: 1; 559 | visibility: visible; 560 | transform: translateX(-50%) translateY(0); 561 | } 562 | 563 | /* Add a small arrow to the tooltip */ 564 | .copy-icon::before { 565 | content: ''; 566 | position: absolute; 567 | bottom: 100%; 568 | left: 50%; 569 | transform: translateX(-50%); 570 | border: 4px solid transparent; 571 | border-top-color: rgba(98, 114, 164, 0.2); 572 | opacity: 0; 573 | visibility: hidden; 574 | transition: all 0.2s ease; 575 | } 576 | 577 | .copy-icon:hover::before { 578 | opacity: 1; 579 | visibility: visible; 580 | transform: translateX(-50%) translateY(0); 581 | } 582 | 583 | .command-output:hover .copy-icon, 584 | .chat-output:hover .copy-icon { 585 | opacity: 1; 586 | } 587 | 588 | /* Autocomplete styles */ 589 | .autocomplete-container { 590 | position: absolute; 591 | bottom: 100%; 592 | left: 0; 593 | width: 100%; 594 | max-width: 100%; 595 | max-height: 200px; 596 | background-color: #44475a; 597 | border-radius: 4px; 598 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2); 599 | z-index: 20; 600 | margin-bottom: 8px; 601 | overflow-y: auto; 602 | outline: none; 603 | border: 1px solid rgba(80, 250, 123, 0.2); 604 | } 605 | 606 | .autocomplete-container:focus { 607 | outline: none; 608 | border: 1px solid rgba(189, 147, 249, 0.7); 609 | box-shadow: 0 0 0 2px rgba(189, 147, 249, 0.3); 610 | } 611 | 612 | .autocomplete-list { 613 | width: 100%; 614 | } 615 | 616 | .autocomplete-item { 617 | padding: 6px 12px; 618 | cursor: pointer; 619 | transition: all 0.2s; 620 | font-size: 11px; 621 | } 622 | 623 | .autocomplete-item:hover { 624 | background-color: #6272a4; 625 | } 626 | 627 | .autocomplete-item.selected { 628 | background-color: #bd93f9; 629 | color: #282a36; 630 | } 631 | 632 | /* Dracula theme colors: 633 | - Background: #282a36 634 | - Current Line: #44475a 635 | - Foreground: #f8f8f2 636 | - Comment: #6272a4 637 | - Red: #ff5555 638 | - Orange: #ffb86c 639 | - Yellow: #f1fa8c 640 | - Green: #50fa7b 641 | - Purple: #bd93f9 642 | - Pink: #ff79c6 643 | - Cyan: #8be9fd 644 | */ 645 | 646 | /* Dark mode support */ 647 | @media (prefers-color-scheme: dark) { 648 | .content-panel { 649 | background-color: #2f2f2f; 650 | color: #f6f6f6; 651 | } 652 | } 653 | 654 | .processing-indicator { 655 | color: #8be9fd; 656 | font-style: italic; 657 | margin-bottom: 8px; 658 | font-size: 11px; 659 | opacity: 0.8; 660 | } 661 | 662 | @keyframes pulse { 663 | 0% { 664 | opacity: 0.5; 665 | } 666 | 667 | 50% { 668 | opacity: 1; 669 | } 670 | 671 | 100% { 672 | opacity: 0.5; 673 | } 674 | } 675 | 676 | /* Make the terminal more responsive */ 677 | @media (max-width: 768px) { 678 | .prompt { 679 | max-width: 100px; 680 | } 681 | } 682 | 683 | /* Add spacing between command output lines */ 684 | .command-output div { 685 | white-space: pre-wrap !important; 686 | word-wrap: break-word !important; 687 | word-break: break-word !important; 688 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 689 | line-height: 1.4; 690 | user-select: text; 691 | display: block; 692 | max-width: 100%; 693 | box-sizing: border-box; 694 | overflow-x: hidden; 695 | } 696 | 697 | /* Ensure command output line wrapping in all environments */ 698 | .command-output div:not(.processing-indicator) { 699 | white-space: pre-wrap !important; 700 | overflow-wrap: break-word !important; 701 | word-break: break-word !important; 702 | display: block; 703 | max-width: 100%; 704 | box-sizing: border-box; 705 | margin: 2px 0; 706 | } 707 | 708 | /* Special styling for file listing outputs (like ls command) */ 709 | .command-output div.file-list-output { 710 | display: inline-block; 711 | vertical-align: top; 712 | margin: 2px 12px 2px 0; 713 | padding: 0 4px; 714 | } 715 | 716 | /* Command status colors */ 717 | .command-success { 718 | color: #50fa7b; 719 | } 720 | 721 | .command-error { 722 | color: #ff5555; 723 | } 724 | 725 | .command-running { 726 | color: #f1fa8c; 727 | /* Dracula yellow */ 728 | animation: pulse 1.5s infinite; 729 | } 730 | 731 | @keyframes pulse { 732 | 0% { 733 | opacity: 0.7; 734 | } 735 | 736 | 50% { 737 | opacity: 1; 738 | } 739 | 740 | 100% { 741 | opacity: 0.7; 742 | } 743 | } 744 | 745 | /* Terminal panel full width when AI panel is hidden */ 746 | .terminal-panel.full-width { 747 | width: calc(100% - 16px) !important; 748 | /* Adjust for margins */ 749 | max-width: 100%; 750 | } 751 | 752 | /* Terminal header styling */ 753 | .terminal-header, 754 | .ai-header { 755 | display: flex; 756 | justify-content: space-between; 757 | align-items: center; 758 | padding: 8px 16px; 759 | background-color: #282a36; 760 | border-radius: 8px 8px 0 0; 761 | height: 40px; 762 | box-sizing: border-box; 763 | } 764 | 765 | /* Terminal Tabs Styling */ 766 | .terminal-tabs { 767 | background-color: rgba(40, 42, 54, 0.3);; 768 | border-bottom: 1px solid rgba(40, 42, 54, 0.3);; 769 | padding: 0; 770 | margin: 0; 771 | } 772 | 773 | .tabs-container { 774 | display: flex; 775 | align-items: center; 776 | padding: 0 8px; 777 | height: 36px; 778 | overflow-x: auto; 779 | overflow-y: hidden; 780 | scrollbar-width: thin; 781 | scrollbar-color: #44475a transparent; 782 | } 783 | 784 | .tabs-container::-webkit-scrollbar { 785 | height: 3px; 786 | } 787 | 788 | .tabs-container::-webkit-scrollbar-track { 789 | background: transparent; 790 | } 791 | 792 | .tabs-container::-webkit-scrollbar-thumb { 793 | background: #44475a; 794 | border-radius: 2px; 795 | } 796 | 797 | .tab { 798 | display: flex; 799 | align-items: center; 800 | padding: 6px 12px; 801 | margin-right: 2px; 802 | background-color: #282a36; 803 | border: 1px solid #44475a; 804 | border-bottom: none; 805 | border-radius: 6px 6px 0 0; 806 | cursor: pointer; 807 | transition: all 0.2s ease; 808 | min-width: 100px; 809 | max-width: 200px; 810 | position: relative; 811 | font-size: 12px; 812 | height: 28px; 813 | box-sizing: border-box; 814 | } 815 | 816 | .tab:hover { 817 | background-color: #44475a; 818 | border-color: rgba(189, 147, 249, 0.3); 819 | } 820 | 821 | .tab.active { 822 | background-color: #21222c; 823 | border-color: rgba(80, 250, 123, 0.5); 824 | color: #50fa7b; 825 | font-weight: 600; 826 | box-shadow: 0 2px 8px rgba(80, 250, 123, 0.1); 827 | } 828 | 829 | .tab.active:hover { 830 | background-color: #21222c; 831 | border-color: rgba(80, 250, 123, 0.7); 832 | } 833 | 834 | .tab-name { 835 | flex: 1; 836 | white-space: nowrap; 837 | overflow: hidden; 838 | text-overflow: ellipsis; 839 | color: #f8f8f2; 840 | font-size: 12px; 841 | line-height: 1; 842 | outline: none; 843 | border: none; 844 | background: transparent; 845 | cursor: inherit; 846 | } 847 | 848 | .tab.active .tab-name { 849 | color: #50fa7b; 850 | } 851 | 852 | .tab-name[contenteditable="true"] { 853 | background-color: rgba(189, 147, 249, 0.1); 854 | border: 1px solid rgba(189, 147, 249, 0.3); 855 | border-radius: 3px; 856 | padding: 2px 4px; 857 | margin: -2px -4px; 858 | cursor: text; 859 | } 860 | 861 | .close-tab { 862 | background: none; 863 | border: none; 864 | color: #6272a4; 865 | cursor: pointer; 866 | padding: 2px 4px; 867 | margin-left: 6px; 868 | border-radius: 3px; 869 | font-size: 14px; 870 | line-height: 1; 871 | width: 16px; 872 | height: 16px; 873 | display: flex; 874 | align-items: center; 875 | justify-content: center; 876 | transition: all 0.2s ease; 877 | font-weight: bold; 878 | } 879 | 880 | .close-tab:hover { 881 | color: #ff5555; 882 | background-color: rgba(255, 85, 85, 0.1); 883 | } 884 | 885 | .new-tab { 886 | background: none; 887 | border: 1px solid #44475a; 888 | color: white; 889 | cursor: pointer; 890 | padding: 6px 12px; 891 | margin-left: 8px; 892 | border-radius: 6px; 893 | font-size: 16px; 894 | line-height: 1; 895 | width: 36px; 896 | height: 28px; 897 | display: flex; 898 | align-items: center; 899 | justify-content: center; 900 | transition: all 0.2s ease; 901 | flex-shrink: 0; 902 | } 903 | 904 | .new-tab:hover { 905 | color: #50fa7b; 906 | background-color: rgba(80, 250, 123, 0.1); 907 | border-color: rgba(80, 250, 123, 0.3); 908 | } 909 | 910 | /* Panel titles styling */ 911 | .panel-title { 912 | font-weight: bold; 913 | font-size: 14px; 914 | line-height: 24px; 915 | color: rgba(139, 233, 253, 0.8); 916 | } 917 | 918 | .ai-title { 919 | font-weight: bold; 920 | font-size: 14px; 921 | line-height: 24px; 922 | color: rgba(189, 147, 249, 0.8); 923 | } 924 | 925 | /* Toggle AI button styling */ 926 | .toggle-ai-button { 927 | background-color: rgba(189, 147, 249, 0.1); 928 | color: rgba(189, 147, 249, 0.9); 929 | border: 1px solid rgba(189, 147, 249, 0.3); 930 | padding: 6px 12px; 931 | border-radius: 6px; 932 | cursor: pointer; 933 | font-size: 12px; 934 | height: 28px; 935 | line-height: 1; 936 | display: flex; 937 | align-items: center; 938 | justify-content: center; 939 | transition: all 0.2s ease; 940 | font-weight: 500; 941 | letter-spacing: 0.3px; 942 | position: relative; 943 | overflow: hidden; 944 | } 945 | 946 | .toggle-ai-button::before { 947 | content: ''; 948 | position: absolute; 949 | top: 0; 950 | left: 0; 951 | width: 100%; 952 | height: 100%; 953 | background: linear-gradient(45deg, rgba(189, 147, 249, 0.1), rgba(255, 121, 198, 0.1)); 954 | opacity: 0; 955 | transition: opacity 0.2s ease; 956 | } 957 | 958 | .toggle-ai-button:hover { 959 | background-color: rgba(189, 147, 249, 0.15); 960 | border-color: rgba(189, 147, 249, 0.5); 961 | box-shadow: 0 4px 12px rgba(189, 147, 249, 0.2); 962 | transform: none; 963 | } 964 | 965 | .toggle-ai-button:hover::before { 966 | opacity: 1; 967 | } 968 | 969 | .toggle-ai-button:active { 970 | transform: translateY(0); 971 | box-shadow: 0 2px 6px rgba(189, 147, 249, 0.15); 972 | } 973 | 974 | /* Add a subtle glow effect when the AI panel is visible */ 975 | .ai-panel .toggle-ai-button { 976 | background-color: rgba(255, 121, 198, 0.1); 977 | color: rgba(255, 121, 198, 0.9); 978 | border-color: rgba(255, 121, 198, 0.3); 979 | } 980 | 981 | .ai-panel .toggle-ai-button:hover { 982 | background-color: rgba(255, 121, 198, 0.15); 983 | border-color: rgba(255, 121, 198, 0.5); 984 | box-shadow: 0 4px 12px rgba(255, 121, 198, 0.2); 985 | } 986 | 987 | .ai-panel .toggle-ai-button::before { 988 | background: linear-gradient(45deg, rgba(255, 121, 198, 0.1), rgba(189, 147, 249, 0.1)); 989 | } 990 | 991 | /* Adjust current directory display in header */ 992 | .terminal-header .current-directory { 993 | margin-bottom: 0; 994 | } 995 | 996 | /* Add styles for command messages and responses */ 997 | .message-command { 998 | color: #ffc107; 999 | } 1000 | 1001 | .chat-command-output { 1002 | background-color: rgba(0, 0, 0, 0.2); 1003 | padding: 8px; 1004 | border-left: 2px solid #ffc107; 1005 | white-space: pre-wrap; 1006 | font-family: 'Courier New', monospace; 1007 | } 1008 | 1009 | /* Code Block Styling */ 1010 | .code-block-container { 1011 | position: relative; 1012 | margin: 8px 0; 1013 | border-radius: 6px; 1014 | background-color: rgba(80, 250, 123, 0.1); 1015 | border: 1px solid rgba(80, 250, 123, 0.2); 1016 | max-width: 100%; 1017 | overflow: hidden; 1018 | box-sizing: border-box; 1019 | display: inline-flex; 1020 | align-items: center; 1021 | transition: all 0.2s ease; 1022 | } 1023 | 1024 | .code-block-container:hover { 1025 | transform: none; 1026 | border-color: rgba(80, 250, 123, 0.2); 1027 | box-shadow: none; 1028 | } 1029 | 1030 | .code-block { 1031 | position: relative; 1032 | padding: 8px 12px; 1033 | margin: 0; 1034 | background-color: transparent; 1035 | color: #6272a4; 1036 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1037 | font-size: 12px; 1038 | line-height: 1.5; 1039 | overflow-x: auto; 1040 | white-space: pre-wrap; 1041 | word-wrap: break-word; 1042 | box-sizing: border-box; 1043 | display: flex; 1044 | align-items: center; 1045 | transition: all 0.2s ease; 1046 | border: 1px solid transparent; 1047 | } 1048 | 1049 | .code-block:hover { 1050 | transform: none; 1051 | border: 1px solid transparent; 1052 | box-shadow: none; 1053 | z-index: auto; 1054 | } 1055 | 1056 | .code-block code { 1057 | display: block; 1058 | white-space: pre-wrap; 1059 | word-wrap: break-word; 1060 | max-width: 100%; 1061 | box-sizing: border-box; 1062 | font-family: inherit; 1063 | } 1064 | 1065 | .chat-output { 1066 | margin-top: 8px; 1067 | color: #f8f8f2; 1068 | font-size: 12px; 1069 | line-height: 1.5; 1070 | white-space: pre-wrap; 1071 | word-wrap: break-word; 1072 | max-width: 100%; 1073 | box-sizing: border-box; 1074 | display: flex; 1075 | flex-wrap: wrap; 1076 | align-items: center; 1077 | gap: 4px; 1078 | } 1079 | 1080 | .chat-output>div { 1081 | display: inline-flex; 1082 | align-items: center; 1083 | flex-wrap: wrap; 1084 | gap: 4px; 1085 | } 1086 | 1087 | /* Single line code blocks (no newlines) */ 1088 | .code-block-container.single-line { 1089 | background-color: rgba(80, 250, 123, 0.1); 1090 | display: inline-flex; 1091 | margin: 4px 0; 1092 | align-items: center; 1093 | border-radius: 4px; 1094 | border: 1px solid rgba(80, 250, 123, 0.2); 1095 | width: fit-content; 1096 | max-width: 100%; 1097 | padding: 4px 8px; 1098 | } 1099 | 1100 | .code-block-container.single-line .code-block { 1101 | padding: 0; 1102 | max-height: none; 1103 | display: flex; 1104 | align-items: center; 1105 | white-space: nowrap; 1106 | overflow-x: auto; 1107 | width: fit-content; 1108 | background-color: transparent; 1109 | color: #6272a4; 1110 | } 1111 | 1112 | .code-block-container.single-line .code-block-header { 1113 | border: none; 1114 | padding: 0 8px; 1115 | background-color: transparent; 1116 | } 1117 | 1118 | .code-block-header { 1119 | display: flex; 1120 | justify-content: space-between; 1121 | align-items: center; 1122 | padding: 4px 8px; 1123 | background: none; 1124 | border: none; 1125 | } 1126 | 1127 | .code-language { 1128 | font-size: 12px; 1129 | color: #6272a4; 1130 | font-weight: normal; 1131 | text-transform: uppercase; 1132 | } 1133 | 1134 | .copy-code-button { 1135 | background: none; 1136 | border: none; 1137 | color: transparent; 1138 | cursor: pointer; 1139 | padding: 2px; 1140 | display: flex; 1141 | align-items: center; 1142 | justify-content: center; 1143 | transition: all 0.2s ease; 1144 | } 1145 | 1146 | .copy-code-button:hover { 1147 | color: transparent; 1148 | background: none; 1149 | } 1150 | 1151 | .copy-code-button svg { 1152 | width: 14px; 1153 | height: 14px; 1154 | stroke: #fefefe; 1155 | stroke-width: 1.5; 1156 | fill: transparent; 1157 | } 1158 | 1159 | /* Code block actions container */ 1160 | .command-actions { 1161 | display: flex; 1162 | gap: 8px; 1163 | margin-left: 8px; 1164 | } 1165 | 1166 | .command-action-button { 1167 | background: none; 1168 | border: none; 1169 | padding: 4px; 1170 | color: transparent; 1171 | cursor: pointer; 1172 | transition: all 0.2s ease; 1173 | display: flex; 1174 | align-items: center; 1175 | justify-content: center; 1176 | position: relative; 1177 | } 1178 | 1179 | .command-action-button:hover { 1180 | color: transparent; 1181 | background: none; 1182 | } 1183 | 1184 | .command-action-button svg { 1185 | width: 14px; 1186 | height: 14px; 1187 | stroke: #ffffff; 1188 | stroke-width: 1.5; 1189 | fill: transparent; 1190 | transition: all 0.2s ease; 1191 | } 1192 | 1193 | .command-action-button:hover svg { 1194 | stroke: #ffffff; 1195 | width: 18px; 1196 | height: 18px; 1197 | } 1198 | 1199 | /* Add tooltip styles */ 1200 | .command-action-button::after { 1201 | content: attr(data-tooltip); 1202 | position: absolute; 1203 | bottom: 100%; 1204 | left: 50%; 1205 | transform: translateX(-50%) translateY(10px); 1206 | padding: 4px 8px; 1207 | background-color: #282a36; 1208 | color: #f8f8f2; 1209 | font-size: 12px; 1210 | border-radius: 4px; 1211 | white-space: nowrap; 1212 | opacity: 0; 1213 | visibility: hidden; 1214 | transition: all 0.3s ease; 1215 | pointer-events: none; 1216 | border: 1px solid rgba(189, 147, 249, 0.4); 1217 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3); 1218 | z-index: 1000; 1219 | margin-bottom: 5px; 1220 | } 1221 | 1222 | .command-action-button:hover::after { 1223 | opacity: 1; 1224 | visibility: visible; 1225 | transform: translateX(-50%) translateY(0); 1226 | transition-delay: 0.3s; 1227 | } 1228 | 1229 | /* Add a small arrow to the tooltip */ 1230 | .command-action-button::before { 1231 | content: ''; 1232 | position: absolute; 1233 | bottom: 100%; 1234 | left: 50%; 1235 | transform: translateX(-50%) translateY(10px); 1236 | border: 5px solid transparent; 1237 | border-top-color: rgba(189, 147, 249, 0.4); 1238 | opacity: 0; 1239 | visibility: hidden; 1240 | transition: all 0.3s ease; 1241 | z-index: 1000; 1242 | margin-bottom: -2px; 1243 | } 1244 | 1245 | .command-action-button:hover::before { 1246 | opacity: 1; 1247 | visibility: visible; 1248 | transform: translateX(-50%) translateY(0); 1249 | transition-delay: 0.3s; 1250 | } 1251 | 1252 | /* Copy notification styling */ 1253 | .copy-notification { 1254 | position: fixed; 1255 | bottom: 30px; 1256 | right: 30px; 1257 | background-color: rgba(80, 250, 123, 0.9); 1258 | color: #282a36; 1259 | padding: 8px 16px; 1260 | border-radius: 4px; 1261 | font-weight: bold; 1262 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3); 1263 | opacity: 0; 1264 | transform: translateY(20px); 1265 | transition: all 0.3s ease; 1266 | z-index: 1000; 1267 | } 1268 | 1269 | .copy-notification.show { 1270 | opacity: 1; 1271 | transform: translateY(0); 1272 | } 1273 | 1274 | /* Additional styling for terminal command-style code blocks */ 1275 | .code-block-container.command-block { 1276 | background-color: #282a36; 1277 | border-color: rgba(139, 233, 253, 0.2); 1278 | } 1279 | 1280 | .code-block-container.command-block .code-block { 1281 | background-color: #282a36; 1282 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1283 | color: #50fa7b; 1284 | } 1285 | 1286 | .code-block-container.command-block .code-block-header { 1287 | background-color: #282a36; 1288 | border-bottom-color: rgba(139, 233, 253, 0.2); 1289 | } 1290 | 1291 | /* Better styling for the inline single-line code blocks */ 1292 | .code-block-container.single-line { 1293 | background-color: rgba(80, 250, 123, 0.1); 1294 | display: inline-flex; 1295 | margin: 4px 0; 1296 | max-width: 100%; 1297 | align-items: center; 1298 | border-radius: 4px; 1299 | border: 1px solid rgba(80, 250, 123, 0.2); 1300 | max-width: 100%; 1301 | overflow: hidden; 1302 | } 1303 | 1304 | .command-history, 1305 | .chat-history { 1306 | user-select: text; 1307 | /* Allow selection in command and chat history */ 1308 | } 1309 | 1310 | /* Add a class for disabling selection during resize */ 1311 | .resizing-active * { 1312 | user-select: none !important; 1313 | cursor: col-resize !important; 1314 | } 1315 | 1316 | /* Command action buttons styling */ 1317 | .command-actions { 1318 | position: absolute; 1319 | right: 8px; 1320 | top: 50%; 1321 | transform: translateY(-50%); 1322 | display: flex; 1323 | gap: 4px; 1324 | padding: 2px 4px; 1325 | background: none; 1326 | z-index: 1; 1327 | } 1328 | 1329 | .command-action-button { 1330 | background: none; 1331 | border: none; 1332 | padding: 4px; 1333 | color: transparent; 1334 | cursor: pointer; 1335 | transition: all 0.2s ease; 1336 | display: flex; 1337 | align-items: center; 1338 | justify-content: center; 1339 | } 1340 | 1341 | .command-action-button:hover { 1342 | color: transparent; 1343 | background: none; 1344 | } 1345 | 1346 | .command-text { 1347 | color: transparent; 1348 | cursor: pointer; 1349 | white-space: nowrap; 1350 | font-weight: 500; 1351 | margin-right: 4px; 1352 | flex-shrink: 0; 1353 | } 1354 | 1355 | /* Code block actions container */ 1356 | .code-block-actions { 1357 | display: flex; 1358 | align-items: center; 1359 | gap: 4px; 1360 | background: none; 1361 | } 1362 | 1363 | .code-block-actions .copy-code-button { 1364 | margin-left: 0; 1365 | opacity: 1; 1366 | } 1367 | 1368 | .code-block-actions .copy-code-button:hover { 1369 | opacity: 1; 1370 | } 1371 | 1372 | /* Simple command styling */ 1373 | .simple-command { 1374 | display: inline-flex; 1375 | align-items: center; 1376 | padding: 6px 12px; 1377 | margin: 0; 1378 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1379 | background-color: rgba(35, 214, 81, 0.08); 1380 | /* More transparent background */ 1381 | border: 1px solid rgba(80, 250, 123, 0.1); 1382 | border-radius: 6px; 1383 | transition: all 0.2s ease; 1384 | width: fit-content; 1385 | min-width: 100px; 1386 | position: relative; 1387 | padding-right: 84px; 1388 | box-sizing: border-box; 1389 | flex-shrink: 0; 1390 | padding-right: 84px; 1391 | /* Space for icons */ 1392 | max-width: 100%; 1393 | overflow: hidden; 1394 | } 1395 | 1396 | .command-output { 1397 | padding: 8px 12px; 1398 | margin: 4px 0; 1399 | border-radius: 0; 1400 | background-color: rgba(40, 42, 54, 0.3); 1401 | padding-bottom: 40px; 1402 | /* Add padding to make room for the buttons */ 1403 | max-width: 100%; 1404 | overflow-x: hidden; 1405 | /* Explicitly prevent horizontal scrolling */ 1406 | } 1407 | 1408 | .command-output div { 1409 | white-space: pre-wrap !important; 1410 | word-wrap: break-word !important; 1411 | word-break: break-word !important; 1412 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1413 | line-height: 1.4; 1414 | user-select: text; 1415 | display: block; 1416 | max-width: 100%; 1417 | box-sizing: border-box; 1418 | overflow-x: hidden; 1419 | } 1420 | 1421 | /* Ensure command output line wrapping in all environments */ 1422 | .command-output div:not(.processing-indicator) { 1423 | white-space: pre-wrap !important; 1424 | overflow-wrap: break-word !important; 1425 | word-break: break-word !important; 1426 | display: block; 1427 | max-width: 100%; 1428 | box-sizing: border-box; 1429 | margin: 2px 0; 1430 | } 1431 | 1432 | /* Special styling for file listing outputs (like ls command) */ 1433 | .command-output div.file-list-output { 1434 | display: inline-block; 1435 | vertical-align: top; 1436 | margin: 2px 12px 2px 0; 1437 | padding: 0 4px; 1438 | } 1439 | 1440 | .ai-message { 1441 | position: relative; 1442 | padding: 8px; 1443 | margin: 4px 0; 1444 | border-radius: 4px; 1445 | background-color: rgba(40, 42, 54, 0.3); 1446 | transition: all 0.2s ease; 1447 | border: 1px solid transparent; 1448 | } 1449 | 1450 | .ai-message:hover { 1451 | transform: none; 1452 | border: 1px solid transparent; 1453 | box-shadow: none; 1454 | z-index: auto; 1455 | } 1456 | 1457 | .ai-message .copy-icon { 1458 | background-color: rgba(189, 147, 249, 0.1); 1459 | } 1460 | 1461 | .ai-message .copy-icon:hover { 1462 | background-color: rgba(189, 147, 249, 0.2); 1463 | } 1464 | 1465 | .command-line { 1466 | position: -webkit-sticky; 1467 | position: sticky; 1468 | top: 0; 1469 | background-color:#363948; 1470 | z-index: 2; 1471 | display: flex; 1472 | align-items: center; 1473 | gap: 8px; 1474 | padding: 8px 12px; 1475 | margin: 0; 1476 | font-family: monospace; 1477 | border-bottom: none; 1478 | box-shadow: 0 1px 0 rgba(139, 233, 253, 0.2); 1479 | letter-spacing: 0.5px; 1480 | } 1481 | 1482 | .command-line .command { 1483 | overflow-x: auto; 1484 | white-space: nowrap; 1485 | } 1486 | 1487 | .chat-question { 1488 | display: flex; 1489 | align-items: center; 1490 | padding: 2px 0; 1491 | margin: 2px 0; 1492 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1493 | border-bottom: 1px solid rgba(139, 233, 253, 0.2); 1494 | padding-bottom: 8px; 1495 | margin-bottom: 8px; 1496 | color: rgba(189, 147, 249, 0.85); 1497 | margin-left: 8px; 1498 | position: relative; 1499 | } 1500 | 1501 | .chat-question .prompt { 1502 | color: rgba(189, 147, 249, 0.85); 1503 | margin-right: 8px; 1504 | } 1505 | 1506 | .chat-question .message { 1507 | color: rgba(189, 147, 249, 0.85); 1508 | font-weight: normal; 1509 | margin-right: 4px; 1510 | } 1511 | 1512 | .refresh-button { 1513 | background: none; 1514 | border: none; 1515 | padding: 2px; 1516 | color: rgba(189, 147, 249, 0.6); 1517 | cursor: pointer; 1518 | transition: all 0.2s ease; 1519 | display: flex; 1520 | align-items: center; 1521 | justify-content: center; 1522 | border-radius: 4px; 1523 | opacity: 1; 1524 | margin-left: 2px; 1525 | } 1526 | 1527 | .chat-question:hover .refresh-button { 1528 | color: rgba(189, 147, 249, 0.8); 1529 | } 1530 | 1531 | .refresh-button:hover { 1532 | color: rgba(189, 147, 249, 1); 1533 | background-color: rgba(189, 147, 249, 0.1); 1534 | transform: none; 1535 | } 1536 | 1537 | .refresh-button svg { 1538 | width: 12px; 1539 | height: 12px; 1540 | } 1541 | 1542 | /* Command block styling */ 1543 | .command-block { 1544 | margin: 8px 0; 1545 | width: 100%; 1546 | } 1547 | 1548 | .command-content { 1549 | display: flex; 1550 | align-items: center; 1551 | gap: 12px; 1552 | width: 100%; 1553 | } 1554 | 1555 | .command-text { 1556 | color: #50fa7b; 1557 | white-space: pre-wrap; 1558 | word-break: break-word; 1559 | font-weight: 500; 1560 | margin-right: 4px; 1561 | overflow-x: auto; 1562 | } 1563 | 1564 | .command-actions { 1565 | position: absolute; 1566 | right: 8px; 1567 | top: 50%; 1568 | transform: translateY(-50%); 1569 | display: flex; 1570 | gap: 4px; 1571 | padding: 2px 4px; 1572 | background-color: rgba(35, 214, 81, 0.00); 1573 | border-radius: 4px; 1574 | z-index: 1; 1575 | } 1576 | 1577 | .command-explanation { 1578 | color: #6272a4; 1579 | font-size: 12px; 1580 | padding-left: 12px; 1581 | border-left: 2px solid rgba(80, 250, 123, 0.2); 1582 | white-space: pre-wrap; 1583 | word-break: break-word; 1584 | flex: 1; 1585 | min-width: 0; 1586 | } 1587 | 1588 | /* Responsive adjustments */ 1589 | @media (max-width: 768px) { 1590 | .command-content { 1591 | flex-direction: column; 1592 | align-items: flex-start; 1593 | gap: 8px; 1594 | } 1595 | 1596 | .simple-command { 1597 | width: 100%; 1598 | padding-right: 92px; 1599 | font-size: 11px; 1600 | min-width: 80px; 1601 | } 1602 | 1603 | .command-text { 1604 | font-size: 12px; 1605 | } 1606 | 1607 | .command-explanation { 1608 | font-size: 11px; 1609 | margin-left: 4px; 1610 | padding-left: 8px; 1611 | width: 100%; 1612 | } 1613 | } 1614 | 1615 | /* Improve command action buttons visibility */ 1616 | .command-action-button { 1617 | background: none; 1618 | border: none; 1619 | padding: 4px; 1620 | color: transparent; 1621 | cursor: pointer; 1622 | transition: all 0.2s ease; 1623 | display: flex; 1624 | align-items: center; 1625 | justify-content: center; 1626 | } 1627 | 1628 | .command-action-button:hover { 1629 | color: transparent; 1630 | background: none; 1631 | } 1632 | 1633 | .command-action-button svg { 1634 | width: 14px; 1635 | height: 14px; 1636 | stroke: #f5f5f5; 1637 | stroke-width: 1.5; 1638 | fill: transparent; 1639 | transition: all 0.2s ease; 1640 | } 1641 | 1642 | .command-action-button:hover svg { 1643 | stroke: #ffffff; 1644 | width: 18px; 1645 | height: 18px; 1646 | } 1647 | 1648 | .code-block-container.single-line .code-block-header { 1649 | border-bottom: none; 1650 | border-right: none; 1651 | padding: 2px 4px; 1652 | background-color: #282a36; 1653 | } 1654 | 1655 | /* Scroll to top button styling */ 1656 | .scroll-to-top-button { 1657 | position: absolute; 1658 | bottom: 8px; 1659 | right: 40px; 1660 | background: none; 1661 | border: 1px solid rgba(98, 114, 164, 0.3); 1662 | padding: 4px; 1663 | color: transparent; 1664 | cursor: pointer; 1665 | transition: all 0.2s ease; 1666 | display: flex; 1667 | align-items: center; 1668 | justify-content: center; 1669 | border-radius: 4px; 1670 | opacity: 0.6; 1671 | z-index: 1; 1672 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); 1673 | } 1674 | 1675 | .scroll-to-top-button:hover { 1676 | opacity: 1; 1677 | background-color: rgba(98, 114, 164, 0.1); 1678 | transform: none; 1679 | box-shadow: none; 1680 | border-color: rgba(98, 114, 164, 0.8); 1681 | } 1682 | 1683 | .scroll-to-top-button svg { 1684 | width: 14px; 1685 | height: 14px; 1686 | stroke: #ffffff; 1687 | stroke-width: 1.5; 1688 | fill: transparent; 1689 | transition: all 0.2s ease; 1690 | } 1691 | 1692 | .scroll-to-top-button:hover svg { 1693 | stroke: #ffffff; 1694 | width: 16px; 1695 | height: 16px; 1696 | } 1697 | 1698 | /* Add tooltip styles for scroll to top button */ 1699 | .scroll-to-top-button::after { 1700 | content: attr(data-tooltip); 1701 | position: absolute; 1702 | bottom: 100%; 1703 | left: 50%; 1704 | transform: translateX(-50%); 1705 | padding: 4px 8px; 1706 | background-color: #282a3600; 1707 | color: #f8f8f2; 1708 | font-size: 12px; 1709 | border-radius: 4px; 1710 | white-space: nowrap; 1711 | opacity: 0; 1712 | visibility: hidden; 1713 | transition: all 0.2s ease; 1714 | pointer-events: none; 1715 | border: 1px solid rgba(98, 115, 164, 0); 1716 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2); 1717 | z-index: 1000; 1718 | } 1719 | 1720 | .scroll-to-top-button:hover::after { 1721 | opacity: 1; 1722 | visibility: visible; 1723 | transform: translateX(-50%) translateY(0); 1724 | } 1725 | 1726 | /* Add a small arrow to the tooltip */ 1727 | .scroll-to-top-button::before { 1728 | content: ''; 1729 | position: absolute; 1730 | bottom: 100%; 1731 | left: 50%; 1732 | transform: translateX(-50%); 1733 | border: 4px solid transparent; 1734 | border-top-color: rgba(98, 114, 164, 0.2); 1735 | opacity: 0; 1736 | visibility: hidden; 1737 | transition: all 0.2s ease; 1738 | } 1739 | 1740 | .scroll-to-top-button:hover::before { 1741 | opacity: 1; 1742 | visibility: visible; 1743 | transform: translateX(-50%) translateY(0); 1744 | } 1745 | 1746 | /* Password input styling */ 1747 | .password-input { 1748 | font-family: monospace; 1749 | letter-spacing: 2px; 1750 | color: #ffcc00 !important; 1751 | /* More visible password dots */ 1752 | } 1753 | 1754 | /* History search mode styles */ 1755 | .prompt.search-mode { 1756 | color: #4a9eff; 1757 | } 1758 | 1759 | .history-search { 1760 | color: #ccc; 1761 | font-family: 'Cascadia Code', 'Fira Code', monospace; 1762 | } 1763 | 1764 | .history-search-highlight { 1765 | background-color: rgba(74, 158, 255, 0.2); 1766 | border-radius: 2px; 1767 | padding: 0 2px; 1768 | } 1769 | 1770 | .git-branch-container { 1771 | position: relative; 1772 | display: flex; 1773 | align-items: center; 1774 | gap: 4px; 1775 | } 1776 | 1777 | .git-action-button { 1778 | background-color: #333; 1779 | color: #8be9fd; 1780 | border: 1px solid #444; 1781 | border-radius: 4px; 1782 | padding: 2px 8px; 1783 | cursor: pointer; 1784 | display: flex; 1785 | align-items: center; 1786 | justify-content: center; 1787 | transition: all 0.2s ease; 1788 | font-size: 12px; 1789 | } 1790 | 1791 | .git-action-button:hover { 1792 | background-color: #44475a; 1793 | border-color: #8be9fd; 1794 | } 1795 | 1796 | .git-branch-button { 1797 | background-color: #333; 1798 | color: #8ade96; 1799 | border: 1px solid #444; 1800 | border-radius: 4px; 1801 | padding: 2px 8px; 1802 | cursor: pointer; 1803 | font-size: 12px; 1804 | display: flex; 1805 | align-items: center; 1806 | gap: 5px; 1807 | } 1808 | 1809 | .git-branch-button:hover { 1810 | background-color: #444; 1811 | } 1812 | 1813 | .branch-selector-popup { 1814 | position: absolute; 1815 | bottom: 100%; 1816 | right: 0; 1817 | margin-bottom: 8px; 1818 | background-color: #44475a; 1819 | border: 1px solid rgba(80, 250, 123, 0.2); 1820 | border-radius: 4px; 1821 | z-index: 1000; 1822 | width: max-content; 1823 | min-width: 200px; 1824 | max-height: 300px; 1825 | overflow-y: auto; 1826 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2); 1827 | } 1828 | 1829 | .branch-list-container { 1830 | display: flex; 1831 | flex-direction: column; 1832 | } 1833 | 1834 | .branch-item { 1835 | padding: 6px 12px; 1836 | cursor: pointer; 1837 | color: #f8f8f2; 1838 | font-size: 11px; 1839 | border-bottom: none; 1840 | transition: all 0.2s; 1841 | } 1842 | 1843 | .branch-item:last-child { 1844 | border-bottom: none; 1845 | } 1846 | 1847 | .branch-item:hover { 1848 | background-color: #6272a4; 1849 | } 1850 | 1851 | .branch-item.active { 1852 | background-color: #bd93f9; 1853 | color: #282a36; 1854 | font-weight: normal; 1855 | } 1856 | 1857 | .branch-item.more-items { 1858 | font-style: italic; 1859 | color: #888; 1860 | cursor: default; 1861 | } 1862 | 1863 | .branch-item.more-items:hover { 1864 | background-color: transparent; 1865 | } 1866 | 1867 | /* Input Area Styles */ 1868 | .input-area { 1869 | padding: 5px; 1870 | background-color: #1e1e1e; 1871 | } 1872 | 1873 | .command-line .command { 1874 | overflow-x: auto; 1875 | white-space: nowrap; 1876 | } 1877 | 1878 | /* Commit Popup Styles */ 1879 | .commit-popup-overlay { 1880 | position: fixed; 1881 | top: 0; 1882 | left: 0; 1883 | width: 100%; 1884 | height: 100%; 1885 | background-color: rgba(0, 0, 0, 0.6); 1886 | display: flex; 1887 | align-items: center; 1888 | justify-content: center; 1889 | z-index: 2000; 1890 | } 1891 | 1892 | .commit-popup { 1893 | background-color: #282a36; 1894 | padding: 20px; 1895 | border-radius: 8px; 1896 | box-shadow: 0 5px 15px rgba(0, 0, 0, 0.3); 1897 | width: 400px; 1898 | max-width: 90%; 1899 | border: 1px solid #44475a; 1900 | } 1901 | 1902 | .commit-popup h4 { 1903 | margin-top: 0; 1904 | color: #f8f8f2; 1905 | } 1906 | 1907 | .commit-popup input { 1908 | width: 100%; 1909 | padding: 8px; 1910 | box-sizing: border-box; 1911 | background-color: #44475a; 1912 | border: 1px solid #6272a4; 1913 | color: #f8f8f2; 1914 | border-radius: 4px; 1915 | } 1916 | 1917 | .commit-popup-actions { 1918 | margin-top: 15px; 1919 | display: flex; 1920 | justify-content: flex-end; 1921 | gap: 10px; 1922 | } 1923 | 1924 | .commit-popup-actions button { 1925 | padding: 8px 16px; 1926 | border-radius: 4px; 1927 | border: none; 1928 | cursor: pointer; 1929 | background-color: #44475a; 1930 | color: #f8f8f2; 1931 | transition: background-color 0.2s; 1932 | } 1933 | 1934 | .commit-popup-actions button:hover { 1935 | background-color: #6272a4; 1936 | } 1937 | 1938 | .commit-popup-actions button[disabled] { 1939 | opacity: 0.5; 1940 | cursor: not-allowed; 1941 | } 1942 | 1943 | .commit-popup-actions button:last-child { 1944 | background-color: #50fa7b; 1945 | color: #282a36; 1946 | } 1947 | 1948 | .commit-popup-actions button:last-child:hover { 1949 | background-color: #61ff8c; 1950 | } 1951 | 1952 | .commit-popup-actions button:last-child[disabled] { 1953 | background-color: #50fa7b; 1954 | } --------------------------------------------------------------------------------