├── .DS_Store ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md └── workflows │ ├── CODEOWNERS │ └── release.yml ├── .gitignore ├── FineTuned ├── README.md ├── export_for_ollama.py ├── test_model.py └── train.py ├── README.md ├── ai-terminal ├── .DS_Store ├── .idea │ ├── .gitignore │ ├── ai-terminal.iml │ ├── modules.xml │ └── vcs.xml ├── .npmrc ├── README.md ├── ai-terminal_0.2.0_amd64.deb ├── angular.json ├── build-macos.sh ├── package-lock.json ├── package.json ├── proxy.conf.json ├── setup-homebrew-tap.sh ├── src-tauri │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ ├── build.rs │ ├── capabilities │ │ └── default.json │ ├── entitlements.plist │ ├── icons │ │ ├── 128x128.png │ │ ├── 128x128@2x.png │ │ ├── 32x32.png │ │ ├── Square107x107Logo.png │ │ ├── Square142x142Logo.png │ │ ├── Square150x150Logo.png │ │ ├── Square284x284Logo.png │ │ ├── Square30x30Logo.png │ │ ├── Square310x310Logo.png │ │ ├── Square44x44Logo.png │ │ ├── Square71x71Logo.png │ │ ├── Square89x89Logo.png │ │ ├── StoreLogo.png │ │ ├── icon.icns │ │ ├── icon.ico │ │ └── icon.png │ ├── src │ │ ├── lib.rs │ │ └── main.rs │ └── tauri.conf.json ├── src │ ├── app │ │ ├── app.component.css │ │ ├── app.component.html │ │ ├── app.component.ts │ │ ├── app.config.ts │ │ └── app.routes.ts │ ├── assets │ │ ├── angular.svg │ │ └── tauri.svg │ ├── index.html │ ├── main.ts │ ├── styles.css │ └── types │ │ └── tauri.d.ts ├── tsconfig.app.json └── tsconfig.json ├── demo.gif ├── requirements.txt └── test_session_isolation.md /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/.DS_Store -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | WebSite/ai-terminal[[:space:]]demo.mov filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS Version [Ex: macOS 12.x] 28 | - Architecture [Ex: intel] 29 | - Ai terminal version [Ex: 0.9] 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /.github/workflows/CODEOWNERS: -------------------------------------------------------------------------------- 1 | 2 | * @MicheleVerriello @Hitomamacs 3 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | 8 | jobs: 9 | build-macos: 10 | runs-on: macos-latest 11 | permissions: 12 | contents: write 13 | steps: 14 | - uses: actions/checkout@v4 15 | 16 | - name: Setup Node.js 17 | uses: actions/setup-node@v4 18 | with: 19 | node-version: '20' 20 | 21 | - name: Install Rust stable 22 | uses: dtolnay/rust-toolchain@stable 23 | 24 | - name: Install create-dmg 25 | run: brew install create-dmg 26 | 27 | - name: Install dependencies 28 | run: | 29 | cd ai-terminal 30 | npm ci --force 31 | 32 | - name: Import Code-Signing Certificates 33 | uses: apple-actions/import-codesign-certs@v1 34 | with: 35 | p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }} 36 | p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }} 37 | 38 | - name: Prepare Apple API Key 39 | run: | 40 | echo "${{ secrets.APPLE_API_KEY }}" > /tmp/apple_api_key.p8 41 | chmod 600 /tmp/apple_api_key.p8 42 | echo "APPLE_API_KEY=/tmp/apple_api_key.p8" >> $GITHUB_ENV 43 | 44 | - name: Build macOS Universal 45 | run: | 46 | cd ai-terminal 47 | chmod +x build-macos.sh 48 | ./build-macos.sh 49 | env: 50 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 51 | APPLE_DEVELOPER_ID: ${{ secrets.APPLE_DEVELOPER_ID }} 52 | APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }} 53 | APPLE_API_KEY_ID: ${{ secrets.APPLE_API_KEY_ID }} 54 | APPLE_API_ISSUER: ${{ secrets.APPLE_API_ISSUER }} 55 | 56 | - name: Upload macOS Artifact 57 | uses: actions/upload-artifact@v4 58 | with: 59 | name: macos-dmg 60 | path: ai-terminal/src-tauri/target/universal-apple-darwin/bundle/dmg/*.dmg 61 | 62 | build-linux: 63 | runs-on: ubuntu-latest 64 | permissions: 65 | contents: write 66 | strategy: 67 | matrix: 68 | architecture: [x86_64] # , aarch64] # Temporarily disabled ARM builds 69 | steps: 70 | - uses: actions/checkout@v4 71 | 72 | - name: Setup Node.js 73 | uses: actions/setup-node@v4 74 | with: 75 | node-version: '20' 76 | 77 | - name: Install Rust stable 78 | uses: dtolnay/rust-toolchain@stable 79 | 80 | - name: Install dependencies 81 | run: | 82 | cd ai-terminal 83 | npm ci 84 | 85 | - name: Install Linux build dependencies 86 | run: | 87 | sudo apt-get update 88 | # Try installing the newer version first, fall back to older version if needed 89 | if ! sudo apt-get install -y libwebkit2gtk-4.1-dev; then 90 | sudo apt-get install -y libwebkit2gtk-4.0-dev 91 | fi 92 | sudo apt-get install -y build-essential curl wget libssl-dev libgtk-3-dev libayatana-appindicator3-dev 93 | 94 | - name: Build Linux DEB 95 | run: | 96 | cd ai-terminal 97 | npm run tauri build 98 | env: 99 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 100 | 101 | - name: Upload Linux Artifact 102 | uses: actions/upload-artifact@v4 103 | with: 104 | name: linux-deb 105 | path: ai-terminal/src-tauri/target/release/bundle/deb/*.deb 106 | 107 | release: 108 | needs: [build-macos, build-linux] 109 | runs-on: ubuntu-latest 110 | permissions: 111 | contents: write 112 | steps: 113 | - uses: actions/checkout@v4 114 | 115 | - name: Download macOS Artifact 116 | uses: actions/download-artifact@v4 117 | with: 118 | name: macos-dmg 119 | path: artifacts/macos 120 | 121 | - name: Download Linux Artifact 122 | uses: actions/download-artifact@v4 123 | with: 124 | name: linux-deb 125 | path: artifacts/linux 126 | 127 | - name: Generate tag 128 | run: | 129 | cd ai-terminal 130 | VERSION=$(node -p "require('./package.json').version") 131 | echo "RELEASE_TAG=v${VERSION}" >> $GITHUB_ENV 132 | echo "RELEASE_VERSION=${VERSION}" >> $GITHUB_ENV 133 | 134 | - name: Create and push tag 135 | run: | 136 | git tag ${{ env.RELEASE_TAG }} 137 | git push origin ${{ env.RELEASE_TAG }} 138 | 139 | - name: Create release 140 | uses: softprops/action-gh-release@v1 141 | with: 142 | tag_name: ${{ env.RELEASE_TAG }} 143 | files: | 144 | artifacts/macos/*.dmg 145 | artifacts/linux/*.deb 146 | env: 147 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 148 | 149 | - name: Update Website Versions 150 | env: 151 | WEBSITE_REPO: AiTerminalFoundation/website 152 | GITHUB_TOKEN: ${{ secrets.HOMEBREW_TOKEN }} 153 | run: | 154 | # Clone the website repository 155 | git clone https://x-access-token:${GITHUB_TOKEN}@github.com/${WEBSITE_REPO}.git website 156 | cd website 157 | 158 | # Update versions.js with new version info 159 | VERSION="${{ env.RELEASE_VERSION }}" 160 | DMG_URL="https://github.com/AiTerminalFoundation/ai-terminal/releases/download/v${VERSION}/ai-terminal-${VERSION}.dmg" 161 | DEB_X86_URL="https://github.com/AiTerminalFoundation/ai-terminal/releases/download/v${VERSION}/ai-terminal_${VERSION}_amd64.deb" 162 | DEB_ARM_URL="https://github.com/AiTerminalFoundation/ai-terminal/releases/download/v${VERSION}/ai-terminal_${VERSION}_arm64.deb" 163 | 164 | # Create a temporary file with the updated content 165 | cat > versions.js << EOL 166 | const VERSION_INFO = { 167 | macos: { 168 | version: '${VERSION}', 169 | downloadUrl: '${DMG_URL}' 170 | }, 171 | windows: { 172 | version: '${VERSION}', 173 | downloadUrl: '#', 174 | isComingSoon: true 175 | }, 176 | linux: { 177 | version: '${VERSION}', 178 | downloadUrl: '${DEB_X86_URL}', 179 | arm64DownloadUrl: '${DEB_ARM_URL}' 180 | } 181 | }; 182 | EOL 183 | 184 | # Commit and push changes 185 | git config user.name "GitHub Actions" 186 | git config user.email "actions@github.com" 187 | git add versions.js 188 | git commit -m "Update ai-terminal version to v${VERSION}" 189 | git push 190 | 191 | - name: Update Homebrew Formula and Cask 192 | env: 193 | HOMEBREW_TAP_REPO: AiTerminalFoundation/homebrew-ai-terminal 194 | GITHUB_TOKEN: ${{ secrets.HOMEBREW_TOKEN }} 195 | run: | 196 | # Clone the tap repository 197 | git clone https://x-access-token:${GITHUB_TOKEN}@github.com/AiTerminalFoundation/homebrew-ai-terminal.git homebrew-tap 198 | cd homebrew-tap 199 | 200 | # Use the version from the environment 201 | VERSION="${{ env.RELEASE_VERSION }}" 202 | DMG_URL="https://github.com/AiTerminalFoundation/ai-terminal/releases/download/v${VERSION}/ai-terminal-${VERSION}.dmg" 203 | SHA256=$(curl -sL "${DMG_URL}" | shasum -a 256 | awk '{print $1}') 204 | 205 | # Update Formula/ai-terminal.rb 206 | sed -i.bak "s/version \".*\"/version \"${VERSION}\"/" Formula/ai-terminal.rb 207 | sed -i.bak "s|url \".*\"|url \"${DMG_URL}\"|" Formula/ai-terminal.rb 208 | sed -i.bak "s/sha256 \".*\"/sha256 \"${SHA256}\"/" Formula/ai-terminal.rb 209 | rm Formula/ai-terminal.rb.bak 210 | 211 | # Update Casks/ai-terminal.rb 212 | sed -i.bak "s/version \".*\"/version \"${VERSION}\"/" Casks/ai-terminal.rb 213 | sed -i.bak "s|url \".*\"|url \"${DMG_URL}\"|" Casks/ai-terminal.rb 214 | sed -i.bak "s/sha256 \".*\"/sha256 \"${SHA256}\"/" Casks/ai-terminal.rb 215 | rm Casks/ai-terminal.rb.bak 216 | 217 | # Commit and push changes 218 | git config user.name "GitHub Actions" 219 | git config user.email "actions@github.com" 220 | git add Formula/ai-terminal.rb Casks/ai-terminal.rb 221 | git commit -m "Update ai-terminal to v${VERSION}" 222 | git push 223 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # These are backup files generated by rustfmt 7 | **/*.rs.bk 8 | 9 | # MSVC Windows builds of rustc generate these, which store debugging information 10 | *.pdb 11 | 12 | # RustRover 13 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 14 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 15 | # and can be added to the global gitignore or merged into this file. For a more nuclear 16 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 17 | #.idea/ 18 | 19 | # Python bytecode 20 | __pycache__/ 21 | *.py[cod] 22 | *$py.class 23 | *.so 24 | .Python 25 | 26 | # Distribution / packaging 27 | dist/ 28 | build/ 29 | *.egg-info/ 30 | 31 | # Virtual environments 32 | venv/ 33 | env/ 34 | ENV/ 35 | .env/ 36 | 37 | # Training outputs 38 | results/ 39 | logs/ 40 | fine_tuned_model/ 41 | merged_opt_1.3b/ 42 | model_cache/ 43 | ollama_model/ 44 | exported_model/ 45 | llama2-7b-finetuned/ 46 | 47 | # Dataset files 48 | *.nl 49 | *.cm 50 | 51 | # Jupyter Notebook 52 | .ipynb_checkpoints 53 | 54 | # IDE specific files 55 | .idea/ 56 | .vscode/ 57 | *.swp 58 | *.swo 59 | 60 | # OS specific 61 | .DS_Store 62 | Thumbs.db 63 | 64 | # Large model files 65 | *.bin 66 | *.pt 67 | *.pth 68 | *.ckpt 69 | *.safetensors 70 | 71 | # Logs 72 | *.log 73 | .DS_Store 74 | /ai-terminal/.angular 75 | /ai-terminal/node_modules 76 | .DS_Store 77 | ai-terminal/.DS_Store 78 | -------------------------------------------------------------------------------- /FineTuned/README.md: -------------------------------------------------------------------------------- 1 | # Fine-Tuning Guide for TinyLlama with nl2bash 2 | 3 | This guide explains how to fine-tune the **TinyLlama/TinyLlama-1.1B-Chat-v1.0** model using the **nl2bash** dataset. The dataset includes over 20,000 examples split into two files—one containing bash command descriptions and the other containing the corresponding commands. 4 | 5 | ## Prerequisites 6 | 7 | - **Python 3.8+** installed. 8 | - Clone this repository and the [llama2.cpp](https://github.com/ggerganov/llama.cpp) repository. 9 | - Install required dependencies (e.g., PyTorch, Transformers). 10 | - Ensure your environment is set up for Hugging Face LoRa format. 11 | 12 | ## Fine-Tuning Steps 13 | 14 | 1. **Run Fine-Tuning Script** 15 | Execute `llama2.py` to fine-tune the model using the nl2bash dataset. This script produces updated weights in the LoRa format. 16 | ```bash 17 | python llama2.py 18 | ``` 19 | 20 | 2. **Merge Weights for Ollama** 21 | Run the export process (e.g., via an export script) to merge the base model with the updated weights. This step prepares the model for conversion to the Ollama format. 22 | ```bash 23 | python export_for_ollama.sh 24 | ``` 25 | 26 | 27 | 3. **Convert to GGUF Format** 28 | Convert the merged model into a gguf model compatible with Ollama by running: 29 | ```bash 30 | python ~/llama.cpp/convert_hf_to_gguf.py --outfile ai-terminal/ai-terminal/FineTuned/ollama_model/gguf/opt_1.3b_f16.gguf ai-terminal/ai-terminal/FineTuned/ollama_model/merged_model 31 | ``` 32 | 33 | -------------------------------------------------------------------------------- /FineTuned/export_for_ollama.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import argparse 4 | from transformers import AutoModelForCausalLM, AutoTokenizer 5 | from peft import PeftModel, PeftConfig 6 | import shutil 7 | import subprocess 8 | 9 | def merge_and_export( 10 | lora_model_path="./llama2-1.1b-finetuned", 11 | output_dir="./ollama_model", 12 | model_name="my-finetuned-model" 13 | ): 14 | """Merge LoRA weights with base model and export for Ollama.""" 15 | print(f"Loading LoRA model from {lora_model_path}...") 16 | 17 | os.makedirs(output_dir, exist_ok=True) 18 | 19 | config = PeftConfig.from_pretrained(lora_model_path) 20 | base_model_name = "meta-llama/Llama-2-7b-chat-hf" 21 | print(f"Base model: {base_model_name}") 22 | 23 | print("Loading base model...") 24 | base_model = AutoModelForCausalLM.from_pretrained( 25 | base_model_name, 26 | torch_dtype=torch.float16, 27 | low_cpu_mem_usage=True, 28 | device_map="cpu" 29 | ) 30 | 31 | print("Loading LoRA adapters...") 32 | model = PeftModel.from_pretrained(base_model, lora_model_path) 33 | 34 | print("Merging weights...") 35 | model = model.merge_and_unload() 36 | 37 | merged_model_path = os.path.join(output_dir, "merged_model") 38 | print(f"Saving merged model to {merged_model_path}...") 39 | model.save_pretrained(merged_model_path) 40 | 41 | tokenizer = AutoTokenizer.from_pretrained(base_model_name) 42 | tokenizer.save_pretrained(merged_model_path) 43 | 44 | model_name_for_file = base_model_name.split('/')[-1] 45 | 46 | modelfile_content = "FROM " + model_name_for_file + "\n" 47 | modelfile_content += "PARAMETER temperature 0.7\n" 48 | modelfile_content += "PARAMETER top_p 0.9\n" 49 | modelfile_content += "PARAMETER stop \"### Instruction:\"\n" 50 | modelfile_content += "PARAMETER stop \"### Response:\"\n\n" 51 | modelfile_content += "TEMPLATE \"\"\"\n" 52 | modelfile_content += "### Instruction:\n" 53 | modelfile_content += "{{.Input}}\n\n" 54 | modelfile_content += "### Response:\n" 55 | modelfile_content += "\"\"\"" 56 | 57 | modelfile_path = os.path.join(output_dir, "Modelfile") 58 | with open(modelfile_path, "w") as f: 59 | f.write(modelfile_content) 60 | 61 | print(f"Created Modelfile at {modelfile_path}") 62 | print("\nTo create the Ollama model, run:") 63 | print(f"ollama create {model_name} -f {modelfile_path}") 64 | print(f"\nThen convert the model to GGUF format using export_for_ollama_gguf.py") 65 | print(f"python export_for_ollama_gguf.py --model_dir {merged_model_path} --output_dir {output_dir}/gguf") 66 | print(f"\nFinally, import the GGUF model into Ollama:") 67 | print(f"ollama import {output_dir}/gguf/{model_name}.gguf") 68 | 69 | if __name__ == "__main__": 70 | parser = argparse.ArgumentParser(description="Export fine-tuned model for Ollama") 71 | parser.add_argument("--lora_model_path", type=str, default="./llama2-1.1b-finetuned2", 72 | help="Path to the LoRA model") 73 | parser.add_argument("--output_dir", type=str, default="./ollama_model2", 74 | help="Output directory for the exported model") 75 | parser.add_argument("--model_name", type=str, default="my-finetuned-model", 76 | help="Name for the Ollama model") 77 | 78 | args = parser.parse_args() 79 | 80 | merge_and_export(args.lora_model_path, args.output_dir, args.model_name) -------------------------------------------------------------------------------- /FineTuned/test_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import argparse 4 | from transformers import AutoTokenizer, AutoModelForCausalLM 5 | from peft import PeftModel, PeftConfig 6 | 7 | def load_model(model_path="./llama2-7b-quantized"): 8 | """Load the fine-tuned model and tokenizer.""" 9 | try: 10 | # Load the configuration 11 | config = PeftConfig.from_pretrained(model_path) 12 | 13 | # Load the base model 14 | base_model = AutoModelForCausalLM.from_pretrained( 15 | config.base_model_name_or_path, 16 | trust_remote_code=True, 17 | low_cpu_mem_usage=True, 18 | device_map="auto" 19 | ) 20 | 21 | # Load the fine-tuned model 22 | model = PeftModel.from_pretrained(base_model, model_path) 23 | 24 | # Load tokenizer 25 | tokenizer = AutoTokenizer.from_pretrained( 26 | config.base_model_name_or_path, # Use base model for tokenizer 27 | use_fast=True 28 | ) 29 | 30 | if tokenizer.pad_token is None: 31 | tokenizer.pad_token = tokenizer.eos_token 32 | 33 | return model, tokenizer 34 | except Exception as e: 35 | print(f"Error loading model: {e}") 36 | return None, None 37 | 38 | def generate_response(model, tokenizer, prompt, max_length=100, temperature=0.7): 39 | """Generate a response for the given prompt.""" 40 | formatted_prompt = f"### Instruction:\n{prompt}\n\n### Response:" 41 | 42 | inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device) 43 | 44 | outputs = model.generate( 45 | **inputs, 46 | max_length=max_length + inputs.input_ids.shape[1], # Account for prompt length 47 | temperature=temperature, 48 | top_p=0.9, 49 | do_sample=True, 50 | num_return_sequences=1 51 | ) 52 | 53 | response = tokenizer.decode(outputs[0], skip_special_tokens=True) 54 | 55 | if "### Response:" in response: 56 | response = response.split("### Response:")[1].strip() 57 | 58 | return response 59 | 60 | def interactive_mode(model, tokenizer): 61 | """Run an interactive session with the model.""" 62 | print("\n=== Interactive Mode ===") 63 | print("Type 'exit' to quit") 64 | 65 | while True: 66 | prompt = input("\nEnter your prompt: ") 67 | if prompt.lower() == 'exit': 68 | break 69 | 70 | response = generate_response(model, tokenizer, prompt) 71 | print(f"\nResponse: {response}") 72 | 73 | def test_with_examples(model, tokenizer, examples=None): 74 | """Test the model with a list of example prompts.""" 75 | if examples is None: 76 | examples = [ 77 | "How do I list all files in a directory?", 78 | "How can I find the largest files in a directory?", 79 | "What's the command to check disk space?", 80 | "How do I search for text in files?", 81 | "How to compress a folder in Linux?" 82 | ] 83 | 84 | print("\n=== Testing with Examples ===") 85 | for prompt in examples: 86 | response = generate_response(model, tokenizer, prompt) 87 | print(f"\nPrompt: {prompt}") 88 | print(f"Response: {response}") 89 | print("-" * 50) 90 | 91 | if __name__ == "__main__": 92 | parser = argparse.ArgumentParser(description="Test a fine-tuned language model") 93 | parser.add_argument("--model_path", type=str, default="./llama2-7b-finetuned", 94 | help="Path to the fine-tuned model (default: ./llama2-7b-finetuned)") 95 | parser.add_argument("--interactive", action="store_true", 96 | help="Run in interactive mode") 97 | parser.add_argument("--examples", action="store_true", 98 | help="Test with example prompts") 99 | parser.add_argument("--prompt", type=str, 100 | help="Single prompt to test") 101 | parser.add_argument("--temperature", type=float, default=0.7, 102 | help="Temperature for generation (default: 0.7)") 103 | parser.add_argument("--max_length", type=int, default=100, 104 | help="Maximum length for generation (default: 100)") 105 | 106 | args = parser.parse_args() 107 | 108 | device = "cuda" if torch.cuda.is_available() else "cpu" 109 | print(f"Using device: {device}") 110 | 111 | print(f"Loading model from {args.model_path}...") 112 | model, tokenizer = load_model(args.model_path) 113 | 114 | if model is None or tokenizer is None: 115 | print("Failed to load model. Exiting.") 116 | exit(1) 117 | 118 | print("Model loaded successfully") 119 | 120 | if args.prompt: 121 | response = generate_response(model, tokenizer, args.prompt, 122 | args.max_length, args.temperature) 123 | print(f"\nPrompt: {args.prompt}") 124 | print(f"Response: {response}") 125 | 126 | elif args.interactive: 127 | interactive_mode(model, tokenizer) 128 | 129 | elif args.examples: 130 | test_with_examples(model, tokenizer) 131 | 132 | else: 133 | test_with_examples(model, tokenizer) 134 | interactive_mode(model, tokenizer) -------------------------------------------------------------------------------- /FineTuned/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | # Set CUDA allocation configuration to allow expandable segments 4 | os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" 5 | 6 | # --- Colab-specific: authenticate and mount Google Drive --- 7 | # try: 8 | # from google.colab import drive, auth 9 | # print("Authenticating user...") 10 | # auth.authenticate_user() # Explicitly authenticate 11 | # print("Mounting Google Drive...") 12 | # drive.mount('/content/drive', force_remount=True) 13 | # DEFAULT_OUTPUT_DIR = "/content/drive/MyDrive/llama2-improved" 14 | # except Exception as e: 15 | # print("Google Drive mounting failed:", e) 16 | # DEFAULT_OUTPUT_DIR = "./llama2-improved" 17 | 18 | import torch 19 | import argparse 20 | from transformers import ( 21 | AutoModelForCausalLM, 22 | AutoTokenizer, 23 | TrainingArguments, 24 | Trainer, 25 | DataCollatorForLanguageModeling, 26 | EarlyStoppingCallback 27 | ) 28 | from datasets import Dataset 29 | from peft import LoraConfig, get_peft_model 30 | import wandb 31 | import random 32 | import numpy as np 33 | 34 | def seed_everything(seed=42): 35 | """Set seeds for reproducibility.""" 36 | random.seed(seed) 37 | np.random.seed(seed) 38 | torch.manual_seed(seed) 39 | if torch.cuda.is_available(): 40 | torch.cuda.manual_seed_all(seed) 41 | os.environ['PYTHONHASHSEED'] = str(seed) 42 | 43 | def load_data(input_file, output_file, test_size=0.05): 44 | """Load data with improved formatting for context handling.""" 45 | with open(input_file, 'r', encoding='utf-8') as f_in: 46 | inputs = [line.strip() for line in f_in] 47 | with open(output_file, 'r', encoding='utf-8') as f_out: 48 | outputs = [line.strip() for line in f_out] 49 | assert len(inputs) == len(outputs), "Input and output counts must match" 50 | 51 | data = [] 52 | for i in range(len(inputs)): 53 | input_text = inputs[i] 54 | formatted_text = f"### Instruction:\n{input_text}\n\n### Response:\n{outputs[i]}" 55 | data.append({ 56 | "input": inputs[i], 57 | "output": outputs[i], 58 | "text": formatted_text 59 | }) 60 | 61 | random.shuffle(data) 62 | split_idx = int(len(data) * (1 - test_size)) 63 | train_data = data[:split_idx] 64 | val_data = data[split_idx:] 65 | 66 | return Dataset.from_list(train_data), Dataset.from_list(val_data) 67 | 68 | def load_model(model_id, device="cpu"): 69 | """Load model with optimized configuration for context learning.""" 70 | try: 71 | print(f"Loading tokenizer from {model_id}") 72 | tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=True) 73 | if tokenizer.pad_token is None: 74 | tokenizer.pad_token = tokenizer.eos_token 75 | 76 | load_options = { 77 | "cache_dir": "./model_cache", 78 | "low_cpu_mem_usage": True, 79 | "trust_remote_code": True, 80 | } 81 | 82 | # Use a sequential device map to offload parts of the model if necessary. 83 | if device == "cuda" and torch.cuda.is_available(): 84 | load_options.update({ 85 | "torch_dtype": torch.float16, 86 | "device_map": "sequential", 87 | }) 88 | else: 89 | load_options.update({ 90 | "torch_dtype": torch.float32, 91 | }) 92 | 93 | if device == "cuda" and torch.cuda.is_available(): 94 | torch.cuda.empty_cache() 95 | 96 | print(f"Loading model from {model_id}") 97 | model = AutoModelForCausalLM.from_pretrained(model_id, **load_options) 98 | model.config.use_cache = False 99 | 100 | print("Determining target modules for LoRA") 101 | if "llama" in model_id.lower(): 102 | target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"] 103 | elif "mistral" in model_id.lower(): 104 | target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"] 105 | elif "falcon" in model_id.lower(): 106 | target_modules = ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"] 107 | elif "tinyllama" in model_id.lower(): 108 | target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"] 109 | else: 110 | target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"] 111 | 112 | print(f"Using target modules: {target_modules}") 113 | model.train() 114 | 115 | lora_config = LoraConfig( 116 | r=8, 117 | lora_alpha=16, 118 | target_modules=target_modules, 119 | lora_dropout=0.05, 120 | bias="none", 121 | task_type="CAUSAL_LM", 122 | ) 123 | 124 | print("Applying LoRA adapters") 125 | model = get_peft_model(model, lora_config) 126 | 127 | if hasattr(model, "enable_input_require_grads"): 128 | model.enable_input_require_grads() 129 | if hasattr(model, "gradient_checkpointing_enable"): 130 | print("Enabling gradient checkpointing") 131 | model.gradient_checkpointing_enable() 132 | 133 | model.train() 134 | 135 | print("Trainable parameters:") 136 | model.print_trainable_parameters() 137 | trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) 138 | print(f"Total trainable parameters: {trainable_params}") 139 | 140 | if trainable_params == 0: 141 | raise ValueError("No trainable parameters found in the model") 142 | 143 | return model, tokenizer 144 | 145 | except Exception as e: 146 | print(f"Error loading model: {e}") 147 | print("Attempting to load a fallback model...") 148 | fallback_model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" 149 | tokenizer = AutoTokenizer.from_pretrained(fallback_model, trust_remote_code=True) 150 | tokenizer.pad_token = tokenizer.eos_token 151 | 152 | fallback_options = { 153 | "cache_dir": "./model_cache", 154 | "torch_dtype": torch.float16 if device == "cuda" else torch.float32, 155 | "low_cpu_mem_usage": True, 156 | "trust_remote_code": True, 157 | } 158 | if device == "cuda": 159 | fallback_options["device_map"] = "sequential" 160 | 161 | model = AutoModelForCausalLM.from_pretrained(fallback_model, **fallback_options) 162 | model.config.use_cache = False 163 | model.train() 164 | 165 | lora_config = LoraConfig( 166 | r=8, 167 | lora_alpha=16, 168 | target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], 169 | lora_dropout=0.05, 170 | bias="none", 171 | task_type="CAUSAL_LM" 172 | ) 173 | model = get_peft_model(model, lora_config) 174 | if hasattr(model, "enable_input_require_grads"): 175 | model.enable_input_require_grads() 176 | if hasattr(model, "gradient_checkpointing_enable"): 177 | model.gradient_checkpointing_enable() 178 | 179 | model.train() 180 | print("Trainable parameters in fallback model:") 181 | model.print_trainable_parameters() 182 | 183 | return model, tokenizer 184 | 185 | def tokenize_function(examples, tokenizer, max_length=512): 186 | """Tokenize with improved handling for context examples.""" 187 | results = tokenizer( 188 | examples["text"], 189 | padding="max_length", 190 | truncation=True, 191 | max_length=max_length, 192 | return_tensors="pt" 193 | ) 194 | results["labels"] = results["input_ids"].clone() 195 | pad_token_id = tokenizer.pad_token_id 196 | results["labels"] = [ 197 | [(label if label != pad_token_id else -100) for label in labels] 198 | for labels in results["labels"] 199 | ] 200 | return results 201 | 202 | def compute_metrics(eval_preds): 203 | """Custom metrics for evaluating context-aware performance.""" 204 | predictions, labels = eval_preds 205 | predictions = np.argmax(predictions, axis=-1) 206 | mask = labels != -100 207 | labels = labels[mask] 208 | predictions = predictions[mask] 209 | accuracy = (predictions == labels).mean() 210 | return {"accuracy": accuracy} 211 | 212 | def main(): 213 | parser = argparse.ArgumentParser(description="Improved training process for context handling") 214 | parser.add_argument("--input_file", type=str, default="improved.nl", help="Path to input file") 215 | parser.add_argument("--output_file", type=str, default="improved.cm", help="Path to output file") 216 | parser.add_argument("--model_id", type=str, default="meta-llama/Llama-3.2-3B", help="Hugging Face model ID") 217 | parser.add_argument("--output_dir", type=str, default=DEFAULT_OUTPUT_DIR, help="Output directory (Google Drive folder if in Colab)") 218 | parser.add_argument("--batch_size", type=int, default=10, help="Batch size for training") 219 | parser.add_argument("--learning_rate", type=float, default=3e-4, help="Learning rate") 220 | parser.add_argument("--num_epochs", type=int, default=2, help="Number of training epochs") 221 | parser.add_argument("--warmup_ratio", type=float, default=0.1, help="Warmup ratio") 222 | parser.add_argument("--max_length", type=int, default=512, help="Max length for tokenization") 223 | parser.add_argument("--seed", type=int, default=42, help="Random seed") 224 | parser.add_argument("--use_wandb", action="store_true", help="Use Weights & Biases for tracking") 225 | parser.add_argument("--no_gradient_checkpointing", action="store_true", help="Disable gradient checkpointing") 226 | parser.add_argument("--force_cpu", action="store_true", help="Force the use of CPU even if a GPU is available") 227 | 228 | args, _ = parser.parse_known_args() 229 | 230 | seed_everything(args.seed) 231 | 232 | if args.use_wandb: 233 | wandb.init(project="llama2-terminal-commands", name="context-improved") 234 | 235 | if args.force_cpu: 236 | device = "cpu" 237 | else: 238 | device = "cuda" if torch.cuda.is_available() else "cpu" 239 | print(f"Using device: {device}") 240 | 241 | print("Loading data...") 242 | train_dataset, val_dataset = load_data(args.input_file, args.output_file) 243 | print(f"Loaded {len(train_dataset)} training examples and {len(val_dataset)} validation examples") 244 | 245 | print(f"Loading model {args.model_id}...") 246 | model, tokenizer = load_model(args.model_id, device) 247 | 248 | print("Tokenizing datasets...") 249 | tokenized_train = train_dataset.map( 250 | lambda examples: tokenize_function(examples, tokenizer, args.max_length), 251 | batched=True, 252 | remove_columns=train_dataset.column_names 253 | ) 254 | tokenized_val = val_dataset.map( 255 | lambda examples: tokenize_function(examples, tokenizer, args.max_length), 256 | batched=True, 257 | remove_columns=val_dataset.column_names 258 | ) 259 | 260 | data_collator = DataCollatorForLanguageModeling( 261 | tokenizer=tokenizer, 262 | mlm=False 263 | ) 264 | 265 | training_args = TrainingArguments( 266 | output_dir=args.output_dir, 267 | per_device_train_batch_size=args.batch_size, 268 | per_device_eval_batch_size=args.batch_size, 269 | evaluation_strategy="steps", 270 | eval_steps=500, 271 | logging_steps=50, 272 | gradient_accumulation_steps=4, 273 | num_train_epochs=args.num_epochs, 274 | weight_decay=0.01, 275 | warmup_ratio=args.warmup_ratio, 276 | lr_scheduler_type="cosine", 277 | learning_rate=args.learning_rate, 278 | save_steps=200, 279 | save_total_limit=3, 280 | load_best_model_at_end=True, 281 | metric_for_best_model="eval_loss", 282 | greater_is_better=False, 283 | push_to_hub=False, 284 | report_to="wandb" if args.use_wandb else "none", 285 | gradient_checkpointing=not args.no_gradient_checkpointing, 286 | fp16=device == "cuda", 287 | ddp_find_unused_parameters=False, 288 | dataloader_drop_last=True, 289 | optim="adamw_torch", 290 | remove_unused_columns=False, 291 | ) 292 | 293 | from transformers.trainer import Trainer as BaseTrainer 294 | original_move_model = BaseTrainer._move_model_to_device 295 | def safe_move_model(self, model, device): 296 | if any(p.device.type == "meta" for p in model.parameters()): 297 | print("Detected meta tensors, using to_empty() to move model") 298 | return model.to_empty(device=device) 299 | return original_move_model(self, model, device) 300 | BaseTrainer._move_model_to_device = safe_move_model 301 | 302 | trainer = Trainer( 303 | model=model, 304 | args=training_args, 305 | train_dataset=tokenized_train, 306 | eval_dataset=tokenized_val, 307 | tokenizer=tokenizer, 308 | data_collator=data_collator, 309 | callbacks=[EarlyStoppingCallback(early_stopping_patience=3)], 310 | ) 311 | 312 | print("Starting training...") 313 | trainer.train() 314 | 315 | print(f"Saving model to {args.output_dir}") 316 | trainer.save_model(args.output_dir) 317 | tokenizer.save_pretrained(args.output_dir) 318 | 319 | print("Training complete!") 320 | 321 | if args.use_wandb: 322 | wandb.finish() 323 | 324 | if __name__ == "__main__": 325 | main() 326 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI Terminal 2 | 3 | A Tauri + Angular terminal application with integrated AI capabilities. 4 | ![AI Terminal Demo](demo.gif) 5 | ## Features 6 | 7 | - Natural language command interpretation 8 | - Integrated AI assistant 9 | - Command history and auto-completion 10 | - Cross-platform support (macOS, Windows, Linux) 11 | - Modern UI built with Tauri and Angular 12 | 13 | ## Requirements 14 | 15 | - Node.js 18+ 16 | - Rust and Cargo 17 | - For AI features: [Ollama](https://ollama.ai/) (can be installed with `brew install ollama`) 18 | 19 | ## Development Setup 20 | 21 | 1. Clone the repository: 22 | ``` 23 | git clone https://github.com/your-username/ai-terminal.git 24 | cd ai-terminal 25 | ``` 26 | 27 | 2. Install dependencies and run the project: 28 | ``` 29 | cd ai-terminal 30 | npm install 31 | npm run tauri dev 32 | ``` 33 | 34 | ## Installation 35 | 36 | ### macOS (Homebrew) 37 | 38 | You can install AI Terminal using Homebrew: 39 | 40 | ```bash 41 | brew tap AiTerminalFoundation/ai-terminal 42 | brew install --cask ai-terminal 43 | ``` 44 | 45 | After installation, you can launch the application from Spotlight or run it from the terminal: 46 | 47 | ```bash 48 | ai-terminal 49 | ``` 50 | 51 | ## Quick Guide to Using Ollama to Download `macsdeve/BetterBash3` Model 52 | 53 | ### Linux 54 | 55 | 1. **Install Ollama** 56 | 57 | Open your terminal and run: 58 | 59 | ```bash 60 | curl -fsSL https://ollama.com/install.sh | sh 61 | ``` 62 | 63 | 2. **Download the Model** 64 | 65 | Run the following command: 66 | 67 | ```bash 68 | ollama pull macsdeve/BetterBash3 69 | ``` 70 | 71 | ### macOS 72 | 73 | 1. **Download Ollama** 74 | 75 | - Visit [Ollama download page](https://ollama.com/download/mac). 76 | - Click **Download for macOS**. 77 | 78 | 2. **Install Ollama** 79 | 80 | - Open the downloaded `.zip` file from your `Downloads` folder. 81 | - Drag the `Ollama.app` into your `Applications` folder. 82 | - Open `Ollama.app` and follow any prompts. 83 | 84 | 3. **Download the Model** 85 | 86 | Open Terminal and execute: 87 | 88 | ```bash 89 | ollama pull macsdeve/BetterBash3 90 | ``` 91 | 92 | ## Contributing 93 | 94 | Contributions are welcome! Please feel free to submit a Pull Request. 95 | 96 | ## License 97 | 98 | [MIT License](LICENSE) 99 | -------------------------------------------------------------------------------- /ai-terminal/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/.DS_Store -------------------------------------------------------------------------------- /ai-terminal/.idea/.gitignore: -------------------------------------------------------------------------------- 1 | # Default ignored files 2 | /shelf/ 3 | /workspace.xml 4 | # Editor-based HTTP Client requests 5 | /httpRequests/ 6 | # Datasource local storage ignored files 7 | /dataSources/ 8 | /dataSources.local.xml 9 | -------------------------------------------------------------------------------- /ai-terminal/.idea/ai-terminal.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /ai-terminal/.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /ai-terminal/.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /ai-terminal/.npmrc: -------------------------------------------------------------------------------- 1 | registry=https://registry.npmjs.org/ -------------------------------------------------------------------------------- /ai-terminal/README.md: -------------------------------------------------------------------------------- 1 | # Tauri + Angular 2 | 3 | This template should help get you started developing with Tauri and Angular. 4 | 5 | ## Recommended IDE Setup 6 | 7 | [VS Code](https://code.visualstudio.com/) + [Tauri](https://marketplace.visualstudio.com/items?itemName=tauri-apps.tauri-vscode) + [rust-analyzer](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer) + [Angular Language Service](https://marketplace.visualstudio.com/items?itemName=Angular.ng-template). 8 | 9 | # AI Terminal 10 | 11 | AI Terminal is a powerful terminal interface with AI capabilities. It allows you to interact with your terminal using natural language commands and provides an integrated AI assistant powered by Ollama. 12 | 13 | ## Features 14 | 15 | - Natural language command interpretation 16 | - Integrated AI assistant 17 | - Command history and auto-completion 18 | - Cross-platform support (macOS, Windows, Linux) 19 | 20 | ## Installation 21 | 22 | ### macOS (Homebrew) 23 | 24 | You can install AI Terminal using Homebrew: 25 | ```bash 26 | brew tap AiTerminalFoundation/ai-terminal 27 | brew install ai-terminal 28 | ``` 29 | 30 | After installation, you can launch the application from Spotlight or run it from the terminal: 31 | 32 | ```bash 33 | ai-terminal 34 | ``` 35 | 36 | ### Requirements 37 | 38 | - For AI features: [Ollama](https://ollama.ai/) (can be installed with `brew install ollama`) 39 | 40 | ## Building from Source 41 | 42 | ### Prerequisites 43 | 44 | - Node.js 18+ 45 | - Rust and Cargo 46 | - Tauri CLI 47 | 48 | ### macOS Universal Binary 49 | 50 | To build a universal binary for macOS (arm64 + x86_64): 51 | 52 | ```bash 53 | # Install dependencies 54 | npm install 55 | 56 | # Install create-dmg tool for packaging 57 | brew install create-dmg 58 | 59 | # Run the build script 60 | chmod +x build-macos.sh 61 | ./build-macos.sh 62 | ``` 63 | 64 | This will create a universal binary DMG installer at `src-tauri/target/universal-apple-darwin/bundle/dmg/ai-terminal-[version].dmg`. 65 | 66 | ## Contributing 67 | 68 | Contributions are welcome! Please feel free to submit a Pull Request. 69 | 70 | ## License 71 | 72 | AI Terminal is licensed under the MIT License - see the LICENSE file for details. 73 | -------------------------------------------------------------------------------- /ai-terminal/ai-terminal_0.2.0_amd64.deb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/ai-terminal_0.2.0_amd64.deb -------------------------------------------------------------------------------- /ai-terminal/angular.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "./node_modules/@angular/cli/lib/config/schema.json", 3 | "version": 1, 4 | "newProjectRoot": "projects", 5 | "cli": { 6 | "analytics": false 7 | }, 8 | "projects": { 9 | "ai-terminal": { 10 | "projectType": "application", 11 | "root": "", 12 | "sourceRoot": "src", 13 | "prefix": "app", 14 | "architect": { 15 | "build": { 16 | "builder": "@angular/build:application", 17 | "options": { 18 | "outputPath": "dist/ai-terminal", 19 | "index": "src/index.html", 20 | "browser": "src/main.ts", 21 | "polyfills": ["zone.js"], 22 | "tsConfig": "tsconfig.app.json", 23 | "assets": ["src/assets"] 24 | }, 25 | "configurations": { 26 | "production": { 27 | "budgets": [ 28 | { 29 | "type": "initial", 30 | "maximumWarning": "500kb", 31 | "maximumError": "1mb" 32 | }, 33 | { 34 | "type": "anyComponentStyle", 35 | "maximumWarning": "300kb", 36 | "maximumError": "1mb" 37 | } 38 | ], 39 | "outputHashing": "all" 40 | }, 41 | "development": { 42 | "optimization": false, 43 | "extractLicenses": false, 44 | "sourceMap": true 45 | } 46 | }, 47 | "defaultConfiguration": "production" 48 | }, 49 | "serve": { 50 | "builder": "@angular/build:dev-server", 51 | "options": { 52 | "port": 1420 53 | }, 54 | "configurations": { 55 | "production": { 56 | "buildTarget": "ai-terminal:build:production" 57 | }, 58 | "development": { 59 | "buildTarget": "ai-terminal:build:development" 60 | } 61 | }, 62 | "defaultConfiguration": "development" 63 | } 64 | } 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /ai-terminal/build-macos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit on error 4 | set -e 5 | 6 | VERSION=$(grep -m1 "version" package.json | cut -d '"' -f 4) 7 | echo "🚀 Building ai-terminal v$VERSION for macOS Universal..." 8 | 9 | # Build frontend 10 | echo "📦 Building frontend..." 11 | npm run build 12 | 13 | # Build Tauri app for multiple architectures 14 | echo "🔨 Building Universal binary for macOS..." 15 | # Build for Apple Silicon (arm64) 16 | rustup target add aarch64-apple-darwin 17 | # Build for Intel (x86_64) 18 | rustup target add x86_64-apple-darwin 19 | 20 | # Build both architectures 21 | echo "Building for ARM64..." 22 | npm run tauri build -- --target aarch64-apple-darwin 23 | echo "Building for x86_64..." 24 | npm run tauri build -- --target x86_64-apple-darwin 25 | 26 | # Create universal binary 27 | echo "Creating universal binary..." 28 | mkdir -p src-tauri/target/universal-apple-darwin/release 29 | lipo -create \ 30 | src-tauri/target/aarch64-apple-darwin/release/ai-terminal \ 31 | src-tauri/target/x86_64-apple-darwin/release/ai-terminal \ 32 | -output src-tauri/target/universal-apple-darwin/release/ai-terminal 33 | 34 | # Create app bundle with universal binary 35 | echo "Creating universal app bundle..." 36 | APP_PATH="src-tauri/target/universal-apple-darwin/bundle/macos/ai-terminal.app" 37 | mkdir -p "$APP_PATH/Contents/MacOS" 38 | # Copy the universal binary 39 | cp src-tauri/target/universal-apple-darwin/release/ai-terminal "$APP_PATH/Contents/MacOS/" 40 | # Copy app bundle contents from one of the architectures 41 | cp -R src-tauri/target/aarch64-apple-darwin/release/bundle/macos/ai-terminal.app/Contents/Resources "$APP_PATH/Contents/" 42 | cp src-tauri/target/aarch64-apple-darwin/release/bundle/macos/ai-terminal.app/Contents/Info.plist "$APP_PATH/Contents/" 43 | 44 | # Sign the application bundle 45 | echo "🔑 Signing application bundle..." 46 | codesign --force --options runtime --sign "$APPLE_DEVELOPER_ID" \ 47 | --entitlements src-tauri/entitlements.plist \ 48 | "$APP_PATH" --deep --timestamp 49 | 50 | # Create DMG 51 | echo "📦 Creating DMG installer..." 52 | DMG_PATH="src-tauri/target/universal-apple-darwin/bundle/dmg/ai-terminal-$VERSION.dmg" 53 | mkdir -p "$(dirname "$DMG_PATH")" 54 | 55 | # Check if create-dmg is available 56 | if command -v create-dmg &> /dev/null; then 57 | echo "Using create-dmg for DMG creation..." 58 | create-dmg \ 59 | --volname "ai-terminal" \ 60 | --volicon "src-tauri/icons/icon.icns" \ 61 | --window-pos 200 120 \ 62 | --window-size 800 400 \ 63 | --icon-size 100 \ 64 | --icon "ai-terminal.app" 200 190 \ 65 | --hide-extension "ai-terminal.app" \ 66 | --app-drop-link 600 185 \ 67 | "$DMG_PATH" \ 68 | "$APP_PATH" 69 | else 70 | echo "create-dmg not found, using hdiutil..." 71 | # Create a temporary directory for DMG creation 72 | TMP_DMG_DIR=$(mktemp -d) 73 | cp -R "$APP_PATH" "$TMP_DMG_DIR/" 74 | 75 | # Create a symlink to Applications folder 76 | ln -s /Applications "$TMP_DMG_DIR/Applications" 77 | 78 | # Create the DMG 79 | hdiutil create -volname "ai-terminal" -srcfolder "$TMP_DMG_DIR" -ov -format UDZO "$DMG_PATH" 80 | 81 | # Clean up 82 | rm -rf "$TMP_DMG_DIR" 83 | fi 84 | 85 | # Sign the DMG 86 | echo "🔑 Signing DMG..." 87 | codesign --force --sign "$APPLE_DEVELOPER_ID" "$DMG_PATH" --timestamp 88 | 89 | # Notarize the DMG 90 | echo "📝 Notarizing DMG..." 91 | xcrun notarytool submit "$DMG_PATH" \ 92 | --key "$APPLE_API_KEY" \ 93 | --key-id "$APPLE_API_KEY_ID" \ 94 | --issuer "$APPLE_API_ISSUER" \ 95 | --wait 96 | 97 | # Staple the notarization ticket 98 | echo "📎 Stapling notarization ticket to DMG..." 99 | xcrun stapler staple "$DMG_PATH" 100 | 101 | # Calculate SHA256 for Homebrew 102 | SHA256=$(shasum -a 256 "$DMG_PATH" | awk '{print $1}') 103 | 104 | echo "✅ Build complete! DMG is available at: $DMG_PATH" 105 | echo "✅ SHA256: $SHA256" -------------------------------------------------------------------------------- /ai-terminal/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ai-terminal", 3 | "version": "1.0.0", 4 | "scripts": { 5 | "ng": "ng", 6 | "start": "ng serve --proxy-config proxy.conf.json", 7 | "build": "ng build", 8 | "watch": "ng build --watch --configuration development", 9 | "tauri": "tauri", 10 | "dev": "tauri dev" 11 | }, 12 | "private": true, 13 | "dependencies": { 14 | "@angular/animations": "^19.2.13", 15 | "@angular/common": "^19.2.13", 16 | "@angular/compiler": "^19.2.13", 17 | "@angular/core": "^19.2.13", 18 | "@angular/forms": "^19.2.13", 19 | "@angular/platform-browser": "^19.2.13", 20 | "@angular/platform-browser-dynamic": "^19.2.13", 21 | "@angular/router": "^19.2.13", 22 | "@tauri-apps/api": "^2.4.0", 23 | "@tauri-apps/plugin-opener": "^2", 24 | "@tauri-apps/plugin-shell": "^2.2.1", 25 | "rxjs": "~7.8.0", 26 | "tslib": "^2.3.0", 27 | "zone.js": "~0.15.0" 28 | }, 29 | "devDependencies": { 30 | "@angular/build": "^19.2.13", 31 | "@angular/cli": "^19.2.13", 32 | "@angular/compiler-cli": "^19.2.13", 33 | "@npmcli/package-json": "^6.1.1", 34 | "@tauri-apps/cli": "^2", 35 | "@types/jasmine": "~5.1.0", 36 | "glob": "^11.0.1", 37 | "jasmine-core": "~5.1.0", 38 | "karma": "~6.4.0", 39 | "karma-chrome-launcher": "~3.2.0", 40 | "karma-coverage": "~2.2.0", 41 | "karma-jasmine": "~5.1.0", 42 | "karma-jasmine-html-reporter": "~2.1.0", 43 | "rimraf": "^6.0.1", 44 | "typescript": "~5.8.2" 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /ai-terminal/proxy.conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "/api": { 3 | "target": "http://localhost:11434", 4 | "secure": false, 5 | "changeOrigin": true, 6 | "logLevel": "debug" 7 | } 8 | } -------------------------------------------------------------------------------- /ai-terminal/setup-homebrew-tap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit on error 4 | set -e 5 | 6 | echo "🍺 Setting up Homebrew tap repository for ai-terminal..." 7 | 8 | # Check if GitHub CLI is installed 9 | if ! command -v gh &> /dev/null; then 10 | echo "GitHub CLI (gh) is not installed. Please install it with: brew install gh" 11 | exit 1 12 | fi 13 | 14 | # Check if logged in to GitHub 15 | if ! gh auth status &> /dev/null; then 16 | echo "Please log in to GitHub with: gh auth login" 17 | exit 1 18 | fi 19 | 20 | # Create the tap repository on GitHub 21 | echo "Creating GitHub repository for Homebrew tap..." 22 | REPO_NAME="ai-terminal" 23 | ORGANIZATION="AiTerminalFoundation" 24 | 25 | # Check if the repo already exists 26 | if gh repo view $ORGANIZATION/$REPO_NAME &> /dev/null; then 27 | echo "Repository $ORGANIZATION/$REPO_NAME already exists. Skipping creation." 28 | else 29 | echo "Creating repository $ORGANIZATION/$REPO_NAME..." 30 | gh repo create $ORGANIZATION/$REPO_NAME --public --description "Homebrew Tap for AI Terminal" || { 31 | echo "Failed to create repository. Please create it manually on GitHub." 32 | exit 1 33 | } 34 | fi 35 | 36 | # Clone the repo 37 | echo "Cloning the tap repository..." 38 | TMP_DIR=$(mktemp -d) 39 | cd $TMP_DIR 40 | gh repo clone $ORGANIZATION/$REPO_NAME || { 41 | echo "Failed to clone repository. Please check if it exists and you have access." 42 | exit 1 43 | } 44 | 45 | cd $REPO_NAME 46 | 47 | # Copy the formula to the repository 48 | echo "Copying formula to the repository..." 49 | cp "$OLDPWD/ai-terminal.rb" ./Formula/ 50 | 51 | # Commit and push changes 52 | echo "Committing and pushing changes..." 53 | git add ./Formula/ai-terminal.rb 54 | git commit -m "Update ai-terminal formula to version $(grep -m1 "version" $OLDPWD/package.json | cut -d '"' -f 4)" 55 | git push 56 | 57 | echo "✅ Homebrew tap repository setup complete!" 58 | echo "Users can now install ai-terminal with:" 59 | echo " brew tap $ORGANIZATION/ai-terminal" 60 | echo " brew install ai-terminal" 61 | echo "" 62 | echo "To update the formula in the future, run:" 63 | echo " ./build-macos.sh" 64 | echo " ./setup-homebrew-tap.sh" 65 | 66 | # Clean up temporary directory 67 | cd $OLDPWD 68 | rm -rf $TMP_DIR -------------------------------------------------------------------------------- /ai-terminal/src-tauri/.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Generated by Tauri 6 | # will have schema files for capabilities auto-completion 7 | /gen/schemas 8 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ai-terminal" 3 | version = "1.0.0" 4 | description = "Your AI Mate Inside Your Favourite Terminal" 5 | authors = ["Michele Verriello", "Marco De Vellis"] 6 | edition = "2021" 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [lib] 11 | # The `_lib` suffix may seem redundant but it is necessary 12 | # to make the lib name unique and wouldn't conflict with the bin name. 13 | # This seems to be only an issue on Windows, see https://github.com/rust-lang/cargo/issues/8519 14 | name = "ai_terminal_lib" 15 | crate-type = ["staticlib", "cdylib", "rlib"] 16 | 17 | [build-dependencies] 18 | tauri-build = { version = "2", features = [] } 19 | 20 | [dependencies] 21 | tauri = { version = "2.4.0", features = ["macos-private-api"] } 22 | tauri-plugin-opener = "2" 23 | serde = { version = "1", features = ["derive"] } 24 | dirs = "6.0.0" 25 | reqwest = { version = "0.12.15", features = ["json"] } 26 | nix = { version = "0.30", features = ["signal"] } 27 | tauri-plugin-shell = "2" 28 | fix-path-env = { git = "https://github.com/tauri-apps/fix-path-env-rs" } 29 | serde_json = "1.0" 30 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | tauri_build::build() 3 | } 4 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/capabilities/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "../gen/schemas/desktop-schema.json", 3 | "identifier": "default", 4 | "description": "Capability for the main window", 5 | "windows": ["main"], 6 | "permissions": [ 7 | "core:default", 8 | "opener:default", 9 | { 10 | "identifier": "shell:allow-execute", 11 | "allow": [ 12 | { 13 | "name": "exec-any", 14 | "cmd": "*", 15 | "args": ["*"], 16 | "sidecar": false 17 | } 18 | ] 19 | } 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/entitlements.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | com.apple.security.cs.allow-jit 6 | 7 | com.apple.security.cs.allow-unsigned-executable-memory 8 | 9 | com.apple.security.cs.disable-library-validation 10 | 11 | com.apple.security.automation.apple-events 12 | 13 | com.apple.security.get-task-allow 14 | 15 | com.apple.security.inherit 16 | 17 | 18 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/128x128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/128x128.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/128x128@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/128x128@2x.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/32x32.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square107x107Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/Square107x107Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square142x142Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/Square142x142Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square150x150Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/Square150x150Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square284x284Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/Square284x284Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square30x30Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/Square30x30Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square310x310Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/Square310x310Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square44x44Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/Square44x44Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square71x71Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/Square71x71Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/Square89x89Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/Square89x89Logo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/StoreLogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/StoreLogo.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/icon.icns: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/icon.icns -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/icon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/icon.ico -------------------------------------------------------------------------------- /ai-terminal/src-tauri/icons/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/icons/icon.png -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/lib.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/ai-terminal/src-tauri/src/lib.rs -------------------------------------------------------------------------------- /ai-terminal/src-tauri/src/main.rs: -------------------------------------------------------------------------------- 1 | // Prevents additional console window on Windows in release, DO NOT REMOVE!! 2 | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] 3 | 4 | extern crate fix_path_env; 5 | 6 | use serde::{Deserialize, Serialize}; 7 | use serde_json; 8 | use std::collections::HashMap; 9 | use std::env; 10 | use std::fs; 11 | use std::io::{Read, BufReader, Write}; 12 | use std::os::unix::process::CommandExt; 13 | use std::path::{Path, PathBuf}; 14 | use std::process::{Child, Command, Stdio}; 15 | use std::sync::{Arc, Mutex}; 16 | use std::thread; 17 | use tauri::{command, AppHandle, Emitter, Manager, State}; 18 | 19 | // Define Ollama API models and structures 20 | #[derive(Debug, Serialize, Deserialize)] 21 | struct OllamaRequest { 22 | model: String, 23 | prompt: String, 24 | stream: bool, 25 | } 26 | 27 | #[derive(Debug, Serialize, Deserialize)] 28 | struct OllamaResponse { 29 | model: String, 30 | response: String, 31 | done: bool, 32 | } 33 | 34 | #[derive(Debug, Serialize, Deserialize)] 35 | struct OllamaModel { 36 | name: String, 37 | size: u64, 38 | modified_at: String, 39 | // Add other fields as needed 40 | } 41 | 42 | #[derive(Debug, Serialize, Deserialize)] 43 | struct OllamaModelList { 44 | models: Vec, 45 | } 46 | 47 | // Store the current working directory for each command 48 | struct CommandState { 49 | current_dir: String, 50 | child_wait_handle: Option>>, // For wait() and kill() 51 | child_stdin: Option>>, // For writing 52 | pid: Option, 53 | is_ssh_session_active: bool, // Added for persistent SSH 54 | remote_current_dir: Option, // New field for remote SSH path 55 | } 56 | 57 | // Add Ollama state management 58 | struct OllamaState { 59 | current_model: String, 60 | api_host: String, 61 | } 62 | 63 | // Structure to handle command output streaming 64 | struct CommandManager { 65 | commands: Mutex>, 66 | ollama: Mutex, 67 | } 68 | 69 | impl CommandManager { 70 | fn new() -> Self { 71 | let mut initial_commands = HashMap::new(); 72 | initial_commands.insert( 73 | "default_state".to_string(), 74 | CommandState { 75 | current_dir: env::current_dir().unwrap_or_default().to_string_lossy().to_string(), 76 | child_wait_handle: None, 77 | child_stdin: None, 78 | pid: None, 79 | is_ssh_session_active: false, // Initialize here 80 | remote_current_dir: None, // Initialize new field 81 | }, 82 | ); 83 | CommandManager { 84 | commands: Mutex::new(initial_commands), 85 | ollama: Mutex::new(OllamaState { 86 | current_model: "llama3.2:latest".to_string(), // Default model will now be overridden by frontend 87 | api_host: "http://localhost:11434".to_string(), // Default Ollama host 88 | }), 89 | } 90 | } 91 | } 92 | 93 | fn get_shell_path() -> Option { 94 | // First try to get the user's default shell 95 | let shell = if cfg!(target_os = "windows") { 96 | "cmd" 97 | } else { 98 | // Try to get the user's default shell from /etc/shells or fallback to common shells 99 | let shells = ["/bin/zsh", "/bin/bash", "/bin/sh"]; 100 | for shell in shells.iter() { 101 | if std::path::Path::new(shell).exists() { 102 | return Some(shell.to_string()); 103 | } 104 | } 105 | "sh" // Fallback 106 | }; 107 | 108 | // Try to get PATH using the shell's login mode and sourcing initialization files 109 | let command = if shell.contains("zsh") { 110 | "source ~/.zshrc 2>/dev/null || true; source ~/.zshenv 2>/dev/null || true; echo $PATH" 111 | } else if shell.contains("bash") { 112 | "source ~/.bashrc 2>/dev/null || true; source ~/.bash_profile 2>/dev/null || true; echo $PATH" 113 | } else { 114 | "echo $PATH" 115 | }; 116 | 117 | let output = Command::new(shell) 118 | .arg("-l") // Login shell to get proper environment 119 | .arg("-c") 120 | .arg(command) 121 | .output() 122 | .ok()?; 123 | 124 | if output.status.success() { 125 | let path = String::from_utf8_lossy(&output.stdout).trim().to_string(); 126 | if !path.is_empty() { 127 | return Some(path); 128 | } 129 | } 130 | 131 | // If shell method fails, try to get PATH from environment 132 | std::env::var("PATH").ok() 133 | } 134 | 135 | #[command] 136 | fn execute_command( 137 | command: String, 138 | session_id: String, 139 | ssh_password: Option, 140 | app_handle: AppHandle, 141 | command_manager: State<'_, CommandManager>, 142 | ) -> Result { 143 | const SSH_NEEDS_PASSWORD_MARKER: &str = "SSH_INTERACTIVE_PASSWORD_PROMPT_REQUESTED"; 144 | const SSH_PRE_EXEC_PASSWORD_EVENT: &str = "ssh_pre_exec_password_request"; 145 | const COMMAND_FORWARDED_TO_ACTIVE_SSH_MARKER: &str = "COMMAND_FORWARDED_TO_ACTIVE_SSH"; 146 | 147 | // Phase 1: Check and handle active SSH session 148 | { 149 | let mut states_guard = command_manager.commands.lock().map_err(|e| e.to_string())?; 150 | let key = session_id.clone(); 151 | println!("[Rust EXEC DEBUG] Phase 1: Checking for active SSH session for key: {}", key); 152 | 153 | let state = states_guard.entry(key.clone()).or_insert_with(|| { 154 | println!("[Rust EXEC DEBUG] No existing state for key {}, creating new.", key); 155 | CommandState { 156 | current_dir: env::current_dir().unwrap_or_default().to_string_lossy().to_string(), 157 | child_wait_handle: None, 158 | child_stdin: None, 159 | pid: None, 160 | is_ssh_session_active: false, 161 | remote_current_dir: None, 162 | } 163 | }); 164 | 165 | if state.is_ssh_session_active { 166 | println!("[Rust EXEC DEBUG] Active SSH session detected (is_ssh_session_active=true)."); 167 | if let Some(stdin_arc_for_thread) = state.child_stdin.clone() { 168 | let active_pid_for_log = state.pid.unwrap_or(0); 169 | println!("[Rust EXEC DEBUG] Found child_stdin for active SSH (Original PID: {}).", active_pid_for_log); 170 | 171 | if let Err(e) = app_handle.emit("command_forwarded_to_ssh", command.clone()) { 172 | eprintln!("[Rust EXEC DEBUG] Failed to emit command_forwarded_to_ssh: {}", e); 173 | } else { 174 | println!("[Rust EXEC DEBUG] Emitted command_forwarded_to_ssh for command: {}", command); 175 | } 176 | 177 | let app_handle_clone_for_thread = app_handle.clone(); 178 | let command_clone_for_thread = command.clone(); 179 | let session_id_clone_for_thread = session_id.clone(); 180 | 181 | println!("[Rust EXEC DEBUG] Attempting to forward command '{}' to active SSH session (Original PID: {})", command_clone_for_thread, active_pid_for_log); 182 | 183 | thread::spawn(move || { 184 | println!("[Rust EXEC DEBUG SSH-Write-Thread] Spawned for command: {}", command_clone_for_thread); 185 | let command_manager_state_for_thread = app_handle_clone_for_thread.state::(); 186 | 187 | let mut stdin_guard = match stdin_arc_for_thread.lock() { 188 | Ok(guard) => guard, 189 | Err(e) => { 190 | eprintln!("[Rust EXEC DEBUG SSH-Write-Thread] Failed to lock SSH ChildStdin: {}. Resetting SSH state.", e); 191 | if let Ok(mut states_lock_in_thread) = command_manager_state_for_thread.commands.lock() { 192 | if let Some(s) = states_lock_in_thread.get_mut(&session_id_clone_for_thread) { 193 | if s.pid == Some(active_pid_for_log) && s.is_ssh_session_active { 194 | println!("[Rust EXEC DEBUG SSH-Write-Thread] Resetting SSH active state (stdin, pid:{}) due to ChildStdin lock failure.", active_pid_for_log); 195 | s.is_ssh_session_active = false; 196 | s.child_stdin = None; 197 | s.remote_current_dir = None; 198 | } 199 | } 200 | } 201 | let _ = app_handle_clone_for_thread.emit("ssh_session_ended", serde_json::json!({ "pid": active_pid_for_log, "reason": format!("SSH session error (stdin lock): {}", e)})); 202 | let _ = app_handle_clone_for_thread.emit("command_error", format!("Failed to send to SSH (stdin lock '{}'): {}", command_clone_for_thread, e)); 203 | let _ = app_handle_clone_for_thread.emit("command_end", "Command failed."); 204 | return; 205 | } 206 | }; 207 | println!("[Rust EXEC DEBUG SSH-Write-Thread] Successfully locked SSH ChildStdin."); 208 | 209 | println!("[Rust EXEC DEBUG SSH-Write-Thread] Writing command to SSH ChildStdin: {}", command_clone_for_thread); 210 | 211 | let is_remote_cd = command_clone_for_thread.trim().starts_with("cd "); 212 | let actual_command_to_write_ssh = if is_remote_cd { 213 | let marker = format!("__REMOTE_CD_PWD_MARKER_{}__", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs_f64().to_string().replace('.', "")); 214 | let cd_command_part = command_clone_for_thread.trim(); 215 | format!("{} && printf '%s\\n' '{}' && pwd && printf '%s\\n' '{}'\n", cd_command_part, marker, marker) 216 | } else { 217 | format!("{}\n", command_clone_for_thread) 218 | }; 219 | 220 | let write_attempt = stdin_guard.write_all(actual_command_to_write_ssh.as_bytes()); 221 | 222 | let final_result = if write_attempt.is_ok() { 223 | println!("[Rust EXEC DEBUG SSH-Write-Thread] Write successful. Flushing ChildStdin."); 224 | stdin_guard.flush() 225 | } else { 226 | eprintln!("[Rust EXEC DEBUG SSH-Write-Thread] Write failed: {:?}. Won't flush.", write_attempt.as_ref().err()); 227 | write_attempt 228 | }; 229 | 230 | if let Err(e) = final_result { 231 | eprintln!("[Rust EXEC DEBUG SSH-Write-Thread] Failed to write/flush to SSH ChildStdin: {}. Resetting SSH state.", e); 232 | if let Ok(mut states_lock_in_thread) = command_manager_state_for_thread.commands.lock() { 233 | if let Some(s) = states_lock_in_thread.get_mut(&session_id_clone_for_thread) { 234 | if s.pid == Some(active_pid_for_log) && s.is_ssh_session_active { 235 | println!("[Rust EXEC DEBUG SSH-Write-Thread] Resetting SSH active state (stdin, pid:{}) due to write/flush failure.", active_pid_for_log); 236 | s.is_ssh_session_active = false; 237 | s.child_stdin = None; 238 | s.remote_current_dir = None; 239 | } 240 | } 241 | } 242 | let _ = app_handle_clone_for_thread.emit("ssh_session_ended", serde_json::json!({ "pid": active_pid_for_log, "reason": format!("SSH session ended (stdin write/flush error): {}", e)})); 243 | let _ = app_handle_clone_for_thread.emit("command_error", format!("Failed to send to SSH (stdin write/flush '{}'): {}", command_clone_for_thread, e)); 244 | let _ = app_handle_clone_for_thread.emit("command_end", "Command failed."); 245 | return; 246 | } 247 | println!("[Rust EXEC DEBUG SSH-Write-Thread] Write and flush successful for command: {}", command_clone_for_thread); 248 | println!("[Rust EXEC DEBUG SSH-Write-Thread] Exiting for command: {}", command_clone_for_thread); 249 | }); 250 | 251 | drop(states_guard); 252 | println!("[Rust EXEC DEBUG] Returning COMMAND_FORWARDED_TO_ACTIVE_SSH_MARKER for forwarded command (PID: {}).", active_pid_for_log); 253 | return Ok(COMMAND_FORWARDED_TO_ACTIVE_SSH_MARKER.to_string()); 254 | 255 | } else { // state.child_stdin is None, but state.is_ssh_session_active was true 256 | let active_pid_for_log = state.pid.unwrap_or(0); 257 | eprintln!("[Rust EXEC DEBUG] SSH session active but no child_stdin found (PID: {}). Resetting state.", active_pid_for_log); 258 | state.is_ssh_session_active = false; 259 | state.pid = None; // Clear PID as session is now considered broken 260 | state.remote_current_dir = None; 261 | drop(states_guard); 262 | let _ = app_handle.emit("ssh_session_ended", serde_json::json!({ "pid": active_pid_for_log, "reason": "SSH session inconsistency: active but no stdin."})); 263 | return Err("SSH session conflict: active but no stdin. Please retry.".to_string()); 264 | } 265 | } else { 266 | println!("[Rust EXEC DEBUG] Phase 1: Finished SSH check."); 267 | } 268 | } 269 | 270 | // Phase 2: Handle 'cd' command (if not in an SSH session) 271 | // The `cd` command logic remains largely the same, it acquires its own lock. 272 | if command.starts_with("cd ") || command == "cd" { 273 | // This block is the original 'cd' handling logic. 274 | // It will lock `command_manager.commands` internally. 275 | let mut states_guard_cd = command_manager.commands.lock().map_err(|e| e.to_string())?; 276 | let key_cd = session_id.clone(); 277 | let state_cd = states_guard_cd.entry(key_cd.clone()).or_insert_with(|| CommandState { 278 | current_dir: env::current_dir().unwrap_or_default().to_string_lossy().to_string(), 279 | child_wait_handle: None, 280 | child_stdin: None, 281 | pid: None, 282 | is_ssh_session_active: false, // ensure default 283 | remote_current_dir: None, 284 | }); 285 | 286 | let path = command.trim_start_matches("cd").trim(); 287 | if path.is_empty() || path == "~" || path == "~/" { 288 | return if let Some(home_dir) = dirs::home_dir() { 289 | let home_path = home_dir.to_string_lossy().to_string(); 290 | state_cd.current_dir = home_path.clone(); 291 | drop(states_guard_cd); // Release lock before emitting and returning 292 | let _ = app_handle.emit("command_end", "Command completed successfully."); 293 | Ok(format!("Changed directory to {}", home_path)) 294 | } else { 295 | drop(states_guard_cd); 296 | let _ = app_handle.emit("command_end", "Command failed."); 297 | Err("Could not determine home directory".to_string()) 298 | }; 299 | } 300 | let current_path = Path::new(&state_cd.current_dir); 301 | let new_path = if path.starts_with('~') { 302 | if let Some(home_dir) = dirs::home_dir() { 303 | let without_tilde = path.trim_start_matches('~'); 304 | let rel_path = without_tilde.trim_start_matches('/'); 305 | if rel_path.is_empty() { home_dir } else { home_dir.join(rel_path) } 306 | } else { drop(states_guard_cd); return Err("Could not determine home directory".to_string()); } 307 | } else if path.starts_with('/') { 308 | std::path::PathBuf::from(path) 309 | } else { 310 | let mut result_path = current_path.to_path_buf(); 311 | let path_components: Vec<&str> = path.split('/').collect(); 312 | for component in path_components { 313 | if component == ".." { 314 | if let Some(parent) = result_path.parent() { result_path = parent.to_path_buf(); } 315 | else { drop(states_guard_cd); let _ = app_handle.emit("command_end", "Command failed."); return Err("Already at root directory".to_string()); } 316 | } else if component != "." && !component.is_empty() { 317 | result_path = result_path.join(component); 318 | } 319 | } 320 | result_path 321 | }; 322 | return if new_path.exists() { 323 | state_cd.current_dir = new_path.to_string_lossy().to_string(); 324 | let current_dir_for_ok = state_cd.current_dir.clone(); 325 | drop(states_guard_cd); 326 | let _ = app_handle.emit("command_end", "Command completed successfully."); 327 | Ok(format!("Changed directory to {}", current_dir_for_ok)) 328 | } else { 329 | drop(states_guard_cd); 330 | let _ = app_handle.emit("command_end", "Command failed."); 331 | Err(format!("Directory not found: {}", path)) 332 | }; 333 | } 334 | 335 | // Phase 3: Prepare for and execute new command (local or new SSH) 336 | let current_dir_clone = { 337 | let mut states_guard_dir = command_manager.commands.lock().map_err(|e| e.to_string())?; 338 | let key_dir = session_id.clone(); 339 | let state_dir = states_guard_dir.entry(key_dir.clone()).or_insert_with(|| CommandState { 340 | current_dir: env::current_dir().unwrap_or_default().to_string_lossy().to_string(), 341 | child_wait_handle: None, 342 | child_stdin: None, 343 | pid: None, 344 | is_ssh_session_active: false, 345 | remote_current_dir: None, 346 | }); 347 | state_dir.current_dir.clone() 348 | }; // Lock for current_dir released. 349 | 350 | 351 | // Proactive SSH password handling (if not in an SSH session) 352 | let is_plain_ssh_attempt = command.contains("ssh ") && !command.trim_start().starts_with("sudo ssh "); 353 | if is_plain_ssh_attempt && ssh_password.is_none() { 354 | app_handle.emit(SSH_PRE_EXEC_PASSWORD_EVENT, command.clone()).map_err(|e| e.to_string())?; 355 | return Ok(SSH_NEEDS_PASSWORD_MARKER.to_string()); 356 | } 357 | 358 | let mut command_to_run = command.clone(); 359 | let app_handle_clone = app_handle.clone(); 360 | 361 | let mut env_map: HashMap = std::env::vars().collect(); 362 | if !env_map.contains_key("PATH") { 363 | if let Some(path_val) = get_shell_path() { 364 | env_map.insert("PATH".to_string(), path_val); 365 | } 366 | } 367 | 368 | // let script_path_option: Option = None; // Removed unused variable 369 | 370 | // This flag determines if the command we are about to spawn *could* start a persistent SSH session 371 | let is_potential_ssh_session_starter = is_plain_ssh_attempt; 372 | 373 | let original_command_is_sudo = command.trim_start().starts_with("sudo "); 374 | let original_command_is_sudo_ssh = command.trim_start().starts_with("sudo ssh "); 375 | 376 | let mut cmd_to_spawn: Command; 377 | let mut child: Child; 378 | 379 | // Prepare command_to_run if it's an SSH command, before deciding on sshpass 380 | if is_potential_ssh_session_starter && !original_command_is_sudo_ssh { // Avoid mangling "sudo ssh ..." here 381 | let original_command_parts: Vec<&str> = command.split_whitespace().collect(); 382 | let mut first_non_option_idx_after_ssh: Option = None; 383 | 384 | // Find the first argument after "ssh" that doesn't start with '-' 385 | // This helps distinguish `ssh host` from `ssh host remote_command` 386 | let ssh_keyword_idx = original_command_parts.iter().position(|&p| p == "ssh"); 387 | 388 | if let Some(idx_ssh) = ssh_keyword_idx { 389 | for i in (idx_ssh + 1)..original_command_parts.len() { 390 | if !original_command_parts[i].starts_with('-') { 391 | first_non_option_idx_after_ssh = Some(i); 392 | break; 393 | } 394 | } 395 | 396 | let is_likely_interactive_ssh = match first_non_option_idx_after_ssh { 397 | Some(idx) => idx == original_command_parts.len() - 1, // True if the first non-option (host) is the last part 398 | None => false, // e.g., "ssh -p 22" without host, or just "ssh" 399 | }; 400 | 401 | let ssh_options_prefix = "ssh -t -t -o StrictHostKeyChecking=accept-new"; 402 | // Arguments are everything after "ssh" in the original command 403 | let args_after_ssh_keyword_in_original = original_command_parts.iter().skip(idx_ssh + 1).cloned().collect::>().join(" "); 404 | 405 | if is_likely_interactive_ssh { 406 | // For interactive: ssh -options user@host 407 | command_to_run = format!("{} {}", ssh_options_prefix, args_after_ssh_keyword_in_original.trim_end()); 408 | } else if first_non_option_idx_after_ssh.is_some() { 409 | // For non-interactive (ssh user@host remote_command): ssh -options user@host remote_command 410 | command_to_run = format!("{} {}", ssh_options_prefix, args_after_ssh_keyword_in_original); 411 | } else { 412 | // Could be just "ssh" or "ssh -options", keep as is but with prefix, though likely won't connect 413 | command_to_run = format!("{} {}", ssh_options_prefix, args_after_ssh_keyword_in_original); 414 | } 415 | println!("[Rust EXEC] Transformed SSH command for execution: [{}]", command_to_run); 416 | } 417 | } 418 | 419 | // Now, use the (potentially transformed) command_to_run for direct/sshpass spawning 420 | if is_potential_ssh_session_starter && !original_command_is_sudo { 421 | println!("[Rust EXEC] Preparing to spawn SSH directly (potentially with sshpass). Original user command: [{}]", command); 422 | println!(" Internally prepared base ssh command (command_to_run): [{}]", command_to_run); 423 | println!(" Current dir: [{}]", current_dir_clone); 424 | 425 | let executable_name: String; 426 | let mut arguments: Vec = Vec::new(); 427 | 428 | if let Some(password_value) = ssh_password { 429 | executable_name = "sshpass".to_string(); 430 | arguments.push("-p".to_string()); 431 | arguments.push(password_value); // password_value is a String, gets moved here 432 | // command_to_run is the full "ssh -t -t ..." string 433 | arguments.extend(command_to_run.split_whitespace().map(String::from)); 434 | println!(" Using sshpass with provided password."); 435 | } else { 436 | // No password provided: use plain ssh 437 | // command_to_run is already "ssh -t -t ..." 438 | let parts: Vec = command_to_run.split_whitespace().map(String::from).collect(); 439 | if parts.is_empty() || parts[0] != "ssh" { 440 | return Err(format!("Failed to parse SSH command for direct execution: {}", command_to_run)); 441 | } 442 | executable_name = parts[0].clone(); // Should be "ssh" 443 | arguments.extend(parts.iter().skip(1).cloned()); 444 | println!(" Using plain ssh (no password provided to backend, will rely on key auth or agent)."); 445 | } 446 | 447 | cmd_to_spawn = Command::new(&executable_name); 448 | for arg in &arguments { 449 | cmd_to_spawn.arg(arg); 450 | } 451 | 452 | // env_map is passed as is. If SSH_ASKPASS was in it from a broader environment, 453 | // sshpass should take precedence or ssh (in key auth) would ignore it if not needed. 454 | cmd_to_spawn.current_dir(¤t_dir_clone) 455 | .envs(&env_map) 456 | .stdout(Stdio::piped()) 457 | .stderr(Stdio::piped()) 458 | .stdin(Stdio::piped()); 459 | 460 | // setsid() was removed here in a previous step, which is good. 461 | 462 | child = match cmd_to_spawn.spawn() { 463 | Ok(c) => c, 464 | Err(e) => return Err(format!("Failed to start direct command ({}): {}", executable_name, e)), 465 | }; 466 | 467 | } else { // Fallback to sh -c for non-SSH or sudo commands 468 | let final_shell_command = if original_command_is_sudo && !original_command_is_sudo_ssh { 469 | command_to_run.clone() 470 | } else { 471 | format!("exec {}", command_to_run) 472 | }; 473 | 474 | println!("[Rust EXEC] Final shell command for sh -c: [{}]", final_shell_command); 475 | println!(" About to spawn for command: [{}] (Original: {})", command_to_run, command); 476 | println!(" Current dir: [{}]", current_dir_clone); 477 | println!(" Plain SSH attempt (via sh -c): {}", is_plain_ssh_attempt); 478 | println!(" Is potential SSH starter (via sh -c): {}", is_potential_ssh_session_starter); 479 | 480 | let mut sh_cmd_to_spawn = Command::new("sh"); 481 | sh_cmd_to_spawn.arg("-c") 482 | .arg(&final_shell_command) 483 | .current_dir(¤t_dir_clone) 484 | .envs(&env_map) 485 | .stdout(Stdio::piped()) 486 | .stderr(Stdio::piped()) 487 | .stdin(Stdio::piped()); // Ensure stdin is piped for sh -c as well 488 | 489 | #[cfg(unix)] 490 | unsafe { 491 | sh_cmd_to_spawn.pre_exec(|| { 492 | match nix::unistd::setsid() { 493 | Ok(_) => Ok(()), 494 | Err(e) => Err(std::io::Error::new(std::io::ErrorKind::Other, format!("setsid failed: {}", e))), 495 | } 496 | }); 497 | } 498 | 499 | child = match sh_cmd_to_spawn.spawn() { 500 | Ok(c) => c, 501 | Err(e) => return Err(format!("Failed to start command via sh -c: {}", e)), 502 | }; 503 | } 504 | 505 | let pid = child.id(); 506 | // Take IO handles before moving child into Arc> 507 | let child_stdin_handle = child.stdin.take().map(|stdin| Arc::new(Mutex::new(stdin))); 508 | let child_stdout_handle = child.stdout.take(); 509 | let child_stderr_handle = child.stderr.take(); let child_wait_handle_arc = Arc::new(Mutex::new(child)); // Now 'child' has no IO handles 510 | let session_id_for_wait_thread = session_id.clone(); 511 | 512 | { 513 | let mut states_guard_update = command_manager.commands.lock().map_err(|e| e.to_string())?; 514 | let key_update = session_id.clone(); 515 | let state_to_update = states_guard_update.entry(key_update).or_insert_with(|| CommandState { 516 | current_dir: current_dir_clone.clone(), 517 | child_wait_handle: None, 518 | child_stdin: None, 519 | pid: None, 520 | is_ssh_session_active: false, 521 | remote_current_dir: None, 522 | }); 523 | 524 | state_to_update.pid = Some(pid); 525 | state_to_update.child_wait_handle = Some(child_wait_handle_arc.clone()); // Store wait handle 526 | 527 | if is_potential_ssh_session_starter { 528 | state_to_update.child_stdin = child_stdin_handle; // Store stdin handle for SSH 529 | state_to_update.is_ssh_session_active = true; 530 | state_to_update.remote_current_dir = Some("remote:~".to_string()); // Initial placeholder 531 | println!("[Rust EXEC] SSH session (pid: {}) marked active.", pid); 532 | let _ = app_handle_clone.emit("ssh_session_started", serde_json::json!({ "pid": pid })); 533 | 534 | // Attempt to send initial PWD command 535 | if let Some(stdin_arc_for_init_pwd) = state_to_update.child_stdin.clone() { 536 | let app_handle_for_init_pwd_thread = app_handle_clone.clone(); // Clone app_handle for the thread 537 | let initial_pid_for_init_pwd_error = pid; 538 | let session_id_for_init_pwd_thread = session_id.clone(); 539 | 540 | thread::spawn(move || { 541 | // Get CommandManager state inside the thread using the moved app_handle 542 | let command_manager_state_for_thread = app_handle_for_init_pwd_thread.state::(); 543 | 544 | let initial_pwd_marker = format!("__INITIAL_REMOTE_PWD_MARKER_{}__", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs_f64().to_string().replace('.', "")); 545 | let initial_pwd_command = format!("echo '{}'; pwd; echo '{}'\n", initial_pwd_marker, initial_pwd_marker); 546 | 547 | println!("[Rust EXEC SSH-Init-PWD-Thread] Attempting to send initial PWD command for PID {}: {}", initial_pid_for_init_pwd_error, initial_pwd_command.trim()); 548 | 549 | match stdin_arc_for_init_pwd.lock() { 550 | Ok(mut stdin_guard) => { 551 | if let Err(e) = stdin_guard.write_all(initial_pwd_command.as_bytes()).and_then(|_| stdin_guard.flush()) { 552 | eprintln!("[Rust EXEC SSH-Init-PWD-Thread] Failed to write/flush initial PWD command for PID {}: {}. Resetting SSH state if still active.", initial_pid_for_init_pwd_error, e); 553 | if let Ok(mut states_lock) = command_manager_state_for_thread.commands.lock() { // Use state obtained within the thread 554 | if let Some(s) = states_lock.get_mut(&session_id_for_init_pwd_thread) { 555 | if s.pid == Some(initial_pid_for_init_pwd_error) && s.is_ssh_session_active { 556 | s.is_ssh_session_active = false; 557 | s.child_stdin = None; 558 | s.remote_current_dir = None; 559 | let _ = app_handle_for_init_pwd_thread.emit("ssh_session_ended", serde_json::json!({ "pid": initial_pid_for_init_pwd_error, "reason": format!("SSH session error (initial PWD send for pid {}): {}", initial_pid_for_init_pwd_error, e)})); 560 | } 561 | } 562 | } 563 | } else { 564 | println!("[Rust EXEC SSH-Init-PWD-Thread] Successfully sent initial PWD command for PID {}.", initial_pid_for_init_pwd_error); 565 | } 566 | } 567 | Err(e) => { 568 | eprintln!("[Rust EXEC SSH-Init-PWD-Thread] Failed to lock SSH ChildStdin for initial PWD command (PID {}): {}. Resetting SSH state if still active.", initial_pid_for_init_pwd_error, e); 569 | if let Ok(mut states_lock) = command_manager_state_for_thread.commands.lock() { // Use state obtained within the thread 570 | if let Some(s) = states_lock.get_mut(&session_id_for_init_pwd_thread) { 571 | if s.pid == Some(initial_pid_for_init_pwd_error) && s.is_ssh_session_active { 572 | s.is_ssh_session_active = false; 573 | s.child_stdin = None; 574 | s.remote_current_dir = None; 575 | let _ = app_handle_for_init_pwd_thread.emit("ssh_session_ended", serde_json::json!({ "pid": initial_pid_for_init_pwd_error, "reason": format!("SSH session error (initial PWD stdin lock for pid {}): {}", initial_pid_for_init_pwd_error, e)})); 576 | } 577 | } 578 | } 579 | } 580 | } 581 | }); 582 | } else { 583 | eprintln!("[Rust EXEC] New SSH session (pid: {}) started, but child_stdin was None. Cannot send initial PWD command.", pid); 584 | } 585 | } else { 586 | state_to_update.is_ssh_session_active = false; 587 | state_to_update.child_stdin = None; // Ensure stdin is None for non-SSH commands 588 | state_to_update.remote_current_dir = None; // Ensure remote_dir is None for non-SSH 589 | } 590 | } // states_guard_update lock released 591 | 592 | if let Some(stdout_stream) = child_stdout_handle { // Use the taken stdout 593 | let app_handle_for_stdout_mgr = app_handle_clone.clone(); 594 | let app_handle_for_stdout_emit = app_handle_clone.clone(); 595 | let current_pid_for_stdout_context = pid; 596 | let session_id_for_stdout_thread = session_id.clone(); 597 | 598 | thread::spawn(move || { 599 | let mut reader = BufReader::new(stdout_stream); 600 | let mut buffer = [0; 2048]; 601 | let mut line_buffer = String::new(); 602 | 603 | enum PwdMarkerParseState { Idle, AwaitingPwd(String), AwaitingEndMarker(String) } 604 | let mut pwd_marker_state = PwdMarkerParseState::Idle; 605 | 606 | let current_thread_id = std::thread::current().id(); 607 | println!("[Rust STDOUT Thread {:?} PID {}] Started.", current_thread_id, current_pid_for_stdout_context); 608 | loop { 609 | match reader.read(&mut buffer) { 610 | Ok(0) => { 611 | println!("[Rust STDOUT Thread {:?} PID {}] EOF reached.", current_thread_id, current_pid_for_stdout_context); 612 | if !line_buffer.is_empty() { 613 | println!("[Rust STDOUT Thread {:?} PID {}] Emitting remaining line_buffer: '{}'", current_thread_id, current_pid_for_stdout_context, line_buffer); 614 | if let Err(e) = app_handle_for_stdout_emit.emit("command_output", line_buffer.clone()) { 615 | println!("[Rust STDOUT Thread {:?} PID {}] Error emitting final command_output: {}", current_thread_id, current_pid_for_stdout_context, e); 616 | } 617 | } 618 | break; 619 | } 620 | Ok(n) => { 621 | let output_chunk_str = String::from_utf8_lossy(&buffer[..n]).to_string(); 622 | line_buffer.push_str(&output_chunk_str); 623 | 624 | while let Some(newline_pos) = line_buffer.find('\n') { 625 | let line_segment = line_buffer.drain(..=newline_pos).collect::(); 626 | let current_line_trimmed = line_segment.trim().to_string(); 627 | 628 | if current_line_trimmed.is_empty() { 629 | match pwd_marker_state { 630 | PwdMarkerParseState::Idle => { 631 | if let Err(e) = app_handle_for_stdout_emit.emit("command_output", line_segment.clone()) { 632 | println!("[Rust STDOUT Thread {:?} PID {}] Error emitting whitespace/newline: {}", current_thread_id, current_pid_for_stdout_context, e); 633 | } 634 | }, 635 | _ => {} 636 | } 637 | continue; 638 | } 639 | 640 | let mut emit_this_segment_to_frontend = true; 641 | 642 | match pwd_marker_state { 643 | PwdMarkerParseState::Idle => { 644 | if current_line_trimmed.starts_with("__REMOTE_CD_PWD_MARKER_") || current_line_trimmed.starts_with("__INITIAL_REMOTE_PWD_MARKER_") { 645 | println!("[Rust STDOUT Thread {:?} PID {}] PWD Start Marker detected: {}", current_thread_id, current_pid_for_stdout_context, current_line_trimmed); 646 | pwd_marker_state = PwdMarkerParseState::AwaitingPwd(current_line_trimmed.clone()); 647 | emit_this_segment_to_frontend = false; 648 | } 649 | } 650 | PwdMarkerParseState::AwaitingPwd(ref marker_val) => { 651 | let new_pwd = current_line_trimmed.clone(); 652 | println!("[Rust STDOUT Thread {:?} PID {}] Captured PWD: '{}' for marker: {}", current_thread_id, current_pid_for_stdout_context, new_pwd, marker_val); 653 | 654 | let command_manager_state = app_handle_for_stdout_mgr.state::(); 655 | if let Ok(mut states_guard) = command_manager_state.commands.lock() { 656 | if let Some(state) = states_guard.get_mut(&session_id_for_stdout_thread) { 657 | if state.pid == Some(current_pid_for_stdout_context) && state.is_ssh_session_active { 658 | state.remote_current_dir = Some(new_pwd.clone()); 659 | println!("[Rust STDOUT Thread {:?} PID {}] Updated remote_current_dir to: {}", current_thread_id, current_pid_for_stdout_context, new_pwd); 660 | if let Err(e) = app_handle_for_stdout_emit.emit("remote_directory_updated", new_pwd.clone()) { 661 | eprintln!("[Rust STDOUT Thread {:?} PID {}] Failed to emit remote_directory_updated: {}", current_thread_id, current_pid_for_stdout_context, e); 662 | } 663 | } else { 664 | println!("[Rust STDOUT Thread {:?} PID {}] SSH no longer active or PID mismatch for PWD update. State PID: {:?}, Active: {}", current_thread_id, current_pid_for_stdout_context, state.pid, state.is_ssh_session_active); 665 | } 666 | } 667 | } 668 | pwd_marker_state = PwdMarkerParseState::AwaitingEndMarker(marker_val.clone()); 669 | emit_this_segment_to_frontend = false; 670 | } 671 | PwdMarkerParseState::AwaitingEndMarker(ref marker_val) => { 672 | if current_line_trimmed == *marker_val { 673 | println!("[Rust STDOUT Thread {:?} PID {}] PWD End Marker detected: {}", current_thread_id, current_pid_for_stdout_context, current_line_trimmed); 674 | pwd_marker_state = PwdMarkerParseState::Idle; 675 | emit_this_segment_to_frontend = false; 676 | } else { 677 | println!("[Rust STDOUT Thread {:?} PID {}] WARNING: Expected PWD end marker '{}', got: '{}'. Resetting state and emitting line.", current_thread_id, current_pid_for_stdout_context, marker_val, current_line_trimmed); 678 | pwd_marker_state = PwdMarkerParseState::Idle; 679 | if current_line_trimmed.starts_with("__REMOTE_CD_PWD_MARKER_") || current_line_trimmed.starts_with("__INITIAL_REMOTE_PWD_MARKER_") { 680 | println!("[Rust STDOUT Thread {:?} PID {}] PWD Start Marker detected immediately after unexpected line: {}", current_thread_id, current_pid_for_stdout_context, current_line_trimmed); 681 | pwd_marker_state = PwdMarkerParseState::AwaitingPwd(current_line_trimmed.clone()); 682 | emit_this_segment_to_frontend = false; 683 | } 684 | } 685 | } 686 | } 687 | 688 | if emit_this_segment_to_frontend { 689 | if let Err(e) = app_handle_for_stdout_emit.emit("command_output", line_segment.clone()) { 690 | println!("[Rust STDOUT Thread {:?} PID {}] Error emitting command_output: {}", current_thread_id, current_pid_for_stdout_context, e); 691 | } 692 | } 693 | } 694 | } 695 | Err(e) => { 696 | println!("[Rust STDOUT Thread {:?} PID {}] Error reading stdout: {}", current_thread_id, current_pid_for_stdout_context, e); 697 | if e.kind() == std::io::ErrorKind::Interrupted { continue; } 698 | if !line_buffer.is_empty() { 699 | println!("[Rust STDOUT Thread {:?} PID {}] Emitting remaining line_buffer on error: '{}'", current_thread_id, current_pid_for_stdout_context, line_buffer); 700 | if let Err(emit_e) = app_handle_for_stdout_emit.emit("command_output", line_buffer.clone()) { 701 | println!("[Rust STDOUT Thread {:?} PID {}] Error emitting final command_output on error: {}", current_thread_id, current_pid_for_stdout_context, emit_e); 702 | } 703 | } 704 | break; 705 | } 706 | } 707 | } 708 | println!("[Rust STDOUT Thread {:?} PID {}] Exiting.", current_thread_id, current_pid_for_stdout_context); 709 | }); 710 | } 711 | 712 | if let Some(stderr_stream) = child_stderr_handle { // Use the taken stderr 713 | let app_handle_stderr = app_handle.clone(); 714 | thread::spawn(move || { 715 | let mut reader = BufReader::new(stderr_stream); 716 | let mut buffer = [0; 2048]; 717 | let current_thread_id = std::thread::current().id(); // Get thread ID once 718 | println!("[Rust STDERR Thread {:?}] Started for command.", current_thread_id); // LOG thread start 719 | loop { 720 | match reader.read(&mut buffer) { 721 | Ok(0) => { 722 | println!("[Rust STDERR Thread {:?}] EOF reached.", current_thread_id); // LOG 723 | break; 724 | } 725 | Ok(n) => { 726 | let error_chunk = String::from_utf8_lossy(&buffer[..n]).to_string(); 727 | println!("[Rust STDERR Thread {:?}] Read chunk: '{}'", current_thread_id, error_chunk); // LOG 728 | if !error_chunk.contains("[sudo] password") { 729 | if let Err(e) = app_handle_stderr.emit("command_error", error_chunk.clone()) { // LOG event emission 730 | println!("[Rust STDERR Thread {:?}] Error emitting command_error: {}", current_thread_id, e); // LOG 731 | } 732 | } 733 | } 734 | Err(e) => { 735 | println!("[Rust STDERR Thread {:?}] Error reading stderr: {}", current_thread_id, e); // LOG 736 | if e.kind() == std::io::ErrorKind::Interrupted { continue; } 737 | break; 738 | } 739 | } 740 | } 741 | println!("[Rust STDERR Thread {:?}] Exiting.", current_thread_id); // LOG 742 | }); 743 | } 744 | 745 | // The wait thread now uses child_wait_handle_arc 746 | let app_handle_wait = app_handle_clone.clone(); 747 | let app_handle_for_thread_state = app_handle.clone(); 748 | let was_ssh_session_starter = is_potential_ssh_session_starter; 749 | let initial_child_pid_for_wait_thread = pid; 750 | 751 | thread::spawn(move || { 752 | println!("[Rust WAIT Thread] Started for PID: {}", initial_child_pid_for_wait_thread); 753 | 754 | let status_result = { 755 | // Lock the child_wait_handle_arc to wait on the child 756 | let mut child_guard = match child_wait_handle_arc.lock() { 757 | Ok(guard) => guard, 758 | Err(e) => { 759 | eprintln!("[Rust WAIT Thread] Failed to lock child_wait_handle for PID {}: {}", initial_child_pid_for_wait_thread, e); 760 | // Emit error and end messages 761 | let _ = app_handle_wait.emit("command_error", format!("Error locking child for wait: {}", e)); 762 | let _ = app_handle_wait.emit("command_end", "Command failed due to wait lock error."); 763 | return; 764 | } 765 | }; 766 | // child_guard is MutexGuard 767 | child_guard.wait() 768 | }; 769 | 770 | { // Cleanup block 771 | let command_manager_state_in_thread = app_handle_for_thread_state.state::(); 772 | let mut states_guard_cleanup = match command_manager_state_in_thread.commands.lock() { 773 | Ok(guard) => guard, 774 | Err(e) => { 775 | eprintln!("[Rust WAIT Thread] Error locking command_manager in wait thread for PID {}: {}", initial_child_pid_for_wait_thread, e); 776 | // Cannot panic here, just log and proceed if possible or return 777 | return; 778 | } 779 | }; 780 | 781 | let key_cleanup = session_id_for_wait_thread.clone(); 782 | if let Some(state_to_clear) = states_guard_cleanup.get_mut(&key_cleanup) { 783 | // Important: Only clear if the PID matches, to avoid race conditions 784 | // if another command started and this wait thread is for an older one. 785 | if state_to_clear.pid == Some(initial_child_pid_for_wait_thread) { 786 | state_to_clear.child_wait_handle = None; 787 | state_to_clear.pid = None; // PID is cleared here 788 | if was_ssh_session_starter && state_to_clear.is_ssh_session_active { 789 | state_to_clear.is_ssh_session_active = false; 790 | state_to_clear.child_stdin = None; // Also clear stdin if it was an SSH session 791 | state_to_clear.remote_current_dir = None; // Clear remote dir 792 | println!("SSH session (pid: {}) ended by wait thread. Marked inactive.", initial_child_pid_for_wait_thread); 793 | let _ = app_handle_wait.emit("ssh_session_ended", serde_json::json!({ "pid": initial_child_pid_for_wait_thread, "reason": "SSH session ended normally."})); 794 | } else if was_ssh_session_starter { 795 | // SSH session starter but was already marked inactive (e.g. by write thread error) 796 | // Ensure remote_current_dir is also cleared if it hasn't been. 797 | state_to_clear.remote_current_dir = None; 798 | println!("Wait thread: SSH session (pid: {}) was already inactive. Clearing handles.", initial_child_pid_for_wait_thread); 799 | state_to_clear.child_stdin = None; 800 | } 801 | } else { 802 | println!("[Rust WAIT Thread] PID mismatch during cleanup. Current state.pid: {:?}, waited_pid: {}. No cleanup performed by this thread.", state_to_clear.pid, initial_child_pid_for_wait_thread); 803 | } 804 | } 805 | } // states_guard_cleanup lock released 806 | 807 | match status_result { 808 | Ok(status) => { 809 | let exit_msg = if status.success() { 810 | "Command completed successfully." 811 | } else { 812 | "Command failed." 813 | }; 814 | let _ = app_handle_wait.emit("command_end", exit_msg); 815 | }, 816 | Err(e) => { 817 | let _ = app_handle_wait.emit("command_error", format!("Error waiting for command: {}", e)); 818 | // Also emit command_end because the command effectively ended, albeit with an error during wait 819 | let _ = app_handle_wait.emit("command_end", "Command failed due to wait error."); 820 | } 821 | } 822 | }); 823 | 824 | Ok("Command started. Output will stream in real-time.".to_string()) 825 | } 826 | 827 | #[command] 828 | fn execute_sudo_command( 829 | command: String, 830 | session_id: String, 831 | password: String, 832 | app_handle: AppHandle, 833 | command_manager: State<'_, CommandManager>, 834 | ) -> Result { 835 | let mut states = command_manager.commands.lock().map_err(|e| e.to_string())?; 836 | 837 | let key = session_id; 838 | let state = states.entry(key.clone()).or_insert_with(|| CommandState { 839 | current_dir: env::current_dir() 840 | .unwrap_or_default() 841 | .to_string_lossy() 842 | .to_string(), 843 | child_wait_handle: None, 844 | child_stdin: None, 845 | pid: None, 846 | is_ssh_session_active: false, 847 | remote_current_dir: None, 848 | }); 849 | 850 | let current_dir = state.current_dir.clone(); 851 | 852 | let mut child_process = match Command::new("sudo") 853 | .arg("-S") 854 | .arg("bash") 855 | .arg("-c") 856 | .arg( 857 | command 858 | .split_whitespace() 859 | .skip(1) 860 | .collect::>() 861 | .join(" "), 862 | ) // Skip "sudo" and join the rest 863 | .current_dir(¤t_dir) 864 | .stdin(Stdio::piped()) 865 | .stdout(Stdio::piped()) 866 | .stderr(Stdio::piped()) 867 | .spawn() 868 | { 869 | Ok(child) => child, 870 | Err(e) => { 871 | return Err(format!("Failed to start sudo command: {}", e)); 872 | } 873 | }; 874 | 875 | let child_pid = child_process.id(); // Get PID 876 | let sudo_stdin = child_process.stdin.take().map(|s| Arc::new(Mutex::new(s))); // Take stdin 877 | let sudo_stdout = child_process.stdout.take(); // Take stdout 878 | let sudo_stderr = child_process.stderr.take(); // Take stderr 879 | 880 | let child_arc = Arc::new(Mutex::new(child_process)); // Store the Child itself for waiting 881 | 882 | state.child_wait_handle = Some(child_arc.clone()); // Store wait handle 883 | state.pid = Some(child_pid); // Store PID 884 | // For sudo, is_ssh_session_active remains false, child_stdin for SSH is not set. 885 | 886 | // Send password to stdin 887 | if let Some(stdin_arc) = sudo_stdin { // Use the taken and Arc-wrapped stdin 888 | let app_handle_stdin = app_handle.clone(); 889 | thread::spawn(move || { 890 | let mut stdin_guard = match stdin_arc.lock() { 891 | Ok(guard) => guard, 892 | Err(e) => { 893 | eprintln!("Failed to lock sudo stdin: {}", e); 894 | let _ = app_handle_stdin.emit("command_error", "Failed to lock sudo stdin"); 895 | return; 896 | } 897 | }; 898 | if stdin_guard 899 | .write_all(format!("{} 900 | ", password).as_bytes()) 901 | .is_err() 902 | { 903 | let _ = app_handle_stdin.emit("command_error", "Failed to send password to sudo"); 904 | } 905 | }); 906 | } 907 | 908 | // Use the taken stdout_stream 909 | if let Some(stdout_stream) = sudo_stdout { 910 | let app_handle_stdout = app_handle.clone(); 911 | thread::spawn(move || { 912 | let mut reader = BufReader::new(stdout_stream); 913 | let mut buffer = [0; 2048]; // Read in chunks 914 | loop { 915 | match reader.read(&mut buffer) { 916 | Ok(0) => break, // EOF 917 | Ok(n) => { 918 | let output_chunk = String::from_utf8_lossy(&buffer[..n]).to_string(); 919 | let _ = app_handle_stdout.emit("command_output", output_chunk); 920 | } 921 | Err(e) => { 922 | if e.kind() == std::io::ErrorKind::Interrupted { continue; } 923 | let _ = app_handle_stdout 924 | .emit("command_output", format!("Error reading stdout: {}", e)); 925 | break; 926 | } 927 | } 928 | } 929 | }); 930 | } 931 | 932 | // Use the taken stderr_stream 933 | if let Some(stderr_stream) = sudo_stderr { 934 | let app_handle_stderr = app_handle.clone(); 935 | thread::spawn(move || { 936 | let mut reader = BufReader::new(stderr_stream); 937 | let mut buffer = [0; 2048]; // Read in chunks 938 | loop { 939 | match reader.read(&mut buffer) { 940 | Ok(0) => break, // EOF 941 | Ok(n) => { 942 | let error_chunk = String::from_utf8_lossy(&buffer[..n]).to_string(); 943 | if !error_chunk.contains("[sudo] password") { 944 | let _ = app_handle_stderr.emit("command_error", error_chunk.clone()); 945 | } 946 | } 947 | Err(e) => { 948 | if e.kind() == std::io::ErrorKind::Interrupted { continue; } 949 | let _ = app_handle_stderr 950 | .emit("command_error", format!("Error reading stderr: {}", e)); 951 | break; 952 | } 953 | } 954 | } 955 | }); 956 | } 957 | 958 | let child_arc_clone = child_arc.clone(); 959 | let app_handle_wait = app_handle.clone(); 960 | thread::spawn(move || { 961 | let status = { 962 | let mut child_guard = child_arc_clone.lock().unwrap(); 963 | match child_guard.wait() { 964 | Ok(status) => status, 965 | Err(e) => { 966 | let _ = app_handle_wait 967 | .emit("command_error", format!("Error waiting for command: {}", e)); 968 | return; 969 | } 970 | } 971 | }; 972 | 973 | let exit_msg = if status.success() { 974 | "Command completed successfully." 975 | } else { 976 | "Command failed." 977 | }; 978 | let _ = app_handle_wait.emit("command_end", exit_msg); 979 | }); 980 | 981 | Ok("Sudo command started. Output will stream in real-time.".to_string()) 982 | } 983 | 984 | #[command] 985 | fn autocomplete( 986 | input: String, 987 | session_id: String, 988 | command_manager: State<'_, CommandManager>, 989 | ) -> Result, String> { 990 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 991 | let key = session_id; 992 | 993 | let current_dir = if let Some(state) = states.get(&key) { 994 | &state.current_dir 995 | } else { 996 | return Err("Could not determine current directory".to_string()); 997 | }; 998 | 999 | let input_parts: Vec<&str> = input.split_whitespace().collect(); 1000 | 1001 | // Autocomplete commands if it's the first word 1002 | if input_parts.len() <= 1 { 1003 | // Common shell commands to suggest 1004 | let common_commands = vec![ 1005 | "cd", "ls", "pwd", "mkdir", "touch", "cat", "echo", "grep", "find", "cp", "mv", "rm", 1006 | "tar", "gzip", "ssh", "curl", "wget", "history", "exit", "clear", "top", "ps", "kill", 1007 | "ping", 1008 | ]; 1009 | 1010 | // Filter commands that match input prefix 1011 | let input_prefix = input_parts.first().unwrap_or(&""); 1012 | 1013 | // Case-insensitive filtering for commands 1014 | let matches: Vec = common_commands 1015 | .iter() 1016 | .filter(|&cmd| cmd.to_lowercase().starts_with(&input_prefix.to_lowercase())) 1017 | .map(|&cmd| cmd.to_string()) 1018 | .collect(); 1019 | 1020 | if !matches.is_empty() { 1021 | return Ok(matches); 1022 | } 1023 | } 1024 | 1025 | // If we have a cd command, autocomplete directories 1026 | let path_to_complete = if input_parts.first() == Some(&"cd") { 1027 | if input_parts.len() > 1 { 1028 | // Handle cd command with argument 1029 | input_parts.last().unwrap_or(&"") 1030 | } else { 1031 | // Handle cd with no argument - show all directories in current folder 1032 | "" 1033 | } 1034 | } else if !input_parts.is_empty() && input_parts[0].contains('/') { 1035 | // Handle path directly 1036 | input_parts[0] 1037 | } else if input_parts.len() > 1 { 1038 | // Handle second argument as path for any command 1039 | input_parts.last().unwrap_or(&"") 1040 | } else { 1041 | // Default to empty string if no path found 1042 | "" 1043 | }; 1044 | 1045 | // If input starts with cd, or we have a potential path to complete 1046 | if input_parts.first() == Some(&"cd") || !path_to_complete.is_empty() { 1047 | let (dir_to_search, prefix) = split_path_prefix(path_to_complete); 1048 | 1049 | // Create a Path for the directory to search 1050 | let search_path = if dir_to_search.starts_with('/') || dir_to_search.starts_with('~') { 1051 | if dir_to_search.starts_with('~') { 1052 | let home = dirs::home_dir().ok_or("Could not determine home directory")?; 1053 | let without_tilde = dir_to_search.trim_start_matches('~'); 1054 | let rel_path = without_tilde.trim_start_matches('/'); 1055 | if rel_path.is_empty() { 1056 | home 1057 | } else { 1058 | home.join(rel_path) 1059 | } 1060 | } else { 1061 | PathBuf::from(dir_to_search) 1062 | } 1063 | } else { 1064 | Path::new(current_dir).join(dir_to_search) 1065 | }; 1066 | 1067 | if search_path.exists() && search_path.is_dir() { 1068 | let entries = fs::read_dir(search_path).map_err(|e| e.to_string())?; 1069 | 1070 | let mut matches = Vec::new(); 1071 | for entry in entries.flatten() { 1072 | let file_name = entry.file_name(); 1073 | let file_name_str = file_name.to_string_lossy(); 1074 | 1075 | // Include all entries for empty prefix, otherwise filter by prefix (case-insensitive) 1076 | if prefix.is_empty() 1077 | || file_name_str 1078 | .to_lowercase() 1079 | .starts_with(&prefix.to_lowercase()) 1080 | { 1081 | let is_dir = entry.file_type().map(|ft| ft.is_dir()).unwrap_or(false); 1082 | 1083 | // For 'cd' command, only show directories 1084 | if input_parts.first() == Some(&"cd") && !is_dir { 1085 | continue; 1086 | } 1087 | 1088 | // Add trailing slash for directories 1089 | let suggestion = if is_dir { 1090 | format!("{}/", file_name_str) 1091 | } else { 1092 | file_name_str.to_string() 1093 | }; 1094 | 1095 | // Construct the full path suggestion for the command 1096 | let base_path = if dir_to_search.is_empty() { 1097 | "".to_string() 1098 | } else { 1099 | format!("{}/", dir_to_search.trim_end_matches('/')) 1100 | }; 1101 | 1102 | matches.push(format!("{}{}", base_path, suggestion)); 1103 | } 1104 | } 1105 | 1106 | if !matches.is_empty() { 1107 | // Sort matches alphabetically, case-insensitive 1108 | matches.sort_by_key(|a| a.to_lowercase()); 1109 | return Ok(matches); 1110 | } 1111 | } 1112 | } 1113 | 1114 | Ok(Vec::new()) 1115 | } 1116 | 1117 | // Helper function to split a path into directory and file prefix parts 1118 | fn split_path_prefix(path: &str) -> (&str, &str) { 1119 | match path.rfind('/') { 1120 | Some(index) => { 1121 | let (dir, file) = path.split_at(index + 1); 1122 | (dir, file) 1123 | } 1124 | None => ("", path), 1125 | } 1126 | } 1127 | 1128 | #[command] 1129 | fn get_working_directory(session_id: String, command_manager: State<'_, CommandManager>) -> Result { 1130 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 1131 | let key = session_id; 1132 | 1133 | if let Some(state) = states.get(&key) { 1134 | if state.is_ssh_session_active { 1135 | // Return the stored remote CWD, or a default if not yet known 1136 | Ok(state.remote_current_dir.clone().unwrap_or_else(|| "remote:~".to_string())) 1137 | } else { 1138 | Ok(state.current_dir.clone()) 1139 | } 1140 | } else { 1141 | // Fallback if session doesn't exist - create new default state 1142 | Ok(env::current_dir().unwrap_or_default().to_string_lossy().to_string()) 1143 | } 1144 | } 1145 | 1146 | #[command] 1147 | fn get_home_directory() -> Result { 1148 | dirs::home_dir() 1149 | .map(|path| path.to_string_lossy().to_string()) 1150 | .ok_or_else(|| "Could not determine home directory".to_string()) 1151 | } 1152 | 1153 | // Add a helper function to get the OS information 1154 | fn get_operating_system() -> String { 1155 | #[cfg(target_os = "windows")] 1156 | return "Windows".to_string(); 1157 | 1158 | #[cfg(target_os = "macos")] 1159 | return "macOS".to_string(); 1160 | 1161 | #[cfg(target_os = "linux")] 1162 | return "Linux".to_string(); 1163 | 1164 | #[cfg(not(any(target_os = "windows", target_os = "macos", target_os = "linux")))] 1165 | return "Unknown".to_string(); 1166 | } 1167 | 1168 | // Implement the ask_ai function for Ollama integration 1169 | #[command] 1170 | async fn ask_ai( 1171 | question: String, 1172 | model_override: Option, 1173 | command_manager: State<'_, CommandManager>, 1174 | ) -> Result { 1175 | // Check if this is a special command 1176 | if question.starts_with('/') { 1177 | return handle_special_command(question, command_manager).await; 1178 | } 1179 | 1180 | // Regular message to Ollama 1181 | let model; 1182 | let api_host; 1183 | 1184 | // Scope the mutex lock to drop it before any async operations 1185 | { 1186 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 1187 | // Use the model_override if provided, otherwise use the default 1188 | model = model_override.unwrap_or_else(|| ollama_state.current_model.clone()); 1189 | api_host = ollama_state.api_host.clone(); 1190 | // MutexGuard is dropped here at the end of scope 1191 | } 1192 | 1193 | // Get the current operating system 1194 | let os = get_operating_system(); 1195 | 1196 | // Create a system prompt that includes OS information and formatting instructions 1197 | let system_prompt = format!( 1198 | "You are a helpful terminal assistant. The user is using a {} operating system. \ 1199 | When providing terminal commands, ensure they are compatible with {}. \ 1200 | When asked for a command, respond with ONLY the command in this format: ```command```\ 1201 | The command should be a single line without any explanation or additional text.", 1202 | os, os 1203 | ); 1204 | 1205 | // Combine the system prompt with the user's question 1206 | let combined_prompt = format!("{}\n\nUser: {}", system_prompt, question); 1207 | 1208 | let client = reqwest::Client::new(); 1209 | let res = client 1210 | .post(format!("{}/api/generate", api_host)) 1211 | .json(&OllamaRequest { 1212 | model, 1213 | prompt: combined_prompt, 1214 | stream: false, 1215 | }) 1216 | .send() 1217 | .await 1218 | .map_err(|e| format!("Failed to send request to Ollama API: {}", e))?; 1219 | 1220 | if !res.status().is_success() { 1221 | return Err(format!("Ollama API error: {}", res.status())); 1222 | } 1223 | 1224 | let response: OllamaResponse = res 1225 | .json() 1226 | .await 1227 | .map_err(|e| format!("Failed to parse Ollama response: {}", e))?; 1228 | 1229 | Ok(response.response) 1230 | } 1231 | 1232 | // Handle special commands like /help, /models, /model 1233 | async fn handle_special_command( 1234 | command: String, 1235 | command_manager: State<'_, CommandManager>, 1236 | ) -> Result { 1237 | match command.as_str() { 1238 | "/help" => Ok("Available commands:\n\ 1239 | /help - Show this help message\n\ 1240 | /models - List available models\n\ 1241 | /model [name] - Show current model or switch to a different model\n\ 1242 | /host [url] - Show current API host or set a new one" 1243 | .to_string()), 1244 | "/models" => { 1245 | // Get list of available models from Ollama API 1246 | let api_host; 1247 | 1248 | // Scope the mutex lock to drop it before any async operations 1249 | { 1250 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 1251 | api_host = ollama_state.api_host.clone(); 1252 | // MutexGuard is dropped here 1253 | } 1254 | 1255 | let client = reqwest::Client::new(); 1256 | let res = client 1257 | .get(format!("{}/api/tags", api_host)) 1258 | .send() 1259 | .await 1260 | .map_err(|e| format!("Failed to get models from Ollama API: {}", e))?; 1261 | 1262 | if !res.status().is_success() { 1263 | return Err(format!("Ollama API error: {}", res.status())); 1264 | } 1265 | 1266 | let models: OllamaModelList = res 1267 | .json() 1268 | .await 1269 | .map_err(|e| format!("Failed to parse models list: {}", e))?; 1270 | 1271 | let mut result = String::from("Available models:\n"); 1272 | for model in models.models { 1273 | result.push_str(&format!("- {} ({} bytes)\n", model.name, model.size)); 1274 | } 1275 | Ok(result) 1276 | } 1277 | cmd if cmd.starts_with("/model") => { 1278 | let parts: Vec<&str> = cmd.split_whitespace().collect(); 1279 | 1280 | // Handle showing current model 1281 | if parts.len() == 1 { 1282 | let current_model; 1283 | { 1284 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 1285 | current_model = ollama_state.current_model.clone(); 1286 | } 1287 | Ok(format!("Current model: {}", current_model)) 1288 | } 1289 | // Handle switching model 1290 | else if parts.len() >= 2 { 1291 | let new_model = parts[1].to_string(); 1292 | { 1293 | let mut ollama_state = 1294 | command_manager.ollama.lock().map_err(|e| e.to_string())?; 1295 | ollama_state.current_model = new_model.clone(); 1296 | } 1297 | Ok(format!("Switched to model: {}", new_model)) 1298 | } else { 1299 | Err("Invalid model command. Use /model [name] to switch models.".to_string()) 1300 | } 1301 | } 1302 | cmd if cmd.starts_with("/host") => { 1303 | let parts: Vec<&str> = cmd.split_whitespace().collect(); 1304 | 1305 | // Handle showing current host 1306 | if parts.len() == 1 { 1307 | let current_host; 1308 | { 1309 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 1310 | current_host = ollama_state.api_host.clone(); 1311 | } 1312 | Ok(format!("Current Ollama API host: {}", current_host)) 1313 | } 1314 | // Handle changing host 1315 | else if parts.len() >= 2 { 1316 | let new_host = parts[1].to_string(); 1317 | { 1318 | let mut ollama_state = 1319 | command_manager.ollama.lock().map_err(|e| e.to_string())?; 1320 | ollama_state.api_host = new_host.clone(); 1321 | } 1322 | Ok(format!("Changed Ollama API host to: {}", new_host)) 1323 | } else { 1324 | Err("Invalid host command. Use /host [url] to change the API host.".to_string()) 1325 | } 1326 | } 1327 | _ => Err(format!( 1328 | "Unknown command: {}. Type /help for available commands.", 1329 | command 1330 | )), 1331 | } 1332 | } 1333 | 1334 | // Add function to get models from Ollama API 1335 | #[command] 1336 | async fn get_models(command_manager: State<'_, CommandManager>) -> Result { 1337 | // Get the API host from the Ollama state 1338 | let api_host; 1339 | { 1340 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 1341 | api_host = ollama_state.api_host.clone(); 1342 | } 1343 | 1344 | // Request the list of models from Ollama 1345 | let client = reqwest::Client::new(); 1346 | let res = client 1347 | .get(format!("{}/api/tags", api_host)) 1348 | .send() 1349 | .await 1350 | .map_err(|e| format!("Failed to get models from Ollama API: {}", e))?; 1351 | 1352 | if !res.status().is_success() { 1353 | return Err(format!("Ollama API error: {}", res.status())); 1354 | } 1355 | 1356 | // Parse the response 1357 | let models: OllamaModelList = res 1358 | .json() 1359 | .await 1360 | .map_err(|e| format!("Failed to parse models list: {}", e))?; 1361 | 1362 | // Format the response 1363 | let mut result = String::from("Available models:\n"); 1364 | for model in models.models { 1365 | result.push_str(&format!("- {} ({} bytes)\n", model.name, model.size)); 1366 | } 1367 | Ok(result) 1368 | } 1369 | 1370 | // Add function to switch model 1371 | #[command] 1372 | fn switch_model( 1373 | model: String, 1374 | command_manager: State<'_, CommandManager>, 1375 | ) -> Result { 1376 | let mut ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 1377 | ollama_state.current_model = model.clone(); 1378 | Ok(format!("Switched to model: {}", model)) 1379 | } 1380 | 1381 | // Add function to get current API host 1382 | #[command] 1383 | fn get_host(command_manager: State<'_, CommandManager>) -> Result { 1384 | let ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 1385 | Ok(format!( 1386 | "Current Ollama API host: {}", 1387 | ollama_state.api_host 1388 | )) 1389 | } 1390 | 1391 | // Add function to set API host 1392 | #[command] 1393 | fn set_host(host: String, command_manager: State<'_, CommandManager>) -> Result { 1394 | let mut ollama_state = command_manager.ollama.lock().map_err(|e| e.to_string())?; 1395 | ollama_state.api_host = host.clone(); 1396 | Ok(format!("Changed Ollama API host to: {}", host)) 1397 | } 1398 | 1399 | #[command] 1400 | fn get_git_branch(session_id: String, command_manager: State<'_, CommandManager>) -> Result { 1401 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 1402 | let key = session_id; 1403 | 1404 | let current_dir = if let Some(state) = states.get(&key) { 1405 | &state.current_dir 1406 | } else { 1407 | return Ok("".to_string()); 1408 | }; 1409 | 1410 | // Check if .git directory exists 1411 | let git_dir = Path::new(current_dir).join(".git"); 1412 | if !git_dir.exists() { 1413 | return Ok("".to_string()); 1414 | } 1415 | 1416 | // Get current branch 1417 | let output = Command::new("git") 1418 | .arg("rev-parse") 1419 | .arg("--abbrev-ref") 1420 | .arg("HEAD") 1421 | .current_dir(current_dir) 1422 | .output() 1423 | .map_err(|e| e.to_string())?; 1424 | 1425 | if output.status.success() { 1426 | let branch = String::from_utf8_lossy(&output.stdout).trim().to_string(); 1427 | Ok(branch) 1428 | } else { 1429 | Ok("".to_string()) 1430 | } 1431 | } 1432 | 1433 | #[tauri::command] 1434 | fn get_current_pid(session_id: String, command_manager: State<'_, CommandManager>) -> Result { 1435 | let states = command_manager.commands.lock().map_err(|e| e.to_string())?; 1436 | let key = session_id; 1437 | 1438 | if let Some(state) = states.get(&key) { 1439 | Ok(state.pid.unwrap_or(0)) 1440 | } else { 1441 | Ok(0) 1442 | } 1443 | } 1444 | 1445 | #[tauri::command] 1446 | fn terminate_command(session_id: String, command_manager: State<'_, CommandManager>) -> Result<(), String> { 1447 | let mut states = command_manager.commands.lock().map_err(|e| e.to_string())?; 1448 | let key = session_id; 1449 | 1450 | let pid = if let Some(state) = states.get(&key) { 1451 | state.pid.unwrap_or(0) 1452 | } else { 1453 | return Err("No active process found".to_string()); 1454 | }; 1455 | 1456 | if pid == 0 { 1457 | return Err("No active process to terminate".to_string()); 1458 | } 1459 | 1460 | #[cfg(unix)] 1461 | { 1462 | use nix::sys::signal::{kill, Signal}; 1463 | use nix::unistd::Pid; 1464 | 1465 | // Try to send SIGTERM first 1466 | if let Err(err) = kill(Pid::from_raw(pid as i32), Signal::SIGTERM) { 1467 | return Err(format!("Failed to send SIGTERM: {}", err)); 1468 | } 1469 | 1470 | // Give the process a moment to terminate gracefully 1471 | std::thread::sleep(std::time::Duration::from_millis(100)); 1472 | 1473 | // If it's still running, force kill with SIGKILL 1474 | if let Err(err) = kill(Pid::from_raw(pid as i32), Signal::SIGKILL) { 1475 | return Err(format!("Failed to send SIGKILL: {}", err)); 1476 | } 1477 | } 1478 | 1479 | #[cfg(windows)] 1480 | { 1481 | use windows::Win32::Foundation::CloseHandle; 1482 | use windows::Win32::System::Threading::{OpenProcess, TerminateProcess, PROCESS_TERMINATE}; 1483 | 1484 | unsafe { 1485 | let handle = OpenProcess(PROCESS_TERMINATE, false, pid); 1486 | if handle.is_invalid() { 1487 | return Err("Failed to open process".to_string()); 1488 | } 1489 | 1490 | if !TerminateProcess(handle, 0).as_bool() { 1491 | CloseHandle(handle); 1492 | return Err("Failed to terminate process".to_string()); 1493 | } 1494 | 1495 | CloseHandle(handle); 1496 | } 1497 | } 1498 | 1499 | // Clear the PID after successful termination 1500 | if let Some(state) = states.get_mut(&key) { 1501 | state.pid = None; 1502 | } 1503 | 1504 | Ok(()) 1505 | } 1506 | 1507 | // Add a new command to get all system environment variables 1508 | #[tauri::command] 1509 | fn get_system_env() -> Result, String> { 1510 | let env_vars: Vec<(String, String)> = std::env::vars().collect(); 1511 | Ok(env_vars) 1512 | } 1513 | 1514 | fn main() { 1515 | let _ = fix_path_env::fix(); 1516 | // Create a new command manager 1517 | let command_manager = CommandManager::new(); 1518 | 1519 | tauri::Builder::default() 1520 | .plugin(tauri_plugin_shell::init()) 1521 | .setup(|_app| { 1522 | // Add any setup logic here 1523 | Ok(()) 1524 | }) 1525 | .manage(command_manager) 1526 | .plugin(tauri_plugin_opener::init()) 1527 | .invoke_handler(tauri::generate_handler![ 1528 | execute_command, 1529 | execute_sudo_command, 1530 | terminate_command, 1531 | get_current_pid, 1532 | autocomplete, 1533 | get_working_directory, 1534 | get_home_directory, 1535 | ask_ai, 1536 | get_models, 1537 | switch_model, 1538 | get_host, 1539 | set_host, 1540 | get_git_branch, 1541 | get_system_env, 1542 | ]) 1543 | .run(tauri::generate_context!()) 1544 | .expect("error while running tauri application"); 1545 | } 1546 | -------------------------------------------------------------------------------- /ai-terminal/src-tauri/tauri.conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.tauri.app/config/2", 3 | "productName": "ai-terminal", 4 | "version": "1.0.0", 5 | "identifier": "com.ai-terminal.dev", 6 | "build": { 7 | "beforeDevCommand": "npm run start", 8 | "devUrl": "http://localhost:1420", 9 | "beforeBuildCommand": "npm run build", 10 | "frontendDist": "../dist/ai-terminal/browser" 11 | }, 12 | "app": { 13 | "windows": [ 14 | { 15 | "title": "AI Terminal", 16 | "width": 1024, 17 | "height": 768, 18 | "minWidth": 800, 19 | "minHeight": 600, 20 | "center": true, 21 | "fullscreen": false, 22 | "resizable": true 23 | } 24 | ], 25 | "security": { 26 | "csp": null 27 | }, 28 | "macOSPrivateApi": true 29 | }, 30 | "bundle": { 31 | "active": true, 32 | "targets": ["dmg", "app", "deb"], 33 | "publisher": "AI Terminal Foundation", 34 | "copyright": "© 2025 AI Terminal Foundation", 35 | "category": "DeveloperTool", 36 | "shortDescription": "AI-powered terminal assistant", 37 | "longDescription": "AI-powered terminal assistant with natural language support", 38 | "icon": [ 39 | "icons/32x32.png", 40 | "icons/128x128.png", 41 | "icons/128x128@2x.png", 42 | "icons/icon.icns", 43 | "icons/icon.ico" 44 | ], 45 | "macOS": { 46 | "frameworks": [], 47 | "minimumSystemVersion": "10.15", 48 | "exceptionDomain": "", 49 | "signingIdentity": null, 50 | "entitlements": null, 51 | "providerShortName": null 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /ai-terminal/src/app/app.component.css: -------------------------------------------------------------------------------- 1 | :host { 2 | display: block; 3 | height: 100vh; 4 | width: 100vw; 5 | overflow: hidden; 6 | background-color: #282a36; 7 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 8 | font-size: 12px; 9 | } 10 | 11 | /* Remove all other styles */ 12 | 13 | .logo.angular:hover { 14 | filter: drop-shadow(0 0 2em #e32727); 15 | } 16 | 17 | :root { 18 | font-family: Inter, Avenir, Helvetica, Arial, sans-serif; 19 | font-size: 16px; 20 | line-height: 24px; 21 | font-weight: 400; 22 | 23 | color: #0f0f0f; 24 | background-color: #f6f6f6; 25 | 26 | font-synthesis: none; 27 | text-rendering: optimizeLegibility; 28 | -webkit-font-smoothing: antialiased; 29 | -moz-osx-font-smoothing: grayscale; 30 | -webkit-text-size-adjust: 100%; 31 | } 32 | 33 | .container { 34 | margin: 0; 35 | padding-top: 10vh; 36 | display: flex; 37 | flex-direction: column; 38 | justify-content: center; 39 | text-align: center; 40 | } 41 | 42 | .logo { 43 | height: 6em; 44 | padding: 1.5em; 45 | will-change: filter; 46 | transition: 0.75s; 47 | } 48 | 49 | .logo.tauri:hover { 50 | filter: drop-shadow(0 0 2em #24c8db); 51 | } 52 | 53 | .row { 54 | display: flex; 55 | justify-content: center; 56 | } 57 | 58 | a { 59 | font-weight: 500; 60 | color: #646cff; 61 | text-decoration: inherit; 62 | } 63 | 64 | a:hover { 65 | color: #535bf2; 66 | } 67 | 68 | h1 { 69 | text-align: center; 70 | } 71 | 72 | input, 73 | button { 74 | border-radius: 8px; 75 | border: 1px solid transparent; 76 | padding: 0.6em 1.2em; 77 | font-size: 1em; 78 | font-weight: 500; 79 | font-family: inherit; 80 | color: #0f0f0f; 81 | background-color: #ffffff; 82 | transition: border-color 0.25s; 83 | box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); 84 | } 85 | 86 | button { 87 | cursor: pointer; 88 | } 89 | 90 | button:hover { 91 | border-color: #396cd8; 92 | } 93 | 94 | button:active { 95 | border-color: #396cd8; 96 | background-color: #e8e8e8; 97 | } 98 | 99 | input, 100 | button { 101 | outline: none; 102 | } 103 | 104 | #greet-input { 105 | margin-right: 5px; 106 | } 107 | 108 | @media (prefers-color-scheme: dark) { 109 | :root { 110 | color: #f6f6f6; 111 | background-color: #2f2f2f; 112 | } 113 | 114 | a:hover { 115 | color: #24c8db; 116 | } 117 | 118 | input, 119 | button { 120 | color: #ffffff; 121 | background-color: #0f0f0f98; 122 | } 123 | 124 | button:active { 125 | background-color: #0f0f0f69; 126 | } 127 | } 128 | 129 | .split-container { 130 | display: flex; 131 | width: 100%; 132 | height: 100%; 133 | position: relative; 134 | user-select: none; 135 | } 136 | 137 | .panel { 138 | height: 100%; 139 | background-color: #282a36; 140 | color: #f8f8f2; 141 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 142 | display: flex; 143 | flex-direction: column; 144 | overflow: hidden; 145 | border-radius: 8px; 146 | } 147 | 148 | .terminal-panel { 149 | min-width: 200px; 150 | max-width: 80%; 151 | margin: 8px 4px 8px 8px; 152 | overflow-x: hidden; 153 | } 154 | 155 | .ai-panel { 156 | flex: 1; 157 | min-width: 200px; 158 | margin: 8px 8px 8px 4px; 159 | } 160 | 161 | .panel-content { 162 | height: 100%; 163 | display: flex; 164 | flex-direction: column; 165 | border-radius: 8px; 166 | overflow: hidden; 167 | padding: 0; 168 | margin: 0; 169 | background-color: transparent; 170 | max-width: 100%; 171 | } 172 | 173 | .output-area { 174 | flex: 1; 175 | padding: 16px; 176 | overflow-y: auto; 177 | overflow-x: hidden; 178 | /* Prevent horizontal scrollbar */ 179 | font-size: 12px; 180 | line-height: 1.5; 181 | border-radius: 0; 182 | scroll-behavior: smooth; 183 | margin-top: 0; 184 | background-color: #282a36; 185 | box-sizing: border-box; 186 | padding-bottom: 32px; 187 | position: relative; 188 | z-index: 1; 189 | user-select: text; 190 | width: 100%; 191 | /* Ensure it takes the full width */ 192 | } 193 | 194 | .terminal-panel .input-area { 195 | border: 1px solid rgba(80, 250, 123, 0.5); 196 | border-top: 1px solid rgba(139, 233, 253, 0.5); 197 | box-shadow: 0 4px 12px rgba(80, 250, 123, 0.1), 0 0 3px rgba(80, 250, 123, 0.15); 198 | } 199 | 200 | .terminal-panel .input-area:hover, 201 | .terminal-panel .input-area:focus-within { 202 | box-shadow: 0 6px 16px rgba(80, 250, 123, 0.15), 0 0 6px rgba(80, 250, 123, 0.2); 203 | } 204 | 205 | .ai-panel .input-area { 206 | border: 1px solid rgba(189, 147, 249, 0.5); 207 | border-top: 1px solid rgba(255, 121, 198, 0.5); 208 | box-shadow: 0 4px 12px rgba(189, 147, 249, 0.1), 0 0 3px rgba(189, 147, 249, 0.15); 209 | } 210 | 211 | .ai-panel .input-area:hover, 212 | .ai-panel .input-area:focus-within { 213 | box-shadow: 0 6px 16px rgba(189, 147, 249, 0.15), 0 0 6px rgba(189, 147, 249, 0.2); 214 | } 215 | 216 | .input-area { 217 | position: relative; 218 | padding: 12px 16px; 219 | display: flex; 220 | align-items: flex-start; 221 | background-color: #282a36; 222 | border-radius: 8px; 223 | margin: 4px 16px 16px 16px; 224 | flex-direction: column; 225 | width: calc(100% - 32px); 226 | box-sizing: border-box; 227 | transform: none; 228 | z-index: 5; 229 | transition: box-shadow 0.3s ease; 230 | } 231 | 232 | .input-area:hover, 233 | .input-area:focus-within { 234 | transform: none; 235 | } 236 | 237 | .terminal-panel .current-directory { 238 | color: rgba(80, 250, 123, 0.8); 239 | } 240 | 241 | .ai-panel .current-directory { 242 | color: rgba(189, 147, 249, 0.8); 243 | } 244 | 245 | .current-directory { 246 | font-weight: bold; 247 | padding: 4px 16px; 248 | width: calc(100% - 16px); 249 | overflow: hidden; 250 | text-overflow: ellipsis; 251 | white-space: nowrap; 252 | background-color: #282a36; 253 | font-size: 12px; 254 | margin-bottom: 0; 255 | display: flex; 256 | align-items: center; 257 | justify-content: space-between; 258 | box-sizing: border-box; 259 | } 260 | 261 | .git-branch { 262 | color: #ffc107; 263 | font-weight: normal; 264 | margin-left: auto; 265 | } 266 | 267 | .prompt-container { 268 | display: flex; 269 | width: 100%; 270 | align-items: center; 271 | padding: 0 4px; 272 | } 273 | 274 | .prompt { 275 | margin-right: 12px; 276 | font-weight: bold; 277 | white-space: nowrap; 278 | overflow: hidden; 279 | text-overflow: ellipsis; 280 | display: inline-block; 281 | line-height: 24px; 282 | } 283 | 284 | /* Terminal prompt is softer green */ 285 | .terminal-panel .prompt { 286 | color: rgba(80, 250, 123, 0.85); 287 | } 288 | 289 | /* AI prompt is softer purple */ 290 | .ai-panel .prompt { 291 | color: rgba(189, 147, 249, 0.85); 292 | } 293 | 294 | /* Command prompt is yellow/gold */ 295 | .prompt-command { 296 | color: #ffc107; 297 | } 298 | 299 | .input-area textarea { 300 | flex: 1; 301 | background: transparent; 302 | border: none; 303 | color: #f8f8f2; 304 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 305 | font-size: 12px; 306 | outline: none; 307 | resize: none; 308 | min-height: 24px; 309 | overflow-y: auto; 310 | line-height: 1.5; 311 | width: calc(100% - 24px); 312 | padding: 0; 313 | } 314 | 315 | .input-area textarea:disabled { 316 | opacity: 0.5; 317 | cursor: not-allowed; 318 | } 319 | 320 | .input-area textarea::placeholder { 321 | color: #6272a4; 322 | opacity: 0.7; 323 | } 324 | 325 | /* Panel resizing */ 326 | .resizer { 327 | width: 8px; 328 | background-color: transparent; 329 | cursor: col-resize; 330 | transition: all 0.2s ease; 331 | position: relative; 332 | user-select: none; 333 | -webkit-user-select: none; 334 | -moz-user-select: none; 335 | -ms-user-select: none; 336 | margin: 8px 0; 337 | } 338 | 339 | .resizer::before { 340 | content: ''; 341 | position: absolute; 342 | left: 50%; 343 | transform: translateX(-50%); 344 | width: 2px; 345 | height: 100%; 346 | background-color: #44475a; 347 | border-radius: 1px; 348 | opacity: 0.5; 349 | transition: all 0.2s ease; 350 | } 351 | 352 | .resizer:hover::before { 353 | background-color: rgba(189, 147, 249, 0.6); 354 | width: 3px; 355 | box-shadow: 0 0 8px rgba(189, 147, 249, 0.3); 356 | opacity: 0.7; 357 | } 358 | 359 | .resizer.resizing::before { 360 | background-color: rgba(189, 147, 249, 0.7); 361 | width: 3px; 362 | box-shadow: 0 0 12px rgba(189, 147, 249, 0.4); 363 | opacity: 0.8; 364 | } 365 | 366 | /* Scrollbar styling */ 367 | .output-area::-webkit-scrollbar { 368 | width: 8px; 369 | } 370 | 371 | .output-area::-webkit-scrollbar-track { 372 | background: #282a36; 373 | border-radius: 4px; 374 | } 375 | 376 | .output-area::-webkit-scrollbar-thumb { 377 | background: #44475a; 378 | border-radius: 4px; 379 | opacity: 0.5; 380 | } 381 | 382 | .output-area::-webkit-scrollbar-thumb:hover { 383 | background: #6272a4; 384 | opacity: 0.8; 385 | } 386 | 387 | /* Command and chat entry styling */ 388 | .command-entry { 389 | margin-bottom: 20px; 390 | padding: 8px; 391 | border: 1px solid #44475a; 392 | border-radius: 8px; 393 | opacity: 0.9; 394 | transition: all 0.2s ease; 395 | max-width: 100%; 396 | overflow-x: hidden; 397 | /* Prevent horizontal overflow */ 398 | box-sizing: border-box; 399 | } 400 | 401 | .command-entry:hover { 402 | transform: scale(1.01); 403 | border-color: rgba(80, 250, 123, 0.4); 404 | box-shadow: 0 4px 12px rgba(80, 250, 123, 0.15); 405 | opacity: 1; 406 | } 407 | 408 | .chat-entry { 409 | margin-bottom: 20px; 410 | padding: 8px; 411 | border: 1px solid #44475a; 412 | border-radius: 8px; 413 | opacity: 0.9; 414 | transition: all 0.2s ease; 415 | } 416 | 417 | .chat-entry:hover { 418 | transform: scale(1.01); 419 | border-color: rgba(189, 147, 249, 0.4); 420 | box-shadow: 0 4px 12px rgba(189, 147, 249, 0.15); 421 | opacity: 1; 422 | } 423 | 424 | .chat-line { 425 | display: flex; 426 | align-items: flex-start; 427 | margin-bottom: 4px; 428 | font-size: 12px; 429 | border-bottom: 1px solid rgba(139, 233, 253, 0.2); 430 | padding-bottom: 8px; 431 | margin-bottom: 8px; 432 | } 433 | 434 | .chat-line .prompt { 435 | color: #50fa7b; 436 | margin-right: 8px; 437 | font-weight: normal; 438 | } 439 | 440 | .chat-line .message { 441 | color: #f8f8f2; 442 | font-weight: normal; 443 | } 444 | 445 | .chat-output { 446 | color: #f8f8f2; 447 | line-height: 1.5; 448 | font-size: 12px; 449 | white-space: pre-wrap; 450 | word-wrap: break-word; 451 | margin-left: 8px; 452 | position: relative; 453 | padding-top: 8px; 454 | display: flex; 455 | flex-wrap: wrap; 456 | align-items: center; 457 | gap: 4px; 458 | transition: all 0.2s ease; 459 | border: 1px solid transparent; 460 | border-radius: 4px; 461 | padding: 8px; 462 | } 463 | 464 | .chat-output:hover { 465 | transform: none; 466 | border: 1px solid transparent; 467 | box-shadow: none; 468 | z-index: auto; 469 | } 470 | 471 | .chat-output>div { 472 | display: inline-flex; 473 | align-items: center; 474 | flex-wrap: wrap; 475 | gap: 4px; 476 | } 477 | 478 | .copy-icon { 479 | position: absolute; 480 | bottom: 8px; 481 | right: 8px; 482 | background: none; 483 | border: 1px solid rgba(255, 255, 255, 0); 484 | padding: 4px; 485 | color: transparent; 486 | cursor: pointer; 487 | transition: all 0.2s ease; 488 | display: flex; 489 | align-items: center; 490 | justify-content: center; 491 | border-radius: 4px; 492 | opacity: 0.6; 493 | z-index: 1; 494 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); 495 | } 496 | 497 | .copy-icon:hover { 498 | opacity: 1; 499 | background-color: rgba(98, 114, 164, 0.1); 500 | transform: none; 501 | box-shadow: none; 502 | border-color: rgba(98, 114, 164, 0.8); 503 | } 504 | 505 | .copy-icon svg { 506 | width: 14px; 507 | height: 14px; 508 | stroke: #fbfbfb; 509 | stroke-width: 1.5; 510 | fill: transparent; 511 | transition: all 0.2s ease; 512 | } 513 | 514 | .copy-icon:hover svg { 515 | stroke: #ffffff; 516 | width: 16px; 517 | height: 16px; 518 | } 519 | 520 | /* Add tooltip styles for copy icon */ 521 | .copy-icon::after { 522 | content: "Copy"; 523 | position: absolute; 524 | bottom: 100%; 525 | left: 50%; 526 | transform: translateX(-50%); 527 | padding: 4px 8px; 528 | background-color: #282a36; 529 | color: #f8f8f2; 530 | font-size: 12px; 531 | border-radius: 4px; 532 | white-space: nowrap; 533 | opacity: 0; 534 | visibility: hidden; 535 | transition: all 0.2s ease; 536 | pointer-events: none; 537 | border: 1px solid rgba(98, 114, 164, 0.2); 538 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2); 539 | z-index: 1000; 540 | } 541 | 542 | .copy-icon:hover::after { 543 | opacity: 1; 544 | visibility: visible; 545 | transform: translateX(-50%) translateY(0); 546 | } 547 | 548 | /* Add a small arrow to the tooltip */ 549 | .copy-icon::before { 550 | content: ''; 551 | position: absolute; 552 | bottom: 100%; 553 | left: 50%; 554 | transform: translateX(-50%); 555 | border: 4px solid transparent; 556 | border-top-color: rgba(98, 114, 164, 0.2); 557 | opacity: 0; 558 | visibility: hidden; 559 | transition: all 0.2s ease; 560 | } 561 | 562 | .copy-icon:hover::before { 563 | opacity: 1; 564 | visibility: visible; 565 | transform: translateX(-50%) translateY(0); 566 | } 567 | 568 | .command-output:hover .copy-icon, 569 | .chat-output:hover .copy-icon { 570 | opacity: 1; 571 | } 572 | 573 | /* Autocomplete styles */ 574 | .autocomplete-container { 575 | position: absolute; 576 | bottom: 100%; 577 | left: 0; 578 | width: 100%; 579 | max-width: 100%; 580 | max-height: 200px; 581 | background-color: #44475a; 582 | border-radius: 4px; 583 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2); 584 | z-index: 20; 585 | margin-bottom: 8px; 586 | overflow-y: auto; 587 | outline: none; 588 | border: 1px solid rgba(80, 250, 123, 0.2); 589 | } 590 | 591 | .autocomplete-container:focus { 592 | outline: none; 593 | border: 1px solid rgba(189, 147, 249, 0.7); 594 | box-shadow: 0 0 0 2px rgba(189, 147, 249, 0.3); 595 | } 596 | 597 | .autocomplete-list { 598 | width: 100%; 599 | } 600 | 601 | .autocomplete-item { 602 | padding: 6px 12px; 603 | cursor: pointer; 604 | transition: all 0.2s; 605 | } 606 | 607 | .autocomplete-item:hover { 608 | background-color: #6272a4; 609 | } 610 | 611 | .autocomplete-item.selected { 612 | background-color: #bd93f9; 613 | color: #282a36; 614 | font-weight: bold; 615 | padding-left: 8px; 616 | border-left: 4px solid #ff79c6; 617 | position: relative; 618 | } 619 | 620 | /* Add an indicator to show this item is selected */ 621 | .autocomplete-item.selected:after { 622 | content: "▶"; 623 | position: absolute; 624 | right: 10px; 625 | font-size: 10px; 626 | color: #282a36; 627 | } 628 | 629 | /* Dracula theme colors: 630 | - Background: #282a36 631 | - Current Line: #44475a 632 | - Foreground: #f8f8f2 633 | - Comment: #6272a4 634 | - Red: #ff5555 635 | - Orange: #ffb86c 636 | - Yellow: #f1fa8c 637 | - Green: #50fa7b 638 | - Purple: #bd93f9 639 | - Pink: #ff79c6 640 | - Cyan: #8be9fd 641 | */ 642 | 643 | /* Dark mode support */ 644 | @media (prefers-color-scheme: dark) { 645 | .content-panel { 646 | background-color: #2f2f2f; 647 | color: #f6f6f6; 648 | } 649 | } 650 | 651 | .processing-indicator { 652 | color: #8be9fd; 653 | font-style: italic; 654 | margin-bottom: 8px; 655 | font-size: 11px; 656 | opacity: 0.8; 657 | } 658 | 659 | @keyframes pulse { 660 | 0% { 661 | opacity: 0.5; 662 | } 663 | 664 | 50% { 665 | opacity: 1; 666 | } 667 | 668 | 100% { 669 | opacity: 0.5; 670 | } 671 | } 672 | 673 | /* Make the terminal more responsive */ 674 | @media (max-width: 768px) { 675 | .prompt { 676 | max-width: 100px; 677 | } 678 | } 679 | 680 | /* Add spacing between command output lines */ 681 | .command-output div { 682 | white-space: pre-wrap !important; 683 | word-wrap: break-word !important; 684 | word-break: break-word !important; 685 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 686 | line-height: 1.4; 687 | user-select: text; 688 | display: block; 689 | max-width: 100%; 690 | box-sizing: border-box; 691 | overflow-x: hidden; 692 | } 693 | 694 | /* Ensure command output line wrapping in all environments */ 695 | .command-output div:not(.processing-indicator) { 696 | white-space: pre-wrap !important; 697 | overflow-wrap: break-word !important; 698 | word-break: break-word !important; 699 | display: block; 700 | max-width: 100%; 701 | box-sizing: border-box; 702 | margin: 2px 0; 703 | } 704 | 705 | /* Special styling for file listing outputs (like ls command) */ 706 | .command-output div.file-list-output { 707 | display: inline-block; 708 | vertical-align: top; 709 | margin: 2px 12px 2px 0; 710 | padding: 0 4px; 711 | } 712 | 713 | /* Command status colors */ 714 | .command-success { 715 | color: #50fa7b; 716 | /* Dracula green */ 717 | font-weight: bold; 718 | } 719 | 720 | .command-error { 721 | color: #ff5555; 722 | /* Dracula red */ 723 | font-weight: bold; 724 | } 725 | 726 | .command-running { 727 | color: #f1fa8c; 728 | /* Dracula yellow */ 729 | font-weight: bold; 730 | animation: pulse 1.5s infinite; 731 | } 732 | 733 | @keyframes pulse { 734 | 0% { 735 | opacity: 0.7; 736 | } 737 | 738 | 50% { 739 | opacity: 1; 740 | } 741 | 742 | 100% { 743 | opacity: 0.7; 744 | } 745 | } 746 | 747 | /* Terminal panel full width when AI panel is hidden */ 748 | .terminal-panel.full-width { 749 | width: calc(100% - 16px) !important; 750 | /* Adjust for margins */ 751 | max-width: 100%; 752 | } 753 | 754 | /* Terminal header styling */ 755 | .terminal-header, 756 | .ai-header { 757 | display: flex; 758 | justify-content: space-between; 759 | align-items: center; 760 | padding: 8px 16px; 761 | background-color: #282a36; 762 | border-radius: 8px 8px 0 0; 763 | height: 40px; 764 | box-sizing: border-box; 765 | } 766 | 767 | /* Terminal Tabs Styling */ 768 | .terminal-tabs { 769 | background-color: #21222c; 770 | border-bottom: 1px solid #44475a; 771 | padding: 0; 772 | margin: 0; 773 | } 774 | 775 | .tabs-container { 776 | display: flex; 777 | align-items: center; 778 | padding: 0 8px; 779 | height: 36px; 780 | overflow-x: auto; 781 | overflow-y: hidden; 782 | scrollbar-width: thin; 783 | scrollbar-color: #44475a transparent; 784 | } 785 | 786 | .tabs-container::-webkit-scrollbar { 787 | height: 3px; 788 | } 789 | 790 | .tabs-container::-webkit-scrollbar-track { 791 | background: transparent; 792 | } 793 | 794 | .tabs-container::-webkit-scrollbar-thumb { 795 | background: #44475a; 796 | border-radius: 2px; 797 | } 798 | 799 | .tab { 800 | display: flex; 801 | align-items: center; 802 | padding: 6px 12px; 803 | margin-right: 2px; 804 | background-color: #282a36; 805 | border: 1px solid #44475a; 806 | border-bottom: none; 807 | border-radius: 6px 6px 0 0; 808 | cursor: pointer; 809 | transition: all 0.2s ease; 810 | min-width: 100px; 811 | max-width: 200px; 812 | position: relative; 813 | font-size: 12px; 814 | height: 28px; 815 | box-sizing: border-box; 816 | } 817 | 818 | .tab:hover { 819 | background-color: #44475a; 820 | border-color: rgba(189, 147, 249, 0.3); 821 | } 822 | 823 | .tab.active { 824 | background-color: #21222c; 825 | border-color: rgba(80, 250, 123, 0.5); 826 | color: #50fa7b; 827 | font-weight: 600; 828 | box-shadow: 0 2px 8px rgba(80, 250, 123, 0.1); 829 | } 830 | 831 | .tab.active:hover { 832 | background-color: #21222c; 833 | border-color: rgba(80, 250, 123, 0.7); 834 | } 835 | 836 | .tab-name { 837 | flex: 1; 838 | white-space: nowrap; 839 | overflow: hidden; 840 | text-overflow: ellipsis; 841 | color: #f8f8f2; 842 | font-size: 12px; 843 | line-height: 1; 844 | outline: none; 845 | border: none; 846 | background: transparent; 847 | cursor: inherit; 848 | } 849 | 850 | .tab.active .tab-name { 851 | color: #50fa7b; 852 | } 853 | 854 | .tab-name[contenteditable="true"] { 855 | background-color: rgba(189, 147, 249, 0.1); 856 | border: 1px solid rgba(189, 147, 249, 0.3); 857 | border-radius: 3px; 858 | padding: 2px 4px; 859 | margin: -2px -4px; 860 | cursor: text; 861 | } 862 | 863 | .close-tab { 864 | background: none; 865 | border: none; 866 | color: #6272a4; 867 | cursor: pointer; 868 | padding: 2px 4px; 869 | margin-left: 6px; 870 | border-radius: 3px; 871 | font-size: 14px; 872 | line-height: 1; 873 | width: 16px; 874 | height: 16px; 875 | display: flex; 876 | align-items: center; 877 | justify-content: center; 878 | transition: all 0.2s ease; 879 | font-weight: bold; 880 | } 881 | 882 | .close-tab:hover { 883 | color: #ff5555; 884 | background-color: rgba(255, 85, 85, 0.1); 885 | } 886 | 887 | .new-tab { 888 | background: none; 889 | border: 1px solid #44475a; 890 | color: #6272a4; 891 | cursor: pointer; 892 | padding: 6px 12px; 893 | margin-left: 8px; 894 | border-radius: 6px; 895 | font-size: 16px; 896 | line-height: 1; 897 | width: 36px; 898 | height: 28px; 899 | display: flex; 900 | align-items: center; 901 | justify-content: center; 902 | transition: all 0.2s ease; 903 | font-weight: bold; 904 | flex-shrink: 0; 905 | } 906 | 907 | .new-tab:hover { 908 | color: #50fa7b; 909 | background-color: rgba(80, 250, 123, 0.1); 910 | border-color: rgba(80, 250, 123, 0.3); 911 | } 912 | 913 | /* Panel titles styling */ 914 | .panel-title { 915 | font-weight: bold; 916 | font-size: 14px; 917 | line-height: 24px; 918 | color: rgba(139, 233, 253, 0.8); 919 | } 920 | 921 | .ai-title { 922 | font-weight: bold; 923 | font-size: 14px; 924 | line-height: 24px; 925 | color: rgba(189, 147, 249, 0.8); 926 | } 927 | 928 | /* Toggle AI button styling */ 929 | .toggle-ai-button { 930 | background-color: rgba(189, 147, 249, 0.1); 931 | color: rgba(189, 147, 249, 0.9); 932 | border: 1px solid rgba(189, 147, 249, 0.3); 933 | padding: 6px 12px; 934 | border-radius: 6px; 935 | cursor: pointer; 936 | font-size: 12px; 937 | height: 28px; 938 | line-height: 1; 939 | display: flex; 940 | align-items: center; 941 | justify-content: center; 942 | transition: all 0.2s ease; 943 | font-weight: 500; 944 | letter-spacing: 0.3px; 945 | position: relative; 946 | overflow: hidden; 947 | } 948 | 949 | .toggle-ai-button::before { 950 | content: ''; 951 | position: absolute; 952 | top: 0; 953 | left: 0; 954 | width: 100%; 955 | height: 100%; 956 | background: linear-gradient(45deg, rgba(189, 147, 249, 0.1), rgba(255, 121, 198, 0.1)); 957 | opacity: 0; 958 | transition: opacity 0.2s ease; 959 | } 960 | 961 | .toggle-ai-button:hover { 962 | background-color: rgba(189, 147, 249, 0.15); 963 | border-color: rgba(189, 147, 249, 0.5); 964 | box-shadow: 0 4px 12px rgba(189, 147, 249, 0.2); 965 | transform: none; 966 | } 967 | 968 | .toggle-ai-button:hover::before { 969 | opacity: 1; 970 | } 971 | 972 | .toggle-ai-button:active { 973 | transform: translateY(0); 974 | box-shadow: 0 2px 6px rgba(189, 147, 249, 0.15); 975 | } 976 | 977 | /* Add a subtle glow effect when the AI panel is visible */ 978 | .ai-panel .toggle-ai-button { 979 | background-color: rgba(255, 121, 198, 0.1); 980 | color: rgba(255, 121, 198, 0.9); 981 | border-color: rgba(255, 121, 198, 0.3); 982 | } 983 | 984 | .ai-panel .toggle-ai-button:hover { 985 | background-color: rgba(255, 121, 198, 0.15); 986 | border-color: rgba(255, 121, 198, 0.5); 987 | box-shadow: 0 4px 12px rgba(255, 121, 198, 0.2); 988 | } 989 | 990 | .ai-panel .toggle-ai-button::before { 991 | background: linear-gradient(45deg, rgba(255, 121, 198, 0.1), rgba(189, 147, 249, 0.1)); 992 | } 993 | 994 | /* Adjust current directory display in header */ 995 | .terminal-header .current-directory { 996 | margin-bottom: 0; 997 | } 998 | 999 | /* Add styles for command messages and responses */ 1000 | .message-command { 1001 | color: #ffc107; 1002 | font-weight: bold; 1003 | } 1004 | 1005 | .chat-command-output { 1006 | background-color: rgba(0, 0, 0, 0.2); 1007 | padding: 8px; 1008 | border-left: 2px solid #ffc107; 1009 | white-space: pre-wrap; 1010 | font-family: 'Courier New', monospace; 1011 | } 1012 | 1013 | /* Code Block Styling */ 1014 | .code-block-container { 1015 | position: relative; 1016 | margin: 8px 0; 1017 | border-radius: 6px; 1018 | background-color: rgba(80, 250, 123, 0.1); 1019 | border: 1px solid rgba(80, 250, 123, 0.2); 1020 | max-width: 100%; 1021 | overflow: hidden; 1022 | box-sizing: border-box; 1023 | display: inline-flex; 1024 | align-items: center; 1025 | transition: all 0.2s ease; 1026 | } 1027 | 1028 | .code-block-container:hover { 1029 | transform: none; 1030 | border-color: rgba(80, 250, 123, 0.2); 1031 | box-shadow: none; 1032 | } 1033 | 1034 | .code-block { 1035 | position: relative; 1036 | padding: 8px 12px; 1037 | margin: 0; 1038 | background-color: transparent; 1039 | color: #6272a4; 1040 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1041 | font-size: 12px; 1042 | line-height: 1.5; 1043 | overflow-x: auto; 1044 | white-space: pre-wrap; 1045 | word-wrap: break-word; 1046 | box-sizing: border-box; 1047 | display: flex; 1048 | align-items: center; 1049 | transition: all 0.2s ease; 1050 | border: 1px solid transparent; 1051 | } 1052 | 1053 | .code-block:hover { 1054 | transform: none; 1055 | border: 1px solid transparent; 1056 | box-shadow: none; 1057 | z-index: auto; 1058 | } 1059 | 1060 | .code-block code { 1061 | display: block; 1062 | white-space: pre-wrap; 1063 | word-wrap: break-word; 1064 | max-width: 100%; 1065 | box-sizing: border-box; 1066 | font-family: inherit; 1067 | } 1068 | 1069 | .chat-output { 1070 | margin-top: 8px; 1071 | color: #f8f8f2; 1072 | font-size: 12px; 1073 | line-height: 1.5; 1074 | white-space: pre-wrap; 1075 | word-wrap: break-word; 1076 | max-width: 100%; 1077 | box-sizing: border-box; 1078 | display: flex; 1079 | flex-wrap: wrap; 1080 | align-items: center; 1081 | gap: 4px; 1082 | } 1083 | 1084 | .chat-output>div { 1085 | display: inline-flex; 1086 | align-items: center; 1087 | flex-wrap: wrap; 1088 | gap: 4px; 1089 | } 1090 | 1091 | /* Single line code blocks (no newlines) */ 1092 | .code-block-container.single-line { 1093 | background-color: rgba(80, 250, 123, 0.1); 1094 | display: inline-flex; 1095 | margin: 4px 0; 1096 | align-items: center; 1097 | border-radius: 4px; 1098 | border: 1px solid rgba(80, 250, 123, 0.2); 1099 | width: fit-content; 1100 | max-width: 100%; 1101 | padding: 4px 8px; 1102 | } 1103 | 1104 | .code-block-container.single-line .code-block { 1105 | padding: 0; 1106 | max-height: none; 1107 | display: flex; 1108 | align-items: center; 1109 | white-space: nowrap; 1110 | overflow-x: auto; 1111 | width: fit-content; 1112 | background-color: transparent; 1113 | color: #6272a4; 1114 | } 1115 | 1116 | .code-block-container.single-line .code-block-header { 1117 | border: none; 1118 | padding: 0 8px; 1119 | background-color: transparent; 1120 | } 1121 | 1122 | .code-block-header { 1123 | display: flex; 1124 | justify-content: space-between; 1125 | align-items: center; 1126 | padding: 4px 8px; 1127 | background: none; 1128 | border: none; 1129 | } 1130 | 1131 | .code-language { 1132 | font-size: 12px; 1133 | color: #6272a4; 1134 | font-weight: normal; 1135 | text-transform: uppercase; 1136 | } 1137 | 1138 | .copy-code-button { 1139 | background: none; 1140 | border: none; 1141 | color: transparent; 1142 | cursor: pointer; 1143 | padding: 2px; 1144 | display: flex; 1145 | align-items: center; 1146 | justify-content: center; 1147 | transition: all 0.2s ease; 1148 | } 1149 | 1150 | .copy-code-button:hover { 1151 | color: transparent; 1152 | background: none; 1153 | } 1154 | 1155 | .copy-code-button svg { 1156 | width: 14px; 1157 | height: 14px; 1158 | stroke: #fefefe; 1159 | stroke-width: 1.5; 1160 | fill: transparent; 1161 | } 1162 | 1163 | /* Code block actions container */ 1164 | .command-actions { 1165 | display: flex; 1166 | gap: 8px; 1167 | margin-left: 8px; 1168 | } 1169 | 1170 | .command-action-button { 1171 | background: none; 1172 | border: none; 1173 | padding: 4px; 1174 | color: transparent; 1175 | cursor: pointer; 1176 | transition: all 0.2s ease; 1177 | display: flex; 1178 | align-items: center; 1179 | justify-content: center; 1180 | position: relative; 1181 | } 1182 | 1183 | .command-action-button:hover { 1184 | color: transparent; 1185 | background: none; 1186 | } 1187 | 1188 | .command-action-button svg { 1189 | width: 14px; 1190 | height: 14px; 1191 | stroke: #ffffff; 1192 | stroke-width: 1.5; 1193 | fill: transparent; 1194 | transition: all 0.2s ease; 1195 | } 1196 | 1197 | .command-action-button:hover svg { 1198 | stroke: #ffffff; 1199 | width: 18px; 1200 | height: 18px; 1201 | } 1202 | 1203 | /* Add tooltip styles */ 1204 | .command-action-button::after { 1205 | content: attr(data-tooltip); 1206 | position: absolute; 1207 | bottom: 100%; 1208 | left: 50%; 1209 | transform: translateX(-50%) translateY(10px); 1210 | padding: 4px 8px; 1211 | background-color: #282a36; 1212 | color: #f8f8f2; 1213 | font-size: 12px; 1214 | border-radius: 4px; 1215 | white-space: nowrap; 1216 | opacity: 0; 1217 | visibility: hidden; 1218 | transition: all 0.3s ease; 1219 | pointer-events: none; 1220 | border: 1px solid rgba(189, 147, 249, 0.4); 1221 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3); 1222 | z-index: 1000; 1223 | margin-bottom: 5px; 1224 | } 1225 | 1226 | .command-action-button:hover::after { 1227 | opacity: 1; 1228 | visibility: visible; 1229 | transform: translateX(-50%) translateY(0); 1230 | transition-delay: 0.3s; 1231 | } 1232 | 1233 | /* Add a small arrow to the tooltip */ 1234 | .command-action-button::before { 1235 | content: ''; 1236 | position: absolute; 1237 | bottom: 100%; 1238 | left: 50%; 1239 | transform: translateX(-50%) translateY(10px); 1240 | border: 5px solid transparent; 1241 | border-top-color: rgba(189, 147, 249, 0.4); 1242 | opacity: 0; 1243 | visibility: hidden; 1244 | transition: all 0.3s ease; 1245 | z-index: 1000; 1246 | margin-bottom: -2px; 1247 | } 1248 | 1249 | .command-action-button:hover::before { 1250 | opacity: 1; 1251 | visibility: visible; 1252 | transform: translateX(-50%) translateY(0); 1253 | transition-delay: 0.3s; 1254 | } 1255 | 1256 | /* Copy notification styling */ 1257 | .copy-notification { 1258 | position: fixed; 1259 | bottom: 30px; 1260 | right: 30px; 1261 | background-color: rgba(80, 250, 123, 0.9); 1262 | color: #282a36; 1263 | padding: 8px 16px; 1264 | border-radius: 4px; 1265 | font-weight: bold; 1266 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3); 1267 | opacity: 0; 1268 | transform: translateY(20px); 1269 | transition: all 0.3s ease; 1270 | z-index: 1000; 1271 | } 1272 | 1273 | .copy-notification.show { 1274 | opacity: 1; 1275 | transform: translateY(0); 1276 | } 1277 | 1278 | /* Additional styling for terminal command-style code blocks */ 1279 | .code-block-container.command-block { 1280 | background-color: #282a36; 1281 | border-color: rgba(139, 233, 253, 0.2); 1282 | } 1283 | 1284 | .code-block-container.command-block .code-block { 1285 | background-color: #282a36; 1286 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1287 | color: #50fa7b; 1288 | } 1289 | 1290 | .code-block-container.command-block .code-block-header { 1291 | background-color: #282a36; 1292 | border-bottom-color: rgba(139, 233, 253, 0.2); 1293 | } 1294 | 1295 | /* Better styling for the inline single-line code blocks */ 1296 | .code-block-container.single-line { 1297 | background-color: rgba(80, 250, 123, 0.1); 1298 | display: inline-flex; 1299 | margin: 4px 0; 1300 | max-width: 100%; 1301 | align-items: center; 1302 | border-radius: 4px; 1303 | border: 1px solid rgba(80, 250, 123, 0.2); 1304 | max-width: 100%; 1305 | overflow: hidden; 1306 | } 1307 | 1308 | .command-history, 1309 | .chat-history { 1310 | user-select: text; 1311 | /* Allow selection in command and chat history */ 1312 | } 1313 | 1314 | /* Add a class for disabling selection during resize */ 1315 | .resizing-active * { 1316 | user-select: none !important; 1317 | cursor: col-resize !important; 1318 | } 1319 | 1320 | /* Command action buttons styling */ 1321 | .command-actions { 1322 | position: absolute; 1323 | right: 8px; 1324 | top: 50%; 1325 | transform: translateY(-50%); 1326 | display: flex; 1327 | gap: 4px; 1328 | padding: 2px 4px; 1329 | background: none; 1330 | z-index: 1; 1331 | } 1332 | 1333 | .command-action-button { 1334 | background: none; 1335 | border: none; 1336 | padding: 4px; 1337 | color: transparent; 1338 | cursor: pointer; 1339 | transition: all 0.2s ease; 1340 | display: flex; 1341 | align-items: center; 1342 | justify-content: center; 1343 | } 1344 | 1345 | .command-action-button:hover { 1346 | color: transparent; 1347 | background: none; 1348 | } 1349 | 1350 | .command-text { 1351 | color: transparent; 1352 | cursor: pointer; 1353 | white-space: nowrap; 1354 | font-weight: 500; 1355 | margin-right: 4px; 1356 | flex-shrink: 0; 1357 | } 1358 | 1359 | /* Code block actions container */ 1360 | .code-block-actions { 1361 | display: flex; 1362 | align-items: center; 1363 | gap: 4px; 1364 | background: none; 1365 | } 1366 | 1367 | .code-block-actions .copy-code-button { 1368 | margin-left: 0; 1369 | opacity: 1; 1370 | } 1371 | 1372 | .code-block-actions .copy-code-button:hover { 1373 | opacity: 1; 1374 | } 1375 | 1376 | /* Simple command styling */ 1377 | .simple-command { 1378 | display: inline-flex; 1379 | align-items: center; 1380 | padding: 6px 12px; 1381 | margin: 0; 1382 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1383 | background-color: rgba(35, 214, 81, 0.08); 1384 | /* More transparent background */ 1385 | border: 1px solid rgba(80, 250, 123, 0.1); 1386 | border-radius: 6px; 1387 | transition: all 0.2s ease; 1388 | width: fit-content; 1389 | min-width: 100px; 1390 | position: relative; 1391 | padding-right: 84px; 1392 | box-sizing: border-box; 1393 | flex-shrink: 0; 1394 | padding-right: 84px; 1395 | /* Space for icons */ 1396 | max-width: 100%; 1397 | overflow: hidden; 1398 | } 1399 | 1400 | .command-output { 1401 | position: relative; 1402 | padding: 8px; 1403 | margin: 4px 0; 1404 | border-radius: 4px; 1405 | background-color: rgba(40, 42, 54, 0.3); 1406 | padding-bottom: 40px; 1407 | /* Add padding to make room for the buttons */ 1408 | max-width: 100%; 1409 | overflow-x: hidden; 1410 | /* Explicitly prevent horizontal scrolling */ 1411 | } 1412 | 1413 | .command-output div { 1414 | white-space: pre-wrap !important; 1415 | word-wrap: break-word !important; 1416 | word-break: break-word !important; 1417 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1418 | line-height: 1.4; 1419 | user-select: text; 1420 | display: block; 1421 | max-width: 100%; 1422 | box-sizing: border-box; 1423 | overflow-x: hidden; 1424 | } 1425 | 1426 | /* Ensure command output line wrapping in all environments */ 1427 | .command-output div:not(.processing-indicator) { 1428 | white-space: pre-wrap !important; 1429 | overflow-wrap: break-word !important; 1430 | word-break: break-word !important; 1431 | display: block; 1432 | max-width: 100%; 1433 | box-sizing: border-box; 1434 | margin: 2px 0; 1435 | } 1436 | 1437 | /* Special styling for file listing outputs (like ls command) */ 1438 | .command-output div.file-list-output { 1439 | display: inline-block; 1440 | vertical-align: top; 1441 | margin: 2px 12px 2px 0; 1442 | padding: 0 4px; 1443 | } 1444 | 1445 | .ai-message { 1446 | position: relative; 1447 | padding: 8px; 1448 | margin: 4px 0; 1449 | border-radius: 4px; 1450 | background-color: rgba(40, 42, 54, 0.3); 1451 | transition: all 0.2s ease; 1452 | border: 1px solid transparent; 1453 | } 1454 | 1455 | .ai-message:hover { 1456 | transform: none; 1457 | border: 1px solid transparent; 1458 | box-shadow: none; 1459 | z-index: auto; 1460 | } 1461 | 1462 | .ai-message .copy-icon { 1463 | background-color: rgba(189, 147, 249, 0.1); 1464 | } 1465 | 1466 | .ai-message .copy-icon:hover { 1467 | background-color: rgba(189, 147, 249, 0.2); 1468 | } 1469 | 1470 | .command-line { 1471 | display: flex; 1472 | align-items: center; 1473 | gap: 8px; 1474 | padding: 2px 0; 1475 | margin: 2px 0; 1476 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1477 | border-bottom: 1px solid rgba(139, 233, 253, 0.2); 1478 | padding-bottom: 8px; 1479 | margin-bottom: 8px; 1480 | } 1481 | 1482 | .chat-question { 1483 | display: flex; 1484 | align-items: center; 1485 | padding: 2px 0; 1486 | margin: 2px 0; 1487 | font-family: 'Menlo', 'Monaco', 'Courier New', monospace; 1488 | border-bottom: 1px solid rgba(139, 233, 253, 0.2); 1489 | padding-bottom: 8px; 1490 | margin-bottom: 8px; 1491 | color: rgba(189, 147, 249, 0.85); 1492 | margin-left: 8px; 1493 | position: relative; 1494 | } 1495 | 1496 | .chat-question .prompt { 1497 | color: rgba(189, 147, 249, 0.85); 1498 | margin-right: 8px; 1499 | font-weight: bold; 1500 | } 1501 | 1502 | .chat-question .message { 1503 | color: rgba(189, 147, 249, 0.85); 1504 | font-weight: normal; 1505 | margin-right: 4px; 1506 | } 1507 | 1508 | .refresh-button { 1509 | background: none; 1510 | border: none; 1511 | padding: 2px; 1512 | color: rgba(189, 147, 249, 0.6); 1513 | cursor: pointer; 1514 | transition: all 0.2s ease; 1515 | display: flex; 1516 | align-items: center; 1517 | justify-content: center; 1518 | border-radius: 4px; 1519 | opacity: 1; 1520 | margin-left: 2px; 1521 | } 1522 | 1523 | .chat-question:hover .refresh-button { 1524 | color: rgba(189, 147, 249, 0.8); 1525 | } 1526 | 1527 | .refresh-button:hover { 1528 | color: rgba(189, 147, 249, 1); 1529 | background-color: rgba(189, 147, 249, 0.1); 1530 | transform: none; 1531 | } 1532 | 1533 | .refresh-button svg { 1534 | width: 12px; 1535 | height: 12px; 1536 | } 1537 | 1538 | /* Command block styling */ 1539 | .command-block { 1540 | margin: 8px 0; 1541 | width: 100%; 1542 | } 1543 | 1544 | .command-content { 1545 | display: flex; 1546 | align-items: center; 1547 | gap: 12px; 1548 | width: 100%; 1549 | } 1550 | 1551 | .command-text { 1552 | color: #50fa7b; 1553 | white-space: pre-wrap; 1554 | word-break: break-word; 1555 | font-weight: 500; 1556 | margin-right: 4px; 1557 | overflow-x: auto; 1558 | } 1559 | 1560 | .command-actions { 1561 | position: absolute; 1562 | right: 8px; 1563 | top: 50%; 1564 | transform: translateY(-50%); 1565 | display: flex; 1566 | gap: 4px; 1567 | padding: 2px 4px; 1568 | background-color: rgba(35, 214, 81, 0.00); 1569 | border-radius: 4px; 1570 | z-index: 1; 1571 | } 1572 | 1573 | .command-explanation { 1574 | color: #6272a4; 1575 | font-size: 12px; 1576 | padding-left: 12px; 1577 | border-left: 2px solid rgba(80, 250, 123, 0.2); 1578 | white-space: pre-wrap; 1579 | word-break: break-word; 1580 | flex: 1; 1581 | min-width: 0; 1582 | } 1583 | 1584 | /* Responsive adjustments */ 1585 | @media (max-width: 768px) { 1586 | .command-content { 1587 | flex-direction: column; 1588 | align-items: flex-start; 1589 | gap: 8px; 1590 | } 1591 | 1592 | .simple-command { 1593 | width: 100%; 1594 | padding-right: 92px; 1595 | font-size: 11px; 1596 | min-width: 80px; 1597 | } 1598 | 1599 | .command-text { 1600 | font-size: 11px; 1601 | } 1602 | 1603 | .command-explanation { 1604 | font-size: 11px; 1605 | margin-left: 4px; 1606 | padding-left: 8px; 1607 | width: 100%; 1608 | } 1609 | } 1610 | 1611 | /* Improve command action buttons visibility */ 1612 | .command-action-button { 1613 | background: none; 1614 | border: none; 1615 | padding: 4px; 1616 | color: transparent; 1617 | cursor: pointer; 1618 | transition: all 0.2s ease; 1619 | display: flex; 1620 | align-items: center; 1621 | justify-content: center; 1622 | } 1623 | 1624 | .command-action-button:hover { 1625 | color: transparent; 1626 | background: none; 1627 | } 1628 | 1629 | .command-action-button svg { 1630 | width: 14px; 1631 | height: 14px; 1632 | stroke: #f5f5f5; 1633 | stroke-width: 1.5; 1634 | fill: transparent; 1635 | transition: all 0.2s ease; 1636 | } 1637 | 1638 | .command-action-button:hover svg { 1639 | stroke: #ffffff; 1640 | width: 18px; 1641 | height: 18px; 1642 | } 1643 | 1644 | .code-block-container.single-line .code-block-header { 1645 | border-bottom: none; 1646 | border-right: none; 1647 | padding: 2px 4px; 1648 | background-color: #282a36; 1649 | } 1650 | 1651 | /* Scroll to top button styling */ 1652 | .scroll-to-top-button { 1653 | position: absolute; 1654 | bottom: 8px; 1655 | right: 40px; 1656 | background: none; 1657 | border: 1px solid rgba(98, 114, 164, 0.3); 1658 | padding: 4px; 1659 | color: transparent; 1660 | cursor: pointer; 1661 | transition: all 0.2s ease; 1662 | display: flex; 1663 | align-items: center; 1664 | justify-content: center; 1665 | border-radius: 4px; 1666 | opacity: 0.6; 1667 | z-index: 1; 1668 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); 1669 | } 1670 | 1671 | .scroll-to-top-button:hover { 1672 | opacity: 1; 1673 | background-color: rgba(98, 114, 164, 0.1); 1674 | transform: none; 1675 | box-shadow: none; 1676 | border-color: rgba(98, 114, 164, 0.8); 1677 | } 1678 | 1679 | .scroll-to-top-button svg { 1680 | width: 14px; 1681 | height: 14px; 1682 | stroke: #ffffff; 1683 | stroke-width: 1.5; 1684 | fill: transparent; 1685 | transition: all 0.2s ease; 1686 | } 1687 | 1688 | .scroll-to-top-button:hover svg { 1689 | stroke: #ffffff; 1690 | width: 16px; 1691 | height: 16px; 1692 | } 1693 | 1694 | /* Add tooltip styles for scroll to top button */ 1695 | .scroll-to-top-button::after { 1696 | content: attr(data-tooltip); 1697 | position: absolute; 1698 | bottom: 100%; 1699 | left: 50%; 1700 | transform: translateX(-50%); 1701 | padding: 4px 8px; 1702 | background-color: #282a3600; 1703 | color: #f8f8f2; 1704 | font-size: 12px; 1705 | border-radius: 4px; 1706 | white-space: nowrap; 1707 | opacity: 0; 1708 | visibility: hidden; 1709 | transition: all 0.2s ease; 1710 | pointer-events: none; 1711 | border: 1px solid rgba(98, 115, 164, 0); 1712 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2); 1713 | z-index: 1000; 1714 | } 1715 | 1716 | .scroll-to-top-button:hover::after { 1717 | opacity: 1; 1718 | visibility: visible; 1719 | transform: translateX(-50%) translateY(0); 1720 | } 1721 | 1722 | /* Add a small arrow to the tooltip */ 1723 | .scroll-to-top-button::before { 1724 | content: ''; 1725 | position: absolute; 1726 | bottom: 100%; 1727 | left: 50%; 1728 | transform: translateX(-50%); 1729 | border: 4px solid transparent; 1730 | border-top-color: rgba(98, 114, 164, 0.2); 1731 | opacity: 0; 1732 | visibility: hidden; 1733 | transition: all 0.2s ease; 1734 | } 1735 | 1736 | .scroll-to-top-button:hover::before { 1737 | opacity: 1; 1738 | visibility: visible; 1739 | transform: translateX(-50%) translateY(0); 1740 | } 1741 | 1742 | /* Password input styling */ 1743 | .password-input { 1744 | font-family: monospace; 1745 | letter-spacing: 2px; 1746 | color: #ffcc00 !important; 1747 | /* More visible password dots */ 1748 | } 1749 | 1750 | /* History search mode styles */ 1751 | .prompt.search-mode { 1752 | color: #4a9eff; 1753 | } 1754 | 1755 | .history-search { 1756 | color: #ccc; 1757 | font-family: 'Cascadia Code', 'Fira Code', monospace; 1758 | } 1759 | 1760 | .history-search-highlight { 1761 | background-color: rgba(74, 158, 255, 0.2); 1762 | border-radius: 2px; 1763 | padding: 0 2px; 1764 | } 1765 | 1766 | /* Autocomplete container */ -------------------------------------------------------------------------------- /ai-terminal/src/app/app.component.html: -------------------------------------------------------------------------------- 1 |
2 | 3 |
4 |
5 |
6 | Terminal v{{version}} 7 | 8 | 11 |
12 | 13 | 14 |
15 |
16 |
18 | 20 | {{ session.name }} 21 | 22 | 26 |
27 | 30 |
31 |
32 | 33 |
34 |
35 |
37 |
38 | $ 39 | 44 | {{ entry.command }} 45 | 46 |
47 |
48 | 49 | 50 | 51 |
{{ line }}
54 |
55 | 56 | 57 | 59 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 73 |
74 |
75 |
76 |
77 |
78 | {{ currentWorkingDirectory }} 79 | git({{ gitBranch }}) 80 |
81 |
82 | 83 |
88 |
89 |
92 | {{ suggestion }} 93 |
94 |
95 |
96 |
97 | {{ isHistorySearchActive ? '🔍' : '$' 98 | }} 99 | 103 |
104 |
105 |
106 |
107 | 108 | 109 |
111 |
112 | 113 | 114 |
115 |
116 |
117 | AI Assistant 118 | 119 | 122 |
123 |
124 |
125 |
126 | 127 |
128 | > 129 | {{ entry.message }} 130 | 139 |
140 |
141 | 142 | 143 | 144 | 145 | 146 | 147 |
148 |
149 | 150 | 151 | {{ transformCodeForDisplay(entry.codeBlocks[getCodeBlockIndex(segment)].code) }} 152 | 153 | 154 | 162 | 170 | 177 | 178 | 179 | 181 | {{ getCommandExplanation(entry.codeBlocks[getCodeBlockIndex(segment)].code) }} 182 | 183 |
184 |
185 |
186 |
187 | 188 | 189 |
190 |
191 | 192 | 193 | 195 | 197 | 198 | 199 | 200 | 201 |
202 |
203 |
204 |
205 |
206 |
207 | > 208 | 210 |
211 |
212 |
213 |
214 |
-------------------------------------------------------------------------------- /ai-terminal/src/app/app.config.ts: -------------------------------------------------------------------------------- 1 | import { ApplicationConfig } from "@angular/core"; 2 | import { provideRouter } from "@angular/router"; 3 | 4 | import { routes } from "./app.routes"; 5 | 6 | export const appConfig: ApplicationConfig = { 7 | providers: [provideRouter(routes)], 8 | }; 9 | -------------------------------------------------------------------------------- /ai-terminal/src/app/app.routes.ts: -------------------------------------------------------------------------------- 1 | import { Routes } from "@angular/router"; 2 | 3 | export const routes: Routes = []; 4 | -------------------------------------------------------------------------------- /ai-terminal/src/assets/angular.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 10 | 11 | 12 | 13 | 15 | 16 | -------------------------------------------------------------------------------- /ai-terminal/src/assets/tauri.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /ai-terminal/src/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Tauri + Angular 6 | 7 | 8 | 9 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /ai-terminal/src/main.ts: -------------------------------------------------------------------------------- 1 | import { bootstrapApplication } from "@angular/platform-browser"; 2 | import { appConfig } from "./app/app.config"; 3 | import { AppComponent } from "./app/app.component"; 4 | 5 | bootstrapApplication(AppComponent, appConfig).catch((err) => 6 | console.error(err), 7 | ); 8 | -------------------------------------------------------------------------------- /ai-terminal/src/styles.css: -------------------------------------------------------------------------------- 1 | /* Global styles */ 2 | html, body { 3 | margin: 0; 4 | padding: 0; 5 | height: 100%; 6 | width: 100%; 7 | overflow: hidden; 8 | } 9 | 10 | /* Make sure app-root doesn't overflow */ 11 | app-root { 12 | display: block; 13 | height: 100%; 14 | width: 100%; 15 | overflow: hidden; 16 | } 17 | 18 | /* Prevent horizontal scrollbars in all panels */ 19 | .panel, .panel-content, .output-area, .command-history, .command-entry, .command-output { 20 | max-width: 100%; 21 | overflow-x: hidden; 22 | } 23 | 24 | /* Ensure all pre-wrapped text properly wraps */ 25 | pre, code, .command-text, .command-output div { 26 | white-space: pre-wrap !important; 27 | word-wrap: break-word !important; 28 | word-break: break-word !important; 29 | max-width: 100%; 30 | } 31 | 32 | /* Import component styles */ 33 | @import './app/app.component.css'; -------------------------------------------------------------------------------- /ai-terminal/src/types/tauri.d.ts: -------------------------------------------------------------------------------- 1 | declare module '@tauri-apps/api' { 2 | export function invoke(cmd: string, args?: Record): Promise; 3 | } -------------------------------------------------------------------------------- /ai-terminal/tsconfig.app.json: -------------------------------------------------------------------------------- 1 | /* To learn more about this file see: https://angular.io/config/tsconfig. */ 2 | { 3 | "extends": "./tsconfig.json", 4 | "compilerOptions": { 5 | "outDir": "./out-tsc/app", 6 | "types": [] 7 | }, 8 | "files": ["src/main.ts"], 9 | "include": ["src/**/*.d.ts"] 10 | } 11 | -------------------------------------------------------------------------------- /ai-terminal/tsconfig.json: -------------------------------------------------------------------------------- 1 | /* To learn more about this file see: https://angular.io/config/tsconfig. */ 2 | { 3 | "compileOnSave": false, 4 | "compilerOptions": { 5 | "outDir": "./dist/out-tsc", 6 | "forceConsistentCasingInFileNames": true, 7 | "strict": true, 8 | "noImplicitOverride": true, 9 | "noPropertyAccessFromIndexSignature": true, 10 | "noImplicitReturns": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "skipLibCheck": true, 13 | "esModuleInterop": true, 14 | "sourceMap": true, 15 | "declaration": false, 16 | "experimentalDecorators": true, 17 | "moduleResolution": "bundler", 18 | "importHelpers": true, 19 | "target": "ES2022", 20 | "module": "ES2022", 21 | "useDefineForClassFields": false, 22 | "lib": ["ES2022", "dom"], 23 | "typeRoots": ["./node_modules/@types", "./src/types"] 24 | }, 25 | "angularCompilerOptions": { 26 | "enableI18nLegacyMessageIdFormat": false, 27 | "strictInjectionParameters": true, 28 | "strictInputAccessModifiers": true, 29 | "strictTemplates": true 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AiTerminalFoundation/ai-terminal/544a37eda83a48f149e15847b8fc26a5c2646a32/demo.gif -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=2.0.0 2 | transformers>=4.30.0 3 | datasets>=2.12.0 4 | peft>=0.4.0 5 | accelerate>=0.20.0 6 | tensorboard>=2.12.0 7 | huggingface-hub>=0.16.0 8 | bitsandbytes>=0.40.0 -------------------------------------------------------------------------------- /test_session_isolation.md: -------------------------------------------------------------------------------- 1 | # Terminal Session Isolation Test 2 | 3 | ## Overview 4 | This document outlines how to test the multiple terminal sessions feature with independent working directories. 5 | 6 | ## Test Steps 7 | 8 | 1. **Launch the Application** 9 | - The application should start with one terminal tab named "Terminal 1" 10 | - The current working directory should be displayed in the prompt 11 | 12 | 2. **Create Multiple Sessions** 13 | - Click the "+" button to create a new terminal tab 14 | - You should now have "Terminal 1" and "Terminal 2" 15 | 16 | 3. **Test Directory Independence** 17 | - In Terminal 1: 18 | ```bash 19 | cd /tmp 20 | pwd 21 | ``` 22 | This should show `/tmp` 23 | 24 | - Switch to Terminal 2 (click on the tab) 25 | - In Terminal 2: 26 | ```bash 27 | pwd 28 | ``` 29 | This should show the original directory (likely your home directory), NOT `/tmp` 30 | 31 | - In Terminal 2: 32 | ```bash 33 | cd /var 34 | pwd 35 | ``` 36 | This should show `/var` 37 | 38 | - Switch back to Terminal 1 39 | - In Terminal 1: 40 | ```bash 41 | pwd 42 | ``` 43 | This should still show `/tmp`, proving the sessions are isolated 44 | 45 | 4. **Test Session Persistence** 46 | - Create multiple directories in different sessions 47 | - Switch between tabs multiple times 48 | - Each session should maintain its own working directory 49 | 50 | ## Expected Results 51 | - Each terminal tab maintains its own independent working directory 52 | - Changing directory in one tab does not affect other tabs 53 | - Session state is preserved when switching between tabs 54 | - Git branch information is session-specific 55 | - SSH sessions are isolated per tab 56 | 57 | ## Success Criteria 58 | ✅ Multiple terminal tabs can be created 59 | ✅ Each tab has an independent working directory 60 | ✅ Directory changes in one tab don't affect others 61 | ✅ Session switching preserves state 62 | ✅ UI properly shows active tab and allows tab management 63 | --------------------------------------------------------------------------------