├── .gitignore ├── Cargo.toml ├── LICENSE ├── src ├── prompts.rs └── main.rs └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | Cargo.lock 9 | 10 | # These are backup files generated by rustfmt 11 | **/*.rs.bk 12 | 13 | # MSVC Windows builds of rustc generate these, which store debugging information 14 | *.pdb 15 | 16 | # executable for testing 17 | orphic 18 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "orphic" 3 | version = "0.1.2" 4 | edition = "2021" 5 | authors = ["Will Savage "] 6 | description = "A natural language interface for *nix systems.\n(Powered by ChatGPT)" 7 | license = "MIT" 8 | readme = "README.md" 9 | homepage = "https://github.com/ws-kj/Orphic" 10 | repsitory = "https://github.com/ws-kj/Orphic" 11 | keywords = ["cli", "ChatGPT", "LLM", "GPT"] 12 | categories = ["command-line-utilities"] 13 | 14 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 15 | 16 | [dependencies] 17 | async-openai = "0.9.2" 18 | tokio = { version = "1.25.0", features = ["full"] } 19 | substring = "1.4.5" 20 | serde_json = "1.0" 21 | lazy_static = "1.4.0" 22 | execute = "0.2.11" 23 | clap = { version = "4.1.8", features = ["cargo"] } 24 | 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Will Savage 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/prompts.rs: -------------------------------------------------------------------------------- 1 | use serde_json::{Value, json}; 2 | use lazy_static::lazy_static; 3 | 4 | lazy_static! { 5 | pub static ref PROMPTS: Value = json!({ 6 | "assistant_system": " 7 | you are a machine that executes tasks on the user's computer through the 8 | terminal. 9 | the user will give you a task, and you will return a series of 10 | terminal commands to execute the command. format the commands like 11 | this: `{\"command\": \"}`. 12 | do not explain your commands. do not return any text or 13 | information other than the commands to be executed. The only information 14 | you will return is the command to be executed. The operating system is 15 | ", 16 | "assistant_user": " (Answer only with the unix command formatted as a json 17 | object `{\"command\": \"\". Do not explain anything)", 18 | "assistant_examples": { 19 | "user": "what is the largest file on the desktop", 20 | "assistant": "{\"command\": \"du -ah ~/desktop | sort -rh | head -n 1\"}", 21 | "user": "create a new blank file in the home folder", 22 | "assistant": "{\"command\": \"touch ~/new_blank_file\"}", 23 | "user": "what is my operating system?", 24 | "assistant": "{\"command\": \"uname -a\"}", 25 | "user": "create a new rust project on the desktop", 26 | "assistant": "{\"command\": \"cd ~/desktop && cargo new my_project\"}" 27 | }, 28 | "json_verify_system": " 29 | you are a machine which verifies that json objects of linux commands are 30 | valid json. commands are formatted like this: 31 | `{\"command\": \"`. sometimes the json objects 32 | will be invalid- it may be missing a closing brace or quotation. 33 | if a json object is invalid, fix it valid and then return it. otherwise, 34 | return only the original input. 35 | ", 36 | "command_verify_system": " 37 | you are a machine which verifies linux commands. if the command given to 38 | you is invalid or has any issues that will prevent it from running 39 | correctly, fix it and return it. otherwise, return only the original input. 40 | ", 41 | "json_verify_user": " 42 | if this json object is invalid, fix it and return only the fixed version. 43 | otherwise, return only the original input. don't explain anything, 44 | just return the fixed version. Look for missing braces especially. 45 | ", 46 | "command_verify_user": " 47 | if this is not a valid linux command, fix it and return only the fixed 48 | version. Otherwise, return only the original input. 49 | ", 50 | "interpreter_system": " 51 | You are a machine that translates the output of linux commands into 52 | understandable but concise language. 53 | ", 54 | "interpreter_user": " 55 | This output was the result of the command. Translate the data in the 56 | output into understandable language. Be extremely concise but include 57 | all data from the output. Don't mention the command, just translate 58 | the data. 59 | " 60 | }); 61 | } 62 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Orphic 2 | *A natural language shell interface for \*nix systems.* 3 | 4 | --- 5 | ### Overview 6 | Orphic is a CLI tool that uses GPT to translate complex tasks into shell commands to be executed on the system. 7 | It excels at tasks that are relatively straightforward but require complex and obscure commands to accomplish. 8 | 9 | ``` 10 | $ orphic delete all installers or tars from ~/Downloads that are more than a week old 11 | find ~/Downloads -name '*.tar.*' -mtime +7 -delete; find ~/Downloads -name '*.dmg' -mtime +7 -delete 12 | Execute? [Y/n] Y 13 | ``` 14 | 15 | *Note: Orphic defaults to safe mode, and will not automatically execute commands without confirmation unless unsafe mode is specified.* 16 | 17 | ### Installation 18 | * Make sure your system has rust and cargo. 19 | * `cargo install orphic` 20 | * Orphic requires the `OPENAI_API_KEY` environment variable to be set. You can generate one [here](https://openai.com/). 21 | 22 | ### Usage 23 | Orphic is designed to be used like you would use any other CLI tool. 24 | 25 | `$ orphic sort ~/Downloads into folders based on media type` 26 | 27 | `$ orphic how strong is my network connection` 28 | 29 | `$ orphic what version kernel am i running` 30 | 31 | `$ orphic show me the name and size of all files larger than 8MB in ~/Downloads/` 32 | 33 | `$ orphic ` 34 | 35 | `-u` or `--unsafe` will execute commands without user verification. 36 | 37 | `-4` or `--gpt4` will attempt to use GPT-4 instead of GPT-3.5-Turbo. Note that this will only work if your OpenAI account has access to the model. 38 | 39 | `-i` or `--interpret` will describe the output of the task in natural language (note that this is generally very slow). 40 | ``` 41 | $ orphic -u -i how much disk space is available 42 | You have 16GB available out of a total of 113GB on your main hard 43 | drive, which is mounted on the root directory. 44 | Other partitions and file systems are also listed with their 45 | respective usage percentages and mount points. 46 | ``` 47 | 48 | `-d` or `--debug` will display the raw GPT text along with the regular output, even in unsafe mode. 49 | ``` 50 | $ orphic -u -d count the lines of rust code in this directory excluding /target/. 51 | {"command": "find . -name target -prune -o -name '*.rs' -type f -print0 | xargs -0 wc -l"} 52 | 61 ./src/prompts.rs 53 | 219 ./src/main.rs 54 | 280 total 55 | ``` 56 | 57 | `-r` or `--repl` will start Orphic in a REPL environment. 58 | ``` 59 | $ orphic -u -r 60 | orphic> when did i last login 61 | wtmp begins Sat Mar 18 14:55 62 | orphic> quit 63 | $ 64 | ``` 65 | ### Usage tips and observations 66 | Sometimes Orphic works. Sometimes it doesn't. GPT is inconsistent, and the prompts that I'm using leave a lot to be desired. Results seem to be better if you format your task as a command instead of a question ("list the currently open ports" instead of "what ports are currently open"). An error that often arises is that GPT will try to use commands or packages for a different OS/distribution, or will try to use tools that you don't currently have installed. A quick fix is to specify your OS if you think the task will require OS-specific tools, but I'm working on making Orphic more aware of which commands are at its disposal and which are not. 67 | 68 | ### Contributing 69 | Pull requests welcome. If you use Orphic and get a good/interesting output, please send it to me. Likewise, if you get a really bad output, please also send it to me or open an issue. This system is very experimental and I'm still trying to figure out what works and what doesn't when it comes to prompts and configurations. 70 | 71 | ### License 72 | [MIT](https://choosealicense.com/licenses/mit/) 73 | 74 | Copyright (c) Will Savage, 2023 75 | 76 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #[allow(unused, dead_code)] 2 | 3 | use async_openai::{ 4 | types::{ 5 | CreateChatCompletionRequestArgs, 6 | ChatCompletionRequestMessage, 7 | Role 8 | }, Client 9 | }; 10 | use serde_json::Value; 11 | use substring::Substring; 12 | use execute::{Execute, shell}; 13 | use clap::{command, Arg, ArgAction}; 14 | use serde_json::json; 15 | 16 | use std::error::Error; 17 | use std::process::Stdio; 18 | use std::io::{self, Write}; 19 | use std::fmt; 20 | 21 | pub mod prompts; 22 | 23 | const GPT_35_TURBO: &'static str = "gpt-3.5-turbo"; 24 | const GPT_4: &'static str = "gpt-4"; 25 | 26 | #[derive(Debug)] 27 | struct UserAbort(); 28 | 29 | #[derive(Debug, Copy, Clone)] 30 | struct Flags { 31 | repl: bool, 32 | interpret: bool, 33 | debug: bool, 34 | unsafe_mode: bool, 35 | model: &'static str 36 | } 37 | 38 | impl fmt::Display for UserAbort { 39 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 40 | write!(f, "User aborted command") 41 | } 42 | } 43 | impl Error for UserAbort {} 44 | 45 | fn get_prompt(key: &'static str) -> &str { 46 | assert!(prompts::PROMPTS[key].is_string()); 47 | prompts::PROMPTS[key].as_str().unwrap() 48 | } 49 | 50 | fn try_extract(body: &String) -> Option { 51 | if body.find('{') == None || body.find('}') == None { 52 | return None; 53 | } 54 | 55 | let data = body.substring(body.find('{').unwrap(),body.rfind('}').unwrap()+1); 56 | 57 | match serde_json::from_str(&data) { 58 | Ok(commands) => Some(commands), 59 | Err(e) => { println!("{}", e); None } 60 | } 61 | } 62 | 63 | async fn parse_command(client: &Client, body: &String) -> Result, Box> { 64 | match try_extract(body) { 65 | Some(commands) => Ok(Some(commands)), 66 | None => { 67 | match verify_json(client, body).await? { 68 | Some(body) => Ok(try_extract(&body)), 69 | None => Ok(None) 70 | } 71 | } 72 | } 73 | } 74 | 75 | async fn verify_json(client: &Client, input: &String) -> Result, Box> { 76 | let history = vec![ 77 | ChatCompletionRequestMessage { 78 | role: Role::System, 79 | content: String::from(get_prompt("json_verify_system")), 80 | name: None 81 | }, 82 | ChatCompletionRequestMessage { 83 | role: Role::User, 84 | content: String::from(get_prompt("json_verify_user")) + input, 85 | name: None 86 | } 87 | ]; 88 | 89 | let request = CreateChatCompletionRequestArgs::default() 90 | .max_tokens(512u16) 91 | .model("gpt-3.5-turbo") 92 | .messages(history) 93 | .build()?; 94 | 95 | let response = client.chat().create(request).await?; 96 | let body = (response.choices[0]).message.content.to_owned(); 97 | 98 | return match body.trim() { 99 | "" => Ok(None), 100 | _ => Ok(Some(body)) 101 | } 102 | } 103 | 104 | async fn interpret(client: &Client, task: &String, output: &String, flags: Flags) -> Result> { 105 | let request = CreateChatCompletionRequestArgs::default() 106 | .max_tokens(512u16) 107 | .model(flags.model) 108 | .messages(vec![ 109 | ChatCompletionRequestMessage { 110 | role: Role::System, 111 | content: String::from(get_prompt("interpreter_system")), 112 | name: None 113 | }, 114 | ChatCompletionRequestMessage { 115 | role: Role::User, 116 | content: String::from(json!({"task": task, "output": output}).to_string()) + get_prompt("interpreter_user"), 117 | name: None 118 | }, 119 | ]) 120 | .build()?; 121 | 122 | let response = client.chat().create(request).await?; 123 | Ok((response.choices[0]).message.content.to_owned()) 124 | } 125 | 126 | async fn try_command(client: &Client, input: String, history: &mut Vec, flags: Flags) -> Result> { 127 | history.push(ChatCompletionRequestMessage { 128 | role: Role::User, 129 | content: input + get_prompt("assistant_user"), 130 | name: None 131 | }); 132 | 133 | let request = CreateChatCompletionRequestArgs::default() 134 | .max_tokens(512u16) 135 | .model(flags.model) 136 | .messages((*history).clone()) 137 | .build()?; 138 | 139 | let response = client.chat().create(request).await?; 140 | let body = (response.choices[0]).message.content.to_owned(); 141 | 142 | if flags.debug && flags.unsafe_mode { println!("{}", body); } 143 | 144 | return match parse_command(client, &body).await? { 145 | Some(commands) => { 146 | match commands["command"].as_str() { 147 | Some(command) => { 148 | if !flags.unsafe_mode { 149 | let mut input = String::new(); 150 | println!("{}", command); 151 | print!("Execute? [Y/n] "); 152 | io::stdout().flush()?; 153 | io::stdin().read_line(&mut input)?; 154 | 155 | match input.trim().to_lowercase().as_str() { 156 | "" | "y" | "yes" => {}, 157 | _ => return Err(Box::new(UserAbort())) 158 | }; 159 | } 160 | 161 | let mut shell = shell(command); 162 | shell.stdout(if !flags.interpret {Stdio::inherit()} else {Stdio::piped()}); 163 | Ok(String::from_utf8(shell.execute_output()?.stdout)? + "\n") 164 | 165 | }, 166 | None => Ok(body + "\n") 167 | } 168 | }, 169 | None => Ok(body + "\n") 170 | } 171 | } 172 | 173 | async fn repl(client: &Client, flags: Flags) -> Result<(), Box> { 174 | let mut history: Vec = Vec::new(); 175 | 176 | loop { 177 | let mut input = String::new(); 178 | print!("orphic> "); 179 | io::stdout().flush()?; 180 | io::stdin().read_line(&mut input)?; 181 | match input.as_str().trim() { 182 | "quit" => break, 183 | task => { 184 | let res_maybe = try_command(client, String::from(task), &mut history, flags).await; 185 | match res_maybe { 186 | Ok(res) => { 187 | history.push(ChatCompletionRequestMessage { 188 | role: Role::Assistant, 189 | content: res.clone(), 190 | name: None 191 | }); 192 | 193 | if flags.interpret { 194 | println!("{}", interpret(&client, &(String::from(task.trim())), &res, flags).await?); 195 | } else { 196 | print!("{}", res.trim()); 197 | } 198 | }, 199 | Err(error) => { 200 | if error.is::() { 201 | continue; 202 | } else { 203 | return Err(error); 204 | } 205 | } 206 | } 207 | 208 | } 209 | } 210 | } 211 | Ok(()) 212 | } 213 | 214 | #[tokio::main] 215 | async fn main() -> Result<(), Box> { 216 | let matches = command!() 217 | .arg(Arg::new("task").action(ArgAction::Append)) 218 | .arg( 219 | Arg::new("repl") 220 | .short('r') 221 | .long("repl") 222 | .action(ArgAction::SetTrue) 223 | .help("Start a REPL environment for orphic commands") 224 | ) 225 | .arg( 226 | Arg::new("interpret") 227 | .short('i') 228 | .long("interpret") 229 | .action(ArgAction::SetTrue) 230 | .help("Interpret output into natural language") 231 | ) 232 | .arg( 233 | Arg::new("debug") 234 | .short('d') 235 | .long("verbose") 236 | .action(ArgAction::SetTrue) 237 | .help("Display raw GPT output") 238 | ) 239 | .arg( 240 | Arg::new("unsafe") 241 | .short('u') 242 | .long("unsafe") 243 | .action(ArgAction::SetTrue) 244 | .help("Execute commands without confirmation") 245 | ) 246 | .arg( 247 | Arg::new("gpt4") 248 | .short('4') 249 | .long("gpt4") 250 | .action(ArgAction::SetTrue) 251 | .help("Use GPT-4 instead of GPT-3.5") 252 | ) 253 | .get_matches(); 254 | 255 | let flags = Flags { 256 | repl: matches.get_flag("repl"), 257 | interpret: matches.get_flag("interpret"), 258 | debug: matches.get_flag("debug"), 259 | unsafe_mode: matches.get_flag("unsafe"), 260 | model: if matches.get_flag("gpt4") { GPT_4 } else { GPT_35_TURBO } 261 | }; 262 | 263 | let client = Client::new(); 264 | 265 | if flags.repl { 266 | repl(&client, flags).await?; 267 | return Ok(()); 268 | } 269 | 270 | let task = matches 271 | .get_many::("task") 272 | .unwrap_or_default() 273 | .map(|v| v.as_str()) 274 | .collect::>(); 275 | 276 | let mut history: Vec = Vec::new(); 277 | 278 | let res = try_command(&client, task.join(" "), &mut history, flags).await?; 279 | 280 | if flags.interpret { 281 | println!("{}", interpret(&client, &(task.join(" ")), &res, flags).await?); 282 | } else { 283 | print!("{}", res.trim()); 284 | } 285 | 286 | Ok(()) 287 | } 288 | --------------------------------------------------------------------------------