├── .gitignore ├── LICENSE ├── README.md ├── create_completion.py ├── services └── services.py └── zsh_codex.plugin.zsh /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Tom Dörr 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

⌨️ 🦾 Zsh Codex

2 | 3 |

4 | AI in the command line. 5 |

6 | 7 |

8 | Repository's starts 13 | Issues 18 | License
23 |
Latest commit 28 | GitHub repository size 33 |

34 | 35 |

36 | 37 |

38 | You just need to write a comment or variable name and the AI will write the corresponding code. 39 |

40 |

41 | 42 | ## What is it? 43 | 44 | This is a ZSH plugin that enables you to use AI powered code completion in the command line. It now supports both OpenAI's Codex and Google's Generative AI (Gemini). OpenAI Codex is the AI that also powers GitHub Copilot, while Gemini is Google's advanced language model. 45 | 46 | ## How do I install it? 47 | 48 | ### Manual Installation 49 | 50 | 1. Install the OpenAI package, the Google package, or boto3. 51 | 52 | ```bash 53 | pip3 install openai 54 | ``` 55 | 56 | or 57 | 58 | ```bash 59 | pip3 install google-generativeai 60 | ``` 61 | 62 | or 63 | 64 | ```bash 65 | pip3 install boto3 66 | ``` 67 | 68 | 2. Download the ZSH plugin. 69 | 70 | ```bash 71 | git clone https://github.com/tom-doerr/zsh_codex.git ~/.oh-my-zsh/custom/plugins/zsh_codex 72 | ``` 73 | 74 | 3. Add the following to your `.zshrc` file. 75 | 76 | Using oh-my-zsh: 77 | 78 | ```bash 79 | plugins=(zsh_codex) 80 | bindkey '^X' create_completion 81 | ``` 82 | 83 | Without oh-my-zsh: 84 | 85 | ```bash 86 | # in your/custom/path you need to have a "plugins" folder and in there you clone the repository as zsh_codex 87 | export ZSH_CUSTOM="your/custom/path" 88 | source "$ZSH_CUSTOM/plugins/zsh_codex/zsh_codex.plugin.zsh" 89 | bindkey '^X' create_completion 90 | ``` 91 | 92 | 4. Create a file called `zsh_codex.ini` in `~/.config`. 93 | Example: 94 | 95 | ```ini 96 | ; Primary service configuration 97 | ; Set 'service' to match one of the defined sections below. 98 | [service] 99 | service = groq_service 100 | 101 | ; Example configuration for a self-hosted Ollama service. 102 | [my_ollama] 103 | api_type = openai 104 | api_key = dummy_key 105 | model = llama3.1 106 | base_url = http://localhost:11434/v1 107 | 108 | ; OpenAI service configuration 109 | ; Provide the 'api_key' and specify a 'model' if needed. 110 | [openai_service] 111 | api_type = openai 112 | api_key = 113 | 114 | ; Groq service configuration 115 | ; Provide the 'api_key'. 116 | [groq_service] 117 | api_type = groq 118 | api_key = 119 | model = gemma2-9b-it 120 | 121 | ; Mistral service configuration 122 | ; Provide the 'api_key'. 123 | [mistral_service] 124 | api_type = mistral 125 | api_key = 126 | model = mistral-small-latest 127 | ``` 128 | 129 | In this configuration file, you can define multiple services with their own configurations. The required and optional parameters of the `api_type` are specified in `services/sevices.py`. Choose which service to use in the `[service]` section. 130 | 131 | 6. Run `zsh`, start typing and complete it using `^X`! 132 | 7. If you use virtual environments you can set `ZSH_CODEX_PYTHON` to python executable where `openai` or `google-generativeai` is installed. 133 | e.g. for `miniconda` you can use: 134 | 135 | ```bash 136 | export ZSH_CODEX_PYTHON="$HOME/miniconda3/bin/python" 137 | ``` 138 | 139 | ### Fig Installation 140 | 141 | 142 | 143 | ## Troubleshooting 144 | 145 | ### Unhandled ZLE widget 'create_completion' 146 | 147 | ``` 148 | zsh-syntax-highlighting: unhandled ZLE widget 'create_completion' 149 | zsh-syntax-highlighting: (This is sometimes caused by doing `bindkey create_completion` without creating the 'create_completion' widget with `zle -N` or `zle -C`.) 150 | ``` 151 | 152 | Add the line 153 | 154 | ``` 155 | zle -N create_completion 156 | ``` 157 | 158 | before you call `bindkey` but after loading the plugin (`plugins=(zsh_codex)`). 159 | 160 | ### Already exists and is not an empty directory 161 | 162 | ``` 163 | fatal: destination path '~.oh-my-zsh/custom/plugins' 164 | ``` 165 | 166 | Try to download the ZSH plugin again. 167 | 168 | ``` 169 | git clone https://github.com/tom-doerr/zsh_codex.git ~/.oh-my-zsh/custom/plugins/zsh_codex 170 | ``` 171 | 172 | --- 173 | 174 |

175 | Buy Me A Coffee 176 |

177 | 178 | ## Passing in context 179 | 180 | Since the current filesystem is not passed into the ai you will need to either 181 | 1. Pass in all context in your descriptive command 182 | 2. Use a command to collect the context 183 | 184 | In order for option 2 to work you will need to first add `export ZSH_CODEX_PREEXECUTE_COMMENT="true"` to your .zshrc file to enable the feature. 185 | 186 | > [!WARNING] 187 | > This will run your prompt using zsh each time before using it, which could potentially modify your system when you hit ^X. 188 | 189 | Once you've done that and restarted your shell you can do things like this: 190 | 191 | `# git add all files. Also commit the current changeset with a descriptive message based on $(git diff). Then git push` 192 | 193 | ## More usage examples 194 | 195 |

196 | 197 |

198 |

199 |

200 | 201 | --- 202 | 203 | [Fish Version](https://github.com/tom-doerr/codex.fish) 204 | 205 | [Traffic Statistics](https://tom-doerr.github.io/github_repo_stats_data/tom-doerr/zsh_codex/latest-report/report.html) 206 | -------------------------------------------------------------------------------- /create_completion.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import sys 5 | 6 | from services.services import ClientFactory 7 | 8 | 9 | def main(): 10 | parser = argparse.ArgumentParser( 11 | description="Generate command completions using AI." 12 | ) 13 | parser.add_argument( 14 | "cursor_position", type=int, help="Cursor position in the input buffer" 15 | ) 16 | args = parser.parse_args() 17 | 18 | client = ClientFactory.create() 19 | 20 | # Read the input prompt from stdin. 21 | buffer = sys.stdin.read() 22 | zsh_prefix = "#!/bin/zsh\n\n" 23 | buffer_prefix = buffer[: args.cursor_position] 24 | buffer_suffix = buffer[args.cursor_position :] 25 | full_command = zsh_prefix + buffer_prefix + buffer_suffix 26 | 27 | completion = client.get_completion(full_command) 28 | 29 | if completion.startswith(zsh_prefix): 30 | completion = completion[len(zsh_prefix) :] 31 | 32 | line_prefix = buffer_prefix.rsplit("\n", 1)[-1] 33 | # Handle all the different ways the command can be returned 34 | for prefix in [buffer_prefix, line_prefix]: 35 | if completion.startswith(prefix): 36 | completion = completion[len(prefix) :] 37 | break 38 | 39 | if buffer_suffix and completion.endswith(buffer_suffix): 40 | completion = completion[: -len(buffer_suffix)] 41 | 42 | completion = completion.strip("\n") 43 | if line_prefix.strip().startswith("#"): 44 | completion = "\n" + completion 45 | 46 | sys.stdout.write(completion) 47 | 48 | 49 | if __name__ == "__main__": 50 | main() 51 | -------------------------------------------------------------------------------- /services/services.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from abc import ABC, abstractmethod 4 | from configparser import ConfigParser 5 | 6 | CONFIG_DIR = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config")) 7 | CONFIG_PATH = os.path.join(CONFIG_DIR, "zsh_codex.ini") 8 | 9 | 10 | class BaseClient(ABC): 11 | """Base class for all clients""" 12 | 13 | api_type: str = None 14 | system_prompt = "You are a zsh shell expert, please help me complete the following command, you should only output the completed command, no need to include any other explanation. Do not put completed command in a code block." 15 | 16 | @abstractmethod 17 | def get_completion(self, full_command: str) -> str: 18 | pass 19 | 20 | 21 | class OpenAIClient(BaseClient): 22 | """ 23 | config keys: 24 | - api_type="openai" 25 | - api_key (required) 26 | - base_url (optional): defaults to "https://api.openai.com/v1". 27 | - organization (optional): defaults to None 28 | - model (optional): defaults to "gpt-4o-mini" 29 | - temperature (optional): defaults to 1.0. 30 | """ 31 | 32 | api_type = "openai" 33 | default_model = os.getenv("OPENAI_DEFAULT_MODEL", "gpt-4o-mini") 34 | 35 | def __init__(self, config: dict): 36 | try: 37 | from openai import OpenAI 38 | except ImportError: 39 | print( 40 | "OpenAI library is not installed. Please install it using 'pip install openai'" 41 | ) 42 | sys.exit(1) 43 | 44 | self.config = config 45 | self.config["model"] = self.config.get("model", self.default_model) 46 | self.client = OpenAI( 47 | api_key=self.config["api_key"], 48 | base_url=self.config.get("base_url", "https://api.openai.com/v1"), 49 | organization=self.config.get("organization"), 50 | ) 51 | 52 | def get_completion(self, full_command: str) -> str: 53 | response = self.client.chat.completions.create( 54 | model=self.config["model"], 55 | messages=[ 56 | {"role": "system", "content": self.system_prompt}, 57 | {"role": "user", "content": full_command}, 58 | ], 59 | temperature=float(self.config.get("temperature", 1.0)), 60 | ) 61 | return response.choices[0].message.content 62 | 63 | 64 | class GoogleGenAIClient(BaseClient): 65 | """ 66 | config keys: 67 | - api_type="gemeni" 68 | - api_key (required) 69 | - model (optional): defaults to "gemini-1.5-pro-latest" 70 | """ 71 | 72 | api_type = "gemeni" 73 | default_model = os.getenv("GOOGLE_GENAI_DEFAULT_MODEL", "gemini-1.5-pro-latest") 74 | 75 | def __init__(self, config: dict): 76 | try: 77 | import google.generativeai as genai 78 | except ImportError: 79 | print( 80 | "Google Generative AI library is not installed. Please install it using 'pip install google-generativeai'" 81 | ) 82 | sys.exit(1) 83 | 84 | self.config = config 85 | genai.configure(api_key=self.config["api_key"]) 86 | self.config["model"] = config.get("model", self.default_model) 87 | self.model = genai.GenerativeModel(self.config["model"]) 88 | 89 | def get_completion(self, full_command: str) -> str: 90 | chat = self.model.start_chat(history=[]) 91 | prompt = f"{self.system_prompt}\n\n{full_command}" 92 | response = chat.send_message(prompt) 93 | return response.text 94 | 95 | 96 | class GroqClient(BaseClient): 97 | """ 98 | config keys: 99 | - api_type="groq" 100 | - api_key (required) 101 | - model (optional): defaults to "llama-3.2-11b-text-preview" 102 | - temperature (optional): defaults to 1.0. 103 | """ 104 | 105 | api_type = "groq" 106 | default_model = os.getenv("GROQ_DEFAULT_MODEL", "llama-3.2-11b-text-preview") 107 | 108 | def __init__(self, config: dict): 109 | try: 110 | from groq import Groq 111 | except ImportError: 112 | print( 113 | "Groq library is not installed. Please install it using 'pip install groq'" 114 | ) 115 | sys.exit(1) 116 | 117 | self.config = config 118 | self.config["model"] = self.config.get("model", self.default_model) 119 | self.client = Groq( 120 | api_key=self.config["api_key"], 121 | ) 122 | 123 | def get_completion(self, full_command: str) -> str: 124 | response = self.client.chat.completions.create( 125 | model=self.config["model"], 126 | messages=[ 127 | {"role": "system", "content": self.system_prompt}, 128 | {"role": "user", "content": full_command}, 129 | ], 130 | temperature=float(self.config.get("temperature", 1.0)), 131 | ) 132 | return response.choices[0].message.content 133 | 134 | 135 | class MistralClient(BaseClient): 136 | """ 137 | config keys: 138 | - api_type="mistral" 139 | - api_key (required) 140 | - model (optional): defaults to "codestral-latest" 141 | - temperature (optional): defaults to 1.0. 142 | """ 143 | 144 | api_type = "mistral" 145 | default_model = os.getenv("MISTRAL_DEFAULT_MODEL", "codestral-latest") 146 | 147 | def __init__(self, config: dict): 148 | try: 149 | from mistralai import Mistral 150 | except ImportError: 151 | print( 152 | "Mistral library is not installed. Please install it using 'pip install mistralai'" 153 | ) 154 | sys.exit(1) 155 | 156 | self.config = config 157 | self.config["model"] = self.config.get("model", self.default_model) 158 | self.client = Mistral( 159 | api_key=self.config["api_key"], 160 | ) 161 | 162 | def get_completion(self, full_command: str) -> str: 163 | response = self.client.chat.complete( 164 | model=self.config["model"], 165 | messages=[ 166 | {"role": "system", "content": self.system_prompt}, 167 | {"role": "user", "content": full_command}, 168 | ], 169 | temperature=float(self.config.get("temperature", 1.0)), 170 | ) 171 | return response.choices[0].message.content 172 | 173 | class AmazonBedrock(BaseClient): 174 | """ 175 | config keys: 176 | - api_type="bedrock" 177 | - aws_region (optional): defaults to environment variable AWS_REGION 178 | - aws_access_key_id (optional): defaults to environment variable AWS_ACCESS_KEY_ID 179 | - aws_secret_access_key (optional): defaults to environment variable AWS_SECRET_ACCESS_KEY 180 | - aws_session_token (optional): defaults to environment variable AWS_SESSION_TOKEN 181 | - model (optional): defaults to "anthropic.claude-3-5-sonnet-20240620-v1:0" or environment variable BEDROCK_DEFAULT_MODEL 182 | - temperature (optional): defaults to 1.0. 183 | """ 184 | 185 | api_type = "bedrock" 186 | default_model = os.getenv("BEDROCK_DEFAULT_MODEL", "anthropic.claude-3-5-sonnet-20240620-v1:0") 187 | 188 | def __init__(self, config: dict): 189 | try: 190 | import boto3 191 | except ImportError: 192 | print( 193 | "Boto3 library is not installed. Please install it using 'pip install boto3'" 194 | ) 195 | sys.exit(1) 196 | 197 | self.config = config 198 | self.config["model"] = self.config.get("model", self.default_model) 199 | 200 | session_kwargs = {} 201 | if "aws_region" in config: 202 | session_kwargs["region_name"] = config["aws_region"] 203 | if "aws_access_key_id" in config: 204 | session_kwargs["aws_access_key_id"] = config["aws_access_key_id"] 205 | if "aws_secret_access_key" in config: 206 | session_kwargs["aws_secret_access_key"] = config["aws_secret_access_key"] 207 | if "aws_session_token" in config: 208 | session_kwargs["aws_session_token"] = config["aws_session_token"] 209 | 210 | self.client = boto3.client("bedrock-runtime", **session_kwargs) 211 | 212 | def get_completion(self, full_command: str) -> str: 213 | import json 214 | 215 | messages = [ 216 | {"role": "user", "content": full_command} 217 | ] 218 | 219 | # Format request body based on model type 220 | if "claude" in self.config["model"].lower(): 221 | body = { 222 | "anthropic_version": "bedrock-2023-05-31", 223 | "max_tokens": 1000, 224 | "system": self.system_prompt, 225 | "messages": messages, 226 | "temperature": float(self.config.get("temperature", 1.0)) 227 | } 228 | else: 229 | raise ValueError(f"Unsupported model: {self.config['model']}") 230 | 231 | response = self.client.invoke_model( 232 | modelId=self.config["model"], 233 | body=json.dumps(body) 234 | ) 235 | 236 | response_body = json.loads(response['body'].read()) 237 | return response_body["content"][0]["text"] 238 | 239 | 240 | 241 | class ClientFactory: 242 | api_types = [OpenAIClient.api_type, GoogleGenAIClient.api_type, GroqClient.api_type, MistralClient.api_type, AmazonBedrock.api_type] 243 | 244 | @classmethod 245 | def create(cls): 246 | config_parser = ConfigParser() 247 | config_parser.read(CONFIG_PATH) 248 | service = config_parser["service"]["service"] 249 | try: 250 | config = {k: v for k, v in config_parser[service].items()} 251 | except KeyError: 252 | raise KeyError(f"Config for service {service} is not defined") 253 | 254 | api_type = config["api_type"] 255 | match api_type: 256 | case OpenAIClient.api_type: 257 | return OpenAIClient(config) 258 | case GoogleGenAIClient.api_type: 259 | return GoogleGenAIClient(config) 260 | case GroqClient.api_type: 261 | return GroqClient(config) 262 | case MistralClient.api_type: 263 | return MistralClient(config) 264 | case AmazonBedrock.api_type: 265 | return AmazonBedrock(config) 266 | case _: 267 | raise KeyError( 268 | f"Specified API type {api_type} is not one of the supported services {cls.api_types}" 269 | ) 270 | -------------------------------------------------------------------------------- /zsh_codex.plugin.zsh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | 3 | # This ZSH plugin reads the text from the current buffer 4 | # and uses a Python script to complete the text. 5 | api="openai" 6 | 7 | _ZSH_CODEX_REPO=$(dirname $0) 8 | 9 | create_completion() { 10 | # Get the text typed until now. 11 | local text=$BUFFER 12 | if [[ "$ZSH_CODEX_PREEXECUTE_COMMENT" == "true" ]]; then 13 | text="$(echo -n "echo \"$text\"" | zsh)" 14 | fi 15 | local ZSH_CODEX_PYTHON="${ZSH_CODEX_PYTHON:-python3}" 16 | local completion=$(echo -n "$text" | $ZSH_CODEX_PYTHON $_ZSH_CODEX_REPO/create_completion.py $CURSOR) 17 | local text_before_cursor=${BUFFER:0:$CURSOR} 18 | local text_after_cursor=${BUFFER:$CURSOR} 19 | 20 | # Add completion to the current buffer. 21 | BUFFER="${text_before_cursor}${completion}${text_after_cursor}" 22 | 23 | # Put the cursor at the end of the completion 24 | CURSOR=$((CURSOR + ${#completion})) 25 | } 26 | 27 | # Bind the create_completion function to a key. 28 | zle -N create_completion 29 | # You may want to add a key binding here, e.g.: 30 | # bindkey '^X^E' create_completion 31 | --------------------------------------------------------------------------------