├── .gitignore ├── LICENSE ├── README.md ├── assets └── generate-commit-message.webp ├── git_gpt ├── __init__.py ├── ai_client.py ├── ask_command.py ├── changelog_command.py ├── commit_command.py ├── config_command.py ├── git_diff.py ├── issue_command.py ├── main.py └── quality_command.py └── pyproject.toml /.gitignore: -------------------------------------------------------------------------------- 1 | output/ 2 | lib/ 3 | .vscode/ 4 | 5 | venv/ 6 | # Python related files 7 | *.pyc 8 | __pycache__/ 9 | .pytest_cache 10 | *.pyo 11 | *.egg-info/ 12 | dist/ 13 | build/ 14 | 15 | test/config.json 16 | 17 | *.sqlite 18 | 19 | # Created by .ignore support plugin (hsz.mobi) 20 | ### Linux template 21 | *~ 22 | 23 | # temporary files which can be created if a process still has a handle open of a deleted file 24 | .fuse_hidden* 25 | 26 | # KDE directory preferences 27 | .directory 28 | 29 | # Linux trash folder which might appear on any partition or disk 30 | .Trash-* 31 | 32 | # .nfs files are created when an open file is removed but is still being accessed 33 | .nfs* 34 | 35 | ### Windows template 36 | # Windows thumbnail cache files 37 | Thumbs.db 38 | Thumbs.db:encryptable 39 | ehthumbs.db 40 | ehthumbs_vista.db 41 | 42 | # Dump file 43 | *.stackdump 44 | 45 | # Folder config file 46 | [Dd]esktop.ini 47 | 48 | # Recycle Bin used on file shares 49 | $RECYCLE.BIN/ 50 | 51 | # Windows Installer files 52 | *.cab 53 | *.msi 54 | *.msix 55 | *.msm 56 | *.msp 57 | 58 | # Windows shortcuts 59 | *.lnk 60 | 61 | package-lock.json 62 | 63 | ### Node template 64 | # Logs 65 | logs 66 | *.log 67 | npm-debug.log* 68 | yarn-debug.log* 69 | yarn-error.log* 70 | lerna-debug.log* 71 | 72 | # Diagnostic reports (https://nodejs.org/api/report.html) 73 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 74 | 75 | # Runtime data 76 | pids 77 | *.pid 78 | *.seed 79 | *.pid.lock 80 | 81 | # Directory for instrumented libs generated by jscoverage/JSCover 82 | lib-cov 83 | 84 | # Coverage directory used by tools like istanbul 85 | coverage 86 | *.lcov 87 | 88 | # nyc test coverage 89 | .nyc_output 90 | 91 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 92 | .grunt 93 | 94 | # Bower dependency directory (https://bower.io/) 95 | bower_components 96 | 97 | # node-waf configuration 98 | .lock-wscript 99 | 100 | # Compiled binary addons (https://nodejs.org/api/addons.html) 101 | build/Release 102 | 103 | # Dependency directories 104 | node_modules/ 105 | jspm_packages/ 106 | 107 | # Snowpack dependency directory (https://snowpack.dev/) 108 | web_modules/ 109 | 110 | # TypeScript cache 111 | *.tsbuildinfo 112 | 113 | # Optional npm cache directory 114 | .npm 115 | 116 | # Optional eslint cache 117 | .eslintcache 118 | 119 | # Microbundle cache 120 | .rpt2_cache/ 121 | .rts2_cache_cjs/ 122 | .rts2_cache_es/ 123 | .rts2_cache_umd/ 124 | 125 | # Optional REPL history 126 | .node_repl_history 127 | 128 | # Output of 'npm pack' 129 | *.tgz 130 | 131 | # Yarn Integrity file 132 | .yarn-integrity 133 | 134 | # dotenv environment variables file 135 | .env 136 | .env.test 137 | 138 | # parcel-bundler cache (https://parceljs.org/) 139 | .cache 140 | .parcel-cache 141 | 142 | # Next.js build output 143 | .next 144 | out 145 | 146 | # Nuxt.js build / generate output 147 | .nuxt 148 | dist 149 | 150 | # Gatsby files 151 | .cache/ 152 | # Comment in the public line in if your project uses Gatsby and not Next.js 153 | # https://nextjs.org/blog/next-9-1#public-directory-support 154 | # public 155 | 156 | # vuepress build output 157 | .vuepress/dist 158 | 159 | # Serverless directories 160 | .serverless/ 161 | 162 | # FuseBox cache 163 | .fusebox/ 164 | 165 | # DynamoDB Local files 166 | .dynamodb/ 167 | 168 | # TernJS port file 169 | .tern-port 170 | 171 | # Stores VSCode versions used for testing VSCode extensions 172 | .vscode-test 173 | 174 | # yarn v2 175 | .yarn/cache 176 | .yarn/unplugged 177 | .yarn/build-state.yml 178 | .yarn/install-state.gz 179 | .pnp.* 180 | 181 | ### Example user template template 182 | ### Example user template 183 | 184 | # IntelliJ project files 185 | .idea 186 | *.iml 187 | out 188 | gen 189 | ### macOS template 190 | # General 191 | .DS_Store 192 | .AppleDouble 193 | .LSOverride 194 | 195 | # Icon must end with two \r 196 | Icon 197 | 198 | # Thumbnails 199 | ._* 200 | 201 | # Files that might appear in the root of a volume 202 | .DocumentRevisions-V100 203 | .fseventsd 204 | .Spotlight-V100 205 | .TemporaryItems 206 | .Trashes 207 | .VolumeIcon.icns 208 | .com.apple.timemachine.donotpresent 209 | 210 | # Directories potentially created on remote AFP share 211 | .AppleDB 212 | .AppleDesktop 213 | Network Trash Folder 214 | Temporary Items 215 | .apdisk 216 | 217 | ### MicrosoftOffice template 218 | *.tmp 219 | 220 | # Word temporary 221 | ~$*.doc* 222 | 223 | # Word Auto Backup File 224 | Backup of *.doc* 225 | 226 | # Excel temporary 227 | ~$*.xls* 228 | 229 | # Excel Backup File 230 | *.xlk 231 | 232 | # PowerPoint temporary 233 | ~$*.ppt* 234 | 235 | # Visio autosave temporary files 236 | *.~vsd* 237 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 ShinChven 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Git-GPT 2 | 3 | Git-GPT is a versatile CLI tool designed to auto-generate git commit messages, issues, and perform various code quality checks using multiple AI providers. It supports OpenAI, Azure OpenAI, Ollama, Claude, and Google Generative AI, giving you the flexibility to choose the best model for your needs. 4 | 5 | ![generate-commit-message](/assets/generate-commit-message.webp) 6 | 7 | ## Installation 8 | 9 | Install `git-gpt` via pip: 10 | 11 | ```bash 12 | pip install git+https://github.com/ShinChven/git-gpt.git 13 | ``` 14 | 15 | Upgrade: 16 | 17 | ```bash 18 | pip install --upgrade git+https://github.com/ShinChven/git-gpt.git 19 | ``` 20 | 21 | ## Development 22 | 23 | To set up the development environment: 24 | 25 | ```bash 26 | git clone https://github.com/ShinChven/git-gpt.git 27 | cd git-gpt 28 | python -m venv venv 29 | source venv/bin/activate 30 | pip install -e . 31 | ``` 32 | 33 | ### Project Structure 34 | 35 | The project is organized as follows: 36 | 37 | - `git_gpt/main.py`: The main entry point of the CLI application. 38 | - `git_gpt/__init__.py`: Initializes the package and defines the version. 39 | - `git_gpt/config_command.py`: Handles the configuration command. 40 | - `git_gpt/commit_command.py`: Implements the commit message generation. 41 | - `git_gpt/issue_command.py`: Manages the issue creation functionality. 42 | - `git_gpt/quality_command.py`: Performs quality checks on code changes. 43 | - `git_gpt/changelog_command.py`: Generates changelogs based on commits. 44 | - `git_gpt/ask_command.py`: Allows asking custom questions about code diffs. 45 | - `git_gpt/ai_client.py`: Handles API requests to multiple AI providers. 46 | 47 | Each command is implemented in its own file for better organization and maintainability. 48 | 49 | ## Configuration 50 | 51 | Before using `git-gpt`, you'll need to configure it with your API settings. For a step-by-step guided configuration, use the following command: 52 | 53 | ```bash 54 | git-gpt config 55 | ``` 56 | 57 | This command will prompt you for each configuration setting, making the process easier and more user-friendly. 58 | 59 | To configure a new model or update an existing one using a single command (all options are mandatory): 60 | 61 | ```bash 62 | git-gpt config --alias MODEL_ALIAS --model_name MODEL_NAME --provider PROVIDER --key API_KEY --api_base API_BASE 63 | ``` 64 | 65 | - `--alias`: A unique name for the model configuration 66 | - `--model_name`: The name of the model (e.g., "gpt-4" for OpenAI, "claude-3-sonnet-20240229" for Claude, "gemini-1.5-pro" for Google Generative AI) 67 | - `--provider`: The provider of the model (e.g., "openai", "azure-openai", "ollama", "claude", or "google-generativeai") 68 | - `--key`: The API key (optional, depending on the provider) 69 | - `--api_base`: The API base URL (optional, defaults to https://api.anthropic.com for Claude) 70 | 71 | If you don't provide all options using the single command method, you'll be prompted to enter the missing information. 72 | 73 | ### Setting the Default Model 74 | 75 | To set the default model: 76 | 77 | ```bash 78 | git-gpt set-default MODEL_ALIAS 79 | ``` 80 | 81 | ### Deleting a Model Configuration 82 | 83 | To delete a model configuration: 84 | 85 | ```bash 86 | git-gpt delete-model MODEL_ALIAS 87 | ``` 88 | 89 | ### Showing All Configured Models 90 | 91 | To display all configured models with their provider and masked API key: 92 | 93 | ```bash 94 | git-gpt show-models 95 | ``` 96 | 97 | ## Supported AI Providers 98 | 99 | Git-GPT supports multiple AI providers, allowing you to choose the one that best fits your needs. The supported providers are: 100 | 101 | 1. **OpenAI**: Use models like GPT-3 and GPT-4. 102 | 2. **Azure OpenAI**: Access OpenAI models through Microsoft's Azure platform. 103 | 3. **Ollama**: An open-source, locally hosted language model. 104 | 4. **Claude (Anthropic)**: Use models like Claude-3. 105 | 5. **Google Generative AI**: Access models like Gemini. 106 | 107 | Each provider may have specific requirements for model names and API configurations. When configuring a new model, make sure to use the correct provider name and follow any provider-specific instructions. 108 | 109 | ## Ollama Support 110 | 111 | Git-GPT now supports Ollama, an open-source, locally hosted language model. This allows you to use Git-GPT without relying on external API services. 112 | 113 | ### Setting up Ollama: 114 | 115 | 1. Install and set up Ollama on your local machine (visit [Ollama's website](https://ollama.ai/) for instructions). 116 | 2. Pull the desired model(s) using Ollama's CLI (e.g., `ollama pull gemma2`). 117 | 118 | ### Configuring Git-GPT for Ollama: 119 | 120 | To use Ollama with Git-GPT, configure it as follows: 121 | 122 | ```bash 123 | git-gpt config --api-type ollama --ollama-base http://localhost:11434 --model 124 | ``` 125 | 126 | Replace `` with the model you've pulled in Ollama (e.g., llama2, codellama, mistral, etc.). 127 | 128 | ### Default Model: 129 | 130 | The default model for Ollama in Git-GPT is set to 'gpt-4o-mini'. You can change this by specifying a different model during configuration or when running commands. 131 | 132 | ### Using Ollama: 133 | 134 | Once configured, you can use Git-GPT with Ollama just like you would with OpenAI. All commands (commit, issue, quality, changelog, ask) will automatically use your Ollama configuration. 135 | 136 | Note: When using Ollama, you don't need to provide an API key. 137 | 138 | ## Usage 139 | 140 | ### Generating Commit Messages 141 | 142 | Stage all changes and generate a commit message: 143 | 144 | ```bash 145 | git-gpt commit [--lang ] [--model ] [--run-dry] 146 | ``` 147 | 148 | Options: 149 | 150 | - `--lang`: Target language for the generated message (default is 'en'). 151 | - `--model`: The model to use for generating messages (default is set in config). 152 | - `--run-dry`: Print the generated message without committing. 153 | 154 | ### Creating Issues 155 | 156 | To create an issue based on the diffs of the latest commit(s), run: 157 | 158 | ```bash 159 | git-gpt issue [--lang ] [--model ] [--max-tokens ] [--commit-range ] 160 | ``` 161 | 162 | Options: 163 | 164 | - `--lang`: Target language for the generated message (default is 'en'). 165 | - `--model`: The model to use for generating messages (default is set in config). 166 | - `--max-tokens`: The maximum number of tokens to use for the issue prompt (overrides the configured value). 167 | - `--commit-range`: The range of commits to consider for generating the issue. 168 | 169 | ### Performing a Quality Check 170 | 171 | To perform a quality check on the diffs of the latest commit(s), run: 172 | 173 | ```bash 174 | git-gpt quality [--lang ] [--model ] [--max-tokens ] [--commit-range ] 175 | ``` 176 | 177 | Options: 178 | 179 | - `--lang`: Target language for the generated message (default is 'en'). 180 | - `--model`: The model to use for generating messages (default is set in config). 181 | - `--max-tokens`: The maximum number of tokens to use for the quality check prompt (overrides the configured value). 182 | - `--commit-range`: The range of commits to consider for the quality check. 183 | 184 | ### Generating a Changelog 185 | 186 | To generate a changelog based on the diffs of the latest commit(s), run: 187 | 188 | ```bash 189 | git-gpt changelog [--lang ] [--model ] [--max-tokens ] [--commit-range ] 190 | ``` 191 | 192 | Options: 193 | 194 | - `--lang`: Target language for the generated changelog (default is 'en'). 195 | - `--model`: The model to use for generating the changelog (default is set in config). 196 | - `--max-tokens`: The maximum number of tokens to use for the changelog prompt (overrides the configured value). 197 | - `--commit-range`: The range of commits to consider for generating the changelog. 198 | 199 | ### Asking a Custom Question 200 | 201 | To ask a custom question about the code diffs, run: 202 | 203 | ```bash 204 | git-gpt ask --question [--model ] [--commit-range ] 205 | ``` 206 | 207 | Options: 208 | 209 | - `--question`: The question to ask about the code diffs. 210 | - `--model`: The model to use for generating the response (default is set in config). 211 | - `--commit-range`: The range of commits to consider when forming the response. 212 | 213 | ## Trouble Shooting 214 | 215 | ### aiohttp 216 | 217 | If you encounter any issues concerning `aiohttp`, please try to downgrade python to 3.11, as this issue is reported with python 3.12: 218 | 219 | ```log 220 | ERROR: Could not build wheels for aiohttp, which is required to install pyproject.toml-based projects 221 | ``` 222 | 223 | ## Disclaimer 224 | 225 | - Content generated by this tool is not guaranteed to be correct. You should always review and edit the generated content before committing. 226 | 227 | ## Contributing 228 | 229 | Feel free to fork the repository, create a feature branch, and open a Pull Request. 230 | 231 | ## License 232 | 233 | [MIT License](LICENSE) 234 | -------------------------------------------------------------------------------- /assets/generate-commit-message.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShinChven/git-gpt/eb8e430ff7d3967aca1121c7014c43b8a7541949/assets/generate-commit-message.webp -------------------------------------------------------------------------------- /git_gpt/__init__.py: -------------------------------------------------------------------------------- 1 | from .config_command import config 2 | from .commit_command import commit 3 | from .issue_command import issue 4 | from .quality_command import quality 5 | from .changelog_command import changelog 6 | from .ask_command import ask 7 | 8 | __version__ = "0.13.0" 9 | 10 | __all__ = ['config', 'commit', 'issue', 'quality', 'changelog', 'ask', '__version__'] 11 | -------------------------------------------------------------------------------- /git_gpt/ai_client.py: -------------------------------------------------------------------------------- 1 | import json 2 | from openai import OpenAI, AzureOpenAI 3 | import requests 4 | import anthropic 5 | from google import genai 6 | from google.genai import types # Import types 7 | 8 | class AIClient: 9 | def __init__(self, config): 10 | self.config = config 11 | 12 | def request(self, messages, model_alias=None, max_tokens=None): 13 | if not model_alias: 14 | model_alias = self.config.get('default_model') 15 | if not model_alias: 16 | raise ValueError("No default model specified in configuration. Please run git-gpt set-default to set default model or run git-gpt config to add model configuration.") 17 | 18 | if model_alias not in self.config.get('models', {}): 19 | raise ValueError(f"Model alias '{model_alias}' not found in configuration") 20 | 21 | model_config = self.config['models'][model_alias] 22 | provider = model_config.get('provider') 23 | 24 | if not provider: 25 | raise ValueError(f"Provider not specified for model alias '{model_alias}'") 26 | 27 | print(f"Requesting content from model '{model_alias}' using provider '{provider}'") 28 | 29 | if provider == 'openai': 30 | return self._openai_request(messages, model_config, max_tokens) 31 | elif provider == 'azure-openai': 32 | return self._azure_openai_request(messages, model_config, max_tokens) 33 | elif provider == 'ollama': 34 | return self._ollama_request(messages, model_config, max_tokens) 35 | elif provider == 'claude': 36 | return self._claude_request(messages, model_config, max_tokens) 37 | elif provider == 'google-generativeai': 38 | return self._google_generativeai_request(messages, model_config, max_tokens) 39 | else: 40 | raise ValueError(f"Unsupported provider: {provider}") 41 | 42 | def _openai_request(self, messages, model_config, max_tokens): 43 | if 'key' not in model_config or not model_config['key']: 44 | raise ValueError("API key not provided for OpenAI") 45 | 46 | api_base = model_config.get('api_base') 47 | if not api_base: 48 | api_base = 'https://api.openai.com/v1' 49 | 50 | openAIClient = OpenAI(api_key=model_config['key'], base_url=api_base) 51 | response = openAIClient.chat.completions.create( 52 | model=model_config['model_name'], 53 | messages=messages, 54 | stream=False, 55 | max_tokens=max_tokens) 56 | return response.choices[0].message.content 57 | 58 | def _azure_openai_request(self, messages, model_config, max_tokens): 59 | if 'key' not in model_config or not model_config['key']: 60 | raise ValueError("API key not provided for Azure OpenAI") 61 | 62 | if 'api_base' not in model_config or not model_config['api_base']: 63 | raise ValueError("API base URL not provided for Azure OpenAI") 64 | 65 | if 'model_name' not in model_config or not model_config['model_name']: 66 | raise ValueError("Azure deployment name not provided for Azure OpenAI, please set it as 'model_name' in the configuration") 67 | 68 | azureOpenAIClient = AzureOpenAI( 69 | api_key=model_config['key'], 70 | api_version="2023-07-01-preview", 71 | azure_endpoint=model_config['api_base'] 72 | ) 73 | 74 | response = azureOpenAIClient.chat.completions.create( 75 | model=model_config['model_name'], 76 | messages=messages, 77 | stream=False, 78 | max_tokens=max_tokens 79 | ) 80 | return response.choices[0].message.content 81 | 82 | def _ollama_request(self, messages, model_config, max_tokens): 83 | api_base = model_config.get('api_base', 'http://localhost:11434') 84 | if not api_base: 85 | api_base = 'http://localhost:11434' 86 | api_base += '/api/chat' 87 | 88 | request_data = { 89 | "model": model_config['model_name'], 90 | "messages": messages, 91 | } 92 | if max_tokens: 93 | request_data["options"] = {"num_predict": max_tokens} 94 | 95 | try: 96 | response = requests.post(api_base, json=request_data) 97 | response.raise_for_status() 98 | content = response.content.decode('utf-8') 99 | full_response = "" 100 | for line in content.strip().split('\n'): 101 | try: 102 | json_obj = json.loads(line) 103 | if 'message' in json_obj and 'content' in json_obj['message']: 104 | full_response += json_obj['message']['content'] 105 | if json_obj.get('done', False): 106 | break 107 | except json.JSONDecodeError: 108 | print(f"Error decoding JSON line: {line}") 109 | continue 110 | return full_response.strip() 111 | 112 | except requests.exceptions.RequestException as e: 113 | print(f"Error in Ollama API request: {e}") 114 | if response is not None: 115 | print(f"Response content: {response.content}") 116 | raise 117 | 118 | def _claude_request(self, messages, model_config, max_tokens): 119 | if 'key' not in model_config or not model_config['key']: 120 | raise ValueError("API key not provided for Claude") 121 | 122 | client = anthropic.Anthropic(api_key=model_config['key']) 123 | 124 | # Convert messages to Anthropic's format 125 | anthropic_messages = [{"role": msg["role"], "content": msg["content"]} for msg in messages] 126 | 127 | response = client.messages.create( 128 | model=model_config['model_name'], 129 | max_tokens=max_tokens or model_config.get('max_tokens', 1024), 130 | messages=anthropic_messages 131 | ) 132 | return response.content 133 | 134 | def _google_generativeai_request(self, messages, model_config, max_tokens): 135 | if 'key' not in model_config or not model_config['key']: 136 | raise ValueError("API key not provided for Google Generative AI") 137 | 138 | # Keep the original client instantiation 139 | client = genai.Client(api_key=model_config['key']) 140 | model_name = model_config['model_name'] 141 | 142 | system_instruction = None 143 | chat_messages_parts = [] 144 | for msg in messages: 145 | if msg['role'] == 'system': 146 | # Extract system instruction 147 | system_instruction = msg['content'] 148 | else: 149 | # Format other messages for the prompt string 150 | chat_messages_parts.append(f"{msg['role']}: {msg['content']}") 151 | 152 | # Combine non-system messages into a single prompt string 153 | prompt = "\n".join(chat_messages_parts) 154 | 155 | # Prepare configuration dictionary 156 | config_dict = { 157 | 'max_output_tokens': max_tokens or None, 158 | 'system_instruction': system_instruction or None, 159 | # Add other config parameters from model_config if needed, e.g.: 160 | # 'top_k': model_config.get('top_k'), 161 | # 'top_p': model_config.get('top_p'), 162 | # 'temperature': model_config.get('temperature'), 163 | } 164 | 165 | # Remove None values from config_dict as GenerateContentConfig might not accept them 166 | config_dict = {k: v for k, v in config_dict.items() if v is not None} 167 | 168 | # Create GenerateContentConfig object 169 | generation_config = types.GenerateContentConfig(**config_dict) 170 | 171 | # Use the generate_content call with the config object 172 | response = client.models.generate_content( 173 | model=model_name, 174 | contents=prompt, 175 | config=generation_config # Pass the config object here 176 | ) 177 | # Keep the original return statement 178 | return response.text 179 | -------------------------------------------------------------------------------- /git_gpt/ask_command.py: -------------------------------------------------------------------------------- 1 | import click 2 | import git 3 | from .config_command import get_config 4 | import os 5 | from .ai_client import AIClient 6 | from .git_diff import get_git_diff_by_commit_range 7 | 8 | ask_prompt = """ 9 | ```diff 10 | [insert_diff] 11 | ``` 12 | [insert_question] 13 | """ 14 | 15 | @click.command() 16 | @click.option('--model', '-m', default=None, help='The model to use for generating the answer.') 17 | @click.option('--commit-range', '-r', type=int, help='The number of commits to include in the diff.') 18 | @click.option('--question', '-q', help='The question to ask.', required=True) 19 | def ask(model, commit_range, question): 20 | config = get_config() 21 | model = model or config.get('default_model') 22 | 23 | if not model: 24 | raise ValueError("No default model specified in configuration. Please run git-gpt set-default to set default model or run git-gpt config to add model configuration.") 25 | 26 | diff = get_git_diff_by_commit_range(commit_range) 27 | 28 | ai_client = AIClient(config) 29 | 30 | try: 31 | click.echo(f"Generating answer using {model}...") 32 | 33 | prompt = ask_prompt.replace('[insert_diff]', diff).replace('[insert_question]', question) 34 | 35 | messages = [ 36 | {"role": "system", "content": "You are a helpful code assistant, you will help users with their code, you will reply users in their language."}, 37 | {"role": "user", "content": prompt} 38 | ] 39 | 40 | response = ai_client.request(messages=messages, model_alias=model) 41 | ask_result = response 42 | click.echo(f"Answer generated successfully:\n\n{ask_result}") 43 | except ValueError as e: 44 | click.echo(f"Error: {str(e)}") 45 | click.echo("Please make sure you have set the API key using `git-gpt config --api-key `") 46 | except Exception as e: 47 | click.echo(f"Error generating answer: {str(e)}") 48 | click.echo("Please check the ai_client.py file for more details on the error.") 49 | -------------------------------------------------------------------------------- /git_gpt/changelog_command.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import click 3 | import git 4 | from .config_command import get_config 5 | import os 6 | from .ai_client import AIClient 7 | from .git_diff import get_git_diff_by_commit_range 8 | 9 | system_instruction = "You are going to work as a text generator, **you don't talk at all**, you will print your response in plain text without code block." 10 | 11 | changelog_prompt = """ 12 | I have a `git diff` output from my recent code changes, and I need help with a changelog written in [insert_language]. 13 | 14 | ## Changes 15 | ```diff 16 | [insert_diff] 17 | ``` 18 | 19 | All notable changes to this project will be documented in this log. 20 | 21 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 22 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 23 | 24 | ## Template: 25 | ```md 26 | # Changelog 27 | 28 | ## [Version x.x.x] - [insert_date] 29 | 30 | [write a detailed overview here.] 31 | 32 | ### Added(If applicable) 33 | - [List new features that have been added.] 34 | - [Include details about new modules, UI enhancements, etc.] 35 | 36 | ### Changed(If applicable) 37 | - [Describe any changes to existing functionality.] 38 | - [Note improvements, restructurings, or changes in behavior.] 39 | 40 | ### Deprecated(If applicable) 41 | - [Document any features that are still available but are not recommended for use and will be removed in future versions.] 42 | 43 | ### Removed(If applicable) 44 | - [List features or components that have been removed from this version.] 45 | 46 | ### Fixed(If applicable) 47 | - [Highlight fixed bugs or issues.] 48 | - [Include references to any tickets or bug report IDs if applicable.] 49 | 50 | ### Security(If applicable) 51 | - [Mention any security improvements or vulnerabilities addressed in this version.] 52 | ```md 53 | """ 54 | 55 | @click.command() 56 | @click.option('--lang', '-l', default=None, help='Target language for the generated changelog.') 57 | @click.option('--model', '-m', default=None, help='The model to use for generating the changelog.') 58 | @click.option('--max-tokens', '-t', type=int, help='The maximum number of tokens to use for the changelog.') 59 | @click.option('--commit-range', '-r', type=int, help='The number of commits to include in the diff.') 60 | def changelog(lang, model, max_tokens, commit_range): 61 | config = get_config() 62 | 63 | lang = lang or config.get('lang', 'English') 64 | model = model or config.get('default_model') 65 | 66 | if not model: 67 | raise ValueError("No default model specified in configuration. Please run git-gpt set-default to set default model or run git-gpt config to add model configuration.") 68 | 69 | diff = get_git_diff_by_commit_range(commit_range) 70 | 71 | max_tokens = max_tokens or config.get('changelog_max_tokens') or None 72 | 73 | ai_client = AIClient(config) 74 | 75 | try: 76 | click.echo(f"Generating changelog using {model} in {lang}...") 77 | 78 | prompt = changelog_prompt.replace('[insert_diff]', diff).replace('[insert_language]', lang).replace('[insert_date]', datetime.now().strftime('%Y-%m-%d')) 79 | 80 | messages = [ 81 | {"role": "system", "content": system_instruction}, 82 | {"role": "user", "content": prompt} 83 | ] 84 | 85 | response = ai_client.request(messages=messages, model_alias=model, max_tokens=max_tokens) 86 | changelog_result = response 87 | click.echo(f"Changelog generated successfully:\n\n{changelog_result}") 88 | except ValueError as e: 89 | click.echo(f"Error: {str(e)}") 90 | click.echo("Please make sure you have set the API key using `git-gpt config --api-key `") 91 | except Exception as e: 92 | click.echo(f"Error generating changelog: {str(e)}") 93 | click.echo("Please check the ai_client.py file for more details on the error.") 94 | -------------------------------------------------------------------------------- /git_gpt/commit_command.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import tempfile 3 | import click 4 | import git 5 | from .config_command import get_config 6 | from .ai_client import AIClient 7 | import os 8 | 9 | system_instruction = "You are going to work as a text generator, **you don't talk at all**, you will print your response in plain text without code block." 10 | 11 | commit_message_prompt = """You are going to work as commit message generator, you will print the message without code block, and **You don't talk**. 12 | 13 | Please analyze staged diffs: 14 | ```diff 15 | [insert_diff] 16 | ``` 17 | Then, craft a conventional commit message a title under 50 characters and a list of details about changes under 70 characters to describe the commit in [insert_language]. 18 | Use appropriate type (e.g., 'feat:', 'fix:', 'docs:', 'style:', 'refactor:', 'test:', 'chore:', etc.). 19 | 20 | Here's the required format of the commit message 21 | 22 | ```txt 23 | [optional scope]: 24 | 25 | Added(If applicable): 26 | - [List new features that have been added.] 27 | - [Include details about new modules, UI enhancements, etc.] 28 | 29 | Changed(If applicable): 30 | - [Describe any changes to existing functionality.] 31 | - [Note improvements, restructurings, or changes in behavior.] 32 | 33 | Deprecated(If applicable): 34 | - [Document any features that are still available but are not recommended for use and will be removed in future versions.] 35 | 36 | Removed(If applicable): 37 | - [List features or components that have been removed from this version.] 38 | 39 | Fixed(If applicable): 40 | - [Highlight fixed bugs or issues.] 41 | - [Include references to any tickets or bug report IDs if applicable.] 42 | 43 | Security(If applicable): 44 | - [Mention any security improvements or vulnerabilities addressed in this version.] 45 | 46 | ``` 47 | """ 48 | 49 | @click.command() 50 | @click.option('--lang', '-l', default=None, help='Target language for the generated message.') 51 | @click.option('--model', '-m', default=None, help='The model to use for generating the commit message.') 52 | @click.option('--run-dry', '-d', is_flag=True, help='Run the command to print the commit message without actually committing.') 53 | def commit(lang, model, run_dry): 54 | config = get_config() 55 | 56 | # If arguments are not provided via command line, try to get them from the config file 57 | lang = lang or config.get('lang', 'English') 58 | model = model or config.get('default_model') 59 | 60 | if not model: 61 | raise ValueError("No default model specified in configuration. Please run git-gpt set-default to set default model or run git-gpt config to add model configuration.") 62 | 63 | repo = git.Repo(os.getcwd()) 64 | # add all changes to staged 65 | repo.git.add('--all') 66 | diff = repo.git.diff('--staged') # Get textual representation of staged diffs 67 | click.echo('Run Command: git diff --staged') 68 | 69 | ai_client = AIClient(config) 70 | 71 | try: 72 | click.echo(f"Generating commit message with {model} in {lang}...") 73 | 74 | prompt = commit_message_prompt.replace('[insert_diff]', diff).replace('[insert_language]', lang) 75 | 76 | messages = [ 77 | {"role": "system", "content": system_instruction}, 78 | {"role": "user", "content": prompt} 79 | ] 80 | 81 | response = ai_client.request(messages=messages, model_alias=model) 82 | commit_message = response 83 | 84 | if run_dry: 85 | click.echo(f"Commit message generated successfully:\n\n{commit_message}") 86 | return 87 | 88 | # Create a temporary file to hold the commit message 89 | with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp_file: 90 | temp_file.write(f"# Generated by git-gpt\n\n{commit_message}") 91 | temp_file_name = temp_file.name 92 | 93 | # Use git to open the commit message editing dialog 94 | try: 95 | subprocess.run(['git', 'commit', '-e', '-F', temp_file_name], check=True) 96 | click.echo("Commit created successfully.") 97 | click.echo("Please run `git commit --amend` to edit the commit message if needed.") 98 | except subprocess.CalledProcessError: 99 | click.echo("Failed to create commit. Aborting.") 100 | finally: 101 | # Clean up the temporary file 102 | os.remove(temp_file_name) 103 | 104 | except ValueError as e: 105 | click.echo(f"Error: {str(e)}") 106 | click.echo("Please make sure you have set the API key using `git-gpt config --api-key <API_KEY>`") 107 | except Exception as e: 108 | click.echo(f"Error generating commit message: {str(e)}") 109 | click.echo("Please check the ai_client.py file for more details on the error.") -------------------------------------------------------------------------------- /git_gpt/config_command.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import click 4 | from pathlib import Path 5 | from prompt_toolkit import prompt 6 | from prompt_toolkit.completion import WordCompleter 7 | from prompt_toolkit.shortcuts import PromptSession 8 | from prompt_toolkit.application import Application 9 | from prompt_toolkit.key_binding import KeyBindings 10 | from prompt_toolkit.layout.containers import Window 11 | from prompt_toolkit.layout.controls import FormattedTextControl 12 | from prompt_toolkit.layout.layout import Layout 13 | 14 | CONFIG_PATH = os.path.expanduser('~/.config/git-gpt/config.json') 15 | 16 | def load_config(): 17 | if os.path.exists(CONFIG_PATH): 18 | with open(CONFIG_PATH, 'r') as config_file: 19 | config = json.load(config_file) 20 | else: 21 | config = {"default_model": "", "models": {}} 22 | 23 | # Ensure 'default_model' and 'models' keys are always present 24 | if 'default_model' not in config: 25 | config['default_model'] = "" 26 | if 'models' not in config: 27 | config['models'] = {} 28 | 29 | return config 30 | 31 | def save_config(config_data): 32 | os.makedirs(os.path.dirname(CONFIG_PATH), exist_ok=True) 33 | with open(CONFIG_PATH, 'w') as config_file: 34 | json.dump(config_data, config_file, indent=4, sort_keys=True) 35 | 36 | def get_config(): 37 | return load_config() 38 | 39 | def select_from_list(options, default_index=0): 40 | selected_index = [default_index] 41 | 42 | def get_formatted_options(): 43 | return [ 44 | ("", f"{' > ' if i == selected_index[0] else ' '}{option}\n") 45 | for i, option in enumerate(options) 46 | ] 47 | 48 | kb = KeyBindings() 49 | 50 | @kb.add('up') 51 | def _(event): 52 | selected_index[0] = (selected_index[0] - 1) % len(options) 53 | 54 | @kb.add('down') 55 | def _(event): 56 | selected_index[0] = (selected_index[0] + 1) % len(options) 57 | 58 | @kb.add('enter') 59 | def _(event): 60 | event.app.exit(result=options[selected_index[0]]) 61 | 62 | text_control = FormattedTextControl(get_formatted_options) 63 | window = Window(content=text_control) 64 | layout = Layout(window) 65 | 66 | application = Application( 67 | layout=layout, 68 | key_bindings=kb, 69 | mouse_support=True, 70 | full_screen=False, 71 | ) 72 | 73 | result = application.run() 74 | return result 75 | 76 | def update_config(alias, model_name, provider, key, api_base): 77 | config = get_config() 78 | 79 | if not alias: 80 | alias = prompt("Enter model alias: ") 81 | 82 | if not provider: 83 | provider_options = ['openai', 'azure-openai', 'ollama', 'claude', 'google-generativeai'] 84 | print("Select provider (use up/down arrows and press Enter to select):") 85 | provider = select_from_list(provider_options) 86 | print(f"Selected provider: {provider}") 87 | 88 | if not model_name: 89 | if provider == 'azure-openai': 90 | model_name = prompt("Enter model name with Azure deployment: ") 91 | elif provider == 'claude': 92 | model_name = prompt("Enter Claude model name (e.g., claude-3-sonnet-20240229): ") 93 | elif provider == 'google-generativeai': 94 | model_name = prompt("Enter Google Generative AI model name (e.g., gemini-1.5-pro): ") 95 | else: 96 | model_name = prompt("Enter model name: ") 97 | 98 | if key is None: 99 | key = prompt("Enter API key (optional, press Enter to skip): ", default="") 100 | 101 | if api_base is None: 102 | if provider == 'claude': 103 | api_base = prompt("Enter API base URL (optional, default is https://api.anthropic.com): ", default="https://api.anthropic.com") 104 | elif provider == 'google-generativeai': 105 | api_base = prompt("Enter API base URL (optional, press Enter to skip): ", default="") 106 | else: 107 | api_base = prompt("Enter API base URL (optional, press Enter to skip): ", default="") 108 | 109 | if alias in config['models']: 110 | overwrite = prompt(f"Model alias '{alias}' already exists. Do you want to overwrite it? (y/n): ").lower() 111 | if overwrite != 'y': 112 | print("Configuration update cancelled.") 113 | return 114 | 115 | config['models'][alias] = { 116 | "model_name": model_name, 117 | "provider": provider, 118 | "key": key, 119 | "api_base": api_base 120 | } 121 | 122 | if not config['default_model']: 123 | config['default_model'] = alias 124 | 125 | save_config(config) 126 | 127 | set_default = prompt("Do you want to set this model as default? (y/N): ").lower() 128 | if set_default == 'y': 129 | set_default_model(alias) 130 | 131 | print(f"Configuration updated for model alias '{alias}'") 132 | 133 | def set_default_model(alias): 134 | config = get_config() 135 | if not alias: 136 | available_aliases = list(config['models'].keys()) 137 | if not available_aliases: 138 | raise click.ClickException("No models available to set as default.") 139 | default_index = available_aliases.index(config['default_model']) if config['default_model'] in available_aliases else 0 140 | print("Select a model alias to set as default (use up/down arrows and press Enter to select):") 141 | alias = select_from_list(available_aliases, default_index=default_index) 142 | 143 | if alias not in config['models']: 144 | raise click.ClickException(f"Error: Model alias '{alias}' not found in configuration.") 145 | 146 | config['default_model'] = alias 147 | save_config(config) 148 | click.echo(f"Default model set to '{alias}'") 149 | 150 | def delete_config_command(alias): 151 | """Delete a model configuration by alias.""" 152 | config = get_config() 153 | 154 | if not alias: 155 | available_aliases = list(config['models'].keys()) 156 | if not available_aliases: 157 | click.echo("No models available to delete.", err=True) 158 | return 159 | print("Select a model alias to delete (use up/down arrows and press Enter to select):") 160 | alias = select_from_list(available_aliases) 161 | 162 | if alias in config['models']: 163 | model_config = config['models'][alias] 164 | 165 | # Mask the API key 166 | masked_key = model_config['key'][:2] + "*" * (len(model_config['key']) - 4) + model_config['key'][-2:] 167 | 168 | click.echo(f"Configuration for alias '{alias}':") 169 | click.echo(f" Model Name: {model_config['model_name']}") 170 | click.echo(f" Provider: {model_config['provider']}") 171 | click.echo(f" API Key: {masked_key}") 172 | click.echo(f" API Base: {model_config['api_base']}") 173 | 174 | if click.confirm("Are you sure you want to delete this configuration?"): 175 | del config['models'][alias] 176 | save_config(config) 177 | click.echo(f"Configuration for alias '{alias}' has been deleted.") 178 | else: 179 | click.echo("Deletion cancelled.") 180 | else: 181 | click.echo(f"Error: No configuration found for alias '{alias}'", err=True) 182 | 183 | def show_models_command(): 184 | """Show all models with their provider and masked key.""" 185 | config = get_config() 186 | if not config['models']: 187 | click.echo("No models configured.", err=True) 188 | return 189 | 190 | click.echo("Configured models:\n") 191 | for alias, model_config in config['models'].items(): 192 | masked_key = model_config['key'][:2] + "*" * (len(model_config['key']) - 4) + model_config['key'][-2:] 193 | click.echo(f"- Alias: {alias}") 194 | click.echo(f" Model Name: {model_config['model_name']}") 195 | click.echo(f" Provider: {model_config['provider']}") 196 | click.echo(f" API Key: {masked_key}") 197 | click.echo(f" API Base: {model_config['api_base']}") 198 | click.echo("") # Blank line for better readability 199 | 200 | @click.command(help="Configure a new model or update an existing one.") 201 | @click.option('-a', '--alias', help='Model alias') 202 | @click.option('-m', '--model_name', help='Model name') 203 | @click.option('-p', '--provider', help='Provider name') 204 | @click.option('-k', '--key', help='API key') 205 | @click.option('-b', '--api_base', help='API base URL') 206 | def config(alias, model_name, provider, key, api_base): 207 | try: 208 | update_config(alias, model_name, provider, key, api_base) 209 | except click.Abort: 210 | click.echo("Configuration cancelled.") 211 | except Exception as e: 212 | click.echo(f"Error updating configuration: {str(e)}", err=True) -------------------------------------------------------------------------------- /git_gpt/git_diff.py: -------------------------------------------------------------------------------- 1 | import git 2 | import os 3 | import click 4 | 5 | def get_git_diff_by_commit_range(commit_range: int | None = None) -> str: 6 | """ 7 | Retrieves the git diff for the specified commit range. 8 | 9 | Args: 10 | commit_range: The number of commits to include in the diff. Defaults to 1. 11 | 12 | Returns: 13 | The git diff as a string. 14 | """ 15 | repo = git.Repo(os.getcwd()) 16 | effective_commit_range = commit_range or 1 17 | diff_command = f'git diff HEAD~{effective_commit_range}..HEAD' 18 | click.echo(f"Running git command: {diff_command}") 19 | diff = repo.git.diff(f'HEAD~{effective_commit_range}..HEAD') 20 | return diff 21 | -------------------------------------------------------------------------------- /git_gpt/issue_command.py: -------------------------------------------------------------------------------- 1 | import click 2 | import git 3 | from .config_command import get_config 4 | import os 5 | from .ai_client import AIClient 6 | from .git_diff import get_git_diff_by_commit_range 7 | 8 | system_instruction = "You are going to work as a text generator, **you don't talk at all**, you will print your response in plain text without code block." 9 | 10 | issue_prompt = """You will work as a GitHub issue generator, and **you don't talk**, you will print the content in plain text without code block. 11 | Please generate a development issue according in [insert_language] to the changes below: 12 | ```diff 13 | [insert_diff] 14 | ``` 15 | 16 | Issue template: 17 | 18 | ## [Brief description of the feature or bug] 19 | 20 | ## Description 21 | [Provide a detailed description of the feature to be implemented or the bug to be fixed] 22 | 23 | ## Requirements 24 | - [ ] Requirement 1 25 | - [ ] Requirement 2 26 | - [ ] Requirement 3 27 | - [ ] ... 28 | 29 | ## Acceptance Criteria 30 | - [ ] Criteria 1 31 | - [ ] Criteria 2 32 | - [ ] Criteria 3 33 | - [ ] ... 34 | 35 | ## Technical Details 36 | [Provide any technical specifications, API endpoints, data models, etc.] 37 | 38 | ## Dependencies 39 | - [List any dependencies or related issues] 40 | 41 | ## Mockups/Screenshots 42 | [If applicable, include mockups or screenshots] 43 | 44 | ## Additional Information 45 | [Any other relevant information, context, or resources] 46 | 47 | ## Estimated Effort 48 | [Provide an estimate of the expected effort, e.g., story points or time] 49 | 50 | ## Priority 51 | [Set the priority level: Low/Medium/High/Critical] 52 | 53 | ## Assigned To 54 | [Name of the person assigned to this task] 55 | 56 | ## Labels 57 | [Add relevant labels, e.g., "feature", "bug", "enhancement"] 58 | 59 | ## Milestone 60 | [If applicable, link to the relevant milestone] 61 | 62 | """ 63 | 64 | @click.command() 65 | @click.option('--lang', '-l', default=None, help='Target language for the generated message.') 66 | @click.option('--model', '-m', default=None, help='The model to use for generating the commit message.') 67 | @click.option('--max-tokens', '-t', type=int, help='The maximum number of tokens to use for the issue prompt.') 68 | @click.option('--commit-range', '-r', type=int, help='The number of commits to include in the diff.') 69 | def issue(lang, model, max_tokens, commit_range): 70 | config = get_config() 71 | 72 | lang = lang or config.get('lang', 'English') 73 | model = model or config.get('default_model') 74 | 75 | if not model: 76 | raise ValueError("No default model specified in configuration. Please run git-gpt set-default to set default model or run git-gpt config to add model configuration.") 77 | 78 | diff = get_git_diff_by_commit_range(commit_range) 79 | 80 | max_tokens = max_tokens or config.get('issue_max_tokens') or None 81 | 82 | ai_client = AIClient(config) 83 | 84 | try: 85 | click.echo(f"Generating issue using {model} in {lang}...") 86 | 87 | prompt = issue_prompt.replace('[insert_diff]', diff).replace('[insert_language]', lang) 88 | 89 | messages = [ 90 | {"role": "system", "content": system_instruction}, 91 | {"role": "user", "content": prompt} 92 | ] 93 | 94 | response = ai_client.request(messages=messages, model_alias=model, max_tokens=max_tokens) 95 | issue_content = response 96 | click.echo(f"Issue generated successfully:\n\n{issue_content}") 97 | except ValueError as e: 98 | click.echo(f"Error: {str(e)}") 99 | click.echo("Please make sure you have set the API key using `git-gpt config --api-key <API_KEY>`") 100 | except Exception as e: 101 | click.echo(f"Error generating issue: {str(e)}") 102 | click.echo("Please check the ai_client.py file for more details on the error.") 103 | -------------------------------------------------------------------------------- /git_gpt/main.py: -------------------------------------------------------------------------------- 1 | import click 2 | from git_gpt import __version__ 3 | from git_gpt.config_command import config, set_default_model, delete_config_command, show_models_command 4 | from git_gpt.commit_command import commit 5 | from git_gpt.issue_command import issue 6 | from git_gpt.quality_command import quality 7 | from git_gpt.changelog_command import changelog 8 | from git_gpt.ask_command import ask 9 | 10 | default_model = 'gpt-4o-mini' 11 | 12 | @click.group() 13 | @click.version_option(version=__version__, prog_name='git-gpt') 14 | def cli(): 15 | pass 16 | 17 | cli.add_command(config) 18 | cli.add_command(commit) 19 | cli.add_command(issue) 20 | cli.add_command(quality) 21 | cli.add_command(changelog) 22 | cli.add_command(ask) 23 | 24 | @cli.command() 25 | @click.option('-a', '--alias', help='Model alias to set as default') 26 | def set_default(alias): 27 | """Set the default model.""" 28 | try: 29 | set_default_model(alias) 30 | except click.ClickException as e: 31 | click.echo(str(e), err=True) 32 | 33 | @cli.command() 34 | @click.option('-a', '--alias', required=False, help='Model alias to delete') 35 | def delete_model(alias): 36 | """Delete a model configuration by alias.""" 37 | delete_config_command(alias) 38 | 39 | @cli.command() 40 | def show_models(): 41 | """Show all models with their provider and masked key.""" 42 | show_models_command() 43 | 44 | if __name__ == '__main__': 45 | cli() 46 | -------------------------------------------------------------------------------- /git_gpt/quality_command.py: -------------------------------------------------------------------------------- 1 | import click 2 | import git 3 | from .config_command import get_config 4 | import os 5 | from .ai_client import AIClient 6 | from .git_diff import get_git_diff_by_commit_range 7 | 8 | system_instruction = "You are going to work as a text generator, **you don't talk at all**, you will print your response in plain text without code block." 9 | 10 | quality_prompt = """I have a `git diff` output from my recent code changes, and I need help with a quality check report written in [insert_language]. 11 | 12 | ## Changes 13 | ```diff 14 | [insert_diff] 15 | ``` 16 | 17 | ## Requirements: 18 | 1. Code Consistency: Please analyze if the changes are consistent with the existing coding style and standards in the project. 19 | 2. Potential Bugs: Highlight any lines in the diff that might introduce bugs or logical errors. 20 | 3. Best Practices: Suggest any improvements or best practices that could be applied to the changes. 21 | 4. Documentation and Comments: Check if the new code is adequately commented and if any documentation needs to be updated. 22 | 5. Performance Implications: Evaluate if there are any changes that might adversely affect the performance of the code. 23 | 6. Security Check: Examine the code for potential security vulnerabilities, such as SQL injection, cross-site scripting, data leaks, or any other security risks. 24 | 7. Test Coverage: Assess if the changes are covered by existing tests or if new tests need to be added. 25 | 8. Use `- [ ]` to define the tasks that need to be done. 26 | 9. Use `-` to define list, don't use `*` to define list. 27 | 10. Use `#` to define the sections of the report, don't use `** **` to define section title. 28 | """ 29 | 30 | @click.command() 31 | @click.option('--lang', '-l', default=None, help='Target language for the generated message.') 32 | @click.option('--model', '-m', default=None, help='The model to use for generating the quality check.') 33 | @click.option('--max-tokens', '-t', type=int, help='The maximum number of tokens to use for the quality check.') 34 | @click.option('--commit-range', '-r', type=int, help='The number of commits to include in the diff.') 35 | def quality(lang, model, max_tokens, commit_range): 36 | config = get_config() 37 | 38 | lang = lang or config.get('lang', 'English') 39 | model = model or config.get('default_model') 40 | 41 | if not model: 42 | raise ValueError("No default model specified in configuration. Please run git-gpt set-default to set default model or run git-gpt config to add model configuration.") 43 | 44 | diff = get_git_diff_by_commit_range(commit_range) 45 | 46 | max_tokens = max_tokens or config.get('quality_check_max_tokens') or None 47 | 48 | ai_client = AIClient(config) 49 | 50 | try: 51 | click.echo(f"Performing quality check using {model} in {lang}...") 52 | 53 | prompt = quality_prompt.replace('[insert_diff]', diff).replace('[insert_language]', lang) 54 | 55 | messages = [ 56 | {"role": "system", "content": system_instruction}, 57 | {"role": "user", "content": prompt} 58 | ] 59 | 60 | response = ai_client.request(messages=messages, model_alias=model, max_tokens=max_tokens) 61 | quality_check_result = response 62 | click.echo(f"Quality check performed successfully:\n\n{quality_check_result}") 63 | except ValueError as e: 64 | click.echo(f"Error: {str(e)}") 65 | click.echo("Please make sure you have set the API key using `git-gpt config --api-key <API_KEY>`") 66 | except Exception as e: 67 | click.echo(f"Error performing quality check: {str(e)}") 68 | click.echo("Please check the ai_client.py file for more details on the error.") 69 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=45", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "git_gpt" 7 | version = "0.14.0" 8 | authors = [ 9 | {name = "ShinChven", email = "shinchven@gmail.com"}, 10 | ] 11 | description = "A CLI tool to generate commit messages and issues based on staged Git diffs using OpenAI GPT models or Ollama" 12 | readme = "README.md" 13 | requires-python = ">=3.7" 14 | license = {text = "MIT"} 15 | classifiers = [ 16 | "Programming Language :: Python :: 3", 17 | "License :: OSI Approved :: MIT License", 18 | "Operating System :: OS Independent", 19 | ] 20 | dependencies = [ 21 | "click", 22 | "openai", 23 | "gitpython", 24 | "tomli", 25 | "requests", 26 | "prompt_toolkit>=3.0.0", 27 | "click>=8.0.0", 28 | "anthropic", 29 | "google-genai", 30 | ] 31 | 32 | [project.urls] 33 | "Homepage" = "https://github.com/ShinChven/git-gpt.git" 34 | "Bug Tracker" = "https://github.com/ShinChven/git-gpt/issues" 35 | 36 | [project.scripts] 37 | git-gpt = "git_gpt.main:cli" 38 | 39 | [tool.setuptools] 40 | packages = ["git_gpt"] 41 | 42 | [tool.setuptools.package-data] 43 | git_gpt = ["*"] 44 | 45 | [tool.setuptools.exclude-package-data] 46 | "*" = ["assets"] 47 | --------------------------------------------------------------------------------