├── src ├── _init__.py ├── clients │ ├── __init__.py │ ├── openai_client.py │ └── github_client.py ├── utils │ ├── __init__.py │ └── helpers.py └── main.py ├── tests ├── __init__.py ├── test_openai_client.py ├── test_helpers.py ├── test_github_client.py └── test_main.py ├── requirements.txt ├── img ├── genai_code_review.png └── chatgpt-comment-on-pr.png ├── Dockerfile ├── .github └── workflows │ └── genai_code_review.yml ├── LICENSE ├── action.yml ├── .gitignore └── README.md /src/_init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/clients/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openai==1.30.1 2 | requests==2.31.0 3 | PyGithub==2.3.0 4 | -------------------------------------------------------------------------------- /img/genai_code_review.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cirolini/genai-code-review/HEAD/img/genai_code_review.png -------------------------------------------------------------------------------- /img/chatgpt-comment-on-pr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cirolini/genai-code-review/HEAD/img/chatgpt-comment-on-pr.png -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the official Python base image 2 | FROM python:3.12 3 | 4 | # Set the working directory inside the container 5 | WORKDIR /app 6 | 7 | # Copy the requirements file to the container 8 | COPY requirements.txt . 9 | 10 | # Install the Python dependencies 11 | RUN pip install --no-cache-dir -r requirements.txt 12 | 13 | # Copy all the source code to the working directory 14 | COPY . . 15 | 16 | # Add the src directory to the PYTHONPATH 17 | ENV PYTHONPATH="${PYTHONPATH}:/app/src" 18 | 19 | # Set the command to execute the main Python script 20 | CMD ["python", "src/main.py"] -------------------------------------------------------------------------------- /.github/workflows/genai_code_review.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | types: [opened, synchronize] 4 | 5 | jobs: 6 | genai_code_review: 7 | runs-on: ubuntu-latest 8 | name: GenAI Code Review 9 | steps: 10 | - name: Checkout code 11 | uses: actions/checkout@v2 12 | - name: GenAI Code Review 13 | uses: cirolini/genai-code-review@v2 14 | with: 15 | openai_api_key: ${{ secrets.OPENAI_API_KEY }} 16 | github_token: ${{ secrets.GITHUB_TOKEN }} 17 | github_pr_id: ${{ github.event.pull_request.number }} 18 | openai_model: "gpt-3.5-turbo" 19 | openai_temperature: 0.5 20 | openai_max_tokens: 2048 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. -------------------------------------------------------------------------------- /src/utils/helpers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Este módulo contém funções auxiliares para o projeto. 3 | Este módulo fornece uma função para recuperar variáveis de ambiente e garantir que elas não 4 | estejam vazias, se necessário. 5 | """ 6 | 7 | import os 8 | import logging 9 | 10 | # Configure logging 11 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 12 | 13 | def get_env_variable(key, required=True): 14 | """ 15 | Retrieve an environment variable and ensure it is not empty if required. 16 | 17 | Args: 18 | key (str): The key of the environment variable. 19 | required (bool): Whether the environment variable is required. 20 | 21 | Returns: 22 | str: The value of the environment variable or None if not required and missing. 23 | 24 | Raises: 25 | ValueError: If the environment variable is required and missing or empty. 26 | """ 27 | logging.info("Retrieving environment variable: %s", key) 28 | value = os.getenv(key) 29 | if required and not value: 30 | logging.error("Missing required environment variable: %s", key) 31 | raise ValueError(f"Missing required environment variable: {key}") 32 | logging.info("Successfully retrieved environment variable: %s", key) 33 | return value 34 | -------------------------------------------------------------------------------- /tests/test_openai_client.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest.mock import patch, MagicMock 3 | from clients.openai_client import OpenAIClient # Substitua 'your_module' pelo nome do seu módulo. 4 | 5 | class TestOpenAIClient(unittest.TestCase): 6 | 7 | @patch('clients.openai_client.OpenAI') 8 | def setUp(self, MockOpenAI): 9 | self.mock_openai = MockOpenAI.return_value 10 | self.mock_openai.chat.completions.create.return_value = MagicMock(choices=[MagicMock(message=MagicMock(content="Test response"))]) 11 | self.client = OpenAIClient(model="gpt-3.5-turbo", temperature=0.7, max_tokens=150) 12 | 13 | def test_initialization(self): 14 | self.assertEqual(self.client.model, "gpt-3.5-turbo") 15 | self.assertEqual(self.client.temperature, 0.7) 16 | self.assertEqual(self.client.max_tokens, 150) 17 | 18 | def test_generate_response_success(self): 19 | prompt = "Hello, how are you?" 20 | response = self.client.generate_response(prompt) 21 | self.assertEqual(response, "Test response") 22 | self.mock_openai.chat.completions.create.assert_called_once_with( 23 | model="gpt-3.5-turbo", 24 | messages=[ 25 | {"role": "system", "content": "You are an expert Developer."}, 26 | {"role": "user", "content": prompt} 27 | ], 28 | temperature=0.7, 29 | max_tokens=150 30 | ) 31 | 32 | @patch('clients.openai_client.logging') 33 | def test_generate_response_exception(self, mock_logging): 34 | self.mock_openai.chat.completions.create.side_effect = Exception("API error") 35 | with self.assertRaises(Exception) as context: 36 | self.client.generate_response("This will fail.") 37 | self.assertTrue("API error" in str(context.exception)) 38 | 39 | if __name__ == '__main__': 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /tests/test_helpers.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest.mock import patch 3 | import logging 4 | 5 | from utils.helpers import get_env_variable 6 | 7 | class TestHelpers(unittest.TestCase): 8 | 9 | @patch('utils.helpers.os.getenv') 10 | def test_get_env_variable_success(self, mock_getenv): 11 | mock_getenv.return_value = 'some_value' 12 | result = get_env_variable('TEST_VAR') 13 | self.assertEqual(result, 'some_value') 14 | mock_getenv.assert_called_once_with('TEST_VAR') 15 | 16 | @patch('utils.helpers.os.getenv') 17 | def test_get_env_variable_missing_required(self, mock_getenv): 18 | mock_getenv.return_value = None 19 | with self.assertRaises(ValueError) as context: 20 | get_env_variable('MISSING_VAR') 21 | self.assertEqual(str(context.exception), 'Missing required environment variable: MISSING_VAR') 22 | mock_getenv.assert_called_once_with('MISSING_VAR') 23 | 24 | @patch('utils.helpers.os.getenv') 25 | def test_get_env_variable_not_required(self, mock_getenv): 26 | mock_getenv.return_value = None 27 | result = get_env_variable('OPTIONAL_VAR', required=False) 28 | self.assertIsNone(result) 29 | mock_getenv.assert_called_once_with('OPTIONAL_VAR') 30 | 31 | @patch('utils.helpers.os.getenv') 32 | def test_get_env_variable_empty_required(self, mock_getenv): 33 | mock_getenv.return_value = '' 34 | with self.assertRaises(ValueError) as context: 35 | get_env_variable('EMPTY_VAR') 36 | self.assertEqual(str(context.exception), 'Missing required environment variable: EMPTY_VAR') 37 | mock_getenv.assert_called_once_with('EMPTY_VAR') 38 | 39 | @patch('utils.helpers.os.getenv') 40 | def test_get_env_variable_empty_not_required(self, mock_getenv): 41 | mock_getenv.return_value = '' 42 | result = get_env_variable('EMPTY_VAR', required=False) 43 | self.assertEqual(result, '') 44 | mock_getenv.assert_called_once_with('EMPTY_VAR') 45 | 46 | if __name__ == '__main__': 47 | unittest.main() 48 | -------------------------------------------------------------------------------- /action.yml: -------------------------------------------------------------------------------- 1 | name: "ChatGPT GitHub Actions" 2 | description: "This GitHub Action automates code review for pull requests using OpenAI's GPT models. By integrating this action into your CI/CD pipeline, it provides detailed, AI-generated reviews of your code directly within the PR comments. This tool helps maintain code quality, ensures adherence to best practices, and highlights potential improvements and bugs." 3 | inputs: 4 | openai_api_key: # id of input 5 | description: 'OpenAI API Key' 6 | required: true 7 | default: '' 8 | github_token: # id of input 9 | description: 'Github API Key' 10 | required: true 11 | default: '' 12 | github_pr_id: # id of input 13 | description: 'Github PR ID' 14 | required: true 15 | default: '' 16 | openai_model: 17 | description: "The OpenAI model to use for generating responses. Examples: 'gpt-3.5-turbo', 'gpt-4'" 18 | required: false 19 | default: "gpt-3.5-turbo" 20 | openai_temperature: 21 | description: "The temperature for OpenAI responses. Controls the creativity of the responses. Higher values (e.g., 0.9) make output more random, while lower values (e.g., 0.1) make it more focused and deterministic." 22 | required: false 23 | default: "0.5" 24 | openai_max_tokens: 25 | description: "The maximum number of tokens to generate in the OpenAI response. Example: 1000" 26 | required: false 27 | default: "2048" 28 | mode: 29 | description: "The mode of operation for the action. Options: 'files' to review file changes, 'patch' to review the patch changes." 30 | required: false 31 | default: "files" 32 | language: 33 | description: "The language for the code review" 34 | required: false 35 | default: "en" # English as the default language 36 | custom_prompt: 37 | description: "Custom prompt to use for generating responses. If not provided, a default prompt will be used based on the mode." 38 | required: false 39 | default: "" 40 | runs: 41 | using: "docker" 42 | image: "Dockerfile" 43 | env: 44 | OPENAI_API_KEY: ${{ inputs.openai_api_key }} 45 | GITHUB_TOKEN: ${{ inputs.github_token }} 46 | GITHUB_PR_ID: ${{ inputs.github_pr_id }} 47 | OPENAI_MODEL: ${{ inputs.openai_model }} 48 | OPENAI_TEMPERATURE: ${{ inputs.openai_temperature }} 49 | OPENAI_MAX_TOKENS: ${{ inputs.openai_max_tokens }} 50 | MODE: ${{ inputs.mode }} 51 | LANGUAGE: ${{ inputs.language }} 52 | CUSTOM_PROMPT: ${{ inputs.custom_prompt }} 53 | -------------------------------------------------------------------------------- /src/clients/openai_client.py: -------------------------------------------------------------------------------- 1 | """ 2 | Este módulo contém a classe OpenAIClient, que é usada para interagir com a API do OpenAI. 3 | A classe OpenAIClient pode ser usada para gerar respostas de um modelo especificado do OpenAI. 4 | """ 5 | 6 | import logging 7 | from openai import OpenAI 8 | 9 | # Configure logging 10 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 11 | 12 | class OpenAIClient: 13 | """ 14 | A client for interacting with the OpenAI API to generate responses using a specified model. 15 | """ 16 | 17 | def __init__(self, model, temperature, max_tokens): 18 | """ 19 | Initialize the OpenAIClient with API key, model, temperature, and max tokens. 20 | 21 | Args: 22 | api_key (str): The OpenAI API key. 23 | model (str): The OpenAI model to use. 24 | temperature (float): The sampling temperature. 25 | max_tokens (int): The maximum number of tokens to generate. 26 | """ 27 | try: 28 | self.client = OpenAI() 29 | self.model = model 30 | self.temperature = temperature 31 | self.max_tokens = max_tokens 32 | logging.info( 33 | "OpenAI client initialized successfully, " 34 | "Model: %s, temperature: %s, max tokens: %s", 35 | self.model, 36 | self.temperature, 37 | self.max_tokens 38 | ) 39 | except Exception as e: 40 | logging.error("Error initializing OpenAI client: %s", e) 41 | raise 42 | 43 | def generate_response(self, prompt): 44 | """ 45 | Generate a response from the OpenAI model based on the given prompt. 46 | 47 | Args: 48 | prompt (str): The prompt to send to the OpenAI API. 49 | 50 | Returns: 51 | str: The generated response from the OpenAI model. 52 | 53 | Raises: 54 | Exception: If there is an error generating the response. 55 | """ 56 | try: 57 | logging.info("Generating response from OpenAI model.") 58 | response = self.client.chat.completions.create( 59 | model=self.model, 60 | messages=[ 61 | {"role": "system", "content": "You are an expert Developer."}, 62 | {"role": "user", "content": prompt} 63 | ], 64 | temperature=self.temperature, 65 | max_tokens=self.max_tokens 66 | ) 67 | logging.info("Response generated successfully.") 68 | return response.choices[0].message.content 69 | except Exception as e: 70 | logging.error("Error generating response from OpenAI model: %s", e) 71 | raise 72 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | .idea/ 161 | 162 | .DS_Store -------------------------------------------------------------------------------- /tests/test_github_client.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest.mock import patch, MagicMock 3 | from clients.github_client import GithubClient 4 | import os 5 | 6 | class TestGithubClient(unittest.TestCase): 7 | 8 | def setUp(self): 9 | self.token = "fake_github_token" 10 | self.repo_name = "fake_repo" 11 | self.pr_id = 1 12 | self.commit_sha = "fake_commit_sha" 13 | self.filename = "fake_file.py" 14 | 15 | os.environ['GITHUB_REPOSITORY'] = self.repo_name 16 | os.environ['GITHUB_TOKEN'] = self.token 17 | 18 | with patch('clients.github_client.Github') as MockGithub: 19 | self.mock_github = MockGithub.return_value 20 | self.mock_repo = self.mock_github.get_repo.return_value 21 | self.github_client = GithubClient(self.token) 22 | 23 | def tearDown(self): 24 | del os.environ['GITHUB_REPOSITORY'] 25 | del os.environ['GITHUB_TOKEN'] 26 | 27 | def test_init(self): 28 | self.mock_github.get_repo.assert_called_with(self.repo_name) 29 | self.assertEqual(self.github_client.repo_name, self.repo_name) 30 | self.assertEqual(self.github_client.repo, self.mock_repo) 31 | 32 | def test_get_pr(self): 33 | mock_pr = MagicMock() 34 | self.mock_repo.get_pull.return_value = mock_pr 35 | pr = self.github_client.get_pr(self.pr_id) 36 | self.mock_repo.get_pull.assert_called_with(self.pr_id) 37 | self.assertEqual(pr, mock_pr) 38 | 39 | def test_get_pr_comments(self): 40 | mock_pr = MagicMock() 41 | mock_comments = MagicMock() 42 | self.mock_repo.get_pull.return_value = mock_pr 43 | mock_pr.get_issue_comments.return_value = mock_comments 44 | 45 | comments = self.github_client.get_pr_comments(self.pr_id) 46 | self.mock_repo.get_pull.assert_called_with(self.pr_id) 47 | mock_pr.get_issue_comments.assert_called_once() 48 | self.assertEqual(comments, mock_comments) 49 | 50 | def test_post_comment(self): 51 | mock_pr = MagicMock() 52 | mock_comment = MagicMock() 53 | self.mock_repo.get_pull.return_value = mock_pr 54 | mock_pr.create_issue_comment.return_value = mock_comment 55 | 56 | body = "Test comment" 57 | comment = self.github_client.post_comment(self.pr_id, body) 58 | self.mock_repo.get_pull.assert_called_with(self.pr_id) 59 | mock_pr.create_issue_comment.assert_called_with(body) 60 | self.assertEqual(comment, mock_comment) 61 | 62 | def test_get_commit_files(self): 63 | mock_commit = MagicMock() 64 | mock_commit.files = ["file1.py", "file2.py"] 65 | files = self.github_client.get_commit_files(mock_commit) 66 | self.assertEqual(files, ["file1.py", "file2.py"]) 67 | 68 | def test_get_file_content(self): 69 | mock_content = MagicMock() 70 | mock_content.decoded_content.decode.return_value = "file content" 71 | self.mock_repo.get_contents.return_value = mock_content 72 | 73 | content = self.github_client.get_file_content(self.commit_sha, self.filename) 74 | self.mock_repo.get_contents.assert_called_with(self.filename, ref=self.commit_sha) 75 | self.assertEqual(content, "file content") 76 | 77 | @patch('clients.github_client.requests.get') 78 | def test_get_pr_patch(self, mock_get): 79 | mock_response = MagicMock() 80 | mock_response.text = "patch content" 81 | mock_response.raise_for_status = MagicMock() 82 | mock_get.return_value = mock_response 83 | 84 | patch_content = self.github_client.get_pr_patch(self.pr_id) 85 | expected_url = f"https://api.github.com/repos/{self.repo_name}/pulls/{self.pr_id}" 86 | mock_get.assert_called_with(expected_url, headers={ 87 | 'Authorization': f"token {self.token}", 88 | 'Accept': 'application/vnd.github.v3.diff' 89 | }, timeout=60) 90 | self.assertEqual(patch_content, "patch content") 91 | 92 | if __name__ == '__main__': 93 | unittest.main() 94 | -------------------------------------------------------------------------------- /tests/test_main.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest.mock import patch, MagicMock 3 | import logging 4 | from main import main, get_env_vars, process_files, process_patch, analyze_commit_files, analyze_patch, create_review_prompt 5 | 6 | class TestMainModule(unittest.TestCase): 7 | 8 | @patch('main.get_env_vars') 9 | @patch('main.GithubClient') 10 | @patch('main.OpenAIClient') 11 | def test_main_files_mode(self, MockOpenAIClient, MockGithubClient, mock_get_env_vars): 12 | mock_get_env_vars.return_value = { 13 | 'GITHUB_TOKEN': 'fake_github_token', 14 | 'OPENAI_API_KEY': 'fake_openai_api_key', 15 | 'OPENAI_MODEL': 'gpt-3.5-turbo', 16 | 'OPENAI_TEMPERATURE': 0.5, 17 | 'OPENAI_MAX_TOKENS': 1000, 18 | 'MODE': 'files', 19 | 'GITHUB_PR_ID': 1, 20 | 'LANGUAGE': 'en', 21 | 'CUSTOM_PROMPT': None 22 | } 23 | 24 | with patch('main.process_files') as mock_process_files: 25 | main() 26 | mock_process_files.assert_called_once() 27 | 28 | @patch('main.get_env_vars') 29 | @patch('main.GithubClient') 30 | @patch('main.OpenAIClient') 31 | def test_main_patch_mode(self, MockOpenAIClient, MockGithubClient, mock_get_env_vars): 32 | mock_get_env_vars.return_value = { 33 | 'GITHUB_TOKEN': 'fake_github_token', 34 | 'OPENAI_API_KEY': 'fake_openai_api_key', 35 | 'OPENAI_MODEL': 'gpt-3.5-turbo', 36 | 'OPENAI_TEMPERATURE': 0.5, 37 | 'OPENAI_MAX_TOKENS': 1000, 38 | 'MODE': 'patch', 39 | 'GITHUB_PR_ID': 1, 40 | 'LANGUAGE': 'en', 41 | 'CUSTOM_PROMPT': None 42 | } 43 | 44 | with patch('main.process_patch') as mock_process_patch: 45 | main() 46 | mock_process_patch.assert_called_once() 47 | 48 | @patch('main.get_env_variable') 49 | def test_get_env_vars(self, mock_get_env_variable): 50 | mock_get_env_variable.side_effect = lambda var, required: { 51 | 'OPENAI_API_KEY': 'fake_openai_api_key', 52 | 'GITHUB_TOKEN': 'fake_github_token', 53 | 'GITHUB_PR_ID': '1', 54 | 'OPENAI_MODEL': 'gpt-3.5-turbo', 55 | 'OPENAI_TEMPERATURE': '0.5', 56 | 'OPENAI_MAX_TOKENS': '1000', 57 | 'MODE': 'files', 58 | 'LANGUAGE': 'en', 59 | 'CUSTOM_PROMPT': None 60 | }.get(var, None) 61 | 62 | env_vars = get_env_vars() 63 | self.assertEqual(env_vars['OPENAI_API_KEY'], 'fake_openai_api_key') 64 | self.assertEqual(env_vars['GITHUB_TOKEN'], 'fake_github_token') 65 | self.assertEqual(env_vars['GITHUB_PR_ID'], 1) 66 | self.assertEqual(env_vars['OPENAI_TEMPERATURE'], 0.5) 67 | 68 | def test_create_review_prompt(self): 69 | content = "def foo(): pass" 70 | language = "en" 71 | custom_prompt = None 72 | 73 | prompt = create_review_prompt(content, language, custom_prompt) 74 | self.assertIn("Please review the following code", prompt) 75 | 76 | @patch('main.GithubClient') 77 | @patch('main.OpenAIClient') 78 | def test_process_files(self, MockGithubClient, MockOpenAIClient): 79 | github_client = MockGithubClient() 80 | openai_client = MockOpenAIClient() 81 | github_client.get_pr.return_value.get_commits.return_value = [MagicMock(sha='abc123')] 82 | 83 | process_files(github_client, openai_client, 1, 'en', None) 84 | github_client.get_pr.assert_called_with(1) 85 | openai_client.generate_response.assert_called() 86 | 87 | @patch('main.GithubClient') 88 | @patch('main.OpenAIClient') 89 | def test_process_patch(self, MockGithubClient, MockOpenAIClient): 90 | github_client = MockGithubClient() 91 | openai_client = MockOpenAIClient() 92 | github_client.get_pr_patch.return_value = 'diff --git a/file b/file' 93 | 94 | process_patch(github_client, openai_client, 1, 'en', None) 95 | github_client.get_pr_patch.assert_called_with(1) 96 | openai_client.generate_response.assert_called() 97 | 98 | if __name__ == '__main__': 99 | unittest.main() -------------------------------------------------------------------------------- /src/clients/github_client.py: -------------------------------------------------------------------------------- 1 | """ 2 | Este módulo contém a classe GithubClient, que é usada para interagir com a API do Github. 3 | A classe GithubClient pode ser usada para recuperar informações sobre commits, conteúdo de 4 | arquivos e patches de pull requests. 5 | """ 6 | 7 | import os 8 | import logging 9 | import requests 10 | from github import Github 11 | 12 | 13 | # Configure logging 14 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 15 | 16 | class GithubClient: 17 | """ 18 | A client for interacting with the GitHub API to manage pull requests and repository content. 19 | """ 20 | 21 | def __init__(self, token): 22 | """ 23 | Initialize the GithubClient with a GitHub token. 24 | 25 | Args: 26 | token (str): The GitHub token for authentication. 27 | """ 28 | try: 29 | self.client = Github(token) 30 | self.repo_name = os.getenv('GITHUB_REPOSITORY') 31 | self.repo = self.client.get_repo(self.repo_name) 32 | logging.info("Initialized GitHub client for repository: %s", self.repo_name) 33 | except Exception as e: 34 | logging.error("Error initializing GitHub client: %s", e) 35 | raise 36 | 37 | def get_pr(self, pr_id): 38 | """ 39 | Retrieve a pull request by its ID. 40 | 41 | Args: 42 | pr_id (int): The pull request ID. 43 | 44 | Returns: 45 | PullRequest: The pull request object. 46 | """ 47 | try: 48 | pr = self.repo.get_pull(pr_id) 49 | logging.info("Retrieved PR ID: %s", pr_id) 50 | return pr 51 | except Exception as e: 52 | logging.error("Error retrieving PR ID %s: %s", pr_id, e) 53 | raise 54 | 55 | def get_pr_comments(self, pr_id): 56 | """ 57 | Retrieve comments from a pull request. 58 | 59 | Args: 60 | pr_id (int): The pull request ID. 61 | 62 | Returns: 63 | PaginatedList: The list of comments. 64 | """ 65 | try: 66 | pr = self.get_pr(pr_id) 67 | comments = pr.get_issue_comments() 68 | logging.info("Retrieved comments for PR ID: %s", pr_id) 69 | return comments 70 | except Exception as e: 71 | logging.error("Error retrieving comments for PR ID %s: %s", pr_id, e) 72 | raise 73 | 74 | def post_comment(self, pr_id, body): 75 | """ 76 | Post a comment to a pull request. 77 | 78 | Args: 79 | pr_id (int): The pull request ID. 80 | body (str): The comment body. 81 | 82 | Returns: 83 | IssueComment: The created comment. 84 | """ 85 | try: 86 | pr = self.get_pr(pr_id) 87 | comment = pr.create_issue_comment(body) 88 | logging.info("Posted comment to PR ID: %s", pr_id) 89 | return comment 90 | except Exception as e: 91 | logging.error("Error posting comment to PR ID %s: %s", pr_id, e) 92 | raise 93 | 94 | def get_commit_files(self, commit): 95 | """ 96 | Retrieve the files modified in a commit. 97 | 98 | Args: 99 | commit (Commit): The commit object. 100 | 101 | Returns: 102 | list: The list of files modified in the commit. 103 | """ 104 | try: 105 | files = commit.files 106 | logging.info("Retrieved files for commit: %s", commit.sha) 107 | return files 108 | except Exception as e: 109 | logging.error("Error retrieving files for commit %s: %s", commit.sha, e) 110 | raise 111 | 112 | def get_file_content(self, commit_sha, filename): 113 | """ 114 | Retrieve the content of a file at a specific commit. 115 | 116 | Args: 117 | commit_sha (str): The commit SHA. 118 | filename (str): The name of the file. 119 | 120 | Returns: 121 | str: The content of the file. 122 | """ 123 | try: 124 | content = self.repo.get_contents(filename, ref=commit_sha).decoded_content.decode() 125 | logging.info("Retrieved content for file: %s at commit: %s", filename, commit_sha) 126 | return content 127 | except Exception as e: 128 | logging.error( 129 | "Error retrieving content for file %s at commit %s: %s", 130 | filename, 131 | commit_sha, 132 | e 133 | ) 134 | raise 135 | 136 | def get_pr_patch(self, pr_id): 137 | """ 138 | Retrieve the patch content of a pull request. 139 | 140 | Args: 141 | pr_id (int): The pull request ID. 142 | 143 | Returns: 144 | str: The patch content of the pull request. 145 | """ 146 | try: 147 | url = f"https://api.github.com/repos/{self.repo_name}/pulls/{pr_id}" 148 | headers = { 149 | 'Authorization': f"token {os.getenv('GITHUB_TOKEN')}", 150 | 'Accept': 'application/vnd.github.v3.diff' 151 | } 152 | response = requests.get(url, headers=headers, timeout=60) 153 | response.raise_for_status() 154 | logging.info("Retrieved patch for PR ID: %s", pr_id) 155 | return response.text 156 | except requests.RequestException as e: 157 | logging.error("Error retrieving patch for PR ID %s: %s", pr_id, e) 158 | raise 159 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GenAI Code Review 2 | 3 | This project aims to automate code review using the GPT language model. It integrates with Github Actions and, upon receiving a Pull Request, automatically submits each code change to GPT for review. 4 | 5 | # Setup 6 | 7 | The following steps will guide you in setting up the code review automation with GPT. 8 | 9 | ## Prerequisites 10 | Before you begin, you need to have the following: 11 | 12 | - An OpenAI API Key. You will need a personal API key from OpenAI which you can get here: https://openai.com/api/. To get an OpenAI API key, you can sign up for an account on the OpenAI website https://openai.com/signup/. Once you have signed up, you can create a new API key from your account settings. 13 | - A Github account and a Github repository where you want to use the code review automation. 14 | 15 | ### Step 1: Create a Secret for your OpenAI API Key 16 | 17 | Create a secret for your OpenAI API Key in your Github repository or organization with the name `openai_api_key`. This secret will be used to authenticate with the OpenAI API. 18 | 19 | You can do this by going to your repository/organization's settings, navigate to secrets and create a new secret with the name `openai_api_key` and paste your OpenAI API key as the value. 20 | 21 | ### Step 2: Adjust Permissions 22 | 23 | Then you need to set up your project's permissions so that the Github Actions can write comments on Pull Requests. You can read more about this here: [automatic-token-authentication](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token) 24 | 25 | ### Step 3: Create a new Github Actions workflow in your repository in `.github/workflows/chatgpt-review.yaml. A sample workflow is given below: 26 | 27 | ``` 28 | on: 29 | pull_request: 30 | types: [opened, synchronize] 31 | 32 | jobs: 33 | code_review_job: 34 | runs-on: ubuntu-latest 35 | name: ChatGPT Code Review 36 | steps: 37 | - name: GenAI Code Review 38 | uses: cirolini/genai-code-review@v2 39 | with: 40 | openai_api_key: ${{ secrets.openai_api_key }} 41 | github_token: ${{ secrets.GITHUB_TOKEN }} 42 | github_pr_id: ${{ github.event.number }} 43 | openai_model: "gpt-3.5-turbo" # optional 44 | openai_temperature: 0.5 # optional 45 | openai_max_tokens: 2048 # optional 46 | mode: files # files or patch 47 | language: en # optional, default is 'en' 48 | custom_prompt: "" # optional 49 | ``` 50 | 51 | In the above workflow, the pull_request event triggers the workflow whenever a pull request is opened or synchronized. The workflow runs on the ubuntu-latest runner and uses the cirolini/chatgpt-github-actions@v1 action. 52 | 53 | The openai_api_key is passed from the secrets context, and the github_token is also passed from the secrets context. The github_pr_id is passed from the github.event.number context. The other three input parameters, openai_engine, openai_temperature, and openai_max_tokens, are optional and have default values. 54 | 55 | ## Configuration Parameters 56 | 57 | ### `openai_engine` 58 | - **Description**: The OpenAI model to use for generating responses. 59 | - **Default**: `"gpt-3.5-turbo"` 60 | - **Options**: Models like `gpt-4o`, `gpt-4-turbo`, etc. 61 | 62 | ### `openai_temperature` 63 | - **Description**: Controls the creativity of the AI's responses. Higher values make the output more random, while lower values make it more focused and deterministic. 64 | - **Default**: `0.5` 65 | - **Range**: `0.0` to `1.0` 66 | 67 | ### `openai_max_tokens` 68 | - **Description**: The maximum number of tokens to generate in the completion. 69 | - **Default**: `2048` 70 | - **Range**: Up to the model's maximum context length. 71 | 72 | ### `mode` 73 | - **Description**: Determines the method of analysis for the pull request. 74 | - **Options**: 75 | - `files`: Analyzes the files changed in the last commit. 76 | - `patch`: Analyzes the patch content. 77 | 78 | ### `language` 79 | - **Description**: The language in which the review comments will be written. 80 | - **Default**: `en` (English) 81 | - **Options**: Any valid language code, e.g., `pt-br` for Brazilian Portuguese. 82 | 83 | ### `custom_prompt` 84 | - **Description**: Custom instructions for the AI to follow when generating the review. 85 | - **Default**: `""` (empty) 86 | - **Usage**: Provide specific guidelines or focus areas for the AI's code review. 87 | 88 | 89 | ## How it works 90 | 91 | ### files 92 | This action is triggered when a pull request is opened or updated. The action authenticates with the OpenAI API using the provided API key, and with the Github API using the provided token. It then selects the repository using the provided repository name, and the pull request ID. 93 | For each commit in the pull request, it gets the modified files, gets the file name and content, sends the code to ChatGPT for an explanation, and adds a comment to the pull request with ChatGPT's response. 94 | 95 | ### patch 96 | Every PR has a file called patch which is where the difference between 2 files, the original and the one that was changed, is, this strategy consists of reading this file and asking the AI to summarize the changes made to it. 97 | 98 | Comments will appear like this: 99 | 100 | ![genaicodereview](img/genai_code_review.png "GenAI Code Review") 101 | 102 | ## Custom Prompt 103 | 104 | ### Overview 105 | 106 | The `custom_prompt` parameter allows users to tailor the AI's review to specific needs. By providing custom instructions, users can focus the review on particular aspects or request additional information. This flexibility enhances the usefulness of the AI-generated review comments. 107 | 108 | ### How to Use 109 | 110 | To use a custom prompt, simply provide a string with your instructions. For example, to ask the AI to rate the code on a scale of 1 to 10, set the `custom_prompt` parameter as follows: 111 | 112 | ```yaml 113 | custom_prompt: "Give a rating from 1 to 10 for this code:" 114 | ```` 115 | 116 | ### Potential 117 | Using a custom prompt can direct the AI to focus on specific areas, such as: 118 | 119 | * Code quality and readability 120 | * Security vulnerabilities 121 | * Performance optimizations 122 | * Adherence to coding standards 123 | * Specific concerns or questions about the code 124 | 125 | ## Implementation in Code 126 | The custom_prompt is integrated into the review generation as shown: 127 | 128 | ``` 129 | if custom_prompt: 130 | logging.info(f"Using custom prompt: {custom_prompt}") 131 | return f"{custom_prompt}\n### Code\n```{content}```\n\nWrite this code review in the following {language}:\n\n" 132 | return (f"Please review the following code for clarity, efficiency, and adherence to best practices. " 133 | f"Identify any ar... 134 | ``` 135 | 136 | This feature allows you to harness the power of AI in a way that best suits your specific code review requirements. 137 | 138 | ## Security and Privacity 139 | 140 | When sending code to the ChatGPT language model, it is important to consider the security and privacy of the code because user data may be collected and used to train and improve the model, so it's important to have proper caution and privacy policies in place.. OpenAI takes security seriously and implements measures to protect customer data, such as encryption of data in transit and at rest, and implementing regular security audits and penetration testing. However, it is still recommended to use appropriate precautions when sending sensitive or confidential code, such as removing any sensitive information or obscuring it before sending it to the model. Additionally, it is a good practice to use a unique API key for each project and to keep the API key secret, for example by storing it in a Github secret. This way, if the API key is ever compromised, it can be easily revoked, limiting the potential impact on the user's projects. 141 | 142 | # Built With 143 | - [OpenAI](https://openai.com/) - The AI platform used 144 | - [Github Actions](https://github.com/features/actions) - Automation platform 145 | 146 | ## Authors 147 | - **CiroLini** - [cirolini](https://github.com/cirolini) 148 | 149 | ## Contributors 150 | - **Glauber Borges** - [glauberborges](https://github.com/glauberborges) 151 | 152 | # License 153 | This project is licensed under the MIT License - see the LICENSE file for details. 154 | -------------------------------------------------------------------------------- /src/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Main module for handling the code review process using ChatGPT and GitHub API. 3 | """ 4 | 5 | import logging 6 | from clients.github_client import GithubClient 7 | from clients.openai_client import OpenAIClient 8 | from utils.helpers import get_env_variable 9 | 10 | # Configure logging 11 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 12 | 13 | def main(): 14 | """ 15 | Main function to handle the code review process based on the mode specified. 16 | """ 17 | try: 18 | env_vars = get_env_vars() 19 | except ValueError as e: 20 | logging.error("Environment variable error: %s", e) 21 | return 22 | 23 | github_client = GithubClient(env_vars['GITHUB_TOKEN']) 24 | openai_client = OpenAIClient(env_vars['OPENAI_MODEL'], 25 | env_vars['OPENAI_TEMPERATURE'], 26 | env_vars['OPENAI_MAX_TOKENS']) 27 | 28 | language = env_vars.get('LANGUAGE', 'en') 29 | custom_prompt = env_vars.get('CUSTOM_PROMPT') 30 | 31 | if env_vars['MODE'] == "files": 32 | process_files(github_client, 33 | openai_client, 34 | env_vars['GITHUB_PR_ID'], 35 | language, 36 | custom_prompt) 37 | elif env_vars['MODE'] == "patch": 38 | process_patch(github_client, 39 | openai_client, 40 | env_vars['GITHUB_PR_ID'], 41 | language, 42 | custom_prompt) 43 | else: 44 | logging.error("Invalid mode. Choose either 'files' or 'patch'.") 45 | raise ValueError("Invalid mode. Choose either 'files' or 'patch'.") 46 | 47 | def get_env_vars(): 48 | """ 49 | Retrieve required and optional environment variables and ensure they are not empty. 50 | Convert specific variables to their appropriate types. 51 | 52 | Returns: 53 | dict: A dictionary of environment variables. 54 | 55 | Raises: 56 | ValueError: If any required environment variable is missing, empty, or has an invalid type. 57 | """ 58 | variables = { 59 | 'OPENAI_API_KEY': (str, True), 60 | 'GITHUB_TOKEN': (str, True), 61 | 'GITHUB_PR_ID': (int, True), 62 | 'OPENAI_MODEL': (str, True), 63 | 'OPENAI_TEMPERATURE': (float, True), 64 | 'OPENAI_MAX_TOKENS': (int, True), 65 | 'MODE': (str, True), 66 | 'LANGUAGE': (str, True), 67 | 'CUSTOM_PROMPT': (str, False) 68 | } 69 | 70 | env_vars = {} 71 | for var, (var_type, required) in variables.items(): 72 | value = get_env_variable(var, required) 73 | if value: 74 | try: 75 | env_vars[var] = var_type(value) 76 | logging.info( 77 | "%s (%s) retrieved and converted successfully.", 78 | var, 79 | var_type.__name__ 80 | ) 81 | except ValueError as e: 82 | logging.error("%s must be of type %s. Error: %s", var, var_type.__name__, e) 83 | raise ValueError(f"{var} must be of type {var_type.__name__}.") from e 84 | else: 85 | env_vars[var] = None 86 | 87 | return env_vars 88 | 89 | def process_files(github_client, openai_client, pr_id, language, custom_prompt): 90 | """ 91 | Process the files changed in the last commit of the pull request. 92 | 93 | Args: 94 | github_client (GithubClient): The GitHub client instance. 95 | openai_client (OpenAIClient): The OpenAI client instance. 96 | pr_id (int): The pull request ID. 97 | language (str): The language for the review. 98 | custom_prompt (str, optional): Custom prompt for the code review. 99 | """ 100 | logging.info("Processing files for PR ID: %s", pr_id) 101 | pull_request = github_client.get_pr(pr_id) 102 | commits = list(pull_request.get_commits()) 103 | 104 | if not commits: 105 | logging.info("No commits found.") 106 | return 107 | 108 | last_commit = commits[-1] 109 | analyze_commit_files(github_client, openai_client, pr_id, last_commit, language, custom_prompt) 110 | 111 | def process_patch(github_client, openai_client, pr_id, language, custom_prompt): 112 | """ 113 | Process the patch content of a pull request. 114 | 115 | Args: 116 | github_client (GithubClient): The GitHub client instance. 117 | openai_client (OpenAIClient): The OpenAI client instance. 118 | pr_id (int): The pull request ID. 119 | language (str): The language for the review. 120 | custom_prompt (str, optional): Custom prompt for the code review. 121 | """ 122 | logging.info("Processing patch for PR ID: %s", pr_id) 123 | patch_content = github_client.get_pr_patch(pr_id) 124 | if not patch_content: 125 | logging.info("Patch file does not contain any changes.") 126 | github_client.post_comment(pr_id, "Patch file does not contain any changes") 127 | return 128 | analyze_patch(github_client, openai_client, pr_id, patch_content, language, custom_prompt) 129 | 130 | def analyze_commit_files(github_client, openai_client, pr_id, commit, language, custom_prompt): 131 | """ 132 | Analyze all files in a given commit together and post a single comment. 133 | 134 | Args: 135 | github_client (GithubClient): The GitHub client instance. 136 | openai_client (OpenAIClient): The OpenAI client instance. 137 | pr_id (int): The pull request ID. 138 | commit (Commit): The commit object. 139 | language (str): The language for the review. 140 | custom_prompt (str, optional): Custom prompt for the code review. 141 | """ 142 | logging.info("Analyzing files in commit: %s", commit.sha) 143 | files = github_client.get_commit_files(commit) 144 | 145 | combined_content = "" 146 | for file in files: 147 | logging.info("Processing file: %s", file.filename) 148 | content = github_client.get_file_content(commit.sha, file.filename) 149 | combined_content += f"\n### File: {file.filename}\n```{content}```\n" 150 | 151 | review = openai_client.generate_response(create_review_prompt(combined_content, 152 | language, 153 | custom_prompt)) 154 | github_client.post_comment(pr_id, f"ChatGPT's code review:\n {review}") 155 | 156 | def analyze_patch(github_client, openai_client, pr_id, patch_content, language, custom_prompt): 157 | """ 158 | Analyze the patch content of a pull request and post a single comment. 159 | 160 | Args: 161 | github_client (GithubClient): The GitHub client instance. 162 | openai_client (OpenAIClient): The OpenAI client instance. 163 | pr_id (int): The pull request ID. 164 | patch_content (str): The patch content. 165 | language (str): The language for the review. 166 | custom_prompt (str, optional): Custom prompt for the code review. 167 | """ 168 | logging.info("Analyzing patch content for PR ID: %s", pr_id) 169 | 170 | combined_diff = "" 171 | for diff_text in patch_content.split("diff"): 172 | if diff_text: 173 | try: 174 | file_name = diff_text.split("b/")[1].splitlines()[0] 175 | logging.info("Processing diff for file: %s", file_name) 176 | combined_diff += f"\n### File: {file_name}\n```diff\n{diff_text}```\n" 177 | except (TypeError, ValueError) as e: 178 | logging.error("Error processing diff for file: %s: %s", file_name, str(e)) 179 | github_client.post_comment( 180 | pr_id, 181 | f"ChatGPT was unable to process the response about {file_name}: {str(e)}" 182 | ) 183 | 184 | review_prompt = create_review_prompt(combined_diff, language, custom_prompt) 185 | summary = openai_client.generate_response(review_prompt) 186 | github_client.post_comment(pr_id, f"ChatGPT's code review:\n {summary}") 187 | 188 | def create_review_prompt(content, language, custom_prompt=None): 189 | """ 190 | Create a review prompt for the OpenAI API. 191 | 192 | Args: 193 | content (str): The content of the code to be reviewed. 194 | language (str): The language for the review. 195 | custom_prompt (str, optional): Custom prompt for the code review. 196 | 197 | Returns: 198 | str: The review prompt. 199 | """ 200 | if custom_prompt: 201 | logging.info("Using custom prompt: %s", custom_prompt) 202 | return ( 203 | f"{custom_prompt}\n" 204 | "### Code\n" 205 | f"```{content}```\n\n" 206 | f"Write this code review in the following {language}:\n\n" 207 | ) 208 | return ( 209 | f"Please review the following code for clarity, efficiency, and adherence to best practices." 210 | f"Identify any areas for improvement, suggest specific optimizations, and note potential bugs or security vulnerabilities. " 211 | f"Additionally, provide suggestions for how to address the identified issues, with a focus on maintainability and scalability. " 212 | f"Include examples of code where relevant. Use markdown formatting for your response:\n\n" 213 | f"Write this code review in the following {language}:\n\n" 214 | f"Do not write the code or guidelines in the review. Only write the review itself.\n\n" 215 | f"### Code\n```{content}```\n\n" 216 | f"### Review Guidelines\n" 217 | f"1. **Clarity**: Is the code easy to understand?\n" 218 | f"2. **Efficiency**: Are there any performance improvements?\n" 219 | f"3. **Best Practices**: Does the code follow standard coding conventions?\n" 220 | f"4. **Bugs/Security**: Are there any potential bugs or security vulnerabilities?\n" 221 | f"5. **Maintainability**: Is the code easy to maintain and scale?\n\n" 222 | f"### Review Example\n" 223 | f"1. **Issue**: The variable names are not descriptive.\n" 224 | f" **Suggestion**: Use more descriptive variable names that reflect their purpose. For example:\n" 225 | f" ```python\n" 226 | f" # Instead of this:\n" 227 | f" x = 5\n" 228 | f" # Use this:\n" 229 | f" item_count = 5\n" 230 | f" ```\n" 231 | f"2. **Issue**: There is a potential SQL injection vulnerability.\n" 232 | f" **Suggestion**: Use parameterized queries to prevent SQL injection. For example:\n" 233 | f" ```python\n" 234 | f" # Instead of this:\n" 235 | f" cursor.execute(f'SELECT * FROM users WHERE username = (username)')\n" 236 | f" # Use this:\n" 237 | f" cursor.execute('SELECT * FROM users WHERE username = %s', (username,))\n" 238 | f" ```" 239 | ) 240 | 241 | 242 | if __name__ == "__main__": 243 | main() 244 | --------------------------------------------------------------------------------