├── tests ├── __init__.py ├── test_files │ ├── cc_resp.txt │ └── cc_resp_fail.txt ├── conftest.py └── test_wolverine.py ├── .gitignore ├── .env.sample ├── wolverine ├── __init__.py ├── __main__.py └── wolverine.py ├── requirements.txt ├── examples ├── buggy_script_2.py ├── buggy_script.js └── buggy_script.py ├── .github └── workflows │ └── build.yml ├── LICENSE ├── prompt.txt └── README.md /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | venv 2 | .venv 3 | .env 4 | env/ 5 | .vscode/ 6 | -------------------------------------------------------------------------------- /.env.sample: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=your_api_key 2 | #DEFAULT_MODEL=gpt-3.5-turbo 3 | -------------------------------------------------------------------------------- /wolverine/__init__.py: -------------------------------------------------------------------------------- 1 | from .wolverine import apply_changes, json_validated_response # noqa 2 | -------------------------------------------------------------------------------- /wolverine/__main__.py: -------------------------------------------------------------------------------- 1 | import fire 2 | 3 | from .wolverine import main 4 | 5 | if __name__ == "__main__": 6 | fire.Fire(main) 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.8.4 2 | aiosignal==1.3.1 3 | async-timeout==4.0.2 4 | attrs==23.1.0 5 | certifi==2022.12.7 6 | charset-normalizer==3.1.0 7 | exceptiongroup==1.1.1 8 | fire==0.5.0 9 | frozenlist==1.3.3 10 | idna==3.4 11 | iniconfig==2.0.0 12 | multidict==6.0.4 13 | openai==0.27.4 14 | packaging==23.1 15 | pluggy==1.0.0 16 | pytest==7.3.1 17 | pytest-mock==3.10.0 18 | python-dotenv==1.0.0 19 | requests==2.29.0 20 | ruff==0.0.263 21 | six==1.16.0 22 | termcolor==2.3.0 23 | tomli==2.0.1 24 | tqdm==4.65.0 25 | urllib3==1.26.15 26 | yarl==1.9.2 27 | -------------------------------------------------------------------------------- /examples/buggy_script_2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import fire 3 | 4 | """ 5 | Run With: with `python wolverine.py examples/buggy_script_2.py` 6 | Purpose: Fix singleton code bug in Python 7 | """ 8 | 9 | class SingletonClass(object): 10 | def __new__(cls): 11 | cls.instance = super(SingletonClass, cls).__new__(cls) 12 | return cls.instance 13 | 14 | def check_singleton_works(): 15 | """ 16 | check that singleton pattern is working 17 | """ 18 | singleton = SingletonClass() 19 | new_singleton = SingletonClass() 20 | singleton.a = 1 21 | new_singleton.a = 2 22 | should_be_4 = (singleton.a + new_singleton.a) 23 | assert should_be_4 == 4 24 | 25 | if __name__=="__main__": 26 | fire.Fire(check_singleton_works) 27 | 28 | -------------------------------------------------------------------------------- /examples/buggy_script.js: -------------------------------------------------------------------------------- 1 | const subtractNumbers = (a, b) => { 2 | return a - b; 3 | }; 4 | 5 | const multiplyNumbers = (a, b) => { 6 | return a * b; 7 | }; 8 | 9 | const divideNumbers = (a, b) => { 10 | return a / b; 11 | }; 12 | 13 | function calculate(operation, num1, num2) { 14 | let result = ''; 15 | if (operation == 'add') { 16 | result = addNumbers(num1, num2); 17 | } else if (operation == 'subtract') { 18 | result = subtractNumbers(num1, num2); 19 | } else if (operation == 'multiply') { 20 | result = multiplyNumbers(num1, num2); 21 | } else if (operation == 'divide') { 22 | result = divideNumbers(num1, num2); 23 | } else { 24 | console.log('Invalid operation'); 25 | } 26 | 27 | return res; 28 | } 29 | 30 | const [, , operation, num1, num2] = process.argv; 31 | calculate(operation, num1, num2); 32 | -------------------------------------------------------------------------------- /tests/test_files/cc_resp.txt: -------------------------------------------------------------------------------- 1 | Explanation: The function `subtract_numbers` is never defined in the script, causing a `NameError` when it is called in the `calculate` function. 2 | 3 | [ 4 | {"explanation": "The 'subtract_numbers' function is never defined in the script."}, 5 | {"operation": "InsertAfter", "line": 12, "content": "\n# Define subtract_numbers function\ndef subtract_numbers(a, b):\n return a - b\n"}, 6 | {"operation": "Replace", "line": 18, "content": " if operation == \"add\":\n result = add_numbers(num1, num2)\n elif operation == \"subtract\":\n result = subtract_numbers(num1, num2)\n elif operation == \"multiply\":\n result = multiply_numbers(num1, num2)\n elif operation == \"divide\":\n result = divide_numbers(num1, num2)\n else:\n print(\"Invalid operation\")\n"}, 7 | {"operation": "Replace", "line": 30, "content": " return result\n"} 8 | ] -------------------------------------------------------------------------------- /tests/test_files/cc_resp_fail.txt: -------------------------------------------------------------------------------- 1 | Explanation: The function `subtract_numbers` is never defined in the script, causing a `NameError` when it is called in the `calculate` function. 2 | 3 | [ 4 | {"explanation": "The 'subtract_numbers' function is never defined in the script."}, 5 | {"operation": "InsertAfter", "line": 12, "content": "\n# Define subtract_numbers function\ndef subtract_numbers(a, b):\n return a - b\n"}, 6 | {"operation": "Replace", "line": 18, "content": " if operation == \"add\":\n result = add_numbers(num1, num2)\n elif operation == \"subtract\":\n result = subtract_numbers(num1, num2)\n elif operation == \"multiply\":\n result = multiply_numbers(num1, num2)\n elif operation == \"divide\":\n result = divide_numbers(num1, num2)\n else:\n print(\"Invalid operation\")\n"}, 7 | {"operation": "Replace", "line": 30, "content": " return result\n"} 8 | -------------------------------------------------------------------------------- /examples/buggy_script.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import fire 3 | """ 4 | Run With: `wolverine examples/buggy_script.py "subtract" 20 3` 5 | Purpose: Show self-regenerating fixing of subtraction operator 6 | """ 7 | 8 | def add_numbers(a, b): 9 | return a + b 10 | 11 | def multiply_numbers(a, b): 12 | return a * b 13 | 14 | def divide_numbers(a, b): 15 | return a / b 16 | 17 | 18 | def calculate(operation, num1, num2): 19 | if operation == "add": 20 | result = add_numbers(num1, num2) 21 | elif operation == "subtract": 22 | result = subtract_numbers(num1, num2) 23 | elif operation == "multiply": 24 | result = multiply_numbers(num1, num2) 25 | elif operation == "divide": 26 | result = divide_numbers(num1, num2) 27 | else: 28 | print("Invalid operation") 29 | 30 | return res 31 | 32 | 33 | if __name__ == "__main__": 34 | fire.Fire(calculate) 35 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Conftest 3 | """ 4 | import os 5 | import pytest 6 | import tempfile 7 | 8 | 9 | TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files") 10 | 11 | 12 | @pytest.fixture(scope='function') 13 | def temp_file(): 14 | # Create a temporary file 15 | with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: 16 | f.write("first line\nsecond line\nthird line") 17 | file_path = f.name 18 | yield file_path 19 | # Clean up the temporary file 20 | os.remove(file_path) 21 | 22 | 23 | def mock_open_ai_response_object(mocker, content: str): 24 | """ 25 | Mocks the response object from the openai api. 26 | """ 27 | mock_generator_object = mocker.MagicMock() 28 | mock_message_object = mocker.MagicMock() 29 | mock_message_object.configure_mock(**{"message.content": content}) 30 | mock_generator_object.configure_mock(**{"choices": [mock_message_object]}) 31 | return mock_generator_object -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Python package 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | python-version: ["3.10", "3.11"] 11 | 12 | steps: 13 | - uses: actions/checkout@v3 14 | - name: Set up Python ${{ matrix.python-version }} 15 | uses: actions/setup-python@v4 16 | with: 17 | python-version: ${{ matrix.python-version }} 18 | - name: Install dependencies 19 | run: | 20 | python -m pip install --upgrade pip 21 | pip install -r requirements.txt 22 | - name: Lint with ruff 23 | run: | 24 | # stop the build if there are Python syntax errors or undefined names 25 | ruff --format=github --select=E9,F63,F7,F82 --target-version=py37 --exclude examples/ . 26 | # default set of ruff rules with GitHub Annotations 27 | ruff --format=github --target-version=py37 --exclude examples/ . 28 | - name: Test with pytest 29 | run: | 30 | pytest 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) [2023] [BioBootloader] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /prompt.txt: -------------------------------------------------------------------------------- 1 | You are part of an elite automated software fixing team. You will be given a script followed by the arguments it was provided and the stacktrace of the error it produced. Your job is to figure out what went wrong and suggest changes to the code. 2 | 3 | Because you are part of an automated system, the format you respond in is very strict. You must provide changes in JSON format, using one of 3 actions: 'Replace', 'Delete', or 'InsertAfter'. 'Delete' will remove that line from the code. 'Replace' will replace the existing line with the content you provide. 'InsertAfter' will insert the new lines you provide after the code already at the specified line number. For multi-line insertions or replacements, provide the content as a single string with '\n' as the newline character. The first line in each file is given line number 1. Edits will be applied in reverse line order so that line numbers won't be impacted by other edits. 4 | 5 | In addition to the changes, please also provide short explanations of the what went wrong. A single explanation is required, but if you think it's helpful, feel free to provide more explanations for groups of more complicated changes. Be careful to use proper indentation and spacing in your changes. An example response could be: 6 | 7 | Be ABSOLUTELY SURE to include the CORRECT INDENTATION when making replacements. 8 | 9 | example response: 10 | [ 11 | {"explanation": "this is just an example, this would usually be a brief explanation of what went wrong"}, 12 | {"operation": "InsertAfter", "line": 10, "content": "x = 1\ny = 2\nz = x * y"}, 13 | {"operation": "Delete", "line": 15, "content": ""}, 14 | {"operation": "Replace", "line": 18, "content": " x += 1"}, 15 | {"operation": "Delete", "line": 20, "content": ""} 16 | ] 17 | 18 | From now, your response must be only the json object, no talking, no comments. 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DEPRECATED: Try Mentat instead! https://github.com/AbanteAI/mentat 2 | 3 | 4 | # Wolverine 5 | 6 | ## About 7 | 8 | Give your python scripts regenerative healing abilities! 9 | 10 | Run your scripts with Wolverine and when they crash, GPT-4 edits them and explains what went wrong. Even if you have many bugs it will repeatedly rerun until it's fixed. 11 | 12 | For a quick demonstration see my [demo video on twitter](https://twitter.com/bio_bootloader/status/1636880208304431104). 13 | 14 | ## Setup 15 | 16 | python3 -m venv venv 17 | source venv/bin/activate 18 | pip install -r requirements.txt 19 | cp .env.sample .env 20 | 21 | Add your openAI api key to `.env` 22 | 23 | _warning!_ By default wolverine uses GPT-4 and may make many repeated calls to the api. 24 | 25 | ## Example Usage 26 | 27 | To run with gpt-4 (the default, tested option): 28 | 29 | python -m wolverine examples/buggy_script.py "subtract" 20 3 30 | 31 | You can also run with other models, but be warned they may not adhere to the edit format as well: 32 | 33 | python -m wolverine --model=gpt-3.5-turbo examples/buggy_script.py "subtract" 20 3 34 | 35 | If you want to use GPT-3.5 by default instead of GPT-4 uncomment the default model line in `.env`: 36 | 37 | DEFAULT_MODEL=gpt-3.5-turbo 38 | 39 | You can also use flag `--confirm=True` which will ask you `yes or no` before making changes to the file. If flag is not used then it will apply the changes to the file 40 | 41 | python -m wolverine examples/buggy_script.py "subtract" 20 3 --confirm=True 42 | 43 | ## Environment variables 44 | 45 | | env name | description | default value | 46 | | ------------------- | ----------------------------------------------------------------- | ------------- | 47 | | OPENAI_API_KEY | OpenAI API key | None | 48 | | DEFAULT_MODEL | GPT model to use | "gpt-4" | 49 | | VALIDATE_JSON_RETRY | Number of retries when requesting OpenAI API (-1 means unlimites) | -1 | 50 | 51 | ## Future Plans 52 | 53 | This is just a quick prototype I threw together in a few hours. There are many possible extensions and contributions are welcome: 54 | 55 | - add flags to customize usage, such as asking for user confirmation before running changed code 56 | - further iterations on the edit format that GPT responds in. Currently it struggles a bit with indentation, but I'm sure that can be improved 57 | - a suite of example buggy files that we can test prompts on to ensure reliability and measure improvement 58 | - multiple files / codebases: send GPT everything that appears in the stacktrace 59 | - graceful handling of large files - should we just send GPT relevant classes / functions? 60 | - extension to languages other than python 61 | 62 | ## Star History 63 | 64 | [![Star History Chart](https://api.star-history.com/svg?repos=biobootloader/wolverine&type=Date)](https://star-history.com/#biobootloader/wolverine) 65 | -------------------------------------------------------------------------------- /tests/test_wolverine.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from wolverine import apply_changes, json_validated_response 4 | 5 | from .conftest import ( 6 | mock_open_ai_response_object, 7 | TEST_FILES_DIR 8 | ) 9 | 10 | 11 | def test_apply_changes_replace(temp_file): 12 | # Make a "replace" change to the second line 13 | changes = [ 14 | {"operation": "Replace", "line": 2, "content": "new second line"} 15 | ] 16 | apply_changes(temp_file, changes) 17 | 18 | # Check that the file was updated correctly 19 | with open(temp_file) as f: 20 | content = f.read() 21 | assert content == "first line\nnew second line\nthird line" 22 | 23 | 24 | def test_apply_changes_delete(temp_file): 25 | # Make a "delete" change to the third line 26 | changes = [ 27 | {"operation": "Delete", "line": 3, "content": ""}, 28 | ] 29 | apply_changes(temp_file, changes) 30 | 31 | # Check that the file was updated correctly 32 | with open(temp_file) as f: 33 | content = f.read() 34 | assert content == "first line\nsecond line\n" 35 | 36 | 37 | def test_apply_changes_insert(temp_file): 38 | # Make an "insert" change after the second line 39 | changes = [ 40 | {"operation": "InsertAfter", "line": 2, "content": "inserted line"}, 41 | ] 42 | apply_changes(temp_file, changes) 43 | 44 | # Check that the file was updated correctly 45 | with open(temp_file) as f: 46 | content = f.read() 47 | assert content == 'first line\nsecond line\ninserted line\nthird line' 48 | 49 | 50 | @pytest.mark.parametrize("chat_completion_response, nb_retry, fail", [ 51 | (os.path.join(TEST_FILES_DIR, "cc_resp.txt"), 3, False), 52 | (os.path.join(TEST_FILES_DIR, "cc_resp_fail.txt"), 3, True), 53 | (os.path.join(TEST_FILES_DIR, "cc_resp_fail.txt"), 10, True), 54 | ]) 55 | def test_json_validated_response(mocker, chat_completion_response, nb_retry, fail): 56 | # Open the test file 57 | with open(chat_completion_response, 'r') as file: 58 | response = file.read() 59 | # Mock the openAi chat completion API call 60 | mocker.patch( 61 | "openai.ChatCompletion.create", 62 | return_value=mock_open_ai_response_object(mocker=mocker, content=response)) 63 | # ChatCompletion returned an invalid response 64 | if fail: 65 | with pytest.raises(Exception) as err: 66 | json_response = json_validated_response("gpt-4", [ 67 | { 68 | "role": "user", 69 | "content": "prompt" 70 | } 71 | ], 72 | nb_retry=nb_retry 73 | ) 74 | # Check that the exception is raised after nb_retry time 75 | assert err.value == "No valid json response found after 3 tries. Exiting." 76 | else: 77 | json_response = json_validated_response("gpt-4", [ 78 | { 79 | "role": "user", 80 | "content": "prompt" 81 | } 82 | ], 83 | nb_retry=nb_retry 84 | ) 85 | assert json_response 86 | -------------------------------------------------------------------------------- /wolverine/wolverine.py: -------------------------------------------------------------------------------- 1 | import difflib 2 | import json 3 | import os 4 | import shutil 5 | import subprocess 6 | import sys 7 | 8 | import openai 9 | 10 | from typing import List, Dict 11 | from termcolor import cprint 12 | from dotenv import load_dotenv 13 | 14 | # Set up the OpenAI API 15 | load_dotenv() 16 | openai.api_key = os.getenv("OPENAI_API_KEY") 17 | 18 | # Default model is GPT-4 19 | DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4") 20 | 21 | # Nb retries for json_validated_response, default to -1, infinite 22 | VALIDATE_JSON_RETRY = int(os.getenv("VALIDATE_JSON_RETRY", -1)) 23 | 24 | # Read the system prompt 25 | with open(os.path.join(os.path.dirname(__file__), "..", "prompt.txt"), "r") as f: 26 | SYSTEM_PROMPT = f.read() 27 | 28 | 29 | def run_script(script_name: str, script_args: List) -> str: 30 | """ 31 | If script_name.endswith(".py") then run with python 32 | else run with node 33 | """ 34 | script_args = [str(arg) for arg in script_args] 35 | subprocess_args = ( 36 | [sys.executable, script_name, *script_args] 37 | if script_name.endswith(".py") 38 | else ["node", script_name, *script_args] 39 | ) 40 | 41 | try: 42 | result = subprocess.check_output(subprocess_args, stderr=subprocess.STDOUT) 43 | except subprocess.CalledProcessError as error: 44 | return error.output.decode("utf-8"), error.returncode 45 | return result.decode("utf-8"), 0 46 | 47 | 48 | def json_validated_response( 49 | model: str, messages: List[Dict], nb_retry: int = VALIDATE_JSON_RETRY 50 | ) -> Dict: 51 | """ 52 | This function is needed because the API can return a non-json response. 53 | This will run recursively VALIDATE_JSON_RETRY times. 54 | If VALIDATE_JSON_RETRY is -1, it will run recursively until a valid json 55 | response is returned. 56 | """ 57 | json_response = {} 58 | if nb_retry != 0: 59 | response = openai.ChatCompletion.create( 60 | model=model, 61 | messages=messages, 62 | temperature=0.5, 63 | ) 64 | messages.append(response.choices[0].message) 65 | content = response.choices[0].message.content 66 | # see if json can be parsed 67 | try: 68 | json_start_index = content.index( 69 | "[" 70 | ) # find the starting position of the JSON data 71 | json_data = content[ 72 | json_start_index: 73 | ] # extract the JSON data from the response string 74 | json_response = json.loads(json_data) 75 | return json_response 76 | except (json.decoder.JSONDecodeError, ValueError) as e: 77 | cprint(f"{e}. Re-running the query.", "red") 78 | # debug 79 | cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") 80 | # append a user message that says the json is invalid 81 | messages.append( 82 | { 83 | "role": "user", 84 | "content": ( 85 | "Your response could not be parsed by json.loads. " 86 | "Please restate your last message as pure JSON." 87 | ), 88 | } 89 | ) 90 | # dec nb_retry 91 | nb_retry -= 1 92 | # rerun the api call 93 | return json_validated_response(model, messages, nb_retry) 94 | except Exception as e: 95 | cprint(f"Unknown error: {e}", "red") 96 | cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") 97 | raise e 98 | raise Exception( 99 | f"No valid json response found after {VALIDATE_JSON_RETRY} tries. Exiting." 100 | ) 101 | 102 | 103 | def send_error_to_gpt( 104 | file_path: str, args: List, error_message: str, model: str = DEFAULT_MODEL 105 | ) -> Dict: 106 | with open(file_path, "r") as f: 107 | file_lines = f.readlines() 108 | 109 | file_with_lines = [] 110 | for i, line in enumerate(file_lines): 111 | file_with_lines.append(str(i + 1) + ": " + line) 112 | file_with_lines = "".join(file_with_lines) 113 | 114 | prompt = ( 115 | "Here is the script that needs fixing:\n\n" 116 | f"{file_with_lines}\n\n" 117 | "Here are the arguments it was provided:\n\n" 118 | f"{args}\n\n" 119 | "Here is the error message:\n\n" 120 | f"{error_message}\n" 121 | "Please provide your suggested changes, and remember to stick to the " 122 | "exact format as described above." 123 | ) 124 | 125 | # print(prompt) 126 | messages = [ 127 | { 128 | "role": "system", 129 | "content": SYSTEM_PROMPT, 130 | }, 131 | { 132 | "role": "user", 133 | "content": prompt, 134 | }, 135 | ] 136 | 137 | return json_validated_response(model, messages) 138 | 139 | 140 | def apply_changes(file_path: str, changes: List, confirm: bool = False): 141 | """ 142 | Pass changes as loaded json (list of dicts) 143 | """ 144 | with open(file_path) as f: 145 | original_file_lines = f.readlines() 146 | 147 | # Filter out explanation elements 148 | operation_changes = [change for change in changes if "operation" in change] 149 | explanations = [ 150 | change["explanation"] for change in changes if "explanation" in change 151 | ] 152 | 153 | # Sort the changes in reverse line order 154 | operation_changes.sort(key=lambda x: x["line"], reverse=True) 155 | 156 | file_lines = original_file_lines.copy() 157 | for change in operation_changes: 158 | operation = change["operation"] 159 | line = change["line"] 160 | content = change["content"] 161 | 162 | if operation == "Replace": 163 | file_lines[line - 1] = content + "\n" 164 | elif operation == "Delete": 165 | del file_lines[line - 1] 166 | elif operation == "InsertAfter": 167 | file_lines.insert(line, content + "\n") 168 | 169 | # Print explanations 170 | cprint("Explanations:", "blue") 171 | for explanation in explanations: 172 | cprint(f"- {explanation}", "blue") 173 | 174 | # Display changes diff 175 | print("\nChanges to be made:") 176 | diff = difflib.unified_diff(original_file_lines, file_lines, lineterm="") 177 | for line in diff: 178 | if line.startswith("+"): 179 | cprint(line, "green", end="") 180 | elif line.startswith("-"): 181 | cprint(line, "red", end="") 182 | else: 183 | print(line, end="") 184 | 185 | if confirm: 186 | # check if user wants to apply changes or exit 187 | confirmation = input("Do you want to apply these changes? (y/n): ") 188 | if confirmation.lower() != "y": 189 | print("Changes not applied") 190 | sys.exit(0) 191 | 192 | with open(file_path, "w") as f: 193 | f.writelines(file_lines) 194 | print("Changes applied.") 195 | 196 | 197 | def check_model_availability(model): 198 | available_models = [x["id"] for x in openai.Model.list()["data"]] 199 | if model not in available_models: 200 | print( 201 | f"Model {model} is not available. Perhaps try running with " 202 | "`--model=gpt-3.5-turbo` instead? You can also configure a " 203 | "default model in the .env" 204 | ) 205 | exit() 206 | 207 | 208 | def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=False): 209 | if revert: 210 | backup_file = script_name + ".bak" 211 | if os.path.exists(backup_file): 212 | shutil.copy(backup_file, script_name) 213 | print(f"Reverted changes to {script_name}") 214 | sys.exit(0) 215 | else: 216 | print(f"No backup file found for {script_name}") 217 | sys.exit(1) 218 | 219 | # check if model is available 220 | check_model_availability(model) 221 | 222 | # Make a backup of the original script 223 | shutil.copy(script_name, script_name + ".bak") 224 | 225 | while True: 226 | output, returncode = run_script(script_name, script_args) 227 | 228 | if returncode == 0: 229 | cprint("Script ran successfully.", "blue") 230 | print("Output:", output) 231 | break 232 | 233 | else: 234 | cprint("Script crashed. Trying to fix...", "blue") 235 | print("Output:", output) 236 | json_response = send_error_to_gpt( 237 | file_path=script_name, 238 | args=script_args, 239 | error_message=output, 240 | model=model, 241 | ) 242 | 243 | apply_changes(script_name, json_response, confirm=confirm) 244 | cprint("Changes applied. Rerunning...", "blue") 245 | --------------------------------------------------------------------------------