├── AiderModify ├── ModifyCodeAider.py ├── ModifyCodeAiderPrompts.py ├── __init__.py ├── __pycache__ │ ├── ModifyCodeAider.cpython-39.pyc │ ├── ModifyCodeAiderPrompts.cpython-39.pyc │ └── __init__.cpython-39.pyc └── aider │ ├── __init__.py │ ├── coders │ ├── __init__.py │ ├── base_coder.py │ ├── base_prompts.py │ ├── editblock_coder.py │ ├── editblock_func_coder.py │ ├── editblock_func_prompts.py │ ├── editblock_prompts.py │ ├── single_wholefile_func_coder.py │ ├── single_wholefile_func_prompts.py │ ├── wholefile_coder.py │ ├── wholefile_func_coder.py │ ├── wholefile_func_prompts.py │ └── wholefile_prompts.py │ ├── commands.py │ ├── diffs.py │ ├── dump.py │ ├── io.py │ ├── main.py │ ├── models.py │ ├── prompts.py │ ├── repomap.py │ └── utils.py ├── Icon.png ├── LICENSE ├── README.md ├── config └── default.json ├── database ├── DB_Tools.py ├── __pycache__ │ └── DB_Tools.cpython-39.pyc ├── database.sqlite3 └── db.sql ├── logs └── logs.log ├── main.py ├── prompts_templates ├── Code_generation_prompt.txt ├── Code_modification_based_on_test_cases_prompt.txt ├── Code_modification_prompt.txt ├── Design_modification_prompt.txt ├── Design_page_prompt.txt ├── Extract_Css.json ├── Extract_Javascript.json ├── Extract_html.json ├── Gherkin2NL_prompt.txt ├── Gherkin_merge_prompt.txt ├── Gherkin_prompt.txt ├── Human_in_the_loop_prompt.txt ├── NL2Gherkin_prompt.txt ├── Test_cases_generation_prompt.txt └── Visual_design_prompt.txt ├── requirements.txt ├── static ├── html │ ├── index.html │ ├── script.js │ └── style.css └── img │ ├── Icon.png │ ├── Placeholder200.jpg │ ├── Placeholder200.png │ ├── Placeholder600.jpg │ └── Placeholder600.png └── utils ├── CodeGeneration.py ├── __init__.py ├── __pycache__ ├── CodeGeneration.cpython-39.pyc ├── __init__.cpython-39.pyc ├── log.cpython-39.pyc └── utils.cpython-39.pyc ├── log.py └── utils.py /AiderModify/ModifyCodeAider.py: -------------------------------------------------------------------------------- 1 | import AiderModify.ModifyCodeAiderPrompts as prompts 2 | import os 3 | import time 4 | import shutil 5 | import git 6 | import openai 7 | from pathlib import Path 8 | from aider.io import InputOutput 9 | from aider import models 10 | from aider.coders import Coder 11 | 12 | def modify_code_aider(user_prompts, edit_purpose, testdir, model_name, edit_format, tries, no_unit_tests, no_aider, verbose, commit_hash): 13 | testdir = Path(testdir) 14 | 15 | history_fname = os.path.join(testdir, ".aider.chat.history.md") 16 | 17 | timestamp= time.strftime("%Y-%m-%d-%H-%M", time.localtime()) 18 | original_dname = str(testdir) + "_" + timestamp 19 | print(original_dname) 20 | 21 | if not os.path.exists(original_dname): 22 | os.makedirs(original_dname) 23 | 24 | fnames = [] 25 | for fname in testdir.glob("*"): 26 | if "test" not in fname.name and fname.is_file() and fname.name[0] != ".": 27 | fnames.append(fname) 28 | 29 | # restore the original file, in case we interrupted a prev run 30 | # after it had saved changes 31 | original_fname = os.path.join(original_dname, fname.name) 32 | print(fname, original_fname) 33 | shutil.copy(fname, original_fname) 34 | 35 | file_list = " ".join(fname.name for fname in fnames) 36 | 37 | instructions = user_prompts 38 | 39 | if edit_purpose == "design": 40 | instructions += prompts.design_instructions 41 | instructions += prompts.instructions_addendum.format(file_list=file_list) 42 | elif edit_purpose == "code": 43 | instructions += prompts.code_instructions 44 | instructions += prompts.instructions_addendum.format(file_list=file_list) 45 | 46 | io = InputOutput( 47 | pretty=True, 48 | yes=False, 49 | chat_history_file=history_fname, 50 | ) 51 | 52 | main_model = models.Model(model_name) 53 | edit_format = edit_format or main_model.edit_format 54 | 55 | show_fnames = ",".join(map(str, fnames)) 56 | print("fnames:", show_fnames) # result file name .py 57 | 58 | coder = Coder.create( 59 | main_model=main_model, 60 | edit_format=edit_format, 61 | io=io, 62 | fnames=fnames, 63 | use_git=False, 64 | stream=False, 65 | pretty=False, 66 | verbose=verbose, 67 | ) 68 | 69 | timeouts = 0 70 | 71 | dur = 0 72 | test_outcomes = [] 73 | for i in range(tries): 74 | start = time.time() 75 | if not no_aider: 76 | coder.run(with_message=instructions) 77 | dur += time.time() - start 78 | 79 | if coder.num_control_c: 80 | raise KeyboardInterrupt 81 | 82 | if no_unit_tests: 83 | break 84 | 85 | 86 | errors = errors.splitlines() 87 | print(errors[-1]) 88 | errors = errors[:50] 89 | errors = "\n".join(errors) 90 | instructions = errors 91 | instructions += prompts.test_failures.format(file_list=file_list) 92 | 93 | 94 | def run_aider(): 95 | testdir = "../static/html" 96 | model_name="gpt-3.5-turbo-0613" 97 | edit_format="whole" 98 | tries=2 99 | no_unit_tests=True 100 | no_aider=False 101 | verbose=False 102 | commit_hash="e3aa9db-dirty" 103 | edit_purpose = "design" #code 104 | # repo = git.Repo(search_parent_directories=True) 105 | # commit_hash = repo.head.object.hexsha[:7] 106 | # if repo.is_dirty(): 107 | # commit_hash += "-dirty" 108 | 109 | modify_code_aider("", edit_purpose, testdir, model_name, edit_format, tries, no_unit_tests, no_aider, verbose, commit_hash) 110 | 111 | if __name__ == "__main__": 112 | # for quick test function 113 | os.environ["openai_api_key"] = "YOUR API KEY" 114 | openai_api_base="https://api.openai.com/v1" 115 | 116 | openai.api_key = os.environ["openai_api_key"] 117 | openai.api_base = openai_api_base 118 | 119 | run_aider() -------------------------------------------------------------------------------- /AiderModify/ModifyCodeAiderPrompts.py: -------------------------------------------------------------------------------- 1 | design_instructions = """ 2 | #### 3 | You are an expert in UI interface design, 4 | Use tailwind CSS for generating beautiful interface designs. 5 | Adhere to material design principles for a visually appealing web page. 6 | No more than three colors per page, don't use extremely saturated colors 7 | The font size should be appropriate. 8 | The design needs to be a flat style design. 9 | Add an image background. 10 | Adjust the layout to look better. 11 | 12 | Follow these material design principles: 13 | Utilize bold and graphic colors purposefully to highlight important information. 14 | Apply shadows and depth effects sparingly to distinguish UI levels. 15 | Incorporate responsive animations and transitions for user feedback and continuity. 16 | Maintain a unified theme with a unique color palette and typography. 17 | Ensure the design complements the content, following the "Content is king" principle. 18 | Use space, color, and fonts deliberately to guide user attention and interaction. 19 | Ensure consistent behavior of components within their environment. 20 | 21 | Consider accessibility colors: 22 | Ensure sufficient contrast between background and foreground colors. 23 | Use color as a means of communication, but not the sole method. 24 | Avoid colors that may cause issues for colorblind users (e.g., red/green). 25 | Select a color palette with high contrast among its elements. 26 | Remember that the usability and user experience of a website are crucial. 27 | The use of beautiful colors and adherence to material design principles should enhance, rather than detract from, the overall user experience. 28 | 29 | """ 30 | 31 | code_instructions = """ 32 | #### 33 | 34 | Please follow the instructions to regenerate the code. 35 | """ 36 | 37 | instructions_addendum = """ 38 | #### 39 | 40 | Use the above instructions to modify the supplied files: {file_list} 41 | """ 42 | 43 | 44 | test_failures = """ 45 | #### 46 | 47 | See the testing errors above. 48 | The tests are correct. 49 | Fix the code in {file_list} to resolve the errors. 50 | """ -------------------------------------------------------------------------------- /AiderModify/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.8.3-dev" 2 | -------------------------------------------------------------------------------- /AiderModify/__pycache__/ModifyCodeAider.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/AiderModify/__pycache__/ModifyCodeAider.cpython-39.pyc -------------------------------------------------------------------------------- /AiderModify/__pycache__/ModifyCodeAiderPrompts.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/AiderModify/__pycache__/ModifyCodeAiderPrompts.cpython-39.pyc -------------------------------------------------------------------------------- /AiderModify/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/AiderModify/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /AiderModify/aider/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.8.3-dev" 2 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_coder import Coder 2 | from .editblock_coder import EditBlockCoder 3 | from .editblock_func_coder import EditBlockFunctionCoder 4 | from .single_wholefile_func_coder import SingleWholeFileFunctionCoder 5 | from .wholefile_coder import WholeFileCoder 6 | from .wholefile_func_coder import WholeFileFunctionCoder 7 | 8 | __all__ = [ 9 | Coder, 10 | EditBlockCoder, 11 | WholeFileCoder, 12 | WholeFileFunctionCoder, 13 | EditBlockFunctionCoder, 14 | SingleWholeFileFunctionCoder, 15 | ] 16 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/base_prompts.py: -------------------------------------------------------------------------------- 1 | class CoderPrompts: 2 | files_content_gpt_edits = "I committed the changes with git hash {hash} & commit msg: {message}" 3 | 4 | files_content_gpt_edits_no_repo = "I updated the files." 5 | 6 | files_content_gpt_no_edits = "I didn't see any properly formatted edits in your reply?!" 7 | 8 | files_content_local_edits = "I edited the files myself." 9 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/editblock_coder.py: -------------------------------------------------------------------------------- 1 | import math 2 | import re 3 | from difflib import SequenceMatcher 4 | from pathlib import Path 5 | 6 | from .base_coder import Coder 7 | from .editblock_prompts import EditBlockPrompts 8 | 9 | 10 | class EditBlockCoder(Coder): 11 | def __init__(self, *args, **kwargs): 12 | self.gpt_prompts = EditBlockPrompts() 13 | super().__init__(*args, **kwargs) 14 | 15 | def update_cur_messages(self, content, edited): 16 | self.cur_messages += [dict(role="assistant", content=content)] 17 | 18 | def update_files(self): 19 | content = self.partial_response_content 20 | 21 | # might raise ValueError for malformed ORIG/UPD blocks 22 | edits = list(find_original_update_blocks(content)) 23 | 24 | edited = set() 25 | for path, original, updated in edits: 26 | full_path = self.allowed_to_edit(path) 27 | if not full_path: 28 | continue 29 | content = self.io.read_text(full_path) 30 | content = do_replace(full_path, content, original, updated) 31 | if content: 32 | self.io.write_text(full_path, content) 33 | edited.add(path) 34 | continue 35 | self.io.tool_error(f"Failed to apply edit to {path}") 36 | 37 | return edited 38 | 39 | 40 | def try_dotdotdots(whole, part, replace): 41 | """ 42 | See if the edit block has ... lines. 43 | If not, return none. 44 | 45 | If yes, try and do a perfect edit with the ... chunks. 46 | If there's a mismatch or otherwise imperfect edit, raise ValueError. 47 | 48 | If perfect edit succeeds, return the updated whole. 49 | """ 50 | 51 | dots_re = re.compile(r"(^\s*\.\.\.\n)", re.MULTILINE | re.DOTALL) 52 | 53 | part_pieces = re.split(dots_re, part) 54 | replace_pieces = re.split(dots_re, replace) 55 | 56 | if len(part_pieces) != len(replace_pieces): 57 | raise ValueError("Unpaired ... in edit block") 58 | 59 | if len(part_pieces) == 1: 60 | # no dots in this edit block, just return None 61 | return 62 | 63 | # Compare odd strings in part_pieces and replace_pieces 64 | all_dots_match = all(part_pieces[i] == replace_pieces[i] for i in range(1, len(part_pieces), 2)) 65 | 66 | if not all_dots_match: 67 | raise ValueError("Unmatched ... in edit block") 68 | 69 | part_pieces = [part_pieces[i] for i in range(0, len(part_pieces), 2)] 70 | replace_pieces = [replace_pieces[i] for i in range(0, len(replace_pieces), 2)] 71 | 72 | pairs = zip(part_pieces, replace_pieces) 73 | for part, replace in pairs: 74 | if not part and not replace: 75 | continue 76 | 77 | if not part and replace: 78 | if not whole.endswith("\n"): 79 | whole += "\n" 80 | whole += replace 81 | continue 82 | 83 | if whole.count(part) != 1: 84 | raise ValueError( 85 | "No perfect matching chunk in edit block with ... or part appears more than once" 86 | ) 87 | 88 | whole = whole.replace(part, replace, 1) 89 | 90 | return whole 91 | 92 | 93 | def replace_part_with_missing_leading_whitespace(whole, part, replace): 94 | whole_lines = whole.splitlines() 95 | part_lines = part.splitlines() 96 | replace_lines = replace.splitlines() 97 | 98 | # If all lines in the part start with whitespace, then honor it. 99 | # But GPT often outdents the part and replace blocks completely, 100 | # thereby discarding the actual leading whitespace in the file. 101 | if all((not pline or pline[0].isspace()) for pline in part_lines): 102 | return 103 | 104 | for i in range(len(whole_lines) - len(part_lines) + 1): 105 | leading_whitespace = "" 106 | for j, c in enumerate(whole_lines[i]): 107 | if c == part_lines[0][0]: 108 | leading_whitespace = whole_lines[i][:j] 109 | break 110 | 111 | if not leading_whitespace or not all(c.isspace() for c in leading_whitespace): 112 | continue 113 | 114 | matched = all( 115 | whole_lines[i + k].startswith(leading_whitespace + part_lines[k]) 116 | for k in range(len(part_lines)) 117 | ) 118 | 119 | if matched: 120 | replace_lines = [ 121 | leading_whitespace + rline if rline else rline for rline in replace_lines 122 | ] 123 | whole_lines = whole_lines[:i] + replace_lines + whole_lines[i + len(part_lines) :] 124 | return "\n".join(whole_lines) + "\n" 125 | 126 | return None 127 | 128 | 129 | def replace_most_similar_chunk(whole, part, replace): 130 | res = replace_part_with_missing_leading_whitespace(whole, part, replace) 131 | if res: 132 | return res 133 | 134 | if part in whole: 135 | return whole.replace(part, replace) 136 | 137 | try: 138 | res = try_dotdotdots(whole, part, replace) 139 | except ValueError: 140 | return 141 | 142 | if res: 143 | return res 144 | 145 | similarity_thresh = 0.8 146 | 147 | max_similarity = 0 148 | most_similar_chunk_start = -1 149 | most_similar_chunk_end = -1 150 | 151 | whole_lines = whole.splitlines() 152 | part_lines = part.splitlines() 153 | 154 | scale = 0.1 155 | min_len = math.floor(len(part_lines) * (1 - scale)) 156 | max_len = math.ceil(len(part_lines) * (1 + scale)) 157 | 158 | for length in range(min_len, max_len): 159 | for i in range(len(whole_lines) - length + 1): 160 | chunk = whole_lines[i : i + length] 161 | chunk = "\n".join(chunk) 162 | 163 | similarity = SequenceMatcher(None, chunk, part).ratio() 164 | 165 | if similarity > max_similarity and similarity: 166 | max_similarity = similarity 167 | most_similar_chunk_start = i 168 | most_similar_chunk_end = i + length 169 | 170 | if max_similarity < similarity_thresh: 171 | return 172 | 173 | replace_lines = replace.splitlines() 174 | 175 | modified_whole = ( 176 | whole_lines[:most_similar_chunk_start] 177 | + replace_lines 178 | + whole_lines[most_similar_chunk_end:] 179 | ) 180 | modified_whole = "\n".join(modified_whole) 181 | 182 | if whole.endswith("\n"): 183 | modified_whole += "\n" 184 | 185 | return modified_whole 186 | 187 | 188 | def strip_quoted_wrapping(res, fname=None): 189 | """ 190 | Given an input string which may have extra "wrapping" around it, remove the wrapping. 191 | For example: 192 | 193 | filename.ext 194 | ``` 195 | We just want this content 196 | Not the filename and triple quotes 197 | ``` 198 | """ 199 | if not res: 200 | return res 201 | 202 | res = res.splitlines() 203 | 204 | if fname and res[0].strip().endswith(Path(fname).name): 205 | res = res[1:] 206 | 207 | if res[0].startswith("```") and res[-1].startswith("```"): 208 | res = res[1:-1] 209 | 210 | res = "\n".join(res) 211 | if res and res[-1] != "\n": 212 | res += "\n" 213 | 214 | return res 215 | 216 | 217 | def do_replace(fname, content, before_text, after_text): 218 | before_text = strip_quoted_wrapping(before_text, fname) 219 | after_text = strip_quoted_wrapping(after_text, fname) 220 | fname = Path(fname) 221 | 222 | # does it want to make a new file? 223 | if not fname.exists() and not before_text.strip(): 224 | fname.touch() 225 | content = "" 226 | 227 | if content is None: 228 | return 229 | 230 | if not before_text.strip(): 231 | # append to existing file, or start a new file 232 | new_content = content + after_text 233 | else: 234 | new_content = replace_most_similar_chunk(content, before_text, after_text) 235 | 236 | return new_content 237 | 238 | 239 | ORIGINAL = "<<<<<<< ORIGINAL" 240 | DIVIDER = "=======" 241 | UPDATED = ">>>>>>> UPDATED" 242 | 243 | separators = "|".join([ORIGINAL, DIVIDER, UPDATED]) 244 | 245 | split_re = re.compile(r"^((?:" + separators + r")[ ]*\n)", re.MULTILINE | re.DOTALL) 246 | 247 | 248 | def find_original_update_blocks(content): 249 | # make sure we end with a newline, otherwise the regex will miss <>>>>>> UPDATED 334 | ``` 335 | 336 | Hope you like it! 337 | """ 338 | print(list(find_original_update_blocks(edit))) 339 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/editblock_func_coder.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ..dump import dump # noqa: F401 4 | from .base_coder import Coder 5 | from .editblock_coder import do_replace 6 | from .editblock_func_prompts import EditBlockFunctionPrompts 7 | 8 | 9 | class EditBlockFunctionCoder(Coder): 10 | functions = [ 11 | dict( 12 | name="replace_lines", 13 | description="create or update one or more files", 14 | parameters=dict( 15 | type="object", 16 | required=["explanation", "edits"], 17 | properties=dict( 18 | explanation=dict( 19 | type="string", 20 | description=( 21 | "Step by step plan for the changes to be made to the code (future" 22 | " tense, markdown format)" 23 | ), 24 | ), 25 | edits=dict( 26 | type="array", 27 | items=dict( 28 | type="object", 29 | required=["path", "original_lines", "updated_lines"], 30 | properties=dict( 31 | path=dict( 32 | type="string", 33 | description="Path of file to edit", 34 | ), 35 | original_lines=dict( 36 | type="array", 37 | items=dict( 38 | type="string", 39 | ), 40 | description=( 41 | "A unique stretch of lines from the original file," 42 | " including all whitespace, without skipping any lines" 43 | ), 44 | ), 45 | updated_lines=dict( 46 | type="array", 47 | items=dict( 48 | type="string", 49 | ), 50 | description="New content to replace the `original_lines` with", 51 | ), 52 | ), 53 | ), 54 | ), 55 | ), 56 | ), 57 | ), 58 | ] 59 | 60 | def __init__(self, code_format, *args, **kwargs): 61 | self.code_format = code_format 62 | 63 | if code_format == "string": 64 | original_lines = dict( 65 | type="string", 66 | description=( 67 | "A unique stretch of lines from the original file, including all" 68 | " whitespace and newlines, without skipping any lines" 69 | ), 70 | ) 71 | updated_lines = dict( 72 | type="string", 73 | description="New content to replace the `original_lines` with", 74 | ) 75 | 76 | self.functions[0]["parameters"]["properties"]["edits"]["items"]["properties"][ 77 | "original_lines" 78 | ] = original_lines 79 | self.functions[0]["parameters"]["properties"]["edits"]["items"]["properties"][ 80 | "updated_lines" 81 | ] = updated_lines 82 | 83 | self.gpt_prompts = EditBlockFunctionPrompts() 84 | super().__init__(*args, **kwargs) 85 | 86 | def update_cur_messages(self, content, edited): 87 | if self.partial_response_content: 88 | self.cur_messages += [dict(role="assistant", content=self.partial_response_content)] 89 | if self.partial_response_function_call: 90 | self.cur_messages += [ 91 | dict( 92 | role="assistant", 93 | content=None, 94 | function_call=self.partial_response_function_call, 95 | ) 96 | ] 97 | 98 | def render_incremental_response(self, final=False): 99 | if self.partial_response_content: 100 | return self.partial_response_content 101 | 102 | args = self.parse_partial_args() 103 | res = json.dumps(args, indent=4) 104 | return res 105 | 106 | def update_files(self): 107 | name = self.partial_response_function_call.get("name") 108 | 109 | if name and name != "replace_lines": 110 | raise ValueError(f'Unknown function_call name="{name}", use name="replace_lines"') 111 | 112 | args = self.parse_partial_args() 113 | if not args: 114 | return 115 | 116 | edits = args.get("edits", []) 117 | 118 | edited = set() 119 | for edit in edits: 120 | path = get_arg(edit, "path") 121 | original = get_arg(edit, "original_lines") 122 | updated = get_arg(edit, "updated_lines") 123 | 124 | # gpt-3.5 returns lists even when instructed to return a string! 125 | if self.code_format == "list" or type(original) == list: 126 | original = "\n".join(original) 127 | if self.code_format == "list" or type(updated) == list: 128 | updated = "\n".join(updated) 129 | 130 | if original and not original.endswith("\n"): 131 | original += "\n" 132 | if updated and not updated.endswith("\n"): 133 | updated += "\n" 134 | 135 | full_path = self.allowed_to_edit(path) 136 | if not full_path: 137 | continue 138 | content = self.io.read_text(full_path) 139 | content = do_replace(full_path, content, original, updated) 140 | if content: 141 | self.io.write_text(full_path, content) 142 | edited.add(path) 143 | continue 144 | self.io.tool_error(f"Failed to apply edit to {path}") 145 | 146 | return edited 147 | 148 | 149 | def get_arg(edit, arg): 150 | if arg not in edit: 151 | raise ValueError(f"Missing `{arg}` parameter: {edit}") 152 | return edit[arg] 153 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/editblock_func_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class EditBlockFunctionPrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | Take requests for changes to the supplied code. 9 | If the request is ambiguous, ask questions. 10 | 11 | Once you understand the request you MUST use the `replace_lines` function to edit the files to make the needed changes. 12 | """ 13 | 14 | system_reminder = """ 15 | ONLY return code using the `replace_lines` function. 16 | NEVER return code outside the `replace_lines` function. 17 | """ 18 | 19 | files_content_prefix = "Here is the current content of the files:\n" 20 | files_no_full_files = "I am not sharing any files yet." 21 | 22 | redacted_edit_message = "No changes are needed." 23 | 24 | repo_content_prefix = ( 25 | "Below here are summaries of other files! Do not propose changes to these *read-only*" 26 | " files without asking me first.\n" 27 | ) 28 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/editblock_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class EditBlockPrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | Be concise! 9 | 10 | Take requests for changes to the supplied code. 11 | If the request is ambiguous, ask questions. 12 | 13 | Once you understand the request you MUST: 14 | 1. List the files you need to modify. *NEVER* suggest changes to *read-only* files. You *MUST* ask the user to make them *read-write* using the file's full path name. End your reply and wait for their approval. 15 | 2. Think step-by-step and explain the needed changes. 16 | 3. Describe each change with an *edit block* per the example below. 17 | """ 18 | 19 | system_reminder = """You MUST format EVERY code change with an *edit block* like this: 20 | 21 | ```python 22 | some/dir/example.py 23 | <<<<<<< ORIGINAL 24 | # some comment 25 | # Func to multiply 26 | def mul(a,b) 27 | ======= 28 | # updated comment 29 | # Function to add 30 | def add(a,b): 31 | >>>>>>> UPDATED 32 | ``` 33 | 34 | Every *edit block* must be fenced w/triple backticks with the correct code language. 35 | Every *edit block* must start with the full path! *NEVER* propose edit blocks for *read-only* files. 36 | The ORIGINAL section must be an *exact* set of lines from the file: 37 | - NEVER SKIP LINES! 38 | - Include all original leading spaces and indentation! 39 | 40 | Edits to different parts of a file each need their own *edit block*. 41 | 42 | If you want to put code in a new file, use an edit block with: 43 | - A new file path, including dir name if needed 44 | - An empty ORIGINAL section 45 | - The new file's contents in the UPDATED section 46 | 47 | If a request requires many changes, stop often to ask the user for feedback. 48 | """ 49 | 50 | files_content_prefix = "These are the *read-write* files:\n" 51 | 52 | files_no_full_files = "I am not sharing any *read-write* files yet." 53 | 54 | repo_content_prefix = ( 55 | "Below here are summaries of other files! Do not propose changes to these *read-only*" 56 | " files without asking me first.\n" 57 | ) 58 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/single_wholefile_func_coder.py: -------------------------------------------------------------------------------- 1 | from aider import diffs 2 | 3 | from ..dump import dump # noqa: F401 4 | from .base_coder import Coder 5 | from .single_wholefile_func_prompts import SingleWholeFileFunctionPrompts 6 | 7 | 8 | class SingleWholeFileFunctionCoder(Coder): 9 | functions = [ 10 | dict( 11 | name="write_file", 12 | description="write new content into the file", 13 | parameters=dict( 14 | type="object", 15 | required=["explanation", "content"], 16 | properties=dict( 17 | explanation=dict( 18 | type="string", 19 | description=( 20 | "Step by step plan for the changes to be made to the code (future" 21 | " tense, markdown format)" 22 | ), 23 | ), 24 | content=dict( 25 | type="string", 26 | description="Content to write to the file", 27 | ), 28 | ), 29 | ), 30 | ), 31 | ] 32 | 33 | def __init__(self, *args, **kwargs): 34 | self.gpt_prompts = SingleWholeFileFunctionPrompts() 35 | super().__init__(*args, **kwargs) 36 | 37 | def update_cur_messages(self, content, edited): 38 | if edited: 39 | self.cur_messages += [ 40 | dict(role="assistant", content=self.gpt_prompts.redacted_edit_message) 41 | ] 42 | else: 43 | self.cur_messages += [dict(role="assistant", content=content)] 44 | 45 | def get_context_from_history(self, history): 46 | context = "" 47 | if history: 48 | context += "# Context:\n" 49 | for msg in history: 50 | if msg["role"] == "user": 51 | context += msg["role"].upper() + ": " + msg["content"] + "\n" 52 | return context 53 | 54 | def render_incremental_response(self, final=False): 55 | if self.partial_response_content: 56 | return self.partial_response_content 57 | 58 | args = self.parse_partial_args() 59 | 60 | return str(args) 61 | 62 | if not args: 63 | return 64 | 65 | explanation = args.get("explanation") 66 | files = args.get("files", []) 67 | 68 | res = "" 69 | if explanation: 70 | res += f"{explanation}\n\n" 71 | 72 | for i, file_upd in enumerate(files): 73 | path = file_upd.get("path") 74 | if not path: 75 | continue 76 | content = file_upd.get("content") 77 | if not content: 78 | continue 79 | 80 | this_final = (i < len(files) - 1) or final 81 | res += self.live_diffs(path, content, this_final) 82 | 83 | return res 84 | 85 | def live_diffs(self, fname, content, final): 86 | lines = content.splitlines(keepends=True) 87 | 88 | # ending an existing block 89 | full_path = self.abs_root_path(fname) 90 | 91 | content = self.io.read_text(full_path) 92 | if content is None: 93 | orig_lines = [] 94 | else: 95 | orig_lines = content.splitlines() 96 | 97 | show_diff = diffs.diff_partial_update( 98 | orig_lines, 99 | lines, 100 | final, 101 | fname=fname, 102 | ).splitlines() 103 | 104 | return "\n".join(show_diff) 105 | 106 | def update_files(self): 107 | name = self.partial_response_function_call.get("name") 108 | if name and name != "write_file": 109 | raise ValueError(f'Unknown function_call name="{name}", use name="write_file"') 110 | 111 | args = self.parse_partial_args() 112 | if not args: 113 | return 114 | 115 | content = args["content"] 116 | path = self.get_inchat_relative_files()[0] 117 | if self.allowed_to_edit(path, content): 118 | return set([path]) 119 | 120 | return set() 121 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/single_wholefile_func_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class SingleWholeFileFunctionPrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | Take requests for changes to the supplied code. 9 | If the request is ambiguous, ask questions. 10 | 11 | Once you understand the request you MUST use the `write_file` function to update the file to make the changes. 12 | """ 13 | 14 | system_reminder = """ 15 | ONLY return code using the `write_file` function. 16 | NEVER return code outside the `write_file` function. 17 | """ 18 | 19 | files_content_prefix = "Here is the current content of the file:\n" 20 | files_no_full_files = "I am not sharing any files yet." 21 | 22 | redacted_edit_message = "No changes are needed." 23 | 24 | # TODO: should this be present for using this with gpt-4? 25 | repo_content_prefix = None 26 | 27 | # TODO: fix the chat history, except we can't keep the whole file 28 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/wholefile_coder.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from aider import diffs 4 | 5 | from ..dump import dump # noqa: F401 6 | from .base_coder import Coder 7 | from .wholefile_prompts import WholeFilePrompts 8 | 9 | 10 | class WholeFileCoder(Coder): 11 | def __init__(self, *args, **kwargs): 12 | self.gpt_prompts = WholeFilePrompts() 13 | super().__init__(*args, **kwargs) 14 | 15 | def update_cur_messages(self, content, edited): 16 | if edited: 17 | self.cur_messages += [ 18 | dict(role="assistant", content=self.gpt_prompts.redacted_edit_message) 19 | ] 20 | else: 21 | self.cur_messages += [dict(role="assistant", content=content)] 22 | 23 | def get_context_from_history(self, history): 24 | context = "" 25 | if history: 26 | context += "# Context:\n" 27 | for msg in history: 28 | if msg["role"] == "user": 29 | context += msg["role"].upper() + ": " + msg["content"] + "\n" 30 | return context 31 | 32 | def render_incremental_response(self, final): 33 | try: 34 | return self.update_files(mode="diff") 35 | except ValueError: 36 | return self.partial_response_content 37 | 38 | def update_files(self, mode="update"): 39 | content = self.partial_response_content 40 | 41 | chat_files = self.get_inchat_relative_files() 42 | 43 | output = [] 44 | lines = content.splitlines(keepends=True) 45 | 46 | edits = [] 47 | 48 | saw_fname = None 49 | fname = None 50 | fname_source = None 51 | new_lines = [] 52 | for i, line in enumerate(lines): 53 | if line.startswith(self.fence[0]) or line.startswith(self.fence[1]): 54 | if fname is not None: 55 | # ending an existing block 56 | saw_fname = None 57 | 58 | full_path = (Path(self.root) / fname).absolute() 59 | 60 | if mode == "diff": 61 | output += self.do_live_diff(full_path, new_lines, True) 62 | else: 63 | edits.append((fname, fname_source, new_lines)) 64 | 65 | fname = None 66 | fname_source = None 67 | new_lines = [] 68 | continue 69 | 70 | # fname==None ... starting a new block 71 | if i > 0: 72 | fname_source = "block" 73 | fname = lines[i - 1].strip() 74 | # Did gpt prepend a bogus dir? It especially likes to 75 | # include the path/to prefix from the one-shot example in 76 | # the prompt. 77 | if fname and fname not in chat_files and Path(fname).name in chat_files: 78 | fname = Path(fname).name 79 | if not fname: # blank line? or ``` was on first line i==0 80 | if saw_fname: 81 | fname = saw_fname 82 | fname_source = "saw" 83 | elif len(chat_files) == 1: 84 | fname = chat_files[0] 85 | fname_source = "chat" 86 | else: 87 | # TODO: sense which file it is by diff size 88 | raise ValueError( 89 | f"No filename provided before {self.fence[0]} in file listing" 90 | ) 91 | 92 | elif fname is not None: 93 | new_lines.append(line) 94 | else: 95 | for word in line.strip().split(): 96 | word = word.rstrip(".:,;!") 97 | for chat_file in chat_files: 98 | quoted_chat_file = f"`{chat_file}`" 99 | if word == quoted_chat_file: 100 | saw_fname = chat_file 101 | 102 | output.append(line) 103 | 104 | if mode == "diff": 105 | if fname is not None: 106 | # ending an existing block 107 | full_path = (Path(self.root) / fname).absolute() 108 | output += self.do_live_diff(full_path, new_lines, False) 109 | return "\n".join(output) 110 | 111 | if fname: 112 | edits.append((fname, fname_source, new_lines)) 113 | 114 | edited = set() 115 | # process from most reliable filename, to least reliable 116 | for source in ("block", "saw", "chat"): 117 | for fname, fname_source, new_lines in edits: 118 | if fname_source != source: 119 | continue 120 | # if a higher priority source already edited the file, skip 121 | if fname in edited: 122 | continue 123 | 124 | # we have a winner 125 | new_lines = "".join(new_lines) 126 | if self.allowed_to_edit(fname, new_lines): 127 | edited.add(fname) 128 | 129 | return edited 130 | 131 | def do_live_diff(self, full_path, new_lines, final): 132 | if full_path.exists(): 133 | orig_lines = self.io.read_text(full_path).splitlines(keepends=True) 134 | 135 | show_diff = diffs.diff_partial_update( 136 | orig_lines, 137 | new_lines, 138 | final=final, 139 | ).splitlines() 140 | output = show_diff 141 | else: 142 | output = ["```"] + new_lines + ["```"] 143 | 144 | return output 145 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/wholefile_func_coder.py: -------------------------------------------------------------------------------- 1 | from aider import diffs 2 | 3 | from ..dump import dump # noqa: F401 4 | from .base_coder import Coder 5 | from .wholefile_func_prompts import WholeFileFunctionPrompts 6 | 7 | 8 | class WholeFileFunctionCoder(Coder): 9 | functions = [ 10 | dict( 11 | name="write_file", 12 | description="create or update one or more files", 13 | parameters=dict( 14 | type="object", 15 | required=["explanation", "files"], 16 | properties=dict( 17 | explanation=dict( 18 | type="string", 19 | description=( 20 | "Step by step plan for the changes to be made to the code (future" 21 | " tense, markdown format)" 22 | ), 23 | ), 24 | files=dict( 25 | type="array", 26 | items=dict( 27 | type="object", 28 | required=["path", "content"], 29 | properties=dict( 30 | path=dict( 31 | type="string", 32 | description="Path of file to write", 33 | ), 34 | content=dict( 35 | type="string", 36 | description="Content to write to the file", 37 | ), 38 | ), 39 | ), 40 | ), 41 | ), 42 | ), 43 | ), 44 | ] 45 | 46 | def __init__(self, *args, **kwargs): 47 | self.gpt_prompts = WholeFileFunctionPrompts() 48 | super().__init__(*args, **kwargs) 49 | 50 | def update_cur_messages(self, content, edited): 51 | if edited: 52 | self.cur_messages += [ 53 | dict(role="assistant", content=self.gpt_prompts.redacted_edit_message) 54 | ] 55 | else: 56 | self.cur_messages += [dict(role="assistant", content=content)] 57 | 58 | def get_context_from_history(self, history): 59 | context = "" 60 | if history: 61 | context += "# Context:\n" 62 | for msg in history: 63 | if msg["role"] == "user": 64 | context += msg["role"].upper() + ": " + msg["content"] + "\n" 65 | return context 66 | 67 | def render_incremental_response(self, final=False): 68 | if self.partial_response_content: 69 | return self.partial_response_content 70 | 71 | args = self.parse_partial_args() 72 | 73 | if not args: 74 | return 75 | 76 | explanation = args.get("explanation") 77 | files = args.get("files", []) 78 | 79 | res = "" 80 | if explanation: 81 | res += f"{explanation}\n\n" 82 | 83 | for i, file_upd in enumerate(files): 84 | path = file_upd.get("path") 85 | if not path: 86 | continue 87 | content = file_upd.get("content") 88 | if not content: 89 | continue 90 | 91 | this_final = (i < len(files) - 1) or final 92 | res += self.live_diffs(path, content, this_final) 93 | 94 | return res 95 | 96 | def live_diffs(self, fname, content, final): 97 | lines = content.splitlines(keepends=True) 98 | 99 | # ending an existing block 100 | full_path = self.abs_root_path(fname) 101 | 102 | content = self.io.read_text(full_path) 103 | if content is None: 104 | orig_lines = [] 105 | else: 106 | orig_lines = content.splitlines() 107 | 108 | show_diff = diffs.diff_partial_update( 109 | orig_lines, 110 | lines, 111 | final, 112 | fname=fname, 113 | ).splitlines() 114 | 115 | return "\n".join(show_diff) 116 | 117 | def update_files(self): 118 | name = self.partial_response_function_call.get("name") 119 | if name and name != "write_file": 120 | raise ValueError(f'Unknown function_call name="{name}", use name="write_file"') 121 | 122 | args = self.parse_partial_args() 123 | if not args: 124 | return 125 | 126 | files = args.get("files", []) 127 | 128 | edited = set() 129 | for file_upd in files: 130 | path = file_upd.get("path") 131 | if not path: 132 | raise ValueError(f"Missing path parameter: {file_upd}") 133 | 134 | content = file_upd.get("content") 135 | if not content: 136 | raise ValueError(f"Missing content parameter: {file_upd}") 137 | 138 | if self.allowed_to_edit(path, content): 139 | edited.add(path) 140 | 141 | return edited 142 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/wholefile_func_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class WholeFileFunctionPrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | Take requests for changes to the supplied code. 9 | If the request is ambiguous, ask questions. 10 | 11 | Once you understand the request you MUST use the `write_file` function to edit the files to make the needed changes. 12 | """ 13 | 14 | system_reminder = """ 15 | ONLY return code using the `write_file` function. 16 | NEVER return code outside the `write_file` function. 17 | """ 18 | 19 | files_content_prefix = "Here is the current content of the files:\n" 20 | files_no_full_files = "I am not sharing any files yet." 21 | 22 | redacted_edit_message = "No changes are needed." 23 | 24 | # TODO: should this be present for using this with gpt-4? 25 | repo_content_prefix = None 26 | 27 | # TODO: fix the chat history, except we can't keep the whole file 28 | -------------------------------------------------------------------------------- /AiderModify/aider/coders/wholefile_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class WholeFilePrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | Take requests for changes to the supplied code. 9 | If the request is ambiguous, ask questions. 10 | 11 | Once you understand the request you MUST: 12 | 1. Determine if any code changes are needed. 13 | 2. Explain any needed changes. 14 | 3. If changes are needed, output a copy of each file that needs changes. 15 | """ 16 | 17 | system_reminder = """To suggest changes to a file you MUST return the entire content of the updated file. 18 | You MUST use this *file listing* format: 19 | 20 | path/to/filename.js 21 | {fence[0]} 22 | // entire file content ... 23 | // ... goes in between 24 | {fence[1]} 25 | 26 | Every *file listing* MUST use this format: 27 | - First line: the filename with any originally provided path 28 | - Second line: opening {fence[0]} 29 | - ... entire content of the file ... 30 | - Final line: closing {fence[1]} 31 | 32 | To suggest changes to a file you MUST return a *file listing* that contains the entire content of the file. 33 | Create a new file you MUST return a *file listing* which includes an appropriate filename, including any appropriate path. 34 | """ 35 | 36 | files_content_prefix = "Here is the current content of the files:\n" 37 | files_no_full_files = "I am not sharing any files yet." 38 | 39 | redacted_edit_message = "No changes are needed." 40 | 41 | # this coder is not able to handle repo content 42 | repo_content_prefix = None 43 | -------------------------------------------------------------------------------- /AiderModify/aider/commands.py: -------------------------------------------------------------------------------- 1 | import json 2 | import shlex 3 | import subprocess 4 | import sys 5 | from pathlib import Path 6 | 7 | import git 8 | import tiktoken 9 | from prompt_toolkit.completion import Completion 10 | 11 | from aider import prompts 12 | 13 | from .dump import dump # noqa: F401 14 | 15 | 16 | class Commands: 17 | def __init__(self, io, coder): 18 | self.io = io 19 | self.coder = coder 20 | self.tokenizer = tiktoken.encoding_for_model(coder.main_model.name) 21 | 22 | def is_command(self, inp): 23 | if inp[0] == "/": 24 | return True 25 | 26 | def get_commands(self): 27 | commands = [] 28 | for attr in dir(self): 29 | if attr.startswith("cmd_"): 30 | commands.append("/" + attr[4:]) 31 | 32 | return commands 33 | 34 | def get_command_completions(self, cmd_name, partial): 35 | cmd_completions_method_name = f"completions_{cmd_name}" 36 | cmd_completions_method = getattr(self, cmd_completions_method_name, None) 37 | if cmd_completions_method: 38 | for completion in cmd_completions_method(partial): 39 | yield completion 40 | 41 | def do_run(self, cmd_name, args): 42 | cmd_method_name = f"cmd_{cmd_name}" 43 | cmd_method = getattr(self, cmd_method_name, None) 44 | if cmd_method: 45 | return cmd_method(args) 46 | else: 47 | self.io.tool_output(f"Error: Command {cmd_name} not found.") 48 | 49 | def matching_commands(self, inp): 50 | words = inp.strip().split() 51 | if not words: 52 | return 53 | 54 | first_word = words[0] 55 | rest_inp = inp[len(words[0]) :] 56 | 57 | all_commands = self.get_commands() 58 | matching_commands = [cmd for cmd in all_commands if cmd.startswith(first_word)] 59 | return matching_commands, first_word, rest_inp 60 | 61 | def run(self, inp): 62 | res = self.matching_commands(inp) 63 | if res is None: 64 | return 65 | matching_commands, first_word, rest_inp = res 66 | if len(matching_commands) == 1: 67 | return self.do_run(matching_commands[0][1:], rest_inp) 68 | elif len(matching_commands) > 1: 69 | self.io.tool_error(f"Ambiguous command: {', '.join(matching_commands)}") 70 | else: 71 | self.io.tool_error(f"Invalid command: {first_word}") 72 | 73 | # any method called cmd_xxx becomes a command automatically. 74 | # each one must take an args param. 75 | 76 | def cmd_commit(self, args): 77 | "Commit edits to the repo made outside the chat (commit message optional)" 78 | 79 | if not self.coder.repo: 80 | self.io.tool_error("No git repository found.") 81 | return 82 | 83 | if not self.coder.repo.is_dirty(): 84 | self.io.tool_error("No more changes to commit.") 85 | return 86 | 87 | commit_message = args.strip() 88 | self.coder.commit(message=commit_message, which="repo_files") 89 | 90 | def cmd_clear(self, args): 91 | "Clear the chat history" 92 | 93 | self.coder.done_messages = [] 94 | self.coder.cur_messages = [] 95 | 96 | def cmd_tokens(self, args): 97 | "Report on the number of tokens used by the current chat context" 98 | 99 | res = [] 100 | 101 | # system messages 102 | msgs = [ 103 | dict(role="system", content=self.coder.gpt_prompts.main_system), 104 | dict(role="system", content=self.coder.gpt_prompts.system_reminder), 105 | ] 106 | tokens = len(self.tokenizer.encode(json.dumps(msgs))) 107 | res.append((tokens, "system messages", "")) 108 | 109 | # chat history 110 | msgs = self.coder.done_messages + self.coder.cur_messages 111 | if msgs: 112 | msgs = [dict(role="dummy", content=msg) for msg in msgs] 113 | msgs = json.dumps(msgs) 114 | tokens = len(self.tokenizer.encode(msgs)) 115 | res.append((tokens, "chat history", "use /clear to clear")) 116 | 117 | # repo map 118 | other_files = set(self.coder.get_all_abs_files()) - set(self.coder.abs_fnames) 119 | if self.coder.repo_map: 120 | repo_content = self.coder.repo_map.get_repo_map(self.coder.abs_fnames, other_files) 121 | if repo_content: 122 | tokens = len(self.tokenizer.encode(repo_content)) 123 | res.append((tokens, "repository map", "use --map-tokens to resize")) 124 | 125 | # files 126 | for fname in self.coder.abs_fnames: 127 | relative_fname = self.coder.get_rel_fname(fname) 128 | content = self.io.read_text(fname) 129 | # approximate 130 | content = f"{relative_fname}\n```\n" + content + "```\n" 131 | tokens = len(self.tokenizer.encode(content)) 132 | res.append((tokens, f"{relative_fname}", "use /drop to drop from chat")) 133 | 134 | self.io.tool_output("Approximate context window usage, in tokens:") 135 | self.io.tool_output() 136 | 137 | width = 8 138 | 139 | def fmt(v): 140 | return format(int(v), ",").rjust(width) 141 | 142 | col_width = max(len(row[1]) for row in res) 143 | 144 | total = 0 145 | for tk, msg, tip in res: 146 | total += tk 147 | msg = msg.ljust(col_width) 148 | self.io.tool_output(f"{fmt(tk)} {msg} {tip}") 149 | 150 | self.io.tool_output("=" * width) 151 | self.io.tool_output(f"{fmt(total)} tokens total") 152 | 153 | limit = self.coder.main_model.max_context_tokens 154 | remaining = limit - total 155 | if remaining > 0: 156 | self.io.tool_output(f"{fmt(remaining)} tokens remaining in context window") 157 | else: 158 | self.io.tool_error(f"{fmt(remaining)} tokens remaining, window exhausted!") 159 | self.io.tool_output(f"{fmt(limit)} tokens max context window size") 160 | 161 | def cmd_undo(self, args): 162 | "Undo the last git commit if it was done by aider" 163 | if not self.coder.repo: 164 | self.io.tool_error("No git repository found.") 165 | return 166 | 167 | if self.coder.repo.is_dirty(): 168 | self.io.tool_error( 169 | "The repository has uncommitted changes. Please commit or stash them before" 170 | " undoing." 171 | ) 172 | return 173 | 174 | local_head = self.coder.repo.git.rev_parse("HEAD") 175 | current_branch = self.coder.repo.active_branch.name 176 | try: 177 | remote_head = self.coder.repo.git.rev_parse(f"origin/{current_branch}") 178 | has_origin = True 179 | except git.exc.GitCommandError: 180 | has_origin = False 181 | 182 | if has_origin: 183 | if local_head == remote_head: 184 | self.io.tool_error( 185 | "The last commit has already been pushed to the origin. Undoing is not" 186 | " possible." 187 | ) 188 | return 189 | 190 | last_commit = self.coder.repo.head.commit 191 | if ( 192 | not last_commit.message.startswith("aider:") 193 | or last_commit.hexsha[:7] != self.coder.last_aider_commit_hash 194 | ): 195 | self.io.tool_error("The last commit was not made by aider in this chat session.") 196 | return 197 | self.coder.repo.git.reset("--hard", "HEAD~1") 198 | self.io.tool_output( 199 | f"{last_commit.message.strip()}\n" 200 | f"The above commit {self.coder.last_aider_commit_hash} " 201 | "was reset and removed from git.\n" 202 | ) 203 | 204 | if self.coder.main_model.send_undo_reply: 205 | return prompts.undo_command_reply 206 | 207 | def cmd_diff(self, args): 208 | "Display the diff of the last aider commit" 209 | if not self.coder.repo: 210 | self.io.tool_error("No git repository found.") 211 | return 212 | 213 | if not self.coder.last_aider_commit_hash: 214 | self.io.tool_error("No previous aider commit found.") 215 | return 216 | 217 | commits = f"{self.coder.last_aider_commit_hash}~1" 218 | diff = self.coder.get_diffs(commits, self.coder.last_aider_commit_hash) 219 | 220 | # don't use io.tool_output() because we don't want to log or further colorize 221 | print(diff) 222 | 223 | def completions_add(self, partial): 224 | files = set(self.coder.get_all_relative_files()) 225 | files = files - set(self.coder.get_inchat_relative_files()) 226 | for fname in files: 227 | if partial.lower() in fname.lower(): 228 | yield Completion(fname, start_position=-len(partial)) 229 | 230 | def glob_filtered_to_repo(self, pattern): 231 | raw_matched_files = list(Path(self.coder.root).glob(pattern)) 232 | 233 | matched_files = [] 234 | for fn in raw_matched_files: 235 | matched_files += expand_subdir(fn.relative_to(self.coder.root)) 236 | 237 | # if repo, filter against it 238 | if self.coder.repo: 239 | git_files = self.coder.get_tracked_files() 240 | matched_files = [fn for fn in matched_files if str(fn) in git_files] 241 | 242 | res = list(map(str, matched_files)) 243 | return res 244 | 245 | def cmd_add(self, args): 246 | "Add matching files to the chat session using glob patterns" 247 | 248 | added_fnames = [] 249 | git_added = [] 250 | git_files = self.coder.get_tracked_files() 251 | 252 | all_matched_files = set() 253 | for word in args.split(): 254 | matched_files = self.glob_filtered_to_repo(word) 255 | 256 | if not matched_files: 257 | if any(char in word for char in "*?[]"): 258 | self.io.tool_error(f"No files to add matching pattern: {word}") 259 | else: 260 | if Path(word).exists(): 261 | if Path(word).is_file(): 262 | matched_files = [word] 263 | else: 264 | self.io.tool_error(f"Unable to add: {word}") 265 | elif self.io.confirm_ask( 266 | f"No files matched '{word}'. Do you want to create the file?" 267 | ): 268 | (Path(self.coder.root) / word).touch() 269 | matched_files = [word] 270 | 271 | all_matched_files.update(matched_files) 272 | 273 | for matched_file in all_matched_files: 274 | abs_file_path = self.coder.abs_root_path(matched_file) 275 | 276 | if self.coder.repo and matched_file not in git_files: 277 | self.coder.repo.git.add(abs_file_path) 278 | git_added.append(matched_file) 279 | 280 | if abs_file_path in self.coder.abs_fnames: 281 | self.io.tool_error(f"{matched_file} is already in the chat") 282 | else: 283 | content = self.io.read_text(abs_file_path) 284 | if content is None: 285 | self.io.tool_error(f"Unable to read {matched_file}") 286 | else: 287 | self.coder.abs_fnames.add(abs_file_path) 288 | self.io.tool_output(f"Added {matched_file} to the chat") 289 | added_fnames.append(matched_file) 290 | 291 | if self.coder.repo and git_added: 292 | git_added = " ".join(git_added) 293 | commit_message = f"aider: Added {git_added}" 294 | self.coder.repo.git.commit("-m", commit_message, "--no-verify") 295 | commit_hash = self.coder.repo.head.commit.hexsha[:7] 296 | self.io.tool_output(f"Commit {commit_hash} {commit_message}") 297 | 298 | if not added_fnames: 299 | return 300 | 301 | # only reply if there's been some chatting since the last edit 302 | if not self.coder.cur_messages: 303 | return 304 | 305 | reply = prompts.added_files.format(fnames=", ".join(added_fnames)) 306 | return reply 307 | 308 | def completions_drop(self, partial): 309 | files = self.coder.get_inchat_relative_files() 310 | 311 | for fname in files: 312 | if partial.lower() in fname.lower(): 313 | yield Completion(fname, start_position=-len(partial)) 314 | 315 | def cmd_drop(self, args): 316 | "Remove matching files from the chat session" 317 | 318 | if not args.strip(): 319 | self.io.tool_output("Dropping all files from the chat session.") 320 | self.coder.abs_fnames = set() 321 | 322 | for word in args.split(): 323 | matched_files = self.glob_filtered_to_repo(word) 324 | 325 | if not matched_files: 326 | self.io.tool_error(f"No files matched '{word}'") 327 | 328 | for matched_file in matched_files: 329 | abs_fname = str(Path(matched_file).resolve()) 330 | if abs_fname in self.coder.abs_fnames: 331 | self.coder.abs_fnames.remove(abs_fname) 332 | self.io.tool_output(f"Removed {matched_file} from the chat") 333 | 334 | def cmd_run(self, args): 335 | "Run a shell command and optionally add the output to the chat" 336 | try: 337 | parsed_args = shlex.split(args) 338 | result = subprocess.run( 339 | parsed_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True 340 | ) 341 | combined_output = result.stdout 342 | except Exception as e: 343 | self.io.tool_error(f"Error running command: {e}") 344 | 345 | self.io.tool_output(combined_output) 346 | 347 | if self.io.confirm_ask("Add the output to the chat?", default="y"): 348 | for line in combined_output.splitlines(): 349 | self.io.tool_output(line, log_only=True) 350 | 351 | msg = prompts.run_output.format( 352 | command=args, 353 | output=combined_output, 354 | ) 355 | return msg 356 | 357 | def cmd_exit(self, args): 358 | "Exit the application" 359 | sys.exit() 360 | 361 | def cmd_ls(self, args): 362 | "List all known files and those included in the chat session" 363 | 364 | files = self.coder.get_all_relative_files() 365 | 366 | other_files = [] 367 | chat_files = [] 368 | for file in files: 369 | abs_file_path = self.coder.abs_root_path(file) 370 | if abs_file_path in self.coder.abs_fnames: 371 | chat_files.append(file) 372 | else: 373 | other_files.append(file) 374 | 375 | if not chat_files and not other_files: 376 | self.io.tool_output("\nNo files in chat or git repo.") 377 | return 378 | 379 | if chat_files: 380 | self.io.tool_output("Files in chat:\n") 381 | for file in chat_files: 382 | self.io.tool_output(f" {file}") 383 | 384 | if other_files: 385 | self.io.tool_output("\nRepo files not in the chat:\n") 386 | for file in other_files: 387 | self.io.tool_output(f" {file}") 388 | 389 | def cmd_help(self, args): 390 | "Show help about all commands" 391 | commands = sorted(self.get_commands()) 392 | for cmd in commands: 393 | cmd_method_name = f"cmd_{cmd[1:]}" 394 | cmd_method = getattr(self, cmd_method_name, None) 395 | if cmd_method: 396 | description = cmd_method.__doc__ 397 | self.io.tool_output(f"{cmd} {description}") 398 | else: 399 | self.io.tool_output(f"{cmd} No description available.") 400 | 401 | 402 | def expand_subdir(file_path): 403 | file_path = Path(file_path) 404 | if file_path.is_file(): 405 | yield file_path 406 | return 407 | 408 | for file in file_path.rglob("*"): 409 | if file.is_file(): 410 | yield str(file) 411 | -------------------------------------------------------------------------------- /AiderModify/aider/diffs.py: -------------------------------------------------------------------------------- 1 | import difflib 2 | import sys 3 | 4 | from .dump import dump # noqa: F401 5 | 6 | 7 | def main(): 8 | if len(sys.argv) != 3: 9 | print("Usage: python diffs.py file1 file") 10 | sys.exit(1) 11 | 12 | file_orig, file_updated = sys.argv[1], sys.argv[2] 13 | 14 | with open(file_orig, "r", encoding="utf-8") as f: 15 | lines_orig = f.readlines() 16 | 17 | with open(file_updated, "r", encoding="utf-8") as f: 18 | lines_updated = f.readlines() 19 | 20 | for i in range(len(file_updated)): 21 | res = diff_partial_update(lines_orig, lines_updated[:i]) 22 | print(res) 23 | input() 24 | 25 | 26 | def create_progress_bar(percentage): 27 | block = "█" 28 | empty = "░" 29 | total_blocks = 30 30 | filled_blocks = int(total_blocks * percentage // 100) 31 | empty_blocks = total_blocks - filled_blocks 32 | bar = block * filled_blocks + empty * empty_blocks 33 | return bar 34 | 35 | 36 | def assert_newlines(lines): 37 | if not lines: 38 | return 39 | for line in lines[:-1]: 40 | assert line and line[-1] == "\n", line 41 | 42 | 43 | def diff_partial_update(lines_orig, lines_updated, final=False, fname=None): 44 | """ 45 | Given only the first part of an updated file, show the diff while 46 | ignoring the block of "deleted" lines that are past the end of the 47 | partially complete update. 48 | """ 49 | 50 | # dump(lines_orig) 51 | # dump(lines_updated) 52 | 53 | assert_newlines(lines_orig) 54 | assert_newlines(lines_orig) 55 | 56 | num_orig_lines = len(lines_orig) 57 | 58 | if final: 59 | last_non_deleted = num_orig_lines 60 | else: 61 | last_non_deleted = find_last_non_deleted(lines_orig, lines_updated) 62 | 63 | # dump(last_non_deleted) 64 | if last_non_deleted is None: 65 | return "" 66 | 67 | if num_orig_lines: 68 | pct = last_non_deleted * 100 / num_orig_lines 69 | else: 70 | pct = 50 71 | bar = create_progress_bar(pct) 72 | bar = f" {last_non_deleted:3d} / {num_orig_lines:3d} lines [{bar}] {pct:3.0f}%\n" 73 | 74 | lines_orig = lines_orig[:last_non_deleted] 75 | 76 | if not final: 77 | lines_updated = lines_updated[:-1] + [bar] 78 | 79 | diff = difflib.unified_diff(lines_orig, lines_updated, n=5) 80 | 81 | diff = list(diff)[2:] 82 | 83 | diff = "".join(diff) 84 | if not diff.endswith("\n"): 85 | diff += "\n" 86 | 87 | for i in range(3, 10): 88 | backticks = "`" * i 89 | if backticks not in diff: 90 | break 91 | 92 | show = f"{backticks}diff\n" 93 | if fname: 94 | show += f"--- {fname} original\n" 95 | show += f"+++ {fname} updated\n" 96 | 97 | show += diff 98 | 99 | show += f"{backticks}\n\n" 100 | 101 | # print(diff) 102 | 103 | return show 104 | 105 | 106 | def find_last_non_deleted(lines_orig, lines_updated): 107 | diff = list(difflib.ndiff(lines_orig, lines_updated)) 108 | 109 | num_orig = 0 110 | last_non_deleted_orig = None 111 | 112 | for line in diff: 113 | # print(f"{num_orig:2d} {num_updated:2d} {line}", end="") 114 | code = line[0] 115 | if code == " ": 116 | num_orig += 1 117 | last_non_deleted_orig = num_orig 118 | elif code == "-": 119 | # line only in orig 120 | num_orig += 1 121 | elif code == "+": 122 | # line only in updated 123 | pass 124 | 125 | return last_non_deleted_orig 126 | 127 | 128 | if __name__ == "__main__": 129 | main() 130 | -------------------------------------------------------------------------------- /AiderModify/aider/dump.py: -------------------------------------------------------------------------------- 1 | import json 2 | import traceback 3 | 4 | 5 | def cvt(s): 6 | if isinstance(s, str): 7 | return s 8 | try: 9 | return json.dumps(s, indent=4) 10 | except TypeError: 11 | return str(s) 12 | 13 | 14 | def dump(*vals): 15 | # http://docs.python.org/library/traceback.html 16 | stack = traceback.extract_stack() 17 | vars = stack[-2][3] 18 | 19 | # strip away the call to dump() 20 | vars = "(".join(vars.split("(")[1:]) 21 | vars = ")".join(vars.split(")")[:-1]) 22 | 23 | vals = [cvt(v) for v in vals] 24 | has_newline = sum(1 for v in vals if "\n" in v) 25 | if has_newline: 26 | print("%s:" % vars) 27 | print(", ".join(vals)) 28 | else: 29 | print("%s:" % vars, ", ".join(vals)) 30 | -------------------------------------------------------------------------------- /AiderModify/aider/io.py: -------------------------------------------------------------------------------- 1 | import os 2 | from collections import defaultdict 3 | from datetime import datetime 4 | from pathlib import Path 5 | 6 | from prompt_toolkit.completion import Completer, Completion 7 | from prompt_toolkit.history import FileHistory 8 | from prompt_toolkit.lexers import PygmentsLexer 9 | from prompt_toolkit.shortcuts import CompleteStyle, PromptSession, prompt 10 | from prompt_toolkit.styles import Style 11 | from pygments.lexers import MarkdownLexer, guess_lexer_for_filename 12 | from pygments.token import Token 13 | from pygments.util import ClassNotFound 14 | from rich.console import Console 15 | from rich.text import Text 16 | 17 | from .dump import dump # noqa: F401 18 | 19 | 20 | class AutoCompleter(Completer): 21 | def __init__(self, root, rel_fnames, addable_rel_fnames, commands, encoding): 22 | self.commands = commands 23 | self.addable_rel_fnames = addable_rel_fnames 24 | self.rel_fnames = rel_fnames 25 | self.encoding = encoding 26 | 27 | fname_to_rel_fnames = defaultdict(list) 28 | for rel_fname in addable_rel_fnames: 29 | fname = os.path.basename(rel_fname) 30 | if fname != rel_fname: 31 | fname_to_rel_fnames[fname].append(rel_fname) 32 | self.fname_to_rel_fnames = fname_to_rel_fnames 33 | 34 | self.words = set() 35 | 36 | for rel_fname in addable_rel_fnames: 37 | self.words.add(rel_fname) 38 | 39 | for rel_fname in rel_fnames: 40 | self.words.add(rel_fname) 41 | 42 | fname = Path(root) / rel_fname 43 | try: 44 | with open(fname, "r", encoding=self.encoding) as f: 45 | content = f.read() 46 | except FileNotFoundError: 47 | continue 48 | try: 49 | lexer = guess_lexer_for_filename(fname, content) 50 | except ClassNotFound: 51 | continue 52 | tokens = list(lexer.get_tokens(content)) 53 | self.words.update(token[1] for token in tokens if token[0] in Token.Name) 54 | 55 | def get_completions(self, document, complete_event): 56 | text = document.text_before_cursor 57 | words = text.split() 58 | if not words: 59 | return 60 | 61 | if text[0] == "/": 62 | if len(words) == 1 and not text[-1].isspace(): 63 | candidates = self.commands.get_commands() 64 | candidates = [(cmd, cmd) for cmd in candidates] 65 | else: 66 | for completion in self.commands.get_command_completions(words[0][1:], words[-1]): 67 | yield completion 68 | return 69 | else: 70 | candidates = self.words 71 | candidates.update(set(self.fname_to_rel_fnames)) 72 | candidates = [(word, f"`{word}`") for word in candidates] 73 | 74 | last_word = words[-1] 75 | for word_match, word_insert in candidates: 76 | if word_match.lower().startswith(last_word.lower()): 77 | rel_fnames = self.fname_to_rel_fnames.get(word_match, []) 78 | if rel_fnames: 79 | for rel_fname in rel_fnames: 80 | yield Completion( 81 | f"`{rel_fname}`", start_position=-len(last_word), display=rel_fname 82 | ) 83 | else: 84 | yield Completion( 85 | word_insert, start_position=-len(last_word), display=word_match 86 | ) 87 | 88 | 89 | class InputOutput: 90 | num_error_outputs = 0 91 | num_user_asks = 0 92 | 93 | def __init__( 94 | self, 95 | pretty=True, 96 | yes=False, 97 | input_history_file=None, 98 | chat_history_file=None, 99 | input=None, 100 | output=None, 101 | user_input_color="blue", 102 | tool_output_color=None, 103 | tool_error_color="red", 104 | encoding="utf-8", 105 | dry_run=False, 106 | ): 107 | no_color = os.environ.get("NO_COLOR") 108 | if no_color is not None and no_color != "": 109 | pretty = False 110 | 111 | self.user_input_color = user_input_color if pretty else None 112 | self.tool_output_color = tool_output_color if pretty else None 113 | self.tool_error_color = tool_error_color if pretty else None 114 | 115 | self.input = input 116 | self.output = output 117 | 118 | self.pretty = pretty 119 | if self.output: 120 | self.pretty = False 121 | 122 | self.yes = yes 123 | 124 | self.input_history_file = input_history_file 125 | if chat_history_file is not None: 126 | self.chat_history_file = Path(chat_history_file) 127 | else: 128 | self.chat_history_file = None 129 | 130 | self.encoding = encoding 131 | self.dry_run = dry_run 132 | 133 | if pretty: 134 | self.console = Console() 135 | else: 136 | self.console = Console(force_terminal=False, no_color=True) 137 | 138 | current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 139 | self.append_chat_history(f"\n# aider chat started at {current_time}\n\n") 140 | 141 | def read_text(self, filename): 142 | try: 143 | with open(str(filename), "r", encoding=self.encoding) as f: 144 | return f.read() 145 | except FileNotFoundError: 146 | self.tool_error(f"{filename}: file not found error") 147 | return 148 | except UnicodeError as e: 149 | self.tool_error(f"{filename}: {e}") 150 | return 151 | 152 | def write_text(self, filename, content): 153 | if self.dry_run: 154 | return 155 | with open(str(filename), "w", encoding=self.encoding) as f: 156 | f.write(content) 157 | 158 | def get_input(self, root, rel_fnames, addable_rel_fnames, commands): 159 | if self.pretty: 160 | style = dict(style=self.user_input_color) if self.user_input_color else dict() 161 | self.console.rule(**style) 162 | else: 163 | print() 164 | 165 | rel_fnames = list(rel_fnames) 166 | show = " ".join(rel_fnames) 167 | if len(show) > 10: 168 | show += "\n" 169 | show += "> " 170 | 171 | inp = "" 172 | multiline_input = False 173 | 174 | if self.user_input_color: 175 | style = Style.from_dict( 176 | { 177 | "": self.user_input_color, 178 | "pygments.literal.string": f"bold italic {self.user_input_color}", 179 | } 180 | ) 181 | else: 182 | style = None 183 | 184 | while True: 185 | completer_instance = AutoCompleter( 186 | root, rel_fnames, addable_rel_fnames, commands, self.encoding 187 | ) 188 | if multiline_input: 189 | show = ". " 190 | 191 | session_kwargs = { 192 | "message": show, 193 | "completer": completer_instance, 194 | "reserve_space_for_menu": 4, 195 | "complete_style": CompleteStyle.MULTI_COLUMN, 196 | "input": self.input, 197 | "output": self.output, 198 | "lexer": PygmentsLexer(MarkdownLexer), 199 | } 200 | if style: 201 | session_kwargs["style"] = style 202 | 203 | if self.input_history_file is not None: 204 | session_kwargs["history"] = FileHistory(self.input_history_file) 205 | 206 | session = PromptSession(**session_kwargs) 207 | line = session.prompt() 208 | 209 | if line and line[0] == "{" and not multiline_input: 210 | multiline_input = True 211 | inp += line[1:] + "\n" 212 | continue 213 | elif line and line[-1] == "}" and multiline_input: 214 | inp += line[:-1] + "\n" 215 | break 216 | elif multiline_input: 217 | inp += line + "\n" 218 | else: 219 | inp = line 220 | break 221 | 222 | print() 223 | self.user_input(inp) 224 | return inp 225 | 226 | def user_input(self, inp): 227 | prefix = "####" 228 | if inp: 229 | hist = inp.splitlines() 230 | else: 231 | hist = [""] 232 | 233 | hist = f" \n{prefix} ".join(hist) 234 | 235 | hist = f""" 236 | {prefix} {hist}""" 237 | self.append_chat_history(hist, linebreak=True) 238 | 239 | # OUTPUT 240 | 241 | def ai_output(self, content): 242 | hist = "\n" + content.strip() + "\n\n" 243 | self.append_chat_history(hist) 244 | 245 | def confirm_ask(self, question, default="y"): 246 | self.num_user_asks += 1 247 | 248 | if self.yes is True: 249 | res = "yes" 250 | elif self.yes is False: 251 | res = "no" 252 | else: 253 | res = prompt(question + " ", default=default) 254 | 255 | hist = f"{question.strip()} {res.strip()}" 256 | self.append_chat_history(hist, linebreak=True, blockquote=True) 257 | if self.yes in (True, False): 258 | self.tool_output(hist) 259 | 260 | if not res or not res.strip(): 261 | return 262 | return res.strip().lower().startswith("y") 263 | 264 | def prompt_ask(self, question, default=None): 265 | self.num_user_asks += 1 266 | 267 | if self.yes is True: 268 | res = "yes" 269 | elif self.yes is False: 270 | res = "no" 271 | else: 272 | res = prompt(question + " ", default=default) 273 | 274 | hist = f"{question.strip()} {res.strip()}" 275 | self.append_chat_history(hist, linebreak=True, blockquote=True) 276 | if self.yes in (True, False): 277 | self.tool_output(hist) 278 | 279 | return res 280 | 281 | def tool_error(self, message): 282 | self.num_error_outputs += 1 283 | 284 | if message.strip(): 285 | hist = f"{message.strip()}" 286 | self.append_chat_history(hist, linebreak=True, blockquote=True) 287 | 288 | message = Text(message) 289 | style = dict(style=self.tool_error_color) if self.tool_error_color else dict() 290 | self.console.print(message, **style) 291 | 292 | def tool_output(self, *messages, log_only=False): 293 | if messages: 294 | hist = " ".join(messages) 295 | hist = f"{hist.strip()}" 296 | self.append_chat_history(hist, linebreak=True, blockquote=True) 297 | 298 | if not log_only: 299 | messages = list(map(Text, messages)) 300 | style = dict(style=self.tool_output_color) if self.tool_output_color else dict() 301 | self.console.print(*messages, **style) 302 | 303 | def append_chat_history(self, text, linebreak=False, blockquote=False): 304 | if blockquote: 305 | text = text.strip() 306 | text = "> " + text 307 | if linebreak: 308 | text = text.rstrip() 309 | text = text + " \n" 310 | if not text.endswith("\n"): 311 | text += "\n" 312 | if self.chat_history_file is not None: 313 | with self.chat_history_file.open("a", encoding=self.encoding) as f: 314 | f.write(text) 315 | -------------------------------------------------------------------------------- /AiderModify/aider/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from pathlib import Path 4 | 5 | import configargparse 6 | import git 7 | import openai 8 | 9 | from aider import __version__, models 10 | from aider.coders import Coder 11 | from aider.io import InputOutput 12 | 13 | 14 | def get_git_root(): 15 | try: 16 | repo = git.Repo(search_parent_directories=True) 17 | return repo.working_tree_dir 18 | except git.InvalidGitRepositoryError: 19 | return None 20 | 21 | 22 | def main(args=None, input=None, output=None): 23 | if args is None: 24 | args = sys.argv[1:] 25 | 26 | git_root = get_git_root() 27 | 28 | conf_fname = Path(".aider.conf.yml") 29 | 30 | default_config_files = [conf_fname.resolve()] # CWD 31 | if git_root: 32 | git_conf = Path(git_root) / conf_fname # git root 33 | if git_conf not in default_config_files: 34 | default_config_files.append(git_conf) 35 | default_config_files.append(Path.home() / conf_fname) # homedir 36 | default_config_files = list(map(str, default_config_files)) 37 | 38 | parser = configargparse.ArgumentParser( 39 | description="aider is GPT powered coding in your terminal", 40 | add_config_file_help=True, 41 | default_config_files=default_config_files, 42 | config_file_parser_class=configargparse.YAMLConfigFileParser, 43 | auto_env_var_prefix="AIDER_", 44 | ) 45 | 46 | ########## 47 | core_group = parser.add_argument_group("Main") 48 | core_group.add_argument( 49 | "files", 50 | metavar="FILE", 51 | nargs="*", 52 | help="a list of source code files to edit with GPT (optional)", 53 | ) 54 | core_group.add_argument( 55 | "--openai-api-key", 56 | metavar="OPENAI_API_KEY", 57 | help="Specify the OpenAI API key", 58 | env_var="OPENAI_API_KEY", 59 | ) 60 | core_group.add_argument( 61 | "--model", 62 | metavar="MODEL", 63 | default=models.GPT4.name, 64 | help=f"Specify the model to use for the main chat (default: {models.GPT4.name})", 65 | ) 66 | core_group.add_argument( 67 | "-3", 68 | action="store_const", 69 | dest="model", 70 | const=models.GPT35_16k.name, 71 | help=f"Use {models.GPT35_16k.name} model for the main chat (gpt-4 is better)", 72 | ) 73 | 74 | ########## 75 | model_group = parser.add_argument_group("Advanced Model Settings") 76 | model_group.add_argument( 77 | "--openai-api-base", 78 | metavar="OPENAI_API_BASE", 79 | help="Specify the openai.api_base (default: https://api.openai.com/v1)", 80 | ) 81 | model_group.add_argument( 82 | "--openai-api-type", 83 | metavar="OPENAI_API_TYPE", 84 | help="Specify the openai.api_type", 85 | ) 86 | model_group.add_argument( 87 | "--openai-api-version", 88 | metavar="OPENAI_API_VERSION", 89 | help="Specify the openai.api_version", 90 | ) 91 | model_group.add_argument( 92 | "--openai-api-deployment-id", 93 | metavar="OPENAI_API_DEPLOYMENT_ID", 94 | help="Specify the deployment_id arg to be passed to openai.ChatCompletion.create()", 95 | ) 96 | model_group.add_argument( 97 | "--openai-api-engine", 98 | metavar="OPENAI_API_ENGINE", 99 | help="Specify the engine arg to be passed to openai.ChatCompletion.create()", 100 | ) 101 | model_group.add_argument( 102 | "--edit-format", 103 | metavar="EDIT_FORMAT", 104 | default=None, 105 | help="Specify what edit format GPT should use (default depends on model)", 106 | ) 107 | model_group.add_argument( 108 | "--map-tokens", 109 | type=int, 110 | default=1024, 111 | help="Max number of tokens to use for repo map, use 0 to disable (default: 1024)", 112 | ) 113 | 114 | ########## 115 | history_group = parser.add_argument_group("History Files") 116 | default_input_history_file = ( 117 | os.path.join(git_root, ".aider.input.history") if git_root else ".aider.input.history" 118 | ) 119 | default_chat_history_file = ( 120 | os.path.join(git_root, ".aider.chat.history.md") if git_root else ".aider.chat.history.md" 121 | ) 122 | history_group.add_argument( 123 | "--input-history-file", 124 | metavar="INPUT_HISTORY_FILE", 125 | default=default_input_history_file, 126 | help=f"Specify the chat input history file (default: {default_input_history_file})", 127 | ) 128 | history_group.add_argument( 129 | "--chat-history-file", 130 | metavar="CHAT_HISTORY_FILE", 131 | default=default_chat_history_file, 132 | help=f"Specify the chat history file (default: {default_chat_history_file})", 133 | ) 134 | 135 | ########## 136 | output_group = parser.add_argument_group("Output Settings") 137 | output_group.add_argument( 138 | "--dark-mode", 139 | action="store_true", 140 | help="Use colors suitable for a dark terminal background (default: False)", 141 | default=False, 142 | ) 143 | output_group.add_argument( 144 | "--light-mode", 145 | action="store_true", 146 | help="Use colors suitable for a light terminal background (default: False)", 147 | default=False, 148 | ) 149 | output_group.add_argument( 150 | "--pretty", 151 | action="store_true", 152 | default=True, 153 | help="Enable pretty, colorized output (default: True)", 154 | ) 155 | output_group.add_argument( 156 | "--no-pretty", 157 | action="store_false", 158 | dest="pretty", 159 | help="Disable pretty, colorized output", 160 | ) 161 | output_group.add_argument( 162 | "--no-stream", 163 | action="store_false", 164 | dest="stream", 165 | default=True, 166 | help="Disable streaming responses", 167 | ) 168 | output_group.add_argument( 169 | "--user-input-color", 170 | default="#00cc00", 171 | help="Set the color for user input (default: #00cc00)", 172 | ) 173 | output_group.add_argument( 174 | "--tool-output-color", 175 | default=None, 176 | help="Set the color for tool output (default: None)", 177 | ) 178 | output_group.add_argument( 179 | "--tool-error-color", 180 | default="#FF2222", 181 | help="Set the color for tool error messages (default: red)", 182 | ) 183 | output_group.add_argument( 184 | "--assistant-output-color", 185 | default="#0088ff", 186 | help="Set the color for assistant output (default: #0088ff)", 187 | ) 188 | output_group.add_argument( 189 | "--code-theme", 190 | default="default", 191 | help=( 192 | "Set the markdown code theme (default: default, other options include monokai," 193 | " solarized-dark, solarized-light)" 194 | ), 195 | ) 196 | output_group.add_argument( 197 | "--show-diffs", 198 | action="store_true", 199 | help="Show diffs when committing changes (default: False)", 200 | default=False, 201 | ) 202 | 203 | ########## 204 | git_group = parser.add_argument_group("Git Settings") 205 | git_group.add_argument( 206 | "--no-git", 207 | action="store_false", 208 | dest="git", 209 | default=True, 210 | help="Do not look for a git repo", 211 | ) 212 | git_group.add_argument( 213 | "--auto-commits", 214 | action="store_true", 215 | dest="auto_commits", 216 | default=True, 217 | help="Enable auto commit of GPT changes (default: True)", 218 | ) 219 | git_group.add_argument( 220 | "--no-auto-commits", 221 | action="store_false", 222 | dest="auto_commits", 223 | help="Disable auto commit of GPT changes (implies --no-dirty-commits)", 224 | ) 225 | git_group.add_argument( 226 | "--dirty-commits", 227 | action="store_true", 228 | dest="dirty_commits", 229 | help="Enable commits when repo is found dirty", 230 | default=True, 231 | ) 232 | git_group.add_argument( 233 | "--no-dirty-commits", 234 | action="store_false", 235 | dest="dirty_commits", 236 | help="Disable commits when repo is found dirty", 237 | ) 238 | git_group.add_argument( 239 | "--dry-run", 240 | action="store_true", 241 | help="Perform a dry run without modifying files (default: False)", 242 | default=False, 243 | ) 244 | 245 | ########## 246 | other_group = parser.add_argument_group("Other Settings") 247 | other_group.add_argument( 248 | "--version", 249 | action="version", 250 | version=f"%(prog)s {__version__}", 251 | help="Show the version number and exit", 252 | ) 253 | other_group.add_argument( 254 | "--apply", 255 | metavar="FILE", 256 | help="Apply the changes from the given file instead of running the chat (debug)", 257 | ) 258 | other_group.add_argument( 259 | "--yes", 260 | action="store_true", 261 | help="Always say yes to every confirmation", 262 | default=None, 263 | ) 264 | other_group.add_argument( 265 | "-v", 266 | "--verbose", 267 | action="store_true", 268 | help="Enable verbose output", 269 | default=False, 270 | ) 271 | other_group.add_argument( 272 | "--message", 273 | "--msg", 274 | "-m", 275 | metavar="COMMAND", 276 | help="Specify a single message to send GPT, process reply then exit (disables chat mode)", 277 | ) 278 | other_group.add_argument( 279 | "-c", 280 | "--config", 281 | is_config_file=True, 282 | metavar="CONFIG_FILE", 283 | help=( 284 | "Specify the config file (default: search for .aider.conf.yml in git root, cwd" 285 | " or home directory)" 286 | ), 287 | ) 288 | 289 | args = parser.parse_args(args) 290 | 291 | if args.dark_mode: 292 | args.user_input_color = "#32FF32" 293 | args.tool_error_color = "#FF3333" 294 | args.assistant_output_color = "#00FFFF" 295 | args.code_theme = "monokai" 296 | 297 | if args.light_mode: 298 | args.user_input_color = "green" 299 | args.tool_error_color = "red" 300 | args.assistant_output_color = "blue" 301 | args.code_theme = "default" 302 | 303 | io = InputOutput( 304 | args.pretty, 305 | args.yes, 306 | args.input_history_file, 307 | args.chat_history_file, 308 | input=input, 309 | output=output, 310 | user_input_color=args.user_input_color, 311 | tool_output_color=args.tool_output_color, 312 | tool_error_color=args.tool_error_color, 313 | dry_run=args.dry_run, 314 | ) 315 | 316 | io.tool_output(f"Aider v{__version__}") 317 | 318 | if not git_root and args.git: 319 | if io.confirm_ask("No git repo found, create one to track GPT's changes (recommended)?"): 320 | repo = git.Repo.init(os.getcwd()) 321 | global_git_config = git.GitConfigParser( 322 | [str(Path.home() / ".gitconfig")], read_only=True 323 | ) 324 | with repo.config_writer() as git_config: 325 | if not global_git_config.has_option("user", "name"): 326 | git_config.set_value("user", "name", "Your Name") 327 | io.tool_error('Update git name with: git config --global user.name "Your Name"') 328 | if not global_git_config.has_option("user", "email"): 329 | git_config.set_value("user", "email", "you@example.com") 330 | io.tool_error( 331 | 'Update git email with: git config --global user.email "you@example.com"' 332 | ) 333 | io.tool_output("Git repository created in the current working directory.") 334 | 335 | if args.verbose: 336 | show = parser.format_values() 337 | io.tool_output(show) 338 | io.tool_output("Option settings:") 339 | for arg, val in sorted(vars(args).items()): 340 | io.tool_output(f" - {arg}: {val}") 341 | 342 | io.tool_output(*sys.argv, log_only=True) 343 | 344 | if not args.openai_api_key: 345 | if os.name == "nt": 346 | io.tool_error( 347 | "No OpenAI API key provided. Use --openai-api-key or setx OPENAI_API_KEY." 348 | ) 349 | else: 350 | io.tool_error( 351 | "No OpenAI API key provided. Use --openai-api-key or export OPENAI_API_KEY." 352 | ) 353 | return 1 354 | 355 | main_model = models.Model(args.model) 356 | 357 | openai.api_key = args.openai_api_key 358 | for attr in ("base", "type", "version", "deployment_id", "engine"): 359 | arg_key = f"openai_api_{attr}" 360 | val = getattr(args, arg_key) 361 | if val is not None: 362 | mod_key = f"api_{attr}" 363 | setattr(openai, mod_key, val) 364 | io.tool_output(f"Setting openai.{mod_key}={val}") 365 | 366 | coder = Coder.create( 367 | main_model, 368 | args.edit_format, 369 | io, 370 | ## 371 | fnames=args.files, 372 | pretty=args.pretty, 373 | show_diffs=args.show_diffs, 374 | auto_commits=args.auto_commits, 375 | dirty_commits=args.dirty_commits, 376 | dry_run=args.dry_run, 377 | map_tokens=args.map_tokens, 378 | verbose=args.verbose, 379 | assistant_output_color=args.assistant_output_color, 380 | code_theme=args.code_theme, 381 | stream=args.stream, 382 | use_git=args.git, 383 | ) 384 | 385 | if args.dirty_commits: 386 | coder.commit(ask=True, which="repo_files") 387 | 388 | if args.apply: 389 | content = io.read_text(args.apply) 390 | if content is None: 391 | return 392 | coder.apply_updates(content) 393 | return 394 | 395 | io.tool_output("Use /help to see in-chat commands, run with --help to see cmd line args") 396 | if args.message: 397 | io.tool_output() 398 | coder.run(with_message=args.message) 399 | else: 400 | coder.run() 401 | 402 | 403 | if __name__ == "__main__": 404 | status = main() 405 | sys.exit(status) 406 | -------------------------------------------------------------------------------- /AiderModify/aider/models.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | known_tokens = { 4 | "gpt-3.5-turbo": 4, 5 | "gpt-4": 8, 6 | } 7 | 8 | 9 | class Model: 10 | always_available = False 11 | use_repo_map = False 12 | send_undo_reply = False 13 | 14 | prompt_price = None 15 | completion_price = None 16 | 17 | def __init__(self, name): 18 | self.name = name 19 | 20 | tokens = None 21 | 22 | match = re.search(r"-([0-9]+)k", name) 23 | if match: 24 | tokens = int(match.group(1)) 25 | else: 26 | for m, t in known_tokens.items(): 27 | if name.startswith(m): 28 | tokens = t 29 | 30 | if tokens is None: 31 | raise ValueError(f"Unknown context window size for model: {name}") 32 | 33 | self.max_context_tokens = tokens * 1024 34 | 35 | if self.is_gpt4(): 36 | self.edit_format = "diff" 37 | self.use_repo_map = True 38 | self.send_undo_reply = True 39 | 40 | if tokens == 8: 41 | self.prompt_price = 0.03 42 | self.completion_price = 0.06 43 | elif tokens == 32: 44 | self.prompt_price = 0.06 45 | self.completion_price = 0.12 46 | 47 | return 48 | 49 | if self.is_gpt35(): 50 | self.edit_format = "whole" 51 | self.always_available = True 52 | 53 | if tokens == 4: 54 | self.prompt_price = 0.0015 55 | self.completion_price = 0.002 56 | elif tokens == 16: 57 | self.prompt_price = 0.003 58 | self.completion_price = 0.004 59 | 60 | return 61 | 62 | raise ValueError(f"Unsupported model: {name}") 63 | 64 | def is_gpt4(self): 65 | return self.name.startswith("gpt-4") 66 | 67 | def is_gpt35(self): 68 | return self.name.startswith("gpt-3.5-turbo") 69 | 70 | def __str__(self): 71 | return self.name 72 | 73 | 74 | GPT4 = Model("gpt-4") 75 | GPT35 = Model("gpt-3.5-turbo") 76 | GPT35_16k = Model("gpt-3.5-turbo-16k") 77 | -------------------------------------------------------------------------------- /AiderModify/aider/prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | 4 | # COMMIT 5 | commit_system = """You are an expert software engineer. 6 | Review the provided context and diffs which are about to be committed to a git repo. 7 | Generate a *SHORT* 1 line, 1 sentence commit message that describes the purpose of the changes. 8 | The commit message MUST be in the past tense. 9 | It must describe the changes *which have been made* in the diffs! 10 | Reply with JUST the commit message, without quotes, comments, questions, etc! 11 | """ 12 | 13 | # COMMANDS 14 | undo_command_reply = "I did `git reset --hard HEAD~1` to discard the last edits." 15 | 16 | added_files = "I added these *read-write* files: {fnames}" 17 | 18 | 19 | run_output = """I ran this command: 20 | 21 | {command} 22 | 23 | And got this output: 24 | 25 | {output} 26 | """ 27 | -------------------------------------------------------------------------------- /AiderModify/aider/repomap.py: -------------------------------------------------------------------------------- 1 | import colorsys 2 | import json 3 | import os 4 | import random 5 | import subprocess 6 | import sys 7 | import tempfile 8 | from collections import Counter, defaultdict 9 | 10 | import networkx as nx 11 | import tiktoken 12 | from diskcache import Cache 13 | from pygments.lexers import guess_lexer_for_filename 14 | from pygments.token import Token 15 | from pygments.util import ClassNotFound 16 | 17 | from aider import models 18 | 19 | from .dump import dump # noqa: F402 20 | 21 | 22 | def to_tree(tags): 23 | if not tags: 24 | return "" 25 | 26 | tags = sorted(tags) 27 | 28 | output = "" 29 | last = [None] * len(tags[0]) 30 | tab = "\t" 31 | for tag in tags: 32 | tag = list(tag) 33 | 34 | for i in range(len(last) + 1): 35 | if i == len(last): 36 | break 37 | if last[i] != tag[i]: 38 | break 39 | 40 | num_common = i 41 | 42 | indent = tab * num_common 43 | rest = tag[num_common:] 44 | for item in rest: 45 | output += indent + item + "\n" 46 | indent += tab 47 | last = tag 48 | 49 | return output 50 | 51 | 52 | def fname_to_components(fname, with_colon): 53 | path_components = fname.split(os.sep) 54 | res = [pc + os.sep for pc in path_components[:-1]] 55 | if with_colon: 56 | res.append(path_components[-1] + ":") 57 | else: 58 | res.append(path_components[-1]) 59 | return res 60 | 61 | 62 | class RepoMap: 63 | CACHE_VERSION = 1 64 | ctags_cmd = [ 65 | "ctags", 66 | "--fields=+S", 67 | "--extras=-F", 68 | "--output-format=json", 69 | "--output-encoding=utf-8", 70 | ] 71 | IDENT_CACHE_DIR = f".aider.ident.cache.v{CACHE_VERSION}" 72 | TAGS_CACHE_DIR = f".aider.tags.cache.v{CACHE_VERSION}" 73 | 74 | ctags_disabled_reason = "ctags not initialized" 75 | 76 | def __init__( 77 | self, 78 | map_tokens=1024, 79 | root=None, 80 | main_model=models.GPT4, 81 | io=None, 82 | repo_content_prefix=None, 83 | verbose=False, 84 | ): 85 | self.io = io 86 | self.verbose = verbose 87 | 88 | if not root: 89 | root = os.getcwd() 90 | self.root = root 91 | 92 | self.load_ident_cache() 93 | self.load_tags_cache() 94 | 95 | self.max_map_tokens = map_tokens 96 | self.has_ctags = self.check_for_ctags() 97 | 98 | if map_tokens > 0 and self.has_ctags: 99 | self.use_ctags = True 100 | else: 101 | self.use_ctags = False 102 | 103 | self.tokenizer = tiktoken.encoding_for_model(main_model.name) 104 | self.repo_content_prefix = repo_content_prefix 105 | 106 | def get_repo_map(self, chat_files, other_files): 107 | res = self.choose_files_listing(chat_files, other_files) 108 | if not res: 109 | return 110 | 111 | files_listing, ctags_msg = res 112 | 113 | if chat_files: 114 | other = "other " 115 | else: 116 | other = "" 117 | 118 | if self.repo_content_prefix: 119 | repo_content = self.repo_content_prefix.format( 120 | other=other, 121 | ctags_msg=ctags_msg, 122 | ) 123 | else: 124 | repo_content = "" 125 | 126 | repo_content += files_listing 127 | 128 | return repo_content 129 | 130 | def choose_files_listing(self, chat_files, other_files): 131 | if self.max_map_tokens <= 0: 132 | return 133 | 134 | if not other_files: 135 | return 136 | 137 | if self.use_ctags: 138 | files_listing = self.get_ranked_tags_map(chat_files, other_files) 139 | if files_listing: 140 | num_tokens = self.token_count(files_listing) 141 | if self.verbose: 142 | self.io.tool_output(f"ctags map: {num_tokens/1024:.1f} k-tokens") 143 | ctags_msg = " with selected ctags info" 144 | return files_listing, ctags_msg 145 | 146 | files_listing = self.get_simple_files_map(other_files) 147 | ctags_msg = "" 148 | num_tokens = self.token_count(files_listing) 149 | if self.verbose: 150 | self.io.tool_output(f"simple map: {num_tokens/1024:.1f} k-tokens") 151 | if num_tokens < self.max_map_tokens: 152 | return files_listing, ctags_msg 153 | 154 | def get_simple_files_map(self, other_files): 155 | fnames = [] 156 | for fname in other_files: 157 | fname = self.get_rel_fname(fname) 158 | fname = fname_to_components(fname, False) 159 | fnames.append(fname) 160 | 161 | return to_tree(fnames) 162 | 163 | def token_count(self, string): 164 | return len(self.tokenizer.encode(string)) 165 | 166 | def get_rel_fname(self, fname): 167 | return os.path.relpath(fname, self.root) 168 | 169 | def split_path(self, path): 170 | path = os.path.relpath(path, self.root) 171 | return [path + ":"] 172 | 173 | def run_ctags(self, filename): 174 | # Check if the file is in the cache and if the modification time has not changed 175 | file_mtime = self.get_mtime(filename) 176 | if file_mtime is None: 177 | return [] 178 | 179 | cache_key = filename 180 | if cache_key in self.TAGS_CACHE and self.TAGS_CACHE[cache_key]["mtime"] == file_mtime: 181 | return self.TAGS_CACHE[cache_key]["data"] 182 | 183 | cmd = self.ctags_cmd + [ 184 | f"--input-encoding={self.io.encoding}", 185 | filename, 186 | ] 187 | output = subprocess.check_output(cmd, stderr=subprocess.PIPE).decode("utf-8") 188 | output_lines = output.splitlines() 189 | 190 | data = [] 191 | for line in output_lines: 192 | try: 193 | data.append(json.loads(line)) 194 | except json.decoder.JSONDecodeError as err: 195 | self.io.tool_error(f"Error parsing ctags output: {err}") 196 | self.io.tool_error(repr(line)) 197 | 198 | # Update the cache 199 | self.TAGS_CACHE[cache_key] = {"mtime": file_mtime, "data": data} 200 | self.save_tags_cache() 201 | return data 202 | 203 | def check_for_ctags(self): 204 | try: 205 | executable = self.ctags_cmd[0] 206 | cmd = [executable, "--version"] 207 | output = subprocess.check_output(cmd, stderr=subprocess.PIPE).decode("utf-8") 208 | output = output.lower() 209 | 210 | cmd = " ".join(cmd) 211 | 212 | if "universal ctags" not in output: 213 | self.ctags_disabled_reason = f"{cmd} does not claim to be universal ctags" 214 | return 215 | if "+json" not in output: 216 | self.ctags_disabled_reason = f"{cmd} does not list +json support" 217 | return 218 | 219 | with tempfile.TemporaryDirectory() as tempdir: 220 | hello_py = os.path.join(tempdir, "hello.py") 221 | with open(hello_py, "w", encoding="utf-8") as f: 222 | f.write("def hello():\n print('Hello, world!')\n") 223 | self.run_ctags(hello_py) 224 | except FileNotFoundError: 225 | self.ctags_disabled_reason = f"{executable} executable not found" 226 | return 227 | except Exception as err: 228 | self.ctags_disabled_reason = f"error running universal-ctags: {err}" 229 | return 230 | 231 | return True 232 | 233 | def load_tags_cache(self): 234 | self.TAGS_CACHE = Cache(self.TAGS_CACHE_DIR) 235 | 236 | def save_tags_cache(self): 237 | pass 238 | 239 | def load_ident_cache(self): 240 | self.IDENT_CACHE = Cache(self.IDENT_CACHE_DIR) 241 | 242 | def save_ident_cache(self): 243 | pass 244 | 245 | def get_mtime(self, fname): 246 | try: 247 | return os.path.getmtime(fname) 248 | except FileNotFoundError: 249 | self.io.tool_error(f"File not found error: {fname}") 250 | 251 | def get_name_identifiers(self, fname, uniq=True): 252 | file_mtime = self.get_mtime(fname) 253 | if file_mtime is None: 254 | return set() 255 | 256 | cache_key = fname 257 | if cache_key in self.IDENT_CACHE and self.IDENT_CACHE[cache_key]["mtime"] == file_mtime: 258 | idents = self.IDENT_CACHE[cache_key]["data"] 259 | else: 260 | idents = self.get_name_identifiers_uncached(fname) 261 | self.IDENT_CACHE[cache_key] = {"mtime": file_mtime, "data": idents} 262 | self.save_ident_cache() 263 | 264 | if uniq: 265 | idents = set(idents) 266 | return idents 267 | 268 | def get_name_identifiers_uncached(self, fname): 269 | content = self.io.read_text(fname) 270 | if content is None: 271 | return list() 272 | 273 | try: 274 | lexer = guess_lexer_for_filename(fname, content) 275 | except ClassNotFound: 276 | return list() 277 | 278 | # lexer.get_tokens_unprocessed() returns (char position in file, token type, token string) 279 | tokens = list(lexer.get_tokens_unprocessed(content)) 280 | res = [token[2] for token in tokens if token[1] in Token.Name] 281 | return res 282 | 283 | def get_ranked_tags(self, chat_fnames, other_fnames): 284 | defines = defaultdict(set) 285 | references = defaultdict(list) 286 | definitions = defaultdict(set) 287 | 288 | personalization = dict() 289 | 290 | fnames = set(chat_fnames).union(set(other_fnames)) 291 | chat_rel_fnames = set() 292 | 293 | for fname in sorted(fnames): 294 | # dump(fname) 295 | rel_fname = os.path.relpath(fname, self.root) 296 | 297 | if fname in chat_fnames: 298 | personalization[rel_fname] = 1.0 299 | chat_rel_fnames.add(rel_fname) 300 | 301 | data = self.run_ctags(fname) 302 | 303 | for tag in data: 304 | ident = tag["name"] 305 | defines[ident].add(rel_fname) 306 | 307 | scope = tag.get("scope") 308 | kind = tag.get("kind") 309 | name = tag.get("name") 310 | signature = tag.get("signature") 311 | 312 | last = name 313 | if signature: 314 | last += " " + signature 315 | 316 | res = [rel_fname] 317 | if scope: 318 | res.append(scope) 319 | res += [kind, last] 320 | 321 | key = (rel_fname, ident) 322 | definitions[key].add(tuple(res)) 323 | # definitions[key].add((rel_fname,)) 324 | 325 | idents = self.get_name_identifiers(fname, uniq=False) 326 | for ident in idents: 327 | # dump("ref", fname, ident) 328 | references[ident].append(rel_fname) 329 | 330 | idents = set(defines.keys()).intersection(set(references.keys())) 331 | 332 | G = nx.MultiDiGraph() 333 | 334 | for ident in idents: 335 | definers = defines[ident] 336 | for referencer, num_refs in Counter(references[ident]).items(): 337 | for definer in definers: 338 | if referencer == definer: 339 | continue 340 | G.add_edge(referencer, definer, weight=num_refs, ident=ident) 341 | 342 | if personalization: 343 | pers_args = dict(personalization=personalization, dangling=personalization) 344 | else: 345 | pers_args = dict() 346 | 347 | try: 348 | ranked = nx.pagerank(G, weight="weight", **pers_args) 349 | except ZeroDivisionError: 350 | return [] 351 | 352 | # distribute the rank from each source node, across all of its out edges 353 | ranked_definitions = defaultdict(float) 354 | for src in G.nodes: 355 | src_rank = ranked[src] 356 | total_weight = sum(data["weight"] for _src, _dst, data in G.out_edges(src, data=True)) 357 | # dump(src, src_rank, total_weight) 358 | for _src, dst, data in G.out_edges(src, data=True): 359 | data["rank"] = src_rank * data["weight"] / total_weight 360 | ident = data["ident"] 361 | ranked_definitions[(dst, ident)] += data["rank"] 362 | 363 | ranked_tags = [] 364 | ranked_definitions = sorted(ranked_definitions.items(), reverse=True, key=lambda x: x[1]) 365 | for (fname, ident), rank in ranked_definitions: 366 | # print(f"{rank:.03f} {fname} {ident}") 367 | if fname in chat_rel_fnames: 368 | continue 369 | ranked_tags += list(definitions.get((fname, ident), [])) 370 | 371 | rel_other_fnames_without_tags = set( 372 | os.path.relpath(fname, self.root) for fname in other_fnames 373 | ) 374 | 375 | fnames_already_included = set(rt[0] for rt in ranked_tags) 376 | 377 | top_rank = sorted([(rank, node) for (node, rank) in ranked.items()], reverse=True) 378 | for rank, fname in top_rank: 379 | if fname in rel_other_fnames_without_tags: 380 | rel_other_fnames_without_tags.remove(fname) 381 | if fname not in fnames_already_included: 382 | ranked_tags.append((fname,)) 383 | 384 | for fname in rel_other_fnames_without_tags: 385 | ranked_tags.append((fname,)) 386 | 387 | return ranked_tags 388 | 389 | def get_ranked_tags_map(self, chat_fnames, other_fnames=None): 390 | if not other_fnames: 391 | other_fnames = list() 392 | 393 | ranked_tags = self.get_ranked_tags(chat_fnames, other_fnames) 394 | num_tags = len(ranked_tags) 395 | 396 | lower_bound = 0 397 | upper_bound = num_tags 398 | best_tree = None 399 | 400 | while lower_bound <= upper_bound: 401 | middle = (lower_bound + upper_bound) // 2 402 | tree = to_tree(ranked_tags[:middle]) 403 | num_tokens = self.token_count(tree) 404 | # dump(middle, num_tokens) 405 | 406 | if num_tokens < self.max_map_tokens: 407 | best_tree = tree 408 | lower_bound = middle + 1 409 | else: 410 | upper_bound = middle - 1 411 | 412 | return best_tree 413 | 414 | 415 | def find_py_files(directory): 416 | if not os.path.isdir(directory): 417 | return [directory] 418 | 419 | py_files = [] 420 | for root, dirs, files in os.walk(directory): 421 | for file in files: 422 | if file.endswith(".py"): 423 | py_files.append(os.path.join(root, file)) 424 | return py_files 425 | 426 | 427 | def get_random_color(): 428 | hue = random.random() 429 | r, g, b = [int(x * 255) for x in colorsys.hsv_to_rgb(hue, 1, 0.75)] 430 | res = f"#{r:02x}{g:02x}{b:02x}" 431 | return res 432 | 433 | 434 | if __name__ == "__main__": 435 | fnames = sys.argv[1:] 436 | 437 | chat_fnames = [] 438 | other_fnames = [] 439 | for dname in sys.argv[1:]: 440 | if ".venv" in dname: 441 | other_fnames += find_py_files(dname) 442 | else: 443 | chat_fnames += find_py_files(dname) 444 | 445 | root = os.path.commonpath(chat_fnames) 446 | 447 | rm = RepoMap(root=root) 448 | repo_map = rm.get_ranked_tags_map(chat_fnames, other_fnames) 449 | 450 | dump(len(repo_map)) 451 | print(repo_map) 452 | -------------------------------------------------------------------------------- /AiderModify/aider/utils.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from .dump import dump # noqa: F401 4 | 5 | 6 | def safe_abs_path(res): 7 | "Gives an abs path, which safely returns a full (not 8.3) windows path" 8 | res = Path(res).resolve() 9 | return str(res) 10 | 11 | 12 | def show_messages(messages, title=None, functions=None): 13 | if title: 14 | print(title.upper(), "*" * 50) 15 | 16 | for msg in messages: 17 | role = msg["role"].upper() 18 | content = msg.get("content") 19 | if content: 20 | for line in content.splitlines(): 21 | print(role, line) 22 | content = msg.get("function_call") 23 | if content: 24 | print(role, content) 25 | 26 | if functions: 27 | dump(functions) 28 | -------------------------------------------------------------------------------- /Icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/Icon.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # AgileGen: Empowering Agile-Based Generative Software Development through Human-AI Teamwork 3 | 4 |

5 | 6 |

7 | 8 | - A generative software development agent for human-AI collaboration initiated from the perspective of end-users. 9 | - Our preprint paper is avaliable at 📚: https://arxiv.org/abs/2407.15568 10 | - Try the Huggingface Link 🤗: https://huggingface.co/spaces/HarrisClover/AgileGen 11 | # Overview 📄 12 | 13 | ![框架图_01](https://github.com/HarrisClover/AgileGen/assets/33628813/e41c642e-50bb-43a9-860c-9203aae0bc46) 14 | 15 | The interaction and collaboration design of AgileGen is divided into two parts. 16 | 17 | (1) End-user Decision-Making is used to collaborate with end-users to collect and clarify end-user decisions. 18 | 19 | (2) AgileGen Agent responds to user decisions by transforming and analyzing them, aiming to guide LLMs in generating software code consistent with user requirements. Scenarios Design and Rapid Prototype Design are two core components of the AgileGen. 20 | 21 | - The Scenarios Design component is primarily used to design different scenarios represented in Gherkin language based on decision-making requirements, submit them to end-users for scenario decisions, and return the decided Gherkin scenarios. 22 | - The Rapid Prototype Design component is responsible for generating software application code based on the decided Gherkin scenarios. It then presents the software application to users for acceptance, receives user feedback and suggestions, and makes necessary modifications to the code. 23 | 24 | --- 25 | 26 | # ❓**What Can AgileGen (Sapper4SE) Do?** 27 | ![example](https://github.com/user-attachments/assets/08b7b260-81be-4b0a-8a34-5ae799730e6b) 28 | 29 | https://github.com/HarrisClover/AgileGen/assets/33628813/8478c0d1-b511-4cae-8037-0c3f11f7074e 30 | 31 | --- 32 | # 👨‍💻‍Human-AI collaboration process: 33 | ![inter_case_1](https://github.com/user-attachments/assets/6c2d46c4-0b41-4cd2-b53d-bb25c5daf2ff) 34 | --- 35 | # ✈️Quick start 36 | 37 | 1. Configure your OpenAI key and VPN in utils/CodeGeneration.py 38 | 2. python ./main.py 39 | 3. Running on local URL: http://127.0.0.1:0000 (your VPN) 40 | 41 | **Example input:** 42 | | Project Name | Description Features | 43 | | ------------------------------- | ----------------------------------------------------------------- | 44 | | `NewsMeter` | Evaluate the credibility of news articles by analyzing multiple factors and generating trustworthiness scores with explanations and evidence. | 45 | | `VideoClipper` | VideoClipper is a software application that allows users to easily clip and trim videos. It provides an intuitive interface to select specific sections of the video, and saves the trimmed video as a new file.| 46 | | `SportArena`| Develop a user-friendly software application that allows users to create and customize virtual sports arenas.| 47 | |`SoundBoard`| I'd like to have a website that displays cards with words on them and plays sounds when clicked.| 48 | --- 49 | ![Step](https://github.com/user-attachments/assets/d14a0367-48f8-4ad2-90e7-53ac7af79572) 50 | 51 | 52 | # 🔎Citation 53 | ``` 54 | @article{zhang2024empowering, 55 | title={Empowering Agile-Based Generative Software Development through Human-AI Teamwork}, 56 | author={Zhang, Sai and Xing, Zhenchang and Guo, Ronghui and Xu, Fangzhou and Chen, Lei and Zhang, Zhaoyuan and Zhang, Xiaowang and Feng, Zhiyong and Zhuang, Zhiqiang}, 57 | journal={arXiv preprint arXiv:2407.15568}, 58 | year={2024} 59 | } 60 | ``` 61 | -------------------------------------------------------------------------------- /config/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "logs_dir": "logs/logs.log", 3 | "model": "gpt-4o", 4 | "temperature": 0.4, 5 | "static_dir":"./static", 6 | "static_html_dir": "./static/html", 7 | "prompt_path": "./prompts_templates", 8 | "max_scenarios_number": 10, 9 | "init_visible_scenarios_number": 2, 10 | 11 | "stdout_handler_level": "debug", 12 | "file_handler_level": "debug", 13 | "model_name": "WebCodeGenerator", 14 | 15 | "max_retry": 1, 16 | "similarity_threshold": 0.5, 17 | "max_feature_number": 2, 18 | "demo_dir": "index/Test/data/demo" 19 | } -------------------------------------------------------------------------------- /database/DB_Tools.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | import json 3 | 4 | class DB_Tools(): 5 | def __init__(self): 6 | self.conn = sqlite3.connect('database/database.sqlite3',check_same_thread=False) 7 | self.cursor = self.conn.cursor() 8 | 9 | def insert(self,feature_name,scenario_data): 10 | insert_query = "INSERT INTO index_feature2scenariostable (feature, scenarios) VALUES (?, ?)" 11 | 12 | scenarios_json=json.dumps(scenario_data) 13 | 14 | self.cursor.execute(insert_query, (feature_name,scenarios_json)) 15 | self.conn.commit() 16 | 17 | def select_all(self): 18 | self.cursor.execute("SELECT * FROM index_feature2scenariostable") 19 | rows = self.cursor.fetchall() 20 | feature2scenarios_list=[] 21 | for row in rows: 22 | feature2scenarios={} 23 | feature2scenarios["feature"]=row[1] 24 | feature2scenarios["scenarios"]=eval(row[2]) 25 | feature2scenarios_list.append(feature2scenarios) 26 | 27 | return feature2scenarios_list 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /database/__pycache__/DB_Tools.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/database/__pycache__/DB_Tools.cpython-39.pyc -------------------------------------------------------------------------------- /database/database.sqlite3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/database/database.sqlite3 -------------------------------------------------------------------------------- /database/db.sql: -------------------------------------------------------------------------------- 1 | -- database: /home/user/grh/SE/HITL/database/database.sqlite3 2 | 3 | -- Use the ▷ button in the top right corner to run the entire file. 4 | 5 | SELECT * FROM "index_feature2scenariostable"; 6 | 7 | 8 | 9 | DELETE FROM "index_feature2scenariostable" WHERE id=6 -------------------------------------------------------------------------------- /logs/logs.log: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/logs/logs.log -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import os 4 | import gradio as gr 5 | from utils.log import Logger 6 | from pathlib import Path 7 | from utils.CodeGeneration import CodeGeneration 8 | from utils.utils import zip_folder, iframe_generator 9 | from database.DB_Tools import DB_Tools 10 | from dotenv import load_dotenv 11 | from AiderModify.ModifyCodeAider import modify_code_aider 12 | 13 | # ----------log------------- 14 | sys.stdout = Logger("logs/logs.log") 15 | load_dotenv() 16 | 17 | if __name__ == "__main__": 18 | 19 | codegeneration = CodeGeneration() 20 | db_tools = DB_Tools() 21 | 22 | def read_logs(): 23 | sys.stdout.flush() 24 | with open("logs/logs.log", "r") as f: 25 | return f.read() 26 | # ----------log---------------- 27 | 28 | # create a static directory to store the static files 29 | static_dir = Path(codegeneration.args.static_dir) 30 | static_dir.mkdir(parents=True, exist_ok=True) 31 | # 32 | 33 | def fn_scenario_generation(input_feature): 34 | feature2scenarios_list = db_tools.select_all() 35 | similar_Feature2Scenarios = codegeneration.TopN_Feature2Scenarios( 36 | feature2scenarios_list, input_feature) 37 | print("\n------------------Gherkin generating-------------------\n") 38 | Gherkin_response, messages = codegeneration.Gherkin_generation(input_feature, similar_Feature2Scenarios) 39 | print(Gherkin_response) 40 | Scenarios_List = codegeneration.Scenario_Parsing(Gherkin_response) 41 | print("\n---------------------Gherkin2NL-----------------------\n") 42 | Gherkin_NL_List = codegeneration.Gherkin2NL(Scenarios_List, messages) 43 | print(Gherkin_NL_List) 44 | 45 | output_dict = {} 46 | for i in range(len(Gherkin_NL_List)): 47 | output_dict[globals()["scenarios_list"][i] 48 | ] = gr.update(visible=True) 49 | output_dict[globals()["scenarios_list"][i].children[0].children[0]] = gr.update( 50 | value=Gherkin_NL_List[i]) 51 | for i in range(codegeneration.args.max_scenarios_number-len(Gherkin_NL_List)): 52 | output_dict[globals()["scenarios_list"] 53 | [i+len(Gherkin_NL_List)]] = gr.update(visible=False) 54 | output_dict[globals()["scenarios_list"][i+len(Gherkin_NL_List) 55 | ].children[0].children[0]] = gr.update(value="") 56 | output_dict[globals()["scenario_add"]] = gr.update(visible=True) 57 | output_dict[globals()["code_output"]] = gr.update(visible=False) 58 | return output_dict 59 | 60 | def fn_scenario_add(*arg): 61 | print("fn_scenario_add") 62 | 63 | input_string = arg[-1] 64 | scenarios_string_list = list(arg[:-1]) 65 | for i in range(codegeneration.args.max_scenarios_number): 66 | if scenarios_string_list[i] == "": 67 | return {globals()["scenarios_list"][i]: gr.update(visible=True), 68 | globals()["scenarios_list"][i].children[0].children[0]: input_string} 69 | 70 | def fn_code_generation(*args): 71 | print("\n------------------fn_code_generation-----------------------\n") 72 | codegeneration.clear_static_html_dir() 73 | 74 | Gherkin_NL_List = [] 75 | for i in range(len(args)-1): 76 | if args[i] != "": 77 | Gherkin_NL_List.append(args[i]) 78 | 79 | input_feature = args[-1] 80 | 81 | db_tools.insert(input_feature, Gherkin_NL_List) 82 | print("\n------------------NL2Gherkin-----------------------\n") 83 | Gherkin_result = codegeneration.NL2Gherkin(Gherkin_NL_List, input_feature) 84 | print(Gherkin_result) 85 | time.sleep(15) 86 | print("\n----------------Design_page_template_generation----------------\n") 87 | Design_page_template = codegeneration.Design_page_template_generation(Gherkin_result) 88 | print(Design_page_template) 89 | print("\n----------------Visual_design_template_generation---------------\n") 90 | Visual_design_template = codegeneration.Visual_design_template_generation(Design_page_template) 91 | print(Visual_design_template) 92 | print("\n----------------Code_generation-----------------\n") 93 | Generated_code, loop_number = codegeneration.Code_generation( 94 | Visual_design_template, Design_page_template, input_feature, Gherkin_result) 95 | 96 | file_path = "static/html/index.html"+'?time='+str(time.time()) 97 | file_name = "index.html" 98 | link = f'{file_name}' 99 | 100 | iframe = iframe_generator(file_path) 101 | 102 | output_path = os.path.join(static_dir, "html.zip") 103 | zip_folder(folder_path=codegeneration.args.static_html_dir, 104 | output_path=output_path) 105 | 106 | return link, gr.update(visible=True), output_path, Generated_code, iframe 107 | 108 | def fn_download_file(): 109 | output_path = os.path.join(static_dir, "html.zip") 110 | zip_folder(folder_path=codegeneration.args.static_html_dir, 111 | output_path=output_path) 112 | return output_path 113 | 114 | def fn_code_modification(code_modification_suggestion_string, generated_code): 115 | codegeneration.clear_static_html_dir() 116 | print("Code_Modification") 117 | modified_code, messages, loop_number = codegeneration.Code_Modification( 118 | generated_code, code_modification_suggestion_string) 119 | output_path = os.path.join(static_dir, "html.zip") 120 | zip_folder(folder_path=codegeneration.args.static_html_dir, 121 | output_path=output_path) 122 | 123 | file_path = "static/html/index.html"+'?time='+str(time.time()) 124 | file_name = "index.html" 125 | link = f'{file_name}' 126 | iframe = iframe_generator(file_path) 127 | 128 | return link, output_path, modified_code, iframe 129 | 130 | def fn_code_modification_aider(code_modification_suggestion_string, generated_code): 131 | time.sleep(15) 132 | print("\n---------------Code_Modification-------------\n") 133 | testdir = "static/html" 134 | model_name = "gpt-4-turbo-2024-04-09" 135 | # model_name = "gpt-4o" 136 | edit_format = "whole" 137 | tries = 2 138 | no_unit_tests = True 139 | no_aider = False 140 | verbose = False 141 | commit_hash = "e3aa9db-dirty" 142 | edit_purpose = "code" 143 | modify_code_aider(code_modification_suggestion_string, edit_purpose, testdir, 144 | model_name, edit_format, tries, no_unit_tests, no_aider, verbose, commit_hash) 145 | 146 | output_path = os.path.join(static_dir, "html.zip") 147 | zip_folder(folder_path=codegeneration.args.static_html_dir, 148 | output_path=output_path) 149 | 150 | file_path = "static/html/index.html"+'?time='+str(time.time()) 151 | file_name = "index.html" 152 | link = f'{file_name}' 153 | iframe = iframe_generator(file_path) 154 | modified_code = "" 155 | 156 | return link, output_path, modified_code, iframe 157 | 158 | def fn_design_modification(code_modification_suggestion_string, generated_code): 159 | codegeneration.clear_static_html_dir() 160 | print("\n--------------Design_Modification---------------\n") 161 | modified_code, messages, loop_number = codegeneration.Design_Modification( 162 | generated_code, code_modification_suggestion_string) 163 | output_path = os.path.join(static_dir, "html.zip") 164 | zip_folder(folder_path=codegeneration.args.static_html_dir, 165 | output_path=output_path) 166 | 167 | file_path = "static/html/index.html"+'?time='+str(time.time()) 168 | file_name = "index.html" 169 | link = f'{file_name}' 170 | iframe = iframe_generator(file_path) 171 | 172 | return link, output_path, modified_code, iframe 173 | 174 | def fn_design_modification_aider(code_modification_suggestion_string, generated_code): 175 | print("\n----------------Design_Modification----------------\n") 176 | 177 | testdir = "static/html" 178 | model_name = "gpt-4-turbo-2024-04-09" 179 | edit_format = "whole" 180 | tries = 2 181 | no_unit_tests = True 182 | no_aider = False 183 | verbose = False 184 | commit_hash = "e3aa9db-dirty" 185 | edit_purpose = "code" 186 | modify_code_aider(code_modification_suggestion_string, edit_purpose, testdir, 187 | model_name, edit_format, tries, no_unit_tests, no_aider, verbose, commit_hash) 188 | 189 | output_path = os.path.join(static_dir, "html.zip") 190 | zip_folder(folder_path=codegeneration.args.static_html_dir, 191 | output_path=output_path) 192 | 193 | file_path = "static/html/index.html"+'?time='+str(time.time()) 194 | file_name = "index.html" 195 | link = f'{file_name}' 196 | iframe = iframe_generator(file_path) 197 | modified_code = "" 198 | 199 | return link, output_path, modified_code, iframe 200 | 201 | 202 | with gr.Blocks(title="AgileGen") as app: 203 | gr.Markdown("# AgileGen") 204 | generated_code_state = gr.State(value="") 205 | 206 | with gr.Row() as Feature_Block: 207 | feature_textbox = gr.Textbox(label="Your Feature", lines=3, placeholder="Please input your feature here...", scale=9) 208 | scenario_generation_btn = gr.Button(value="Scenario Generation", scale=1) 209 | 210 | scenarios_list = [] 211 | scenarios_textbox_list = [] 212 | 213 | with gr.Column() as Scenarios_Block: 214 | with gr.Box(): 215 | for i in range(codegeneration.args.max_scenarios_number): 216 | if i < codegeneration.args.init_visible_scenarios_number: 217 | with gr.Row(visible=True) as globals()["scenario_{i}"]: 218 | globals()["scenario_textbox_{i}"] = gr.Textbox( 219 | interactive=True, label=f"Scenario", lines=2, scale=9) 220 | globals()["del_btn_{i}"] = gr.Button( 221 | value="Del", scale=1) 222 | 223 | def change_vis(): 224 | return gr.update(value=""), gr.update(visible=False) 225 | 226 | globals()["del_btn_{i}"].click(fn=change_vis, inputs=None, outputs=[ 227 | globals()["scenario_textbox_{i}"], globals()["scenario_{i}"]]) 228 | else: 229 | with gr.Row(visible=False) as globals()["scenario_{i}"]: 230 | globals()["scenario_textbox_{i}"] = gr.Textbox( 231 | interactive=True, label=f"Scenario", lines=2, scale=9) 232 | globals()["del_btn_{i}"] = gr.Button( 233 | value="Del", scale=1) 234 | 235 | def change_vis(): 236 | return gr.update(value=""), gr.update(visible=False) 237 | 238 | globals()["del_btn_{i}"].click(fn=change_vis, inputs=None, outputs=[ 239 | globals()["scenario_textbox_{i}"], globals()["scenario_{i}"]]) 240 | 241 | scenarios_list.append(globals()["scenario_{i}"]) 242 | scenarios_textbox_list.append( 243 | globals()["scenario_textbox_{i}"]) 244 | 245 | with gr.Column(visible=False) as globals()["scenario_add"]: 246 | with gr.Row(): 247 | globals()["scenario_add_textbox"] = gr.Textbox( 248 | interactive=True, label="Your new scenario:", lines=2, scale=9) 249 | scenario_add_btn = gr.Button(value="Add", scale=1) 250 | code_generation_btn = gr.Button(value="Code Generation") 251 | 252 | html_markdown = gr.Markdown(label="Output HTML") 253 | 254 | with gr.Column(visible=False) as globals()["code_output"]: 255 | with gr.Column(): 256 | gr_download_file = gr.File() 257 | pass 258 | with gr.Row(): 259 | globals()["design_modification_textbox"] = gr.Textbox( 260 | label="Design Modification Suggestions", scale=9) 261 | code_design_modification_btn = gr.Button( 262 | value="Design Modification", scale=1) 263 | with gr.Row(): 264 | globals()["code_modification_textbox"] = gr.Textbox( 265 | label="Code Modification Suggestions", scale=9) 266 | code_modification_btn = gr.Button( 267 | value="Code Modification", scale=1) 268 | 269 | scenario_generation_btn_outputs = [] 270 | scenario_generation_btn_outputs = scenarios_list+scenarios_textbox_list 271 | scenario_generation_btn_outputs.append(globals()["scenario_add"]) 272 | scenario_generation_btn_outputs.append(globals()["code_output"]) 273 | scenario_generation_btn.click( 274 | fn=fn_scenario_generation, inputs=feature_textbox, outputs=scenario_generation_btn_outputs) 275 | 276 | scenario_add_btn_inputs = [] 277 | scenario_add_btn_inputs.extend(scenarios_textbox_list) 278 | scenario_add_btn_inputs.append(globals()["scenario_add_textbox"]) 279 | scenario_add_btn_outputs = [] 280 | scenario_add_btn_outputs = scenarios_list+scenarios_textbox_list 281 | scenario_add_btn_outputs.append(globals()["scenario_add"]) 282 | 283 | scenario_add_btn.click( 284 | fn=fn_scenario_add, inputs=scenario_add_btn_inputs, outputs=scenario_add_btn_outputs) 285 | 286 | code_generation_btn_inputs = [] 287 | code_generation_btn_inputs.extend(scenarios_textbox_list) 288 | code_generation_btn_inputs.append(feature_textbox) 289 | 290 | new_logs = gr.Textbox(label="Log", max_lines=20) 291 | app.load(read_logs, None, new_logs, every=3, queue=True, scroll_to_output=True) 292 | 293 | code_generation_btn.click(fn=fn_code_generation, inputs=code_generation_btn_inputs, outputs=[ 294 | html_markdown, globals()["code_output"], gr_download_file, generated_code_state]) 295 | 296 | code_modification_btn.click(fn=fn_code_modification_aider, inputs=[globals()[ 297 | "code_modification_textbox"], generated_code_state], outputs=[html_markdown, gr_download_file, generated_code_state]) 298 | 299 | code_design_modification_btn.click(fn=fn_design_modification_aider, inputs=[globals( 300 | )["design_modification_textbox"], generated_code_state], outputs=[html_markdown, gr_download_file, generated_code_state]) 301 | 302 | app.queue() 303 | app.launch() 304 | -------------------------------------------------------------------------------- /prompts_templates/Code_generation_prompt.txt: -------------------------------------------------------------------------------- 1 | Page templates:[ 2 | {Design_page_template Replacement Flag} 3 | ] 4 | Visual Description: [ 5 | {Visual_design_template Replacement Flag} 6 | ] 7 | Instructions: Combining Scenarios, Page templates and visual descriptions to generate simple web code (include .html, .css, .js) for the task without any note. 8 | There is no back-end for this feature, so please use the front-end code to implement the back-end functionality involved. 9 | Gherkin:[ 10 | {Gherkin_result Replacement Flag} 11 | ] 12 | Task:{task Replacement Flag} 13 | Please generate the codes for the three files in without any note: 14 | index.html: 15 | ```html 16 | 17 | ``` 18 | end index.html 19 | 20 | style.css: 21 | ```css 22 | 23 | ``` 24 | end style.css 25 | 26 | script.js: 27 | ```javascript 28 | 29 | ``` 30 | end script.js -------------------------------------------------------------------------------- /prompts_templates/Code_modification_based_on_test_cases_prompt.txt: -------------------------------------------------------------------------------- 1 | Code:[ 2 | {Code Replacement Flag} 3 | ] 4 | 5 | Test Cases: [ 6 | {Test Cases Replacement Flag} 7 | ] 8 | 9 | Instructions: Modify code based on Test Cases for the three files in the following format: 10 | index.html: 11 | ... 12 | style.css: 13 | ... 14 | script.js: 15 | ... 16 | 17 | -------------------------------------------------------------------------------- /prompts_templates/Code_modification_prompt.txt: -------------------------------------------------------------------------------- 1 | Code: 2 | {Code Replacement Flag} 3 | Instructions: 4 | {Instructions Replacement Flag} 5 | 6 | Please follow the instructions to modify the code function and format the output without any note: 7 | index.html: 8 | ```html 9 | 10 | ``` 11 | end index.html 12 | 13 | style.css: 14 | ```css 15 | 16 | ``` 17 | end style.css 18 | 19 | script.js: 20 | ```javascript 21 | 22 | ``` 23 | end script.js -------------------------------------------------------------------------------- /prompts_templates/Design_modification_prompt.txt: -------------------------------------------------------------------------------- 1 | Code: 2 | {Code Replacement Flag} 3 | Instructions: 4 | Use Materialize CSS to generate beautiful interface designs. and give a beautiful color design Suggestions. {Instructions Replacement Flag} 5 | 6 | Design Modification Instructions: 7 | Assistant is a senior designer. Assistant only writes new code and does not write additional text. 8 | Assistant is designed to assist with front-end development incorporating modern design principles such as responsive design. 9 | Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of code, and can use this knowledge to provide accurate and informative coding updates. 10 | Overall, Assistant is a powerful tool that can help with a wide range of design and development tasks. 11 | 12 | When designing a web page that should be beautiful, adhere to material design principles and meet the accessibility color requirements, here are some points you should consider: 13 | Material Design Principles: 14 | Use bold and graphic colors deliberately to highlight the most important information on your page. 15 | Use shadows and depth effects sparingly to signify different user interface (UI) levels. 16 | Use responsive animations and transitions to give user feedback or to create a sense of continuity. 17 | Follow a unified theme, like using a unique color palette and typography. 18 | Follow the principle of "Content is king", always ensure that the design serves the content. 19 | Use space, color, and fonts deliberately to guide user attention and interaction. 20 | Ensure components behave in a predictable manner and in relation to their environment. 21 | 22 | Accessibility Colors: 23 | Make sure there is enough contrast between the background and foreground colors. 24 | Use color to communicate information, but don't rely solely on it. 25 | Avoid using colors that are known to cause issues for colorblind users (e.g., red/green). 26 | When choosing a color palette, pick colors that have high contrast against each other. 27 | Always remember, the beauty of a website lies in its usability and user experience. The use of beautiful colors and adherence to material design principles should enhance, not detract from, the overall user experience. 28 | 29 | 30 | Please generate the codes for the three files (index.html/style.css/script.js) in the following format: 31 | index.html: 32 | ... 33 | style.css: 34 | ... 35 | script.js: 36 | ... -------------------------------------------------------------------------------- /prompts_templates/Design_page_prompt.txt: -------------------------------------------------------------------------------- 1 | Instruction: Generate concise web system page templates for scenario description without any code. And describe sitemaps. 2 | Scene description:[ 3 | {Replacement Flag} 4 | ] 5 | Note: Please use only one web page to complete the all Scene without HomePage. 6 | Page templates: -------------------------------------------------------------------------------- /prompts_templates/Extract_Css.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "system", 4 | "content": "You are an AI asistant that used to extract the CSS code from the given text" 5 | }, 6 | { 7 | "role": "user", 8 | "content": "index.html:\n\n\n\n\n \n \n Drawing Application\n \n\n\n
\n
\n \"Drawing\n
\n \n
\n
\n \n
\n
\n \n \n
\n
\n \n \n
\n \n
\n
\n
\n

© 2021 Drawing Application. All rights reserved.

\n
\n \"Facebook\"\n \"Twitter\"\n \"Instagram\"\n
\n
\n \n\n\n\nend index.html\n\nstyle.css:\n\n* {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n}\n\nbody {\n font-family: Arial, sans-serif;\n font-size: 16px;\n line-height: 1.5;\n background-color: #f2f2f2;\n}\n\nheader {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: 20px;\n background-color: #ffffff;\n box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);\n}\n\n.logo img {\n height: 50px;\n}\n\nnav ul {\n display: flex;\n list-style: none;\n}\n\nnav li {\n margin-left: 20px;\n}\n\nnav a {\n text-decoration: none;\n color: #333333;\n font-weight: bold;\n}\n\nnav a:hover {\n color: #ff0000;\n}\n\nmain {\n display: flex;\n flex-direction: column;\n align-items: center;\n margin-top: 50px;\n}\n\ncanvas {\n border: 1px solid #333333;\n background-color: #ffffff;\n}\n\n.toolbar {\n display: flex;\n justify-content: center;\n align-items: center;\n margin-top: 20px;\n}\n\n.brush-size {\n margin-right: 20px;\n}\n\n.color-selection {\n margin-right: 20px;\n}\n\nbutton {\n background-color: #ff0000;\n color: #ffffff;\n border: none;\n padding: 10px 20px;\n border-radius: 5px;\n cursor: pointer;\n}\n\nbutton:hover {\n background-color: #ffffff;\n color: #ff0000;\n border: 1px solid #ff0000;\n}\n\nfooter {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: 20px;\n background-color: #ffffff;\n box-shadow: 0 -2px 4px rgba(0, 0, 0, 0.1);\n}\n\nfooter p {\n font-size: 14px;\n color: #333333;\n}\n\n.social-media a {\n margin-left: 10px;\n}\n\n.social-media img {\n height: 20px;\n}\n\nend style.css\n\nscript.js:\n\nconst canvas = document.getElementById('canvas');\nconst context = canvas.getContext('2d');\nlet isDrawing = false;\nlet lastX = 0;\nlet lastY = 0;\nlet hue = 0;\nlet direction = true;\n\ncanvas.width = window.innerWidth - 100;\ncanvas.height = window.innerHeight - 200;\n\nfunction draw(e) {\n if (!isDrawing) return;\n context.strokeStyle = `hsl(${hue}, 100%, 50%)`;\n context.lineWidth = document.getElementById('brush-size').value;\n context.lineCap = 'round';\n context.beginPath();\n context.moveTo(lastX, lastY);\n context.lineTo(e.offsetX, e.offsetY);\n context.stroke();\n [lastX, lastY] = [e.offsetX, e.offsetY];\n hue++;\n if (hue >= 360) {\n hue = 0;\n }\n if (context.lineWidth >= 50 || context.lineWidth <= 1) {\n direction = !direction;\n }\n if (direction) {\n context.lineWidth++;\n } else {\n context.lineWidth--;\n }\n}\n\ncanvas.addEventListener('mousedown', (e) => {\n isDrawing = true;\n [lastX, lastY] = [e.offsetX, e.offsetY];\n});\n\ncanvas.addEventListener('mousemove', draw);\n\ncanvas.addEventListener('mouseup', () => isDrawing = false);\ncanvas.addEventListener('mouseout', () => isDrawing = false);\n\ndocument.getElementById('clear-canvas').addEventListener('click', () => {\n context.clearRect(0, 0, canvas.width, canvas.height);\n});\n\ndocument.getElementById('brush-size').addEventListener('input', () => {\n context.lineWidth = document.getElementById('brush-size').value;\n});\n\ndocument.getElementById('color-selection').addEventListener('input', () => {\n context.strokeStyle = document.getElementById('color-selection').value;\n});\n\nend script.js" 9 | }, 10 | { 11 | "role": "assistant", 12 | "content": "* {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n}\n\nbody {\n font-family: Arial, sans-serif;\n font-size: 16px;\n line-height: 1.5;\n background-color: #f2f2f2;\n}\n\nheader {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: 20px;\n background-color: #ffffff;\n box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);\n}\n\n.logo img {\n height: 50px;\n}\n\nnav ul {\n display: flex;\n list-style: none;\n}\n\nnav li {\n margin-left: 20px;\n}\n\nnav a {\n text-decoration: none;\n color: #333333;\n font-weight: bold;\n}\n\nnav a:hover {\n color: #ff0000;\n}\n\nmain {\n display: flex;\n flex-direction: column;\n align-items: center;\n margin-top: 50px;\n}\n\ncanvas {\n border: 1px solid #333333;\n background-color: #ffffff;\n}\n\n.toolbar {\n display: flex;\n justify-content: center;\n align-items: center;\n margin-top: 20px;\n}\n\n.brush-size {\n margin-right: 20px;\n}\n\n.color-selection {\n margin-right: 20px;\n}\n\nbutton {\n background-color: #ff0000;\n color: #ffffff;\n border: none;\n padding: 10px 20px;\n border-radius: 5px;\n cursor: pointer;\n}\n\nbutton:hover {\n background-color: #ffffff;\n color: #ff0000;\n border: 1px solid #ff0000;\n}\n\nfooter {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: 20px;\n background-color: #ffffff;\n box-shadow: 0 -2px 4px rgba(0, 0, 0, 0.1);\n}\n\nfooter p {\n font-size: 14px;\n color: #333333;\n}\n\n.social-media a {\n margin-left: 10px;\n}\n\n.social-media img {\n height: 20px;\n}" 13 | } 14 | ] -------------------------------------------------------------------------------- /prompts_templates/Extract_Javascript.json: -------------------------------------------------------------------------------- 1 | [ 2 | {"role": "system","content": "You are an AI asistant that used to extract the Javascript code from the given text"}, 3 | {"role": "user","content": "index.html:\n\n\n\n\n \n \n Drawing Application\n \n\n\n
\n
\n \"Drawing\n
\n \n
\n
\n \n
\n
\n \n \n
\n
\n \n \n
\n \n
\n
\n
\n

© 2021 Drawing Application. All rights reserved.

\n
\n \"Facebook\"\n \"Twitter\"\n \"Instagram\"\n
\n
\n \n\n\n\nend index.html\n\nstyle.css:\n\n* {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n}\n\nbody {\n font-family: Arial, sans-serif;\n font-size: 16px;\n line-height: 1.5;\n background-color: #f2f2f2;\n}\n\nheader {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: 20px;\n background-color: #ffffff;\n box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);\n}\n\n.logo img {\n height: 50px;\n}\n\nnav ul {\n display: flex;\n list-style: none;\n}\n\nnav li {\n margin-left: 20px;\n}\n\nnav a {\n text-decoration: none;\n color: #333333;\n font-weight: bold;\n}\n\nnav a:hover {\n color: #ff0000;\n}\n\nmain {\n display: flex;\n flex-direction: column;\n align-items: center;\n margin-top: 50px;\n}\n\ncanvas {\n border: 1px solid #333333;\n background-color: #ffffff;\n}\n\n.toolbar {\n display: flex;\n justify-content: center;\n align-items: center;\n margin-top: 20px;\n}\n\n.brush-size {\n margin-right: 20px;\n}\n\n.color-selection {\n margin-right: 20px;\n}\n\nbutton {\n background-color: #ff0000;\n color: #ffffff;\n border: none;\n padding: 10px 20px;\n border-radius: 5px;\n cursor: pointer;\n}\n\nbutton:hover {\n background-color: #ffffff;\n color: #ff0000;\n border: 1px solid #ff0000;\n}\n\nfooter {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: 20px;\n background-color: #ffffff;\n box-shadow: 0 -2px 4px rgba(0, 0, 0, 0.1);\n}\n\nfooter p {\n font-size: 14px;\n color: #333333;\n}\n\n.social-media a {\n margin-left: 10px;\n}\n\n.social-media img {\n height: 20px;\n}\n\nend style.css\n\nscript.js:\n\nconst canvas = document.getElementById('canvas');\nconst context = canvas.getContext('2d');\nlet isDrawing = false;\nlet lastX = 0;\nlet lastY = 0;\nlet hue = 0;\nlet direction = true;\n\ncanvas.width = window.innerWidth - 100;\ncanvas.height = window.innerHeight - 200;\n\nfunction draw(e) {\n if (!isDrawing) return;\n context.strokeStyle = `hsl(${hue}, 100%, 50%)`;\n context.lineWidth = document.getElementById('brush-size').value;\n context.lineCap = 'round';\n context.beginPath();\n context.moveTo(lastX, lastY);\n context.lineTo(e.offsetX, e.offsetY);\n context.stroke();\n [lastX, lastY] = [e.offsetX, e.offsetY];\n hue++;\n if (hue >= 360) {\n hue = 0;\n }\n if (context.lineWidth >= 50 || context.lineWidth <= 1) {\n direction = !direction;\n }\n if (direction) {\n context.lineWidth++;\n } else {\n context.lineWidth--;\n }\n}\n\ncanvas.addEventListener('mousedown', (e) => {\n isDrawing = true;\n [lastX, lastY] = [e.offsetX, e.offsetY];\n});\n\ncanvas.addEventListener('mousemove', draw);\n\ncanvas.addEventListener('mouseup', () => isDrawing = false);\ncanvas.addEventListener('mouseout', () => isDrawing = false);\n\ndocument.getElementById('clear-canvas').addEventListener('click', () => {\n context.clearRect(0, 0, canvas.width, canvas.height);\n});\n\ndocument.getElementById('brush-size').addEventListener('input', () => {\n context.lineWidth = document.getElementById('brush-size').value;\n});\n\ndocument.getElementById('color-selection').addEventListener('input', () => {\n context.strokeStyle = document.getElementById('color-selection').value;\n});\n\nend script.js"}, 4 | {"role": "assistant","content": "const canvas = document.getElementById('canvas');\nconst context = canvas.getContext('2d');\nlet isDrawing = false;\nlet lastX = 0;\nlet lastY = 0;\nlet hue = 0;\nlet direction = true;\n\ncanvas.width = window.innerWidth - 100;\ncanvas.height = window.innerHeight - 200;\n\nfunction draw(e) {\n if (!isDrawing) return;\n context.strokeStyle = `hsl(${hue}, 100%, 50%)`;\n context.lineWidth = document.getElementById('brush-size').value;\n context.lineCap = 'round';\n context.beginPath();\n context.moveTo(lastX, lastY);\n context.lineTo(e.offsetX, e.offsetY);\n context.stroke();\n [lastX, lastY] = [e.offsetX, e.offsetY];\n hue++;\n if (hue >= 360) {\n hue = 0;\n }\n if (context.lineWidth >= 50 || context.lineWidth <= 1) {\n direction = !direction;\n }\n if (direction) {\n context.lineWidth++;\n } else {\n context.lineWidth--;\n }\n}\n\ncanvas.addEventListener('mousedown', (e) => {\n isDrawing = true;\n [lastX, lastY] = [e.offsetX, e.offsetY];\n});\n\ncanvas.addEventListener('mousemove', draw);\n\ncanvas.addEventListener('mouseup', () => isDrawing = false);\ncanvas.addEventListener('mouseout', () => isDrawing = false);\n\ndocument.getElementById('clear-canvas').addEventListener('click', () => {\n context.clearRect(0, 0, canvas.width, canvas.height);\n});\n\ndocument.getElementById('brush-size').addEventListener('input', () => {\n context.lineWidth = document.getElementById('brush-size').value;\n});\n\ndocument.getElementById('color-selection').addEventListener('input', () => {\n context.strokeStyle = document.getElementById('color-selection').value;\n});"} 5 | ] -------------------------------------------------------------------------------- /prompts_templates/Extract_html.json: -------------------------------------------------------------------------------- 1 | [ 2 | {"role": "system","content": "You are an AI asistant that used to extract the Html code from the given text"}, 3 | {"role": "user","content": "index.html:\n\n\n\n\n \n \n Drawing Application\n \n\n\n
\n
\n \"Drawing\n
\n \n
\n
\n \n
\n
\n \n \n
\n
\n \n \n
\n \n
\n
\n
\n

© 2021 Drawing Application. All rights reserved.

\n
\n \"Facebook\"\n \"Twitter\"\n \"Instagram\"\n
\n
\n \n\n\n\nend index.html\n\nstyle.css:\n\n* {\n box-sizing: border-box;\n margin: 0;\n padding: 0;\n}\n\nbody {\n font-family: Arial, sans-serif;\n font-size: 16px;\n line-height: 1.5;\n background-color: #f2f2f2;\n}\n\nheader {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: 20px;\n background-color: #ffffff;\n box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);\n}\n\n.logo img {\n height: 50px;\n}\n\nnav ul {\n display: flex;\n list-style: none;\n}\n\nnav li {\n margin-left: 20px;\n}\n\nnav a {\n text-decoration: none;\n color: #333333;\n font-weight: bold;\n}\n\nnav a:hover {\n color: #ff0000;\n}\n\nmain {\n display: flex;\n flex-direction: column;\n align-items: center;\n margin-top: 50px;\n}\n\ncanvas {\n border: 1px solid #333333;\n background-color: #ffffff;\n}\n\n.toolbar {\n display: flex;\n justify-content: center;\n align-items: center;\n margin-top: 20px;\n}\n\n.brush-size {\n margin-right: 20px;\n}\n\n.color-selection {\n margin-right: 20px;\n}\n\nbutton {\n background-color: #ff0000;\n color: #ffffff;\n border: none;\n padding: 10px 20px;\n border-radius: 5px;\n cursor: pointer;\n}\n\nbutton:hover {\n background-color: #ffffff;\n color: #ff0000;\n border: 1px solid #ff0000;\n}\n\nfooter {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: 20px;\n background-color: #ffffff;\n box-shadow: 0 -2px 4px rgba(0, 0, 0, 0.1);\n}\n\nfooter p {\n font-size: 14px;\n color: #333333;\n}\n\n.social-media a {\n margin-left: 10px;\n}\n\n.social-media img {\n height: 20px;\n}\n\nend style.css\n\nscript.js:\n\nconst canvas = document.getElementById('canvas');\nconst context = canvas.getContext('2d');\nlet isDrawing = false;\nlet lastX = 0;\nlet lastY = 0;\nlet hue = 0;\nlet direction = true;\n\ncanvas.width = window.innerWidth - 100;\ncanvas.height = window.innerHeight - 200;\n\nfunction draw(e) {\n if (!isDrawing) return;\n context.strokeStyle = `hsl(${hue}, 100%, 50%)`;\n context.lineWidth = document.getElementById('brush-size').value;\n context.lineCap = 'round';\n context.beginPath();\n context.moveTo(lastX, lastY);\n context.lineTo(e.offsetX, e.offsetY);\n context.stroke();\n [lastX, lastY] = [e.offsetX, e.offsetY];\n hue++;\n if (hue >= 360) {\n hue = 0;\n }\n if (context.lineWidth >= 50 || context.lineWidth <= 1) {\n direction = !direction;\n }\n if (direction) {\n context.lineWidth++;\n } else {\n context.lineWidth--;\n }\n}\n\ncanvas.addEventListener('mousedown', (e) => {\n isDrawing = true;\n [lastX, lastY] = [e.offsetX, e.offsetY];\n});\n\ncanvas.addEventListener('mousemove', draw);\n\ncanvas.addEventListener('mouseup', () => isDrawing = false);\ncanvas.addEventListener('mouseout', () => isDrawing = false);\n\ndocument.getElementById('clear-canvas').addEventListener('click', () => {\n context.clearRect(0, 0, canvas.width, canvas.height);\n});\n\ndocument.getElementById('brush-size').addEventListener('input', () => {\n context.lineWidth = document.getElementById('brush-size').value;\n});\n\ndocument.getElementById('color-selection').addEventListener('input', () => {\n context.strokeStyle = document.getElementById('color-selection').value;\n});\n\nend script.js"}, 4 | {"role": "assistant","content": "\n\n\n \n \n Drawing Application\n \n\n\n
\n
\n \"Drawing\n
\n \n
\n
\n \n
\n
\n \n \n
\n
\n \n \n
\n \n
\n
\n
\n

© 2021 Drawing Application. All rights reserved.

\n
\n \"Facebook\"\n \"Twitter\"\n \"Instagram\"\n
\n
\n \n\n"} 5 | ] -------------------------------------------------------------------------------- /prompts_templates/Gherkin2NL_prompt.txt: -------------------------------------------------------------------------------- 1 | Scenarios: [ 2 | {Replacement Flag} 3 | ] 4 | 5 | 6 | Instructions: Generate Natural Language that explains the every scenario from the Scenarios List without any note. 7 | Scenario 1: 8 | Scenario 2: 9 | Scenario 3: 10 | Scenario 4: 11 | ... -------------------------------------------------------------------------------- /prompts_templates/Gherkin_merge_prompt.txt: -------------------------------------------------------------------------------- 1 | {Replacement Flag} 2 | 3 | Please generate Gherkin language in one feature: -------------------------------------------------------------------------------- /prompts_templates/Gherkin_prompt.txt: -------------------------------------------------------------------------------- 1 | Feature: [Title (one line describing the feature or story)] 2 | 3 | #1. Description of the feature or narrative of the story 4 | 5 | Narrative: 6 | As a [role] 7 | I want [feature: something, requirement] 8 | So that [benefit: achieve some business goal] 9 | 10 | #2. The background is executed once before each scenario 11 | 12 | Background: 13 | Given [some condition] 14 | And [one more thing] 15 | 16 | #3. Acceptance Criteria: (presented as Scenarios) 17 | 18 | Scenario 1: Title 19 | Given [context] 20 | And [some more context] 21 | When [event] 22 | Then [outcome] 23 | And [another outcome]... 24 | For instance, 25 | Scenario 2: ... 26 | 27 | #4. Templates with placeholders require a table. 28 | 29 | Scenario Outline: 30 | Given I have 31 | And I also have 32 | Examples: 33 | | something | number | thing | 34 | | … | … | … | 35 | | … | … | … | 36 | 37 | Feature: Adding-Removing-items 38 | As a store owner, 39 | I want to give a discount as customers add more items to their basket 40 | So that I can encourage the customers to buy more items 41 | 42 | Scenario Outline: Add and Removing items from the basket 43 | Given that the discount rate is 44 | And the minimum discountable amount 45 | And a customer logs into the System 46 | When adding and removing items from the basket 47 | | Item | Price| 48 | | Tea Shirt | $100 | 49 | |Sunglasses | $135 | 50 | | Cookies | $126 | 51 | |Sunglasses | $600 | 52 | | Cookies | -$126| 53 | Then the totalshould be 54 | And the discount should be 55 | Examples: 56 | | discount-rate | min-amount | total | discount | 57 | | %10 | $200 | $835 | $83.5 | 58 | 59 | Feature:{Replacement Flag} 60 | As a -------------------------------------------------------------------------------- /prompts_templates/Human_in_the_loop_prompt.txt: -------------------------------------------------------------------------------- 1 | Example of Feature-to-Scenarios Natural Language:[ 2 | {Replacement Flag} 3 | ] 4 | -------------------------------------------------------------------------------- /prompts_templates/NL2Gherkin_prompt.txt: -------------------------------------------------------------------------------- 1 | Instructions: Please generate Gherkin based on the natural language below. 2 | 3 | Natural Language:""" 4 | {NL Replacement Flag} 5 | """ 6 | 7 | Gherkin: 8 | Feature:{Feature Replacement Flag} -------------------------------------------------------------------------------- /prompts_templates/Test_cases_generation_prompt.txt: -------------------------------------------------------------------------------- 1 | Gherkin: [ 2 | {Gherkin Replacement Flag} 3 | ] 4 | 5 | Instructions: Generate test cases based on Gherkin. 6 | 7 | -------------------------------------------------------------------------------- /prompts_templates/Visual_design_prompt.txt: -------------------------------------------------------------------------------- 1 | Instructions: Generate a visual description for the Page templates, following the principles: 2 | 3 | 1.Unity has to do with all elements on a page visually or conceptually appearing to belong together. 4 | 5 | 2.Gestalt, in visual design, helps users perceive the overall design as opposed to individual elements. 6 | 7 | 3.Space is “defined when something is placed in it”, according to Alex White in his book, The Elements of Graphic Design. Incorporating space into a design helps reduce noise, increase readability, and/or create illusion. White space is an important part of your layout strategy. 8 | 9 | 4.Hierarchy shows the difference in significance between items. Designers often create hierarchies through different font sizes, colors, and placement on the page. Usually, items at the top are perceived as most important. 10 | 11 | 5.Balance creates the perception that there is equal distribution. This does not always imply that there is symmetry. 12 | 13 | 6.Contrast focuses on making items stand out by emphasizing differences in size, color, direction, and other characteristics. 14 | 15 | 7.Scale identifies a range of sizes; it creates interest and depth by demonstrating how each item relates to each other based on size. 16 | 17 | 8.Dominance focuses on having one element as the focal point and others being subordinate. This is often done through scaling and contrasting based on size, color, position, shape, etc. 18 | 19 | Page templates: [ 20 | {Replacement Flag} 21 | ] 22 | 23 | Visual Description: 24 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/requirements.txt -------------------------------------------------------------------------------- /static/html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Strategic Alliance Game 8 | 9 | 10 | 11 |
12 | 13 | 21 |
22 |
23 |
24 |

Alliance Management Section

25 |
26 |

Create an Alliance

27 |
28 | 29 | 30 | 31 |
32 |
33 |
34 |
35 |

Invite Players

36 |
37 | 38 | 39 |
40 |
41 |
42 |
43 |
44 |

Territory Management Section

45 |
46 |

Launch an Attack

47 |
48 | 52 | 53 | 54 |
55 |
56 |
57 |
58 |

Defend Territory

59 |
No incoming attacks.
60 |
61 |
62 |
63 |
64 |

Resource Management Section

65 |
66 |

Allocate Resources for an Attack

67 |
68 |
Initial Resources: 5000
69 | 70 | 71 |
72 |
73 |
74 |
75 |
76 | 90 | 95 | 96 | 97 | 98 | -------------------------------------------------------------------------------- /static/html/script.js: -------------------------------------------------------------------------------- 1 | 2 | document.getElementById('create-alliance-form').addEventListener('submit', function(e) { 3 | e.preventDefault(); 4 | const allianceName = document.getElementById('alliance-name').value; 5 | const allianceDescription = document.getElementById('alliance-description').value; 6 | document.getElementById('create-alliance-status').innerText = `Alliance "${allianceName}" created successfully!`; 7 | }); 8 | 9 | document.getElementById('invite-players-form').addEventListener('submit', function(e) { 10 | e.preventDefault(); 11 | const playerUsername = document.getElementById('player-username').value; 12 | document.getElementById('invite-players-status').innerText = `Invitation sent to ${playerUsername}!`; 13 | }); 14 | 15 | document.getElementById('launch-attack-form').addEventListener('submit', function(e) { 16 | e.preventDefault(); 17 | const targetTerritory = document.getElementById('target-territory').value; 18 | const allocateTroops = document.getElementById('allocate-troops').value; 19 | document.getElementById('launch-attack-status').innerText = `Attack launched on ${targetTerritory} with ${allocateTroops} troops!`; 20 | }); 21 | 22 | document.getElementById('allocate-resources-form').addEventListener('submit', function(e) { 23 | e.preventDefault(); 24 | const initialResources = 5000; 25 | const requiredResources = parseInt(document.getElementById('required-resources').value); 26 | const remainingResources = initialResources - requiredResources; 27 | document.getElementById('allocate-resources-status').innerText = `Remaining Resources: ${remainingResources}`; 28 | }); 29 | -------------------------------------------------------------------------------- /static/html/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: Arial, sans-serif; 3 | margin: 0; 4 | padding: 0; 5 | display: flex; 6 | flex-direction: column; 7 | height: 100vh; 8 | background-color: #e8f4f8; /* Light blue background for the body */ 9 | } 10 | 11 | header { 12 | display: flex; 13 | justify-content: space-between; 14 | align-items: center; 15 | background-color: #205375; /* Darker blue for header */ 16 | color: #FFD700; /* Gold text color */ 17 | padding: 10px 20px; 18 | } 19 | 20 | .logo { 21 | font-size: 24px; 22 | } 23 | 24 | .navbar a { 25 | color: #FFD700; /* Gold text color */ 26 | text-decoration: none; 27 | margin: 0 10px; 28 | } 29 | 30 | main { 31 | display: flex; 32 | flex: 1; 33 | padding: 20px; 34 | } 35 | 36 | section { 37 | flex: 2; 38 | margin-right: 20px; 39 | background-color: #f0f8ff; /* Very light blue for sections */ 40 | padding: 10px; 41 | border-radius: 8px; /* Rounded corners for sections */ 42 | } 43 | 44 | aside { 45 | flex: 1; 46 | background-color: #f9f9f9; /* Very light gray for the sidebar */ 47 | padding: 20px; 48 | border-radius: 8px; /* Rounded corners for the sidebar */ 49 | } 50 | 51 | footer { 52 | background-color: #205375; /* Darker blue for footer */ 53 | color: #FFD700; /* Gold text color */ 54 | text-align: center; 55 | padding: 10px 0; 56 | } 57 | 58 | footer a { 59 | color: #FFD700; /* Gold text color */ 60 | text-decoration: none; 61 | margin: 0 10px; 62 | } 63 | 64 | h2, h3 { 65 | color: #333; 66 | } 67 | 68 | form { 69 | display: flex; 70 | flex-direction: column; 71 | } 72 | 73 | form input, form textarea, form select, form button { 74 | margin-bottom: 10px; 75 | padding: 10px; 76 | font-size: 16px; 77 | } 78 | 79 | form button { 80 | background-color: #28a745; /* Green color for buttons */ 81 | color: white; 82 | border: none; 83 | cursor: pointer; 84 | } 85 | 86 | form button:hover { 87 | background-color: #218838; /* Darker green for button hover */ 88 | } 89 | 90 | #initial-resources { 91 | font-weight: bold; 92 | } 93 | 94 | #allocate-resources-status, #create-alliance-status, #invite-players-status, #launch-attack-status, #battle-status { 95 | margin-top: 10px; 96 | color: green; 97 | } 98 | 99 | #defend-notification { 100 | color: red; 101 | } 102 | -------------------------------------------------------------------------------- /static/img/Icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/static/img/Icon.png -------------------------------------------------------------------------------- /static/img/Placeholder200.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/static/img/Placeholder200.jpg -------------------------------------------------------------------------------- /static/img/Placeholder200.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/static/img/Placeholder200.png -------------------------------------------------------------------------------- /static/img/Placeholder600.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/static/img/Placeholder600.jpg -------------------------------------------------------------------------------- /static/img/Placeholder600.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/static/img/Placeholder600.png -------------------------------------------------------------------------------- /utils/CodeGeneration.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import openai 4 | import json 5 | import re 6 | import time 7 | import cv2 8 | import shutil 9 | import time 10 | 11 | from pathlib import Path 12 | from difflib import SequenceMatcher 13 | from collections import namedtuple 14 | from bs4 import BeautifulSoup 15 | from dotenv import load_dotenv 16 | load_dotenv() 17 | 18 | 19 | class CodeGeneration(): 20 | 21 | def __init__(self): 22 | with open('config/default.json', 'r') as file: 23 | config_dict = json.load(file) 24 | Config = namedtuple('Config', config_dict.keys()) 25 | args = Config(**config_dict) 26 | self.args = args 27 | openai.api_key = os.environ.get("OPENAI_API_KEY") 28 | self.get_prompt() 29 | self.set_proxy() 30 | 31 | @staticmethod 32 | def set_proxy(): 33 | # os.environ["https_proxy"] = "http://172.24.48.1:10809" 34 | pass 35 | 36 | def TopN_Feature2Scenarios(self, feature2scenarios_list, input_feature): 37 | 38 | similar_Feature2Scenarios = [] 39 | for feature2scenarios in feature2scenarios_list: 40 | similarity_score = SequenceMatcher(None, input_feature, feature2scenarios["feature"]).ratio() 41 | if similarity_score >= self.args.similarity_threshold: 42 | similar_Feature2Scenarios.append({'feature': feature2scenarios["feature"], 'scenarios': feature2scenarios["scenarios"], 'similarity_score': similarity_score}) 43 | 44 | similar_Feature2Scenarios = sorted(similar_Feature2Scenarios, key=lambda x: x['similarity_score'], reverse=True)[:self.args.max_feature_number] 45 | return similar_Feature2Scenarios 46 | 47 | def get_prompt(self): 48 | with open(osp.join(self.args.prompt_path, "Gherkin_prompt.txt"), "r", encoding="utf-8") as f: 49 | self.Gherkin_prompt = f.read() 50 | with open(osp.join(self.args.prompt_path, "Design_page_prompt.txt"), "r", encoding="utf-8") as f: 51 | self.Design_page_prompt = f.read() 52 | with open(osp.join(self.args.prompt_path, "Visual_design_prompt.txt"), "r", encoding="utf-8") as f: 53 | self.Visual_design_prompt = f.read() 54 | with open(osp.join(self.args.prompt_path, "Code_generation_prompt.txt"), "r", encoding="utf-8") as f: 55 | self.Code_generation_prompt = f.read() 56 | with open(osp.join(self.args.prompt_path, "Gherkin2NL_prompt.txt"), "r", encoding="utf-8") as f: 57 | self.Gherkin2NL_prompt = f.read() 58 | with open(osp.join(self.args.prompt_path, "NL2Gherkin_prompt.txt"), "r", encoding="utf-8") as f: 59 | self.NL2Gherkin_prompt = f.read() 60 | with open(osp.join(self.args.prompt_path, "Gherkin_merge_prompt.txt"), "r", encoding="utf-8") as f: 61 | self.Gherkin_merge_prompt = f.read() 62 | with open(osp.join(self.args.prompt_path, "Code_modification_prompt.txt"), "r", encoding="utf-8") as f: 63 | self.Code_modification_prompt = f.read() 64 | with open(osp.join(self.args.prompt_path, "Test_cases_generation_prompt.txt"), "r", encoding="utf-8") as f: 65 | self.Test_cases_generation_prompt = f.read() 66 | with open(osp.join(self.args.prompt_path, "Code_modification_based_on_test_cases_prompt.txt"), "r", encoding="utf-8") as f: 67 | self.Code_modification_based_on_test_cases_prompt = f.read() 68 | with open(osp.join(self.args.prompt_path, "Human_in_the_loop_prompt.txt"), "r", encoding="utf-8") as f: 69 | self.Human_in_the_loop_prompt = f.read() 70 | with open(osp.join(self.args.prompt_path, "Design_modification_prompt.txt"), "r", encoding="utf-8") as f: 71 | self.Design_modification_prompt = f.read() 72 | 73 | def ask_chatgpt(self, messages): 74 | extra_response_count = 0 75 | while True: 76 | try: 77 | response = openai.ChatCompletion.create( 78 | model=self.args.model, 79 | messages=messages, 80 | temperature=self.args.temperature 81 | ) 82 | except Exception as e: 83 | print(e) 84 | time.sleep(20) 85 | continue 86 | if response["choices"][0]["finish_reason"] == "stop": 87 | break 88 | else: 89 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 90 | messages.append({"role": "user", "content": "continue"}) 91 | extra_response_count += 1 92 | return response, messages, extra_response_count 93 | 94 | def save_chat_messages(self, messages): 95 | with open(self.args.save_chat_path, "w", encoding="utf-8") as f: 96 | json.dump(messages, f) 97 | 98 | def save_code(self, code): 99 | with open(self.args.all_code_save_dir, "w", encoding="utf-8") as f: 100 | f.write(code) 101 | 102 | def Scenario_Parsing(self, Gherkin_response): 103 | gherkin_regex = re.compile(r'^\s*(?:Feature|Background|Scenario(?: Outline)?|Examples)\b') 104 | statements = [] 105 | current_statement = '' 106 | for line in Gherkin_response.split('\n'): 107 | if gherkin_regex.match(line): 108 | if current_statement: 109 | statements.append(current_statement.strip()) 110 | current_statement = '' 111 | current_statement += line + '\n' 112 | if current_statement: 113 | statements.append(current_statement.strip()) 114 | 115 | Scenarios = [] 116 | for i in range(len(statements)): 117 | if statements[i].startswith("Scenario"): 118 | Scenarios.append(statements[i]) 119 | 120 | return Scenarios 121 | 122 | def Scenario_NL_Parsing(self, Scenario_NL): 123 | gherkin_regex = re.compile(r'^\s*(?:Feature|Background|Scenario(?: Outline)?|Examples)\b') 124 | statements = [] 125 | current_statement = '' 126 | for line in Scenario_NL.split('\n'): 127 | if gherkin_regex.match(line): 128 | if current_statement: 129 | statements.append(current_statement.strip()) 130 | current_statement = '' 131 | current_statement += line + '\n' 132 | if current_statement: 133 | statements.append(current_statement.strip()) 134 | return statements 135 | 136 | def Gherkin_generation(self, input_feature, similar_Feature2Scenarios): 137 | Feature2Scenarios_str = '' 138 | if similar_Feature2Scenarios: 139 | for i, similar_Feature2Scenario in enumerate(similar_Feature2Scenarios): 140 | Feature2Scenarios_str = Feature2Scenarios_str+f"Feature {i}:"+similar_Feature2Scenario['feature']+"\n" 141 | for j, scenario in enumerate(similar_Feature2Scenario['scenarios']): 142 | Feature2Scenarios_str = Feature2Scenarios_str+scenario+"\n" 143 | Feature2Scenarios_str = Feature2Scenarios_str+"\n" 144 | Human_in_the_loop_prompt = self.Human_in_the_loop_prompt.replace("{Replacement Flag}", Feature2Scenarios_str) 145 | else: 146 | Human_in_the_loop_prompt = '' 147 | messages = [] 148 | Gherkin_prompt = self.Gherkin_prompt.replace("{Replacement Flag}", input_feature) 149 | Gherkin_prompt = Human_in_the_loop_prompt+Gherkin_prompt 150 | messages.append({"role": "user", "content": Gherkin_prompt}) 151 | response, messages, extra_response_count = self.ask_chatgpt(messages) 152 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 153 | Gherkin_response = "Feature: "+input_feature+"\n"+"As a " 154 | Gherkin_response = self.handel_extra_response(extra_response_count, messages, Gherkin_response) 155 | Gherkin_response = Gherkin_response+response["choices"][0]["message"]["content"] 156 | return Gherkin_response, messages 157 | 158 | def Gherkin2NL(self, Scenarios_List, messages): 159 | Gherkin_NL_str = '' 160 | for i, scenario in enumerate(Scenarios_List): 161 | Gherkin_NL_str += scenario 162 | if i != len(Scenarios_List)-1: 163 | Gherkin_NL_str += "\n\n" 164 | Gherkin2NL_prompt = self.Gherkin2NL_prompt.replace("{Replacement Flag}", Gherkin_NL_str) 165 | messages.append({"role": "user", "content": Gherkin2NL_prompt}) 166 | response, messages, extra_response_count = self.ask_chatgpt(messages) 167 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 168 | Gherkin_NL = '' 169 | Gherkin_NL = self.handel_extra_response(extra_response_count, messages, Gherkin_NL) 170 | Gherkin_NL = Gherkin_NL+response["choices"][0]["message"]["content"] 171 | Scenarios_NL_List = self.Scenario_NL_Parsing(Gherkin_NL) 172 | return Scenarios_NL_List 173 | 174 | def NL2Gherkin(self, Gherkin_NL_List, Feature): 175 | Gherkin_NL_str = '' 176 | for Gherkin_NL in Gherkin_NL_List: 177 | Gherkin_NL_str += Gherkin_NL+"\n" 178 | messages = [] 179 | current_NL2Gherkin_prompt = self.NL2Gherkin_prompt.replace("{NL Replacement Flag}", Gherkin_NL_str) 180 | current_NL2Gherkin_prompt = current_NL2Gherkin_prompt.replace("{Feature Replacement Flag}", Feature) 181 | messages.append({"role": "user", "content": current_NL2Gherkin_prompt}) 182 | response, messages, extra_response_count = self.ask_chatgpt(messages) 183 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 184 | Gherkin = '' 185 | Gherkin = self.handel_extra_response(extra_response_count, messages, Gherkin) 186 | Gherkin = Gherkin+response["choices"][0]["message"]["content"] 187 | Gherkin = "Feature:{Feature}\n".format(Feature=Feature)+Gherkin 188 | return Gherkin 189 | 190 | def Gherkin_merge(self, Gherkin_list): 191 | Gherkin_merge_str = '' 192 | for Gherkin in Gherkin_list: 193 | Gherkin_merge_str += Gherkin+"\n" 194 | Gherkin_merge_prompt = self.Gherkin_merge_prompt.replace("{Replacement Flag}", Gherkin_merge_str) 195 | messages = [] 196 | messages.append({"role": "user", "content": Gherkin_merge_prompt}) 197 | response, messages, extra_response_count = self.ask_chatgpt(messages) 198 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 199 | Gherkin_merge_results = '' 200 | Gherkin_merge_results = self.handel_extra_response(extra_response_count, messages, Gherkin_merge_results) 201 | Gherkin_merge_results = Gherkin_merge_results+response["choices"][0]["message"]["content"] 202 | return Gherkin_merge_results 203 | 204 | @staticmethod 205 | def handel_extra_response(extra_response_count, messages, response): 206 | if extra_response_count > 0: 207 | for i in range(extra_response_count): 208 | response += messages[(i-extra_response_count)*2]["content"] 209 | return response 210 | 211 | def Design_page_template_generation(self, Gherkin_Language): 212 | messages = [] 213 | Design_page_template = '' 214 | Design_page_prompt = self.Design_page_prompt.replace("{Replacement Flag}", Gherkin_Language) 215 | messages.append({"role": "user", "content": Design_page_prompt}) 216 | response, messages, extra_response_count = self.ask_chatgpt(messages) 217 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 218 | Design_page_template = self.handel_extra_response(extra_response_count, messages, Design_page_template) 219 | Design_page_template = Design_page_template+response["choices"][0]["message"]["content"] 220 | return Design_page_template 221 | 222 | def Visual_design_template_generation(self, Design_page_template): 223 | messages = [] 224 | Visual_design_template = '' 225 | Visual_design_prompt = self.Visual_design_prompt.replace("{Replacement Flag}", Design_page_template) 226 | messages.append({"role": "user", "content": Visual_design_prompt}) 227 | response, messages, extra_response_count = self.ask_chatgpt(messages) 228 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 229 | Visual_design_template = self.handel_extra_response(extra_response_count, messages, Visual_design_template) 230 | Visual_design_template = Visual_design_template+response["choices"][0]["message"]["content"] 231 | return Visual_design_template 232 | 233 | def Test_Cases_generation(self, Gherkin_result): 234 | messages = [] 235 | Test_Cases = '' 236 | Test_cases_generation_prompt = self.Test_cases_generation_prompt.replace("{Replacement Flag}", Gherkin_result) 237 | messages.append({"role": "user", "content": Test_cases_generation_prompt}) 238 | response, messages, extra_response_count = self.ask_chatgpt(messages) 239 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 240 | Test_Cases = self.handel_extra_response(extra_response_count, messages, Test_Cases) 241 | Test_Cases = Test_Cases+response["choices"][0]["message"]["content"] 242 | return Test_Cases 243 | 244 | def Code_modification_based_on_test_cases(self, Code, Test_Cases): 245 | messages = [] 246 | Code_modification = '' 247 | Code_modification_based_on_test_cases_prompt = self.Code_modification_based_on_test_cases_prompt.replace("{Test Cases Replacement Flag}", Test_Cases) 248 | Code_modification_based_on_test_cases_prompt = Code_modification_based_on_test_cases_prompt.replace("{Code Replacement Flag}", Code) 249 | messages.append({"role": "user", "content": Code_modification_based_on_test_cases_prompt}) 250 | response, messages, extra_response_count = self.ask_chatgpt(messages) 251 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 252 | Code_modification = self.handel_extra_response(extra_response_count, messages, Code_modification) 253 | Code_modification = Code_modification+response["choices"][0]["message"]["content"] 254 | return Code_modification 255 | 256 | def Code_generation(self, Visual_design_template, Design_page_template, task, Gherkin_result): 257 | loop_number = 0 258 | while True: 259 | loop_number += 1 260 | messages = [] 261 | Generate_code = '' 262 | Code_generation_prompt = self.Code_generation_prompt 263 | Code_generation_prompt = Code_generation_prompt.replace("{Visual_design_template Replacement Flag}", Visual_design_template) 264 | Code_generation_prompt = Code_generation_prompt.replace("{Design_page_template Replacement Flag}", Design_page_template) 265 | Code_generation_prompt = Code_generation_prompt.replace("{task Replacement Flag}", task) 266 | Code_generation_prompt = Code_generation_prompt.replace("{Gherkin_result Replacement Flag}", Gherkin_result) 267 | messages.append({"role": "user", "content": Code_generation_prompt}) 268 | response, messages, extra_response_count = self.ask_chatgpt(messages) 269 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 270 | Generate_code = self.handel_extra_response(extra_response_count, messages, Generate_code) 271 | Generate_code = Generate_code+response["choices"][0]["message"]["content"] 272 | if self.Code_Parsing(Generate_code) or loop_number > self.args.max_retry: 273 | return Generate_code, loop_number 274 | else: 275 | continue 276 | 277 | def Replace_Images(self): 278 | 279 | png_placeholder = osp.join(self.args.static_dir, "img", 'Placeholder200.png') 280 | jpg_placeholder = osp.join(self.args.static_dir, "img", 'Placeholder200.jpg') 281 | 282 | with open(osp.join(self.args.static_html_dir, 'index.html')) as fp: 283 | html_soup = BeautifulSoup(fp, "html.parser") 284 | html_img_tags = html_soup.find_all("img") 285 | 286 | with open(osp.join(self.args.static_html_dir, 'style.css')) as fp: 287 | css_soup = BeautifulSoup(fp, "lxml") 288 | css_img_tags = css_soup.find_all("img") 289 | 290 | for img in html_img_tags: 291 | img_url = img.get("src") 292 | if not os.path.exists(osp.join(self.args.static_html_dir, img_url)): 293 | if img_url.endswith(".jpg"): 294 | shutil.copyfile(jpg_placeholder, osp.join(self.args.static_html_dir, img_url)) 295 | elif img_url.endswith(".png"): 296 | shutil.copyfile(png_placeholder, osp.join(self.args.static_html_dir, img_url)) 297 | else: 298 | cv2.imwrite(osp.join(self.args.static_html_dir, img_url), cv2.imread(png_placeholder)) 299 | 300 | def Code_Parsing(self, code): 301 | try: 302 | static_html_dir = Path(self.args.static_html_dir) 303 | static_html_dir.mkdir(parents=True, exist_ok=True) 304 | index_pattern = r"index.html:\n```html(.*)```\nend index.html" 305 | css_pattern = r"style.css:\n```css(.*)```\nend style.css" 306 | javascript_pattern = r"script.js:\n```javascript(.*)```\nend script.js" 307 | index_matches = re.findall(index_pattern, code, re.DOTALL) 308 | css_matches = re.findall(css_pattern, code, re.DOTALL) 309 | javascript_matches = re.findall(javascript_pattern, code, re.DOTALL) 310 | with open(osp.join(self.args.static_html_dir, 'index.html'), 'w') as f: 311 | f.write(index_matches[0]) 312 | with open(osp.join(self.args.static_html_dir, 'style.css'), 'w') as f: 313 | f.write(css_matches[0]) 314 | with open(osp.join(self.args.static_html_dir, 'script.js'), 'w') as f: 315 | f.write(javascript_matches[0]) 316 | self.Replace_Images() 317 | except Exception as e: 318 | print(e) 319 | return False 320 | return True 321 | 322 | def Code_Modification(self, Generated_code, Code_Modification_String): 323 | loop_number = 0 324 | 325 | while True: 326 | loop_number += 1 327 | messages = [] 328 | Modified_code = '' 329 | Code_modification_prompt = self.Code_modification_prompt.replace("{Code Replacement Flag}", Generated_code) 330 | Code_modification_prompt = Code_modification_prompt.replace("{Instructions Replacement Flag}", Code_Modification_String) 331 | messages.append({"role": "user", "content": Code_modification_prompt}) 332 | response, messages, extra_response_count = self.ask_chatgpt(messages) 333 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 334 | Modified_code = self.handel_extra_response(extra_response_count, messages, Modified_code) 335 | Modified_code = Modified_code+response["choices"][0]["message"]["content"] 336 | 337 | if self.Code_Parsing(Modified_code) or loop_number > self.args.max_retry: 338 | return Modified_code, messages, loop_number 339 | else: 340 | continue 341 | 342 | def Design_Modification(self, Generated_code, Code_Modification_String): 343 | loop_number = 0 344 | 345 | while True: 346 | loop_number += 1 347 | messages = [] 348 | Modified_code = '' 349 | Design_modification_prompt = self.Design_modification_prompt.replace("{Code Replacement Flag}", Generated_code) 350 | Design_modification_prompt = Design_modification_prompt.replace("{Instructions Replacement Flag}", Code_Modification_String) 351 | messages.append({"role": "user", "content": Design_modification_prompt}) 352 | response, messages, extra_response_count = self.ask_chatgpt(messages) 353 | messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"]}) 354 | Modified_code = self.handel_extra_response(extra_response_count, messages, Modified_code) 355 | Modified_code = Modified_code+response["choices"][0]["message"]["content"] 356 | 357 | if self.Code_Parsing(Modified_code) or loop_number > self.args.max_retry: 358 | return Modified_code, messages, loop_number 359 | else: 360 | continue 361 | 362 | def clear_static_html_dir(self): 363 | static_html_dir = Path(self.args.static_html_dir) 364 | static_html_dir.mkdir(parents=True, exist_ok=True) 365 | 366 | for file in os.listdir(self.args.static_html_dir): 367 | os.remove(osp.join(self.args.static_html_dir, file)) 368 | 369 | def copyfile2static_html_dir(self, origin_dir): 370 | for file in os.listdir(origin_dir): 371 | shutil.copyfile(osp.join(origin_dir, file), osp.join(self.args.static_html_dir, file)) 372 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /utils/__pycache__/CodeGeneration.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/utils/__pycache__/CodeGeneration.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/utils/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/log.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/utils/__pycache__/log.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UGAIForge/AgileGen/5549da67870e1e454b5f505646376ab7aafbb153/utils/__pycache__/utils.cpython-39.pyc -------------------------------------------------------------------------------- /utils/log.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | class Logger: 4 | def __init__(self, filename): 5 | self.terminal = sys.stdout 6 | self.log = open(filename, "w") 7 | 8 | def write(self, message): 9 | self.terminal.write(message) 10 | self.log.write(message) 11 | self.flush() 12 | 13 | def flush(self): 14 | self.terminal.flush() 15 | self.log.flush() 16 | 17 | def isatty(self): 18 | return False -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import zipfile 3 | 4 | 5 | def zip_folder(folder_path, output_path): 6 | with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zip_file: 7 | for root, dirs, files in os.walk(folder_path): 8 | for file in files: 9 | file_path = os.path.join(root, file) 10 | zip_file.write(file_path, os.path.relpath(file_path, folder_path)) 11 | 12 | 13 | 14 | def iframe_generator(file_path): 15 | iframe = """ 16 | 17 | 24 | """.format(file_path) 25 | 26 | return iframe --------------------------------------------------------------------------------