├── .github └── workflows │ └── ci-cd-pipeline.yml ├── .gitignore ├── LICENSE ├── README.md ├── assets ├── 1.png ├── 2.png └── 3.png ├── devops-cli ├── .github │ └── workflows │ │ └── ci-cd-pipeline.yml ├── cli │ ├── __init__.py │ ├── agentic.py │ ├── ci_cd_pipeline.py │ ├── commands.py │ ├── menu.py │ └── utils.py ├── coder.py ├── main.py ├── modules │ ├── __init__.py │ ├── aws.py │ ├── azure.py │ ├── bash.py │ ├── ci_cd.py │ ├── developer.py │ ├── dockerfile.py │ ├── firebase.py │ ├── gcp.py │ ├── k8s.py │ ├── settings.py │ └── utils.py ├── readme.md └── requirements.txt ├── devops ├── LICENSE.txt ├── MANIFEST.in ├── PKG-INFO ├── __init__.py ├── aider.md ├── aider │ ├── __init__.py │ ├── args.py │ ├── coders │ │ ├── __init__.py │ │ ├── base_coder.py │ │ ├── base_prompts.py │ │ ├── editblock_coder.py │ │ ├── editblock_fenced_coder.py │ │ ├── editblock_fenced_prompts.py │ │ ├── editblock_func_coder.py │ │ ├── editblock_func_prompts.py │ │ ├── editblock_prompts.py │ │ ├── search_replace.py │ │ ├── single_wholefile_func_coder.py │ │ ├── single_wholefile_func_prompts.py │ │ ├── udiff_coder.py │ │ ├── udiff_prompts.py │ │ ├── wholefile_coder.py │ │ ├── wholefile_func_coder.py │ │ ├── wholefile_func_prompts.py │ │ └── wholefile_prompts.py │ ├── commands.py │ ├── diffs.py │ ├── dump.py │ ├── gui.py │ ├── history.py │ ├── io.py │ ├── linter.py │ ├── litellm.py │ ├── main.py │ ├── main_wrapper.py │ ├── mdstream.py │ ├── models.py │ ├── prompts.py │ ├── queries │ │ ├── tree-sitter-c-tags.scm │ │ ├── tree-sitter-c_sharp-tags.scm │ │ ├── tree-sitter-cpp-tags.scm │ │ ├── tree-sitter-elisp-tags.scm │ │ ├── tree-sitter-elixir-tags.scm │ │ ├── tree-sitter-elm-tags.scm │ │ ├── tree-sitter-go-tags.scm │ │ ├── tree-sitter-java-tags.scm │ │ ├── tree-sitter-javascript-tags.scm │ │ ├── tree-sitter-ocaml-tags.scm │ │ ├── tree-sitter-php-tags.scm │ │ ├── tree-sitter-python-tags.scm │ │ ├── tree-sitter-ql-tags.scm │ │ ├── tree-sitter-ruby-tags.scm │ │ ├── tree-sitter-rust-tags.scm │ │ └── tree-sitter-typescript-tags.scm │ ├── repo.py │ ├── repomap.py │ ├── scrape.py │ ├── sendchat.py │ ├── utils.py │ ├── versioncheck.py │ └── voice.py ├── aider_chat │ ├── benchmark │ │ └── __init__.py │ └── tests │ │ └── __init__.py ├── benchmark │ ├── __init__.py │ ├── benchmark.py │ ├── over_time.py │ ├── plots.py │ ├── prompts.py │ ├── refactor_tools.py │ ├── rungrid.py │ ├── swe_bench.py │ └── test_benchmark.py ├── cli │ └── __init__.py ├── hello.py ├── modules │ └── __init__.py ├── requirements.txt ├── setup.cfg ├── setup.py └── tests │ ├── __init__.py │ ├── test_coder.py │ ├── test_commands.py │ ├── test_editblock.py │ ├── test_io.py │ ├── test_main.py │ ├── test_models.py │ ├── test_repo.py │ ├── test_repomap.py │ ├── test_sendchat.py │ ├── test_udiff.py │ └── test_wholefile.py ├── misc ├── cli-null.py ├── coder-v01.py ├── main copy.py └── main-v01.py ├── requirements.txt └── setup.py /.github/workflows/ci-cd-pipeline.yml: -------------------------------------------------------------------------------- 1 | name: CI/CD Pipeline 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v2 18 | 19 | - name: Set up Python 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: '3.8' 23 | 24 | - name: Install dependencies 25 | run: | 26 | python -m pip install --upgrade pip 27 | pip install -r requirements.txt 28 | 29 | - name: Run tests 30 | run: | 31 | pytest 32 | 33 | deploy: 34 | runs-on: ubuntu-latest 35 | needs: build 36 | 37 | steps: 38 | - name: Checkout code 39 | uses: actions/checkout@v2 40 | 41 | - name: Set up Python 42 | uses: actions/setup-python@v2 43 | with: 44 | python-version: '3.8' 45 | 46 | - name: Install dependencies 47 | run: | 48 | python -m pip install --upgrade pip 49 | pip install -r requirements.txt 50 | 51 | - name: Deploy to production 52 | run: | 53 | echo "Deploying to production..." 54 | # Add your deployment commands here 55 | name: CI/CD Pipeline 56 | 57 | on: 58 | push: 59 | branches: 60 | - main 61 | pull_request: 62 | branches: 63 | - main 64 | 65 | jobs: 66 | build: 67 | runs-on: ubuntu-latest 68 | 69 | steps: 70 | - name: Checkout code 71 | uses: actions/checkout@v2 72 | 73 | - name: Set up Python 74 | uses: actions/setup-python@v2 75 | with: 76 | python-version: '3.8' 77 | 78 | - name: Install dependencies 79 | run: | 80 | python -m pip install --upgrade pip 81 | pip install -r requirements.txt 82 | 83 | - name: Run tests 84 | run: | 85 | pytest 86 | 87 | deploy: 88 | runs-on: ubuntu-latest 89 | needs: build 90 | 91 | steps: 92 | - name: Checkout code 93 | uses: actions/checkout@v2 94 | 95 | - name: Set up Python 96 | uses: actions/setup-python@v2 97 | with: 98 | python-version: '3.8' 99 | 100 | - name: Install dependencies 101 | run: | 102 | python -m pip install --upgrade pip 103 | pip install -r requirements.txt 104 | 105 | - name: Deploy to production 106 | run: | 107 | echo "Deploying to production..." 108 | # Add your deployment commands here 109 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | .aider* 164 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 rUv 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /assets/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/assets/1.png -------------------------------------------------------------------------------- /assets/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/assets/2.png -------------------------------------------------------------------------------- /assets/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/assets/3.png -------------------------------------------------------------------------------- /devops-cli/.github/workflows/ci-cd-pipeline.yml: -------------------------------------------------------------------------------- 1 | name: CI/CD Pipeline 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v2 18 | 19 | - name: Set up Python 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: '3.x' 23 | 24 | - name: Install dependencies 25 | run: | 26 | python -m pip install --upgrade pip 27 | pip install -r requirements.txt 28 | 29 | - name: Lint with flake8 30 | run: | 31 | pip install flake8 32 | flake8 . 33 | 34 | - name: Run tests 35 | run: | 36 | pip install pytest 37 | pytest 38 | -------------------------------------------------------------------------------- /devops-cli/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/devops-cli/cli/__init__.py -------------------------------------------------------------------------------- /devops-cli/cli/agentic.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from lionagi import Session 3 | from concurrent.futures import ThreadPoolExecutor 4 | 5 | async def initialize_lionagi_session(): 6 | system = "You are an assistant designed to help with DevOps tasks." 7 | return Session([[system]]) 8 | 9 | async def handle_user_input(session, user_input): 10 | context = {"task": user_input} 11 | instruction = {"Action": "Interpret the task and generate appropriate responses or actions."} 12 | 13 | # Intelligent parsing of directory paths 14 | match = re.search(r'\.\./|./|[a-zA-Z0-9_/]+', user_input) 15 | if match: 16 | repo_path = match.group(0) 17 | else: 18 | repo_path = "." 19 | 20 | try: 21 | # Get directory structure 22 | verbose_output(f"\n🔍 Analyzing directory structure for {repo_path}...") 23 | files_and_dirs = subprocess.check_output(f"ls -lR {repo_path}", shell=True, text=True) 24 | 25 | # Chunk the directory information to fit the token limit 26 | max_tokens = 28192 27 | chunks = [files_and_dirs[i:i+max_tokens] for i in range(0, len(files_and_dirs), max_tokens)] 28 | 29 | # Function to review chunk 30 | async def review_chunk(chunk): 31 | verbose_output("\n🤖 Sending directory information to LionAGI for review...") 32 | result = await session.chat(instruction=instruction, context={"task": chunk}, model="gpt-4oo") 33 | return result 34 | 35 | # Process chunks concurrently 36 | async def process_chunks_concurrently(chunks): 37 | with ThreadPoolExecutor() as executor: 38 | loop = asyncio.get_event_loop() 39 | tasks = [ 40 | loop.run_in_executor(executor, review_chunk, chunk) 41 | for chunk in chunks 42 | ] 43 | return await asyncio.gather(*tasks) 44 | 45 | review_results = await process_chunks_concurrently(chunks) 46 | 47 | verbose_output("\n🦁 LionAGI review:") 48 | for result in review_results: 49 | verbose_output(result) 50 | 51 | verbose_output("\n🤔 Generating potential actions based on the request...") 52 | action_prompt = f"User request: {user_input}\nDirectory information:\n{files_and_dirs}" 53 | potential_actions = await session.chat(instruction=instruction, context={"task": action_prompt}, model="gpt-4oo") 54 | 55 | verbose_output("\n📝 Potential actions:") 56 | verbose_output(potential_actions) 57 | 58 | execute_action = click.confirm("Do you want to execute any of the suggested actions?") 59 | if execute_action: 60 | selected_action = click.prompt("Enter the action you want to execute", type=str) 61 | verbose_output(f"\n⚙️ Executing: {selected_action}") 62 | subprocess.run(selected_action, shell=True, check=True) 63 | else: 64 | verbose_output("\n⏭️ Skipping action execution.") 65 | 66 | except subprocess.CalledProcessError as e: 67 | verbose_output(f"\n⚠️ Error: {e.output}") 68 | except Exception as e: 69 | verbose_output(f"\n⚠️ Error: {e}") 70 | -------------------------------------------------------------------------------- /devops-cli/cli/ci_cd_pipeline.py: -------------------------------------------------------------------------------- 1 | import click 2 | from .utils import run_aider_command 3 | 4 | def create_ci_cd_pipeline(): 5 | click.echo("\n🚀 Creating CI/CD Pipeline Configuration...") 6 | instructions = click.prompt("Provide CI/CD pipeline creation instructions", default="Create a CI/CD pipeline for a Python project.") 7 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 8 | if guidance: 9 | instructions = f"{guidance}. {instructions}" 10 | pipeline_path = "./.github/workflows/ci-cd-pipeline.yml" 11 | run_aider_command(instructions, [pipeline_path]) 12 | click.echo("✅ CI/CD Pipeline Configuration created successfully.") 13 | from ..coder import coder_menu 14 | coder_menu() 15 | -------------------------------------------------------------------------------- /devops-cli/cli/commands.py: -------------------------------------------------------------------------------- 1 | import click 2 | import sys 3 | from cli.utils import ( 4 | list_deployments_cmd, 5 | setup_deployment_cmd, 6 | create_deployment_cmd, 7 | update_deployment_cmd, 8 | remove_deployment_cmd 9 | ) 10 | import coder 11 | 12 | def main_menu(): 13 | click.echo("\n📋 Main Menu") 14 | click.echo("1. List Deployments") 15 | click.echo("2. Setup Deployment") 16 | click.echo("3. Create Deployment") 17 | click.echo("4. Update Deployment") 18 | click.echo("5. Remove Deployment") 19 | click.echo("6. Load Coder Menu") 20 | click.echo("7. Exit") 21 | 22 | choice = click.prompt("Enter your choice", type=int) 23 | 24 | if choice == 1: 25 | list_deployments_cmd() 26 | elif choice == 2: 27 | setup_deployment_cmd() 28 | elif choice == 3: 29 | create_deployment_cmd() 30 | elif choice == 4: 31 | update_deployment_cmd() 32 | elif choice == 5: 33 | remove_deployment_cmd() 34 | elif choice == 6: 35 | coder.coder_menu() 36 | elif choice == 7: 37 | click.echo("\n👋 Exiting. Goodbye!\n") 38 | sys.exit(0) 39 | else: 40 | click.echo("\n❌ Invalid choice. Please try again.") 41 | main_menu() 42 | 43 | @click.command() 44 | def run(): 45 | """ 46 | Start the interactive menu or chat UI. 47 | """ 48 | welcome() 49 | main_menu() 50 | -------------------------------------------------------------------------------- /devops-cli/cli/menu.py: -------------------------------------------------------------------------------- 1 | import click 2 | import sys 3 | from cli.utils import ( 4 | list_deployments_cmd, 5 | setup_deployment_cmd, 6 | create_deployment_cmd, 7 | update_deployment_cmd, 8 | remove_deployment_cmd 9 | ) 10 | import coder 11 | 12 | def main_menu(): 13 | click.echo("\n📋 Main Menu") 14 | click.echo("1. List Deployments") 15 | click.echo("2. Setup Deployment") 16 | click.echo("3. Create Deployment") 17 | click.echo("4. Update Deployment") 18 | click.echo("5. Remove Deployment") 19 | click.echo("6. Load Coder Menu") # New menu option to load coder menu 20 | click.echo("7. Exit") 21 | 22 | choice = click.prompt("Enter your choice", type=int) 23 | 24 | if choice == 1: 25 | list_deployments_cmd() 26 | elif choice == 2: 27 | setup_deployment_cmd() 28 | elif choice == 3: 29 | create_deployment_cmd() 30 | elif choice == 4: 31 | update_deployment_cmd() 32 | elif choice == 5: 33 | remove_deployment_cmd() 34 | elif choice == 6: 35 | coder.coder_menu() # Call the coder menu function 36 | elif choice == 7: 37 | click.echo("\n👋 Exiting. Goodbye!\n") 38 | sys.exit(0) 39 | else: 40 | click.echo("\n❌ Invalid choice. Please try again.") 41 | main_menu() 42 | -------------------------------------------------------------------------------- /devops-cli/coder.py: -------------------------------------------------------------------------------- 1 | # coder.py is a new file that contains the main_menu and coder_menu functions. The main_menu function displays the main menu options and prompts the user to choose an option. The coder_menu function displays the Agentic DevOps menu options and calls the appropriate function based on the user's choice. The coder_menu function is called recursively to allow the user to navigate back to the main menu. 2 | import click 3 | import subprocess 4 | import importlib.util 5 | import sys 6 | from modules.dockerfile import create_dockerfile 7 | from modules.bash import create_bash_script 8 | from modules.k8s import create_k8s_config 9 | from modules.ci_cd import create_ci_cd_pipeline 10 | from modules.azure import azure_menu 11 | from modules.aws import aws_menu 12 | from modules.gcp import gcp_menu 13 | from modules.developer import developer_menu 14 | from modules.settings import settings_menu 15 | from modules.firebase import firebase_menu 16 | 17 | def check_and_install_aider(): 18 | aider_installed = importlib.util.find_spec("aider") is not None 19 | 20 | if not aider_installed: 21 | click.echo("📦 'aider' is not installed. Installing...") 22 | try: 23 | subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-e', './aider_chat']) 24 | except subprocess.CalledProcessError as e: 25 | click.echo(f"❌ Failed to install 'aider': {e}") 26 | sys.exit(1) 27 | else: 28 | click.echo("✅ 'aider' installed successfully.") 29 | else: 30 | click.echo("✅ 'aider' is already installed.") 31 | 32 | def start_webui(): 33 | click.echo("\n🚀 Starting Agentic DevOps WebUI...") 34 | try: 35 | subprocess.check_call(["aider", "--gui"]) 36 | except subprocess.CalledProcessError as e: 37 | click.echo(f"❌ Failed to start WebUI: {e}") 38 | import coder 39 | coder.coder_menu(coder.main_menu) # Call the coder menu again 40 | 41 | 42 | def coder_menu(main_menu): 43 | click.echo("\n📋 Agentic DevOps Menu") 44 | click.echo("1. Start Agentic DevOps WebUI") 45 | click.echo("2. Create Dockerfile") 46 | click.echo("3. Create Bash Script") 47 | click.echo("4. Create Kubernetes Configuration") 48 | click.echo("5. Create CI/CD Pipeline") 49 | click.echo("6. Azure Configuration") 50 | click.echo("7. AWS Configuration") 51 | click.echo("8. GCP Configuration") 52 | click.echo("9. Firebase Configuration") 53 | click.echo("10. Developer Configuration") 54 | click.echo("11. Settings") 55 | click.echo("12. Back to Main Menu") 56 | 57 | choice = click.prompt("Enter your choice", type=int) 58 | 59 | if choice == 1: 60 | start_webui() 61 | elif choice == 2: 62 | create_dockerfile() 63 | elif choice == 3: 64 | create_bash_script() 65 | elif choice == 4: 66 | create_k8s_config() 67 | elif choice == 5: 68 | create_ci_cd_pipeline() 69 | elif choice == 6: 70 | azure_menu() 71 | elif choice == 7: 72 | aws_menu() 73 | elif choice == 8: 74 | gcp_menu() 75 | elif choice == 9: 76 | firebase_menu() # Call the Firebase menu 77 | elif choice == 10: 78 | developer_menu() 79 | elif choice == 11: 80 | settings_menu() 81 | elif choice == 12: 82 | main_menu() 83 | else: 84 | click.echo("\n❌ Invalid choice. Please try again.") 85 | coder_menu(main_menu) 86 | 87 | 88 | def main_menu(): 89 | click.echo("\n📋 Main Menu") 90 | click.echo("1. 📄 Agentic DevOps") 91 | click.echo("2. 🚀 Agentic Deployment") 92 | click.echo("3. ❓ Help") 93 | click.echo("4. 🔥 Exit") 94 | 95 | choice = click.prompt("Enter your choice", type=int) 96 | 97 | if choice == 1: 98 | coder_menu(main_menu) 99 | elif choice == 2: 100 | deployment_menu() 101 | elif choice == 3: 102 | click.echo("\nHelp menu...") # Define your help functionality here 103 | main_menu() 104 | elif choice == 4: 105 | click.echo("\n👋 Exiting. Goodbye!\n") 106 | sys.exit(0) 107 | else: 108 | click.echo("\n❌ Invalid choice. Please try again.") 109 | main_menu() 110 | 111 | if __name__ == '__main__': 112 | check_and_install_aider() 113 | main_menu() 114 | -------------------------------------------------------------------------------- /devops-cli/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/devops-cli/modules/__init__.py -------------------------------------------------------------------------------- /devops-cli/modules/aws.py: -------------------------------------------------------------------------------- 1 | # modules/aws.py is a new file that contains the create_aws_config function. This function is called when the user selects the "Create AWS Configuration" option from the coder_menu function in coder.py. The create_aws_config function prompts the user to provide instructions for creating an AWS configuration and optional guidance for the AI. It then creates an AWS configuration file based on the user's input. 2 | 3 | import click 4 | from modules.utils import run_aider_command 5 | 6 | def create_aws_config(): 7 | click.echo("\n🌐 Creating AWS Configuration...") 8 | instructions = click.prompt("Provide AWS configuration instructions", default="Create a CloudFormation template.") 9 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 10 | if guidance: 11 | instructions = f"{guidance}. {instructions}" 12 | config_path = "./aws-config.yml" 13 | run_aider_command(instructions, [config_path]) 14 | click.echo("✅ AWS Configuration created successfully.") 15 | import coder 16 | coder.coder_menu(coder.main_menu) # Call the coder menu again 17 | 18 | def aws_menu(): 19 | click.echo("\n🌐 AWS Configuration") 20 | click.echo("1. Create CloudFormation Template") 21 | click.echo("2. Create AWS CLI Script") 22 | click.echo("3. Create IAM Policy") 23 | click.echo("4. Back to Main Menu") 24 | 25 | choice = click.prompt("Enter your choice", type=int) 26 | 27 | if choice == 1: 28 | create_aws_config() 29 | elif choice == 2: 30 | click.echo("\n🚀 Creating AWS CLI Script...") 31 | import coder 32 | coder.create_bash_script() # Adjust this if there's a more specific function for AWS CLI scripts 33 | elif choice == 3: 34 | click.echo("\n🚀 Creating IAM Policy...") 35 | create_aws_config() # You can adjust this to a more specific function if needed 36 | elif choice == 4: 37 | import coder 38 | coder.coder_menu(coder.main_menu) 39 | else: 40 | click.echo("\n❌ Invalid choice. Please try again.") 41 | aws_menu() 42 | -------------------------------------------------------------------------------- /devops-cli/modules/azure.py: -------------------------------------------------------------------------------- 1 | #modules/azure.py is a new file that contains the create_azure_config function. This function is called when the user selects the "Create Azure Configuration" option from the coder_menu function in coder.py. The create_azure_config function prompts the user to provide instructions for creating an Azure configuration and optional guidance for the AI. It then creates an Azure configuration file based on the user's input. 2 | 3 | import click 4 | from modules.utils import run_aider_command 5 | 6 | def create_azure_config(): 7 | click.echo("\n🌐 Creating Azure Configuration...") 8 | instructions = click.prompt("Provide Azure configuration instructions", default="Create an Azure Resource Manager template.") 9 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 10 | if guidance: 11 | instructions = f"{guidance}. {instructions}" 12 | config_path = "./azure-config.json" 13 | run_aider_command(instructions, [config_path]) 14 | click.echo("✅ Azure Configuration created successfully.") 15 | import coder 16 | coder.coder_menu(coder.main_menu) # Call the coder menu again 17 | 18 | def azure_menu(): 19 | click.echo("\n🌐 Azure Configuration") 20 | click.echo("1. Create ARM Template") 21 | click.echo("2. Create Azure CLI Script") 22 | click.echo("3. Create Azure Policy") 23 | click.echo("4. Back to Main Menu") 24 | 25 | choice = click.prompt("Enter your choice", type=int) 26 | 27 | if choice == 1: 28 | create_azure_config() 29 | elif choice == 2: 30 | click.echo("\n🚀 Creating Azure CLI Script...") 31 | import coder 32 | coder.create_bash_script() # Adjust this if there's a more specific function for Azure CLI scripts 33 | elif choice == 3: 34 | click.echo("\n🚀 Creating Azure Policy...") 35 | create_azure_config() # You can adjust this to a more specific function if needed 36 | elif choice == 4: 37 | import coder 38 | coder.coder_menu(coder.main_menu) 39 | else: 40 | click.echo("\n❌ Invalid choice. Please try again.") 41 | azure_menu() 42 | -------------------------------------------------------------------------------- /devops-cli/modules/bash.py: -------------------------------------------------------------------------------- 1 | # modules/bash.py is a new file that contains the create_bash_script function. This function is called when the user selects the "Create Bash Script" option from the coder_menu function in coder.py. The create_bash_script function prompts the user to provide instructions for creating a Bash script and optional guidance for the AI. It then creates a Bash script file based on the user's input. 2 | import click 3 | import os 4 | from modules.utils import run_aider_command 5 | 6 | def create_bash_script(): 7 | click.echo("\n🚀 Creating Bash Script...") 8 | instructions = click.prompt("Provide Bash script creation instructions", default="Create a basic deployment script.") 9 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 10 | if guidance: 11 | instructions = f"{guidance}. {instructions}" 12 | 13 | # Ensure the output directory exists 14 | output_dir = "./output/bash/" 15 | os.makedirs(output_dir, exist_ok=True) 16 | 17 | script_path = os.path.join(output_dir, "script.sh") 18 | run_aider_command(instructions, [script_path]) 19 | click.echo("✅ Bash Script created successfully.") 20 | 21 | import coder 22 | coder.coder_menu(coder.main_menu) # Call the coder menu again 23 | -------------------------------------------------------------------------------- /devops-cli/modules/ci_cd.py: -------------------------------------------------------------------------------- 1 | # modules/ci_cd.py is a new file that contains the create_ci_cd_pipeline function. This function is called when the user selects the "Create CI/CD Pipeline" option from the coder_menu function in coder.py. The create_ci_cd_pipeline function prompts the user to provide instructions for creating a CI/CD pipeline configuration and optional guidance for the AI. It then creates a CI/CD pipeline configuration file based on the user's input. 2 | 3 | import click 4 | from modules.utils import run_aider_command 5 | 6 | def create_ci_cd_pipeline(): 7 | click.echo("\n🚀 Creating CI/CD Pipeline Configuration...") 8 | instructions = click.prompt("Provide CI/CD pipeline creation instructions", default="Create a CI/CD pipeline for a Python project.") 9 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 10 | if guidance: 11 | instructions = f"{guidance}. {instructions}" 12 | pipeline_path = "./.github/workflows/ci-cd-pipeline.yml" 13 | run_aider_command(instructions, [pipeline_path]) 14 | click.echo("✅ CI/CD Pipeline Configuration created successfully.") 15 | import coder 16 | coder.coder_menu(coder.main_menu) # Call the coder menu again 17 | -------------------------------------------------------------------------------- /devops-cli/modules/developer.py: -------------------------------------------------------------------------------- 1 | # modules/developer.py is a new file that contains the developer_menu function. This function displays a menu for developer-related configurations and allows the user to create .nix configuration, virtual environment (venv), and .devcontainer configuration. The user can also go back to the main menu from this menu. 2 | import click 3 | from modules.utils import run_aider_command 4 | 5 | def create_nix_config(): 6 | click.echo("\n🔧 Creating .nix Configuration...") 7 | instructions = click.prompt("Provide .nix configuration instructions", default="Create a basic Nix configuration.") 8 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 9 | if guidance: 10 | instructions = f"{guidance}. {instructions}" 11 | config_path = "./default.nix" 12 | run_aider_command(instructions, [config_path]) 13 | click.echo("✅ .nix Configuration created successfully.") 14 | import coder 15 | coder.coder_menu(coder.main_menu) # Call the coder menu again 16 | 17 | def create_venv(): 18 | click.echo("\n🔧 Creating Virtual Environment (venv)...") 19 | instructions = click.prompt("Provide venv creation instructions", default="Create a Python virtual environment.") 20 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 21 | if guidance: 22 | instructions = f"{guidance}. {instructions}" 23 | venv_path = "./venv" 24 | run_aider_command(instructions, [venv_path]) 25 | click.echo("✅ Virtual Environment created successfully.") 26 | import coder 27 | coder.coder_menu(coder.main_menu) # Call the coder menu again 28 | 29 | def create_devcontainer(): 30 | click.echo("\n🔧 Creating .devcontainer Configuration...") 31 | instructions = click.prompt("Provide .devcontainer configuration instructions", default="Create a basic .devcontainer.json for a VS Code environment.") 32 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 33 | if guidance: 34 | instructions = f"{guidance}. {instructions}" 35 | config_path = "./.devcontainer/devcontainer.json" 36 | run_aider_command(instructions, [config_path]) 37 | click.echo("✅ .devcontainer Configuration created successfully.") 38 | import coder 39 | coder.coder_menu(coder.main_menu) # Call the coder menu again 40 | 41 | def developer_menu(): 42 | click.echo("\n🛠️ Developer Configuration") 43 | click.echo("1. Create .nix Configuration") 44 | click.echo("2. Create Virtual Environment (venv)") 45 | click.echo("3. Create .devcontainer Configuration") 46 | click.echo("4. Back to Main Menu") 47 | 48 | choice = click.prompt("Enter your choice", type=int) 49 | 50 | if choice == 1: 51 | create_nix_config() 52 | elif choice == 2: 53 | create_venv() 54 | elif choice == 3: 55 | create_devcontainer() 56 | elif choice == 4: 57 | import coder 58 | coder.coder_menu(coder.main_menu) 59 | else: 60 | click.echo("\n❌ Invalid choice. Please try again.") 61 | developer_menu() 62 | -------------------------------------------------------------------------------- /devops-cli/modules/dockerfile.py: -------------------------------------------------------------------------------- 1 | import click 2 | from modules.utils import run_aider_command 3 | 4 | def create_dockerfile(): 5 | click.echo("\n🚀 Creating Dockerfile...") 6 | instructions = click.prompt("Provide Dockerfile creation instructions", default="Create a Dockerfile for a Python web app.") 7 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 8 | if guidance: 9 | instructions = f"{guidance}. {instructions}" 10 | dockerfile_path = "./Dockerfile" 11 | run_aider_command(instructions, [dockerfile_path]) 12 | click.echo("✅ Dockerfile created successfully.") 13 | import coder 14 | coder.coder_menu(coder.main_menu) # Call the coder menu again 15 | -------------------------------------------------------------------------------- /devops-cli/modules/firebase.py: -------------------------------------------------------------------------------- 1 | import click 2 | import os 3 | from modules.utils import run_aider_command 4 | 5 | def ensure_firebase_folder(): 6 | if not os.path.exists('./firebase'): 7 | os.makedirs('./firebase') 8 | 9 | def create_firebase_config(): 10 | click.echo("\n🌐 Creating Firebase Configuration...") 11 | instructions = click.prompt("Provide Firebase configuration instructions", default="Create a basic Firebase configuration.") 12 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 13 | if guidance: 14 | instructions = f"{guidance}. {instructions}" 15 | ensure_firebase_folder() 16 | config_path = "./firebase/firebase-config.json" 17 | run_aider_command(instructions, [config_path]) 18 | click.echo("✅ Firebase Configuration created successfully.") 19 | import coder 20 | coder.coder_menu(coder.main_menu) # Call the coder menu again 21 | 22 | def create_firestore_rules(): 23 | click.echo("\n🌐 Creating Firestore Rules...") 24 | instructions = click.prompt("Provide Firestore rules instructions", default="Create Firestore security rules.") 25 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 26 | if guidance: 27 | instructions = f"{guidance}. {instructions}" 28 | ensure_firebase_folder() 29 | rules_path = "./firebase/firestore.rules" 30 | run_aider_command(instructions, [rules_path]) 31 | click.echo("✅ Firestore Rules created successfully.") 32 | import coder 33 | coder.coder_menu(coder.main_menu) # Call the coder menu again 34 | 35 | def create_firebase_cli_script(): 36 | click.echo("\n🌐 Creating Firebase CLI Script...") 37 | instructions = click.prompt("Provide Firebase CLI script instructions", default="Create a Firebase CLI script for deployment.") 38 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 39 | if guidance: 40 | instructions = f"{guidance}. {instructions}" 41 | ensure_firebase_folder() 42 | script_path = "./firebase/firebase-cli.sh" 43 | run_aider_command(instructions, [script_path]) 44 | click.echo("✅ Firebase CLI Script created successfully.") 45 | import coder 46 | coder.coder_menu(coder.main_menu) # Call the coder menu again 47 | 48 | def firebase_menu(): 49 | click.echo("\n🌐 Firebase Configuration") 50 | click.echo("1. Create Firebase Configuration") 51 | click.echo("2. Create Firestore Rules") 52 | click.echo("3. Create Firebase CLI Script") 53 | click.echo("4. Back to Main Menu") 54 | 55 | choice = click.prompt("Enter your choice", type=int) 56 | 57 | if choice == 1: 58 | create_firebase_config() 59 | elif choice == 2: 60 | create_firestore_rules() 61 | elif choice == 3: 62 | create_firebase_cli_script() 63 | elif choice == 4: 64 | import coder 65 | coder.coder_menu(coder.main_menu) 66 | else: 67 | click.echo("\n❌ Invalid choice. Please try again.") 68 | firebase_menu() 69 | -------------------------------------------------------------------------------- /devops-cli/modules/gcp.py: -------------------------------------------------------------------------------- 1 | # modules/gcp.py is a new file that contains the create_gcp_config function. This function is called when the user selects the "Create GCP Configuration" option from the coder_menu function in coder.py. The create_gcp_config function prompts the user to provide instructions for creating a GCP configuration and optional guidance for the AI. It then creates a GCP configuration file based on the user's input. 2 | import click 3 | from modules.utils import run_aider_command 4 | 5 | def create_gcp_config(): 6 | click.echo("\n🌐 Creating GCP Configuration...") 7 | instructions = click.prompt("Provide GCP configuration instructions", default="Create a Google Cloud Deployment Manager template.") 8 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 9 | if guidance: 10 | instructions = f"{guidance}. {instructions}" 11 | config_path = "./gcp-config.yaml" 12 | run_aider_command(instructions, [config_path]) 13 | click.echo("✅ GCP Configuration created successfully.") 14 | import coder 15 | coder.coder_menu(coder.main_menu) # Call the coder menu again 16 | 17 | def gcp_menu(): 18 | click.echo("\n🌐 GCP Configuration") 19 | click.echo("1. Create Deployment Manager Template") 20 | click.echo("2. Create GCP CLI Script") 21 | click.echo("3. Create GCP IAM Policy") 22 | click.echo("4. Back to Main Menu") 23 | 24 | choice = click.prompt("Enter your choice", type=int) 25 | 26 | if choice == 1: 27 | create_gcp_config() 28 | elif choice == 2: 29 | click.echo("\n🚀 Creating GCP CLI Script...") 30 | import coder 31 | coder.create_bash_script() # Adjust this if there's a more specific function for GCP CLI scripts 32 | elif choice == 3: 33 | click.echo("\n🚀 Creating GCP IAM Policy...") 34 | create_gcp_config() # You can adjust this to a more specific function if needed 35 | elif choice == 4: 36 | import coder 37 | coder.coder_menu(coder.main_menu) 38 | else: 39 | click.echo("\n❌ Invalid choice. Please try again.") 40 | gcp_menu() 41 | -------------------------------------------------------------------------------- /devops-cli/modules/k8s.py: -------------------------------------------------------------------------------- 1 | # modules/k8s.py is a new file that contains the create_k8s_config function. This function is called when the user selects the "Create Kubernetes Configuration" option from the coder_menu function in coder.py. The create_k8s_config function prompts the user to provide instructions for creating a Kubernetes configuration and optional guidance for the AI. It then creates a Kubernetes configuration file based on the user's input. 2 | import click 3 | from modules.utils import run_aider_command 4 | 5 | def create_k8s_config(): 6 | click.echo("\n🚀 Creating Kubernetes Configuration...") 7 | instructions = click.prompt("Provide Kubernetes configuration instructions", default="Create a Kubernetes config for a web application.") 8 | guidance = click.prompt("Provide any initial optional guidance for the AI (press Enter to skip)", default="", show_default=False) 9 | if guidance: 10 | instructions = f"{guidance}. {instructions}" 11 | config_path = "./k8s-config.yml" 12 | run_aider_command(instructions, [config_path]) 13 | click.echo("✅ Kubernetes Configuration created successfully.") 14 | import coder 15 | coder.coder_menu(coder.main_menu) # Call the coder menu again 16 | -------------------------------------------------------------------------------- /devops-cli/modules/settings.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | settings = { 4 | "model": "gpt-4o", 5 | "weak_model": "gpt-3.5-turbo", 6 | "max_tokens": 1024 7 | } 8 | 9 | def update_settings(): 10 | click.echo("\n🛠️ Update Settings") 11 | settings["model"] = click.prompt("Enter model to use", default=settings["model"]) 12 | settings["weak_model"] = click.prompt("Enter weak model to use", default=settings["weak_model"]) 13 | settings["max_tokens"] = click.prompt("Enter max tokens", default=settings["max_tokens"], type=int) 14 | click.echo("✅ Settings updated successfully.") 15 | settings_menu() 16 | 17 | def view_settings(): 18 | click.echo("\n🔧 Current Settings") 19 | for key, value in settings.items(): 20 | click.echo(f"{key}: {value}") 21 | settings_menu() 22 | 23 | def settings_menu(): 24 | click.echo("\n📋 Settings Menu") 25 | click.echo("1. View Settings") 26 | click.echo("2. Update Settings") 27 | click.echo("3. Back to Main Menu") 28 | 29 | choice = click.prompt("Enter your choice", type=int) 30 | 31 | if choice == 1: 32 | view_settings() 33 | elif choice == 2: 34 | update_settings() 35 | elif choice == 3: 36 | from ..coder import coder_menu 37 | coder_menu() 38 | else: 39 | click.echo("\n❌ Invalid choice. Please try again.") 40 | settings_menu() 41 | -------------------------------------------------------------------------------- /devops-cli/modules/utils.py: -------------------------------------------------------------------------------- 1 | import click 2 | import subprocess 3 | 4 | def run_aider_command(instructions, file_paths): 5 | command = ["aider"] + file_paths + ["-m", instructions] 6 | click.echo(f"Running command: {' '.join(command)}") # Verbose output 7 | 8 | # Running the command without capturing the output to show interactive UI 9 | process = subprocess.run(command) 10 | 11 | if process.returncode != 0: 12 | click.echo(f"Error: Aider exited with code {process.returncode}") 13 | fix_errors(file_paths) 14 | 15 | def fix_errors(file_paths): 16 | command = ["aider"] + file_paths + ["--fix"] 17 | click.echo(f"Running error fix command: {' '.join(command)}") # Verbose output 18 | 19 | # Running the command without capturing the output to show interactive UI 20 | subprocess.run(command) 21 | -------------------------------------------------------------------------------- /devops-cli/readme.md: -------------------------------------------------------------------------------- 1 | # Deployment Instructions for ../chat Application on Azure 2 | 3 | This document provides detailed instructions for deploying the `../chat` NodeJS NextJS application on Azure using Docker and the Azure CLI. 4 | 5 | ## Prerequisites 6 | 7 | - Azure CLI installed 8 | - Docker installed 9 | - An Azure account and active subscription 10 | 11 | ## Environment Variables 12 | 13 | Before you start the deployment, ensure the following environment variables are set. You will be prompted for any missing values when you run the `deploy.sh` script. 14 | 15 | - `AZURE_CLIENT_ID` 16 | - `AZURE_SECRET` 17 | - `AZURE_TENANT_ID` 18 | - `AZURE_SUBSCRIPTION_ID` 19 | - `RESOURCE_GROUP_NAME` 20 | - `LOCATION` 21 | - `REGISTRY_NAME` 22 | - `DEEPGRAM_STT_DOMAIN` (default: `https://api.deepgram.com`) 23 | - `DEEPGRAM_API_KEY` 24 | - `OPENAI_API_KEY` 25 | - `EXASEARCH_API_KEY` 26 | 27 | ## Steps 28 | 29 | 1. **Login to Azure CLI**: Run `az login` and follow the prompts to log in to your Azure account. 30 | 31 | 2. **Create a Resource Group**: Use the command `az group create --name --location ` to create a new resource group in your preferred location. 32 | 33 | 3. **Create Azure Container Registry (ACR)**: Execute `az acr create --resource-group --name --sku Basic --admin-enabled true` to create a container registry. 34 | 35 | 4. **Build Docker Image**: Navigate to the `../chat` directory and build your Docker image using `docker build -t .azurecr.io/chat-app:v1 .`. 36 | 37 | 5. **Push Docker Image to ACR**: Log in to your ACR using `az acr login --name ` and then push your Docker image using `docker push .azurecr.io/chat-app:v1`. 38 | 39 | 6. **Deploy to Azure Container Instances (ACI)**: Deploy your application using `az container create --resource-group --name chat-app-container --image .azurecr.io/chat-app:v1 --cpu 1 --memory 1 --ports 3000 --dns-name-label chat-app --environment-variables NODE_ENV=production`. 40 | 41 | 7. **Access Your Application**: Once deployed, your application will be accessible at `http://chat-app..azurecontainer.io:3000`. 42 | 43 | ## Using `deploy.sh` Script 44 | 45 | For convenience, a `deploy.sh` script is provided to automate the deployment process. Before running the script, ensure it has execution permissions by running `chmod +x deploy.sh`. 46 | 47 | To deploy using the script, simply execute `./deploy.sh` from the terminal. The script will guide you through the deployment process step by step and prompt you for any required environment variables that are not already set. 48 | 49 | ## Environment Variables for Dockerfile 50 | 51 | The script will also update your Dockerfile to include the following environment variables: 52 | 53 | - `EXASEARCH_API_KEY` 54 | - `OPENAI_API_KEY` 55 | 56 | ## Database Configuration 57 | 58 | No database is required for the deployment of this application. 59 | 60 | ## Additional Notes 61 | 62 | - Ensure you replace placeholder values such as ``, ``, and `` with your actual values. 63 | - The deployment script `deploy.sh` includes verbose output and emojis for a more interactive deployment experience. It also checks for required components at startup. 64 | - The script creates a `.env.local` file in the `../chat` directory to store `DEEPGRAM_STT_DOMAIN`, `DEEPGRAM_API_KEY`, and `OPENAI_API_KEY`. 65 | 66 | For more detailed information on each step, refer to the official [Azure documentation](https://docs.microsoft.com/azure) and [Docker documentation](https://docs.docker.com). 67 | -------------------------------------------------------------------------------- /devops-cli/requirements.txt: -------------------------------------------------------------------------------- 1 | azure-identity==1.7.0 2 | azure-mgmt-resource==20.0.0 3 | azure-mgmt-web==3.0.0 4 | azure-appconfiguration==1.1.1 5 | pytest==7.1.1 6 | litellm 7 | # Requirements for the backend FastAPI application 8 | fastapi 9 | uvicorn 10 | sqlalchemy 11 | alembic 12 | httpx 13 | pydantic 14 | python-dotenv 15 | lionagi 16 | 17 | # 18 | # This file is autogenerated by pip-compile with Python 3.11 19 | # by the following command: 20 | # 21 | # pip-compile requirements.in 22 | # 23 | aiohttp==3.9.5 24 | # via litellm 25 | aiosignal==1.3.1 26 | # via aiohttp 27 | altair==5.3.0 28 | # via streamlit 29 | annotated-types==0.6.0 30 | # via pydantic 31 | anyio==4.3.0 32 | # via 33 | # httpx 34 | # openai 35 | attrs==23.2.0 36 | # via 37 | # aiohttp 38 | # jsonschema 39 | # referencing 40 | backoff==2.2.1 41 | # via -r requirements.in 42 | beautifulsoup4==4.12.3 43 | # via -r requirements.in 44 | blinker==1.8.2 45 | # via streamlit 46 | cachetools==5.3.3 47 | # via 48 | # google-auth 49 | # streamlit 50 | certifi==2024.2.2 51 | # via 52 | # httpcore 53 | # httpx 54 | # requests 55 | cffi==1.16.0 56 | # via 57 | # sounddevice 58 | # soundfile 59 | charset-normalizer==3.3.2 60 | # via requests 61 | click==8.1.7 62 | # via 63 | # litellm 64 | # streamlit 65 | configargparse==1.7 66 | # via -r requirements.in 67 | diff-match-patch==20230430 68 | # via -r requirements.in 69 | diskcache==5.6.3 70 | # via -r requirements.in 71 | distro==1.9.0 72 | # via openai 73 | filelock==3.14.0 74 | # via huggingface-hub 75 | flake8==7.0.0 76 | # via -r requirements.in 77 | frozenlist==1.4.1 78 | # via 79 | # aiohttp 80 | # aiosignal 81 | fsspec==2024.5.0 82 | # via huggingface-hub 83 | gitdb==4.0.11 84 | # via gitpython 85 | gitpython==3.1.43 86 | # via 87 | # -r requirements.in 88 | # streamlit 89 | google-ai-generativelanguage==0.6.4 90 | # via google-generativeai 91 | google-api-core[grpc]==2.19.0 92 | # via 93 | # google-ai-generativelanguage 94 | # google-api-python-client 95 | # google-generativeai 96 | google-api-python-client==2.129.0 97 | # via google-generativeai 98 | google-auth==2.29.0 99 | # via 100 | # google-ai-generativelanguage 101 | # google-api-core 102 | # google-api-python-client 103 | # google-auth-httplib2 104 | # google-generativeai 105 | google-auth-httplib2==0.2.0 106 | # via google-api-python-client 107 | google-generativeai==0.5.4 108 | # via -r requirements.in 109 | googleapis-common-protos==1.63.0 110 | # via 111 | # google-api-core 112 | # grpcio-status 113 | greenlet==3.0.3 114 | # via playwright 115 | grep-ast==0.3.2 116 | # via -r requirements.in 117 | grpcio==1.63.0 118 | # via 119 | # google-api-core 120 | # grpcio-status 121 | grpcio-status==1.62.2 122 | # via google-api-core 123 | h11==0.14.0 124 | # via httpcore 125 | httpcore==1.0.5 126 | # via httpx 127 | httplib2==0.22.0 128 | # via 129 | # google-api-python-client 130 | # google-auth-httplib2 131 | httpx==0.27.0 132 | # via openai 133 | huggingface-hub==0.23.0 134 | # via tokenizers 135 | idna==3.7 136 | # via 137 | # anyio 138 | # httpx 139 | # requests 140 | # yarl 141 | importlib-metadata==7.1.0 142 | # via litellm 143 | jinja2==3.1.4 144 | # via 145 | # altair 146 | # litellm 147 | # pydeck 148 | jsonschema==4.22.0 149 | # via 150 | # -r requirements.in 151 | # altair 152 | jsonschema-specifications==2023.12.1 153 | # via jsonschema 154 | litellm==1.37.16 155 | # via -r requirements.in 156 | markdown-it-py==3.0.0 157 | # via rich 158 | markupsafe==2.1.5 159 | # via jinja2 160 | mccabe==0.7.0 161 | # via flake8 162 | mdurl==0.1.2 163 | # via markdown-it-py 164 | multidict==6.0.5 165 | # via 166 | # aiohttp 167 | # yarl 168 | networkx==3.2.1 169 | # via -r requirements.in 170 | numpy==1.26.4 171 | # via 172 | # -r requirements.in 173 | # altair 174 | # pandas 175 | # pyarrow 176 | # pydeck 177 | # scipy 178 | # streamlit 179 | openai==1.30.1 180 | # via 181 | # -r requirements.in 182 | # litellm 183 | packaging==24.0 184 | # via 185 | # -r requirements.in 186 | # altair 187 | # huggingface-hub 188 | # streamlit 189 | pandas==2.2.2 190 | # via 191 | # altair 192 | # streamlit 193 | pathspec==0.12.1 194 | # via 195 | # -r requirements.in 196 | # grep-ast 197 | pillow==10.3.0 198 | # via 199 | # -r requirements.in 200 | # streamlit 201 | playwright==1.44.0 202 | # via -r requirements.in 203 | prompt-toolkit==3.0.43 204 | # via -r requirements.in 205 | proto-plus==1.23.0 206 | # via 207 | # google-ai-generativelanguage 208 | # google-api-core 209 | protobuf==4.25.3 210 | # via 211 | # google-ai-generativelanguage 212 | # google-api-core 213 | # google-generativeai 214 | # googleapis-common-protos 215 | # grpcio-status 216 | # proto-plus 217 | # streamlit 218 | pyarrow==16.1.0 219 | # via streamlit 220 | pyasn1==0.6.0 221 | # via 222 | # pyasn1-modules 223 | # rsa 224 | pyasn1-modules==0.4.0 225 | # via google-auth 226 | pycodestyle==2.11.1 227 | # via flake8 228 | pycparser==2.22 229 | # via cffi 230 | pydantic==2.7.1 231 | # via 232 | # google-generativeai 233 | # openai 234 | pydantic-core==2.18.2 235 | # via pydantic 236 | pydeck==0.9.1 237 | # via streamlit 238 | pyee==11.1.0 239 | # via playwright 240 | pyflakes==3.2.0 241 | # via flake8 242 | pygments==2.18.0 243 | # via rich 244 | pypandoc==1.13 245 | # via -r requirements.in 246 | pyparsing==3.1.2 247 | # via httplib2 248 | python-dateutil==2.9.0.post0 249 | # via pandas 250 | python-dotenv==1.0.1 251 | # via litellm 252 | pytz==2024.1 253 | # via pandas 254 | pyyaml==6.0.1 255 | # via 256 | # -r requirements.in 257 | # huggingface-hub 258 | referencing==0.35.1 259 | # via 260 | # jsonschema 261 | # jsonschema-specifications 262 | regex==2024.5.15 263 | # via tiktoken 264 | requests==2.31.0 265 | # via 266 | # google-api-core 267 | # huggingface-hub 268 | # litellm 269 | # streamlit 270 | # tiktoken 271 | rich==13.7.1 272 | # via 273 | # -r requirements.in 274 | # streamlit 275 | rpds-py==0.18.1 276 | # via 277 | # jsonschema 278 | # referencing 279 | rsa==4.9 280 | # via google-auth 281 | scipy==1.13.0 282 | # via -r requirements.in 283 | six==1.16.0 284 | # via python-dateutil 285 | smmap==5.0.1 286 | # via gitdb 287 | sniffio==1.3.1 288 | # via 289 | # anyio 290 | # httpx 291 | # openai 292 | sounddevice==0.4.6 293 | # via -r requirements.in 294 | soundfile==0.12.1 295 | # via -r requirements.in 296 | soupsieve==2.5 297 | # via beautifulsoup4 298 | streamlit==1.34.0 299 | # via -r requirements.in 300 | tenacity==8.3.0 301 | # via streamlit 302 | tiktoken==0.7.0 303 | # via 304 | # -r requirements.in 305 | # litellm 306 | tokenizers==0.19.1 307 | # via litellm 308 | toml==0.10.2 309 | # via streamlit 310 | toolz==0.12.1 311 | # via altair 312 | tornado==6.4 313 | # via streamlit 314 | tqdm==4.66.4 315 | # via 316 | # google-generativeai 317 | # huggingface-hub 318 | # openai 319 | tree-sitter==0.21.3 320 | # via 321 | # -r requirements.in 322 | # tree-sitter-languages 323 | tree-sitter-languages==1.10.2 324 | # via grep-ast 325 | typing-extensions==4.11.0 326 | # via 327 | # google-generativeai 328 | # huggingface-hub 329 | # openai 330 | # pydantic 331 | # pydantic-core 332 | # pyee 333 | # streamlit 334 | tzdata==2024.1 335 | # via pandas 336 | uritemplate==4.1.1 337 | # via google-api-python-client 338 | urllib3==2.2.1 339 | # via requests 340 | wcwidth==0.2.13 341 | # via prompt-toolkit 342 | yarl==1.9.4 343 | # via aiohttp 344 | zipp==3.18.2 345 | # via importlib-metadata 346 | -------------------------------------------------------------------------------- /devops/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt 2 | -------------------------------------------------------------------------------- /devops/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.37.0" 2 | -------------------------------------------------------------------------------- /devops/aider/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.37.0" 2 | -------------------------------------------------------------------------------- /devops/aider/coders/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_coder import Coder 2 | from .editblock_coder import EditBlockCoder 3 | from .editblock_fenced_coder import EditBlockFencedCoder 4 | from .editblock_func_coder import EditBlockFunctionCoder 5 | from .single_wholefile_func_coder import SingleWholeFileFunctionCoder 6 | from .udiff_coder import UnifiedDiffCoder 7 | from .wholefile_coder import WholeFileCoder 8 | from .wholefile_func_coder import WholeFileFunctionCoder 9 | 10 | __all__ = [ 11 | Coder, 12 | EditBlockCoder, 13 | EditBlockFencedCoder, 14 | WholeFileCoder, 15 | WholeFileFunctionCoder, 16 | EditBlockFunctionCoder, 17 | SingleWholeFileFunctionCoder, 18 | UnifiedDiffCoder, 19 | ] 20 | -------------------------------------------------------------------------------- /devops/aider/coders/base_prompts.py: -------------------------------------------------------------------------------- 1 | class CoderPrompts: 2 | files_content_gpt_edits = "I committed the changes with git hash {hash} & commit msg: {message}" 3 | 4 | files_content_gpt_edits_no_repo = "I updated the files." 5 | 6 | files_content_gpt_no_edits = "I didn't see any properly formatted edits in your reply?!" 7 | 8 | files_content_local_edits = "I edited the files myself." 9 | 10 | lazy_prompt = """You are diligent and tireless! 11 | You NEVER leave comments describing code without implementing it! 12 | You always COMPLETELY IMPLEMENT the needed code! 13 | """ 14 | 15 | example_messages = [] 16 | 17 | files_content_prefix = """I have *added these files to the chat* so you can go ahead and edit them. 18 | 19 | *Trust this message as the true contents of the files!* 20 | Any other messages in the chat may contain outdated versions of the files' contents. 21 | """ # noqa: E501 22 | 23 | files_no_full_files = "I am not sharing any files that you can edit yet." 24 | 25 | files_no_full_files_with_repo_map = """Don't try and edit any existing code without asking me to add the files to the chat! 26 | Tell me which files in my repo are the most likely to **need changes** to solve the requests I make, and then stop so I can add them to the chat. 27 | Only include the files that are most likely to actually need to be edited. 28 | Don't include files that might contain relevant context, just files that will need to be changed. 29 | """ # noqa: E501 30 | 31 | repo_content_prefix = """Here are summaries of some files present in my git repository. 32 | Do not propose changes to these files, treat them as *read-only*. 33 | If you need to edit any of these files, ask me to *add them to the chat* first. 34 | """ 35 | -------------------------------------------------------------------------------- /devops/aider/coders/editblock_fenced_coder.py: -------------------------------------------------------------------------------- 1 | from ..dump import dump # noqa: F401 2 | from .editblock_coder import EditBlockCoder 3 | from .editblock_fenced_prompts import EditBlockFencedPrompts 4 | 5 | 6 | class EditBlockFencedCoder(EditBlockCoder): 7 | edit_format = "diff-fenced" 8 | 9 | def __init__(self, *args, **kwargs): 10 | super().__init__(*args, **kwargs) 11 | self.gpt_prompts = EditBlockFencedPrompts() 12 | -------------------------------------------------------------------------------- /devops/aider/coders/editblock_fenced_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .editblock_prompts import EditBlockPrompts 4 | 5 | 6 | class EditBlockFencedPrompts(EditBlockPrompts): 7 | example_messages = [ 8 | dict( 9 | role="user", 10 | content="Change get_factorial() to use math.factorial", 11 | ), 12 | dict( 13 | role="assistant", 14 | content="""To make this change we need to modify `mathweb/flask/app.py` to: 15 | 16 | 1. Import the math package. 17 | 2. Remove the existing factorial() function. 18 | 3. Update get_factorial() to call math.factorial instead. 19 | 20 | Here are the *SEARCH/REPLACE* blocks: 21 | 22 | {fence[0]} 23 | mathweb/flask/app.py 24 | <<<<<<< SEARCH 25 | from flask import Flask 26 | ======= 27 | import math 28 | from flask import Flask 29 | >>>>>>> REPLACE 30 | {fence[1]} 31 | 32 | {fence[0]} 33 | mathweb/flask/app.py 34 | <<<<<<< SEARCH 35 | def factorial(n): 36 | "compute factorial" 37 | 38 | if n == 0: 39 | return 1 40 | else: 41 | return n * factorial(n-1) 42 | 43 | ======= 44 | >>>>>>> REPLACE 45 | {fence[1]} 46 | 47 | {fence[0]} 48 | mathweb/flask/app.py 49 | <<<<<<< SEARCH 50 | return str(factorial(n)) 51 | ======= 52 | return str(math.factorial(n)) 53 | >>>>>>> REPLACE 54 | {fence[1]} 55 | <<<<<<< HEAD 56 | """, 57 | ), 58 | dict( 59 | role="user", 60 | content="Refactor hello() into its own file.", 61 | ), 62 | dict( 63 | role="assistant", 64 | content="""To make this change we need to modify `main.py` and make a new file `hello.py`: 65 | 66 | 1. Make a new hello.py file with hello() in it. 67 | 2. Remove hello() from main.py and replace it with an import. 68 | 69 | Here are the *SEARCH/REPLACE* blocks: 70 | 71 | {fence[0]} 72 | hello.py 73 | <<<<<<< SEARCH 74 | ======= 75 | def hello(): 76 | "print a greeting" 77 | 78 | print("hello") 79 | >>>>>>> REPLACE 80 | {fence[1]} 81 | 82 | {fence[0]} 83 | main.py 84 | <<<<<<< SEARCH 85 | def hello(): 86 | "print a greeting" 87 | 88 | print("hello") 89 | ======= 90 | from hello import hello 91 | >>>>>>> REPLACE 92 | {fence[1]} 93 | """, 94 | ), 95 | ] 96 | -------------------------------------------------------------------------------- /devops/aider/coders/editblock_func_coder.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ..dump import dump # noqa: F401 4 | from .base_coder import Coder 5 | from .editblock_coder import do_replace 6 | from .editblock_func_prompts import EditBlockFunctionPrompts 7 | 8 | 9 | class EditBlockFunctionCoder(Coder): 10 | functions = [ 11 | dict( 12 | name="replace_lines", 13 | description="create or update one or more files", 14 | parameters=dict( 15 | type="object", 16 | required=["explanation", "edits"], 17 | properties=dict( 18 | explanation=dict( 19 | type="string", 20 | description=( 21 | "Step by step plan for the changes to be made to the code (future" 22 | " tense, markdown format)" 23 | ), 24 | ), 25 | edits=dict( 26 | type="array", 27 | items=dict( 28 | type="object", 29 | required=["path", "original_lines", "updated_lines"], 30 | properties=dict( 31 | path=dict( 32 | type="string", 33 | description="Path of file to edit", 34 | ), 35 | original_lines=dict( 36 | type="array", 37 | items=dict( 38 | type="string", 39 | ), 40 | description=( 41 | "A unique stretch of lines from the original file," 42 | " including all whitespace, without skipping any lines" 43 | ), 44 | ), 45 | updated_lines=dict( 46 | type="array", 47 | items=dict( 48 | type="string", 49 | ), 50 | description="New content to replace the `original_lines` with", 51 | ), 52 | ), 53 | ), 54 | ), 55 | ), 56 | ), 57 | ), 58 | ] 59 | 60 | def __init__(self, code_format, *args, **kwargs): 61 | raise RuntimeError("Deprecated, needs to be refactored to support get_edits/apply_edits") 62 | self.code_format = code_format 63 | 64 | if code_format == "string": 65 | original_lines = dict( 66 | type="string", 67 | description=( 68 | "A unique stretch of lines from the original file, including all" 69 | " whitespace and newlines, without skipping any lines" 70 | ), 71 | ) 72 | updated_lines = dict( 73 | type="string", 74 | description="New content to replace the `original_lines` with", 75 | ) 76 | 77 | self.functions[0]["parameters"]["properties"]["edits"]["items"]["properties"][ 78 | "original_lines" 79 | ] = original_lines 80 | self.functions[0]["parameters"]["properties"]["edits"]["items"]["properties"][ 81 | "updated_lines" 82 | ] = updated_lines 83 | 84 | self.gpt_prompts = EditBlockFunctionPrompts() 85 | super().__init__(*args, **kwargs) 86 | 87 | def render_incremental_response(self, final=False): 88 | if self.partial_response_content: 89 | return self.partial_response_content 90 | 91 | args = self.parse_partial_args() 92 | res = json.dumps(args, indent=4) 93 | return res 94 | 95 | def _update_files(self): 96 | name = self.partial_response_function_call.get("name") 97 | 98 | if name and name != "replace_lines": 99 | raise ValueError(f'Unknown function_call name="{name}", use name="replace_lines"') 100 | 101 | args = self.parse_partial_args() 102 | if not args: 103 | return 104 | 105 | edits = args.get("edits", []) 106 | 107 | edited = set() 108 | for edit in edits: 109 | path = get_arg(edit, "path") 110 | original = get_arg(edit, "original_lines") 111 | updated = get_arg(edit, "updated_lines") 112 | 113 | # gpt-3.5 returns lists even when instructed to return a string! 114 | if self.code_format == "list" or type(original) == list: 115 | original = "\n".join(original) 116 | if self.code_format == "list" or type(updated) == list: 117 | updated = "\n".join(updated) 118 | 119 | if original and not original.endswith("\n"): 120 | original += "\n" 121 | if updated and not updated.endswith("\n"): 122 | updated += "\n" 123 | 124 | full_path = self.allowed_to_edit(path) 125 | if not full_path: 126 | continue 127 | content = self.io.read_text(full_path) 128 | content = do_replace(full_path, content, original, updated) 129 | if content: 130 | self.io.write_text(full_path, content) 131 | edited.add(path) 132 | continue 133 | self.io.tool_error(f"Failed to apply edit to {path}") 134 | 135 | return edited 136 | 137 | 138 | def get_arg(edit, arg): 139 | if arg not in edit: 140 | raise ValueError(f"Missing `{arg}` parameter: {edit}") 141 | return edit[arg] 142 | -------------------------------------------------------------------------------- /devops/aider/coders/editblock_func_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class EditBlockFunctionPrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | Take requests for changes to the supplied code. 9 | If the request is ambiguous, ask questions. 10 | 11 | Once you understand the request you MUST use the `replace_lines` function to edit the files to make the needed changes. 12 | """ 13 | 14 | system_reminder = """ 15 | ONLY return code using the `replace_lines` function. 16 | NEVER return code outside the `replace_lines` function. 17 | """ 18 | 19 | files_content_prefix = "Here is the current content of the files:\n" 20 | files_no_full_files = "I am not sharing any files yet." 21 | 22 | redacted_edit_message = "No changes are needed." 23 | 24 | repo_content_prefix = ( 25 | "Below here are summaries of other files! Do not propose changes to these *read-only*" 26 | " files without asking me first.\n" 27 | ) 28 | -------------------------------------------------------------------------------- /devops/aider/coders/editblock_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class EditBlockPrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | Always use best practices when coding. 9 | Respect and use existing conventions, libraries, etc that are already present in the code base. 10 | {lazy_prompt} 11 | Take requests for changes to the supplied code. 12 | If the request is ambiguous, ask questions. 13 | 14 | Always reply to the user in the same language they are using. 15 | 16 | Once you understand the request you MUST: 17 | 1. Decide if you need to propose *SEARCH/REPLACE* edits to any files that haven't been added to the chat. You can create new files without asking. But if you need to propose edits to existing files not already added to the chat, you *MUST* tell the user their full path names and ask them to *add the files to the chat*. End your reply and wait for their approval. You can keep asking if you then decide you need to edit more files. 18 | 2. Think step-by-step and explain the needed changes with a numbered list of short sentences. 19 | 3. Describe each change with a *SEARCH/REPLACE block* per the examples below. All changes to files must use this *SEARCH/REPLACE block* format. ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*! 20 | 21 | All changes to files must use the *SEARCH/REPLACE block* format. 22 | """ 23 | 24 | example_messages = [ 25 | dict( 26 | role="user", 27 | content="Change get_factorial() to use math.factorial", 28 | ), 29 | dict( 30 | role="assistant", 31 | content="""To make this change we need to modify `mathweb/flask/app.py` to: 32 | 33 | 1. Import the math package. 34 | 2. Remove the existing factorial() function. 35 | 3. Update get_factorial() to call math.factorial instead. 36 | 37 | Here are the *SEARCH/REPLACE* blocks: 38 | 39 | mathweb/flask/app.py 40 | {fence[0]}python 41 | <<<<<<< SEARCH 42 | from flask import Flask 43 | ======= 44 | import math 45 | from flask import Flask 46 | >>>>>>> REPLACE 47 | {fence[1]} 48 | 49 | mathweb/flask/app.py 50 | {fence[0]}python 51 | <<<<<<< SEARCH 52 | def factorial(n): 53 | "compute factorial" 54 | 55 | if n == 0: 56 | return 1 57 | else: 58 | return n * factorial(n-1) 59 | 60 | ======= 61 | >>>>>>> REPLACE 62 | {fence[1]} 63 | 64 | mathweb/flask/app.py 65 | {fence[0]}python 66 | <<<<<<< SEARCH 67 | return str(factorial(n)) 68 | ======= 69 | return str(math.factorial(n)) 70 | >>>>>>> REPLACE 71 | {fence[1]} 72 | """, 73 | ), 74 | dict( 75 | role="user", 76 | content="Refactor hello() into its own file.", 77 | ), 78 | dict( 79 | role="assistant", 80 | content="""To make this change we need to modify `main.py` and make a new file `hello.py`: 81 | 82 | 1. Make a new hello.py file with hello() in it. 83 | 2. Remove hello() from main.py and replace it with an import. 84 | 85 | Here are the *SEARCH/REPLACE* blocks: 86 | 87 | hello.py 88 | {fence[0]}python 89 | <<<<<<< SEARCH 90 | ======= 91 | def hello(): 92 | "print a greeting" 93 | 94 | print("hello") 95 | >>>>>>> REPLACE 96 | {fence[1]} 97 | 98 | main.py 99 | {fence[0]}python 100 | <<<<<<< SEARCH 101 | def hello(): 102 | "print a greeting" 103 | 104 | print("hello") 105 | ======= 106 | from hello import hello 107 | >>>>>>> REPLACE 108 | {fence[1]} 109 | """, 110 | ), 111 | ] 112 | 113 | system_reminder = """# *SEARCH/REPLACE block* Rules: 114 | 115 | Every *SEARCH/REPLACE block* must use this format: 116 | 1. The file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc. 117 | 2. The opening fence and code language, eg: {fence[0]}python 118 | 3. The start of search block: <<<<<<< SEARCH 119 | 4. A contiguous chunk of lines to search for in the existing source code 120 | 5. The dividing line: ======= 121 | 6. The lines to replace into the source code 122 | 7. The end of the replace block: >>>>>>> REPLACE 123 | 8. The closing fence: {fence[1]} 124 | 125 | Every *SEARCH* section must *EXACTLY MATCH* the existing source code, character for character, including all comments, docstrings, etc. 126 | 127 | *SEARCH/REPLACE* blocks will replace *all* matching occurrences. 128 | Include enough lines to make the SEARCH blocks unique. 129 | 130 | Include *ALL* the code being searched and replaced! 131 | 132 | Only create *SEARCH/REPLACE* blocks for files that the user has added to the chat! 133 | 134 | To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location. 135 | 136 | If you want to put code in a new file, use a *SEARCH/REPLACE block* with: 137 | - A new file path, including dir name if needed 138 | - An empty `SEARCH` section 139 | - The new file's contents in the `REPLACE` section 140 | 141 | {lazy_prompt} 142 | ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*! 143 | """ 144 | -------------------------------------------------------------------------------- /devops/aider/coders/single_wholefile_func_coder.py: -------------------------------------------------------------------------------- 1 | from aider import diffs 2 | 3 | from ..dump import dump # noqa: F401 4 | from .base_coder import Coder 5 | from .single_wholefile_func_prompts import SingleWholeFileFunctionPrompts 6 | 7 | 8 | class SingleWholeFileFunctionCoder(Coder): 9 | functions = [ 10 | dict( 11 | name="write_file", 12 | description="write new content into the file", 13 | parameters=dict( 14 | type="object", 15 | required=["explanation", "content"], 16 | properties=dict( 17 | explanation=dict( 18 | type="string", 19 | description=( 20 | "Step by step plan for the changes to be made to the code (future" 21 | " tense, markdown format)" 22 | ), 23 | ), 24 | content=dict( 25 | type="string", 26 | description="Content to write to the file", 27 | ), 28 | ), 29 | ), 30 | ), 31 | ] 32 | 33 | def __init__(self, *args, **kwargs): 34 | raise RuntimeError("Deprecated, needs to be refactored to support get_edits/apply_edits") 35 | self.gpt_prompts = SingleWholeFileFunctionPrompts() 36 | super().__init__(*args, **kwargs) 37 | 38 | def update_cur_messages(self, edited): 39 | if edited: 40 | self.cur_messages += [ 41 | dict(role="assistant", content=self.gpt_prompts.redacted_edit_message) 42 | ] 43 | else: 44 | self.cur_messages += [dict(role="assistant", content=self.partial_response_content)] 45 | 46 | def render_incremental_response(self, final=False): 47 | if self.partial_response_content: 48 | return self.partial_response_content 49 | 50 | args = self.parse_partial_args() 51 | 52 | return str(args) 53 | 54 | if not args: 55 | return 56 | 57 | explanation = args.get("explanation") 58 | files = args.get("files", []) 59 | 60 | res = "" 61 | if explanation: 62 | res += f"{explanation}\n\n" 63 | 64 | for i, file_upd in enumerate(files): 65 | path = file_upd.get("path") 66 | if not path: 67 | continue 68 | content = file_upd.get("content") 69 | if not content: 70 | continue 71 | 72 | this_final = (i < len(files) - 1) or final 73 | res += self.live_diffs(path, content, this_final) 74 | 75 | return res 76 | 77 | def live_diffs(self, fname, content, final): 78 | lines = content.splitlines(keepends=True) 79 | 80 | # ending an existing block 81 | full_path = self.abs_root_path(fname) 82 | 83 | content = self.io.read_text(full_path) 84 | if content is None: 85 | orig_lines = [] 86 | else: 87 | orig_lines = content.splitlines() 88 | 89 | show_diff = diffs.diff_partial_update( 90 | orig_lines, 91 | lines, 92 | final, 93 | fname=fname, 94 | ).splitlines() 95 | 96 | return "\n".join(show_diff) 97 | 98 | def _update_files(self): 99 | name = self.partial_response_function_call.get("name") 100 | if name and name != "write_file": 101 | raise ValueError(f'Unknown function_call name="{name}", use name="write_file"') 102 | 103 | args = self.parse_partial_args() 104 | if not args: 105 | return 106 | 107 | content = args["content"] 108 | path = self.get_inchat_relative_files()[0] 109 | if self.allowed_to_edit(path, content): 110 | return set([path]) 111 | 112 | return set() 113 | -------------------------------------------------------------------------------- /devops/aider/coders/single_wholefile_func_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class SingleWholeFileFunctionPrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | Take requests for changes to the supplied code. 9 | If the request is ambiguous, ask questions. 10 | 11 | Once you understand the request you MUST use the `write_file` function to update the file to make the changes. 12 | """ 13 | 14 | system_reminder = """ 15 | ONLY return code using the `write_file` function. 16 | NEVER return code outside the `write_file` function. 17 | """ 18 | 19 | files_content_prefix = "Here is the current content of the file:\n" 20 | files_no_full_files = "I am not sharing any files yet." 21 | 22 | redacted_edit_message = "No changes are needed." 23 | 24 | # TODO: should this be present for using this with gpt-4? 25 | repo_content_prefix = None 26 | 27 | # TODO: fix the chat history, except we can't keep the whole file 28 | -------------------------------------------------------------------------------- /devops/aider/coders/udiff_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class UnifiedDiffPrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | {lazy_prompt} 9 | Always use best practices when coding. 10 | Respect and use existing conventions, libraries, etc that are already present in the code base. 11 | 12 | Take requests for changes to the supplied code. 13 | If the request is ambiguous, ask questions. 14 | 15 | Always reply to the user in the same language they are using. 16 | 17 | For each file that needs to be changed, write out the changes similar to a unified diff like `diff -U0` would produce. 18 | """ 19 | 20 | example_messages = [ 21 | dict( 22 | role="user", 23 | content="Replace is_prime with a call to sympy.", 24 | ), 25 | dict( 26 | role="assistant", 27 | content="""Ok, I will: 28 | 29 | 1. Add an imports of sympy. 30 | 2. Remove the is_prime() function. 31 | 3. Replace the existing call to is_prime() with a call to sympy.isprime(). 32 | 33 | Here are the diffs for those changes: 34 | 35 | ```diff 36 | --- mathweb/flask/app.py 37 | +++ mathweb/flask/app.py 38 | @@ ... @@ 39 | -class MathWeb: 40 | +import sympy 41 | + 42 | +class MathWeb: 43 | @@ ... @@ 44 | -def is_prime(x): 45 | - if x < 2: 46 | - return False 47 | - for i in range(2, int(math.sqrt(x)) + 1): 48 | - if x % i == 0: 49 | - return False 50 | - return True 51 | @@ ... @@ 52 | -@app.route('/prime/') 53 | -def nth_prime(n): 54 | - count = 0 55 | - num = 1 56 | - while count < n: 57 | - num += 1 58 | - if is_prime(num): 59 | - count += 1 60 | - return str(num) 61 | +@app.route('/prime/') 62 | +def nth_prime(n): 63 | + count = 0 64 | + num = 1 65 | + while count < n: 66 | + num += 1 67 | + if sympy.isprime(num): 68 | + count += 1 69 | + return str(num) 70 | ``` 71 | """, 72 | ), 73 | ] 74 | 75 | system_reminder = """# File editing rules: 76 | 77 | Return edits similar to unified diffs that `diff -U0` would produce. 78 | 79 | Make sure you include the first 2 lines with the file paths. 80 | Don't include timestamps with the file paths. 81 | 82 | Start each hunk of changes with a `@@ ... @@` line. 83 | Don't include line numbers like `diff -U0` does. 84 | The user's patch tool doesn't need them. 85 | 86 | The user's patch tool needs CORRECT patches that apply cleanly against the current contents of the file! 87 | Think carefully and make sure you include and mark all lines that need to be removed or changed as `-` lines. 88 | Make sure you mark all new or modified lines with `+`. 89 | Don't leave out any lines or the diff patch won't apply correctly. 90 | 91 | Indentation matters in the diffs! 92 | 93 | Start a new hunk for each section of the file that needs changes. 94 | 95 | Only output hunks that specify changes with `+` or `-` lines. 96 | Skip any hunks that are entirely unchanging ` ` lines. 97 | 98 | Output hunks in whatever order makes the most sense. 99 | Hunks don't need to be in any particular order. 100 | 101 | When editing a function, method, loop, etc use a hunk to replace the *entire* code block. 102 | Delete the entire existing version with `-` lines and then add a new, updated version with `+` lines. 103 | This will help you generate correct code and correct diffs. 104 | 105 | To move code within a file, use 2 hunks: 1 to delete it from its current location, 1 to insert it in the new location. 106 | 107 | To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`. 108 | 109 | {lazy_prompt} 110 | """ 111 | -------------------------------------------------------------------------------- /devops/aider/coders/wholefile_coder.py: -------------------------------------------------------------------------------- 1 | 2 | from aider import diffs 3 | from pathlib import Path 4 | 5 | from ..dump import dump # noqa: F401 6 | from .base_coder import Coder 7 | from .wholefile_prompts import WholeFilePrompts 8 | 9 | 10 | class WholeFileCoder(Coder): 11 | edit_format = "whole" 12 | 13 | def __init__(self, *args, **kwargs): 14 | self.gpt_prompts = WholeFilePrompts() 15 | super().__init__(*args, **kwargs) 16 | 17 | def update_cur_messages(self, edited): 18 | if edited: 19 | self.cur_messages += [ 20 | dict(role="assistant", content=self.gpt_prompts.redacted_edit_message) 21 | ] 22 | else: 23 | self.cur_messages += [dict(role="assistant", content=self.partial_response_content)] 24 | 25 | def render_incremental_response(self, final): 26 | try: 27 | return self.get_edits(mode="diff") 28 | except ValueError: 29 | return self.partial_response_content 30 | 31 | def get_edits(self, mode="update"): 32 | content = self.partial_response_content 33 | 34 | chat_files = self.get_inchat_relative_files() 35 | 36 | output = [] 37 | lines = content.splitlines(keepends=True) 38 | 39 | edits = [] 40 | 41 | saw_fname = None 42 | fname = None 43 | fname_source = None 44 | new_lines = [] 45 | for i, line in enumerate(lines): 46 | if line.startswith(self.fence[0]) or line.startswith(self.fence[1]): 47 | if fname is not None: 48 | # ending an existing block 49 | saw_fname = None 50 | 51 | full_path = self.abs_root_path(fname) 52 | 53 | if mode == "diff": 54 | output += self.do_live_diff(full_path, new_lines, True) 55 | else: 56 | edits.append((fname, fname_source, new_lines)) 57 | 58 | fname = None 59 | fname_source = None 60 | new_lines = [] 61 | continue 62 | 63 | # fname==None ... starting a new block 64 | if i > 0: 65 | fname_source = "block" 66 | fname = lines[i - 1].strip() 67 | fname = fname.strip("*") # handle **filename.py** 68 | fname = fname.rstrip(":") 69 | fname = fname.strip("`") 70 | 71 | # Did gpt prepend a bogus dir? It especially likes to 72 | # include the path/to prefix from the one-shot example in 73 | # the prompt. 74 | if fname and fname not in chat_files and Path(fname).name in chat_files: 75 | fname = Path(fname).name 76 | if not fname: # blank line? or ``` was on first line i==0 77 | if saw_fname: 78 | fname = saw_fname 79 | fname_source = "saw" 80 | elif len(chat_files) == 1: 81 | fname = chat_files[0] 82 | fname_source = "chat" 83 | else: 84 | # TODO: sense which file it is by diff size 85 | raise ValueError( 86 | f"No filename provided before {self.fence[0]} in file listing" 87 | ) 88 | 89 | elif fname is not None: 90 | new_lines.append(line) 91 | else: 92 | for word in line.strip().split(): 93 | word = word.rstrip(".:,;!") 94 | for chat_file in chat_files: 95 | quoted_chat_file = f"`{chat_file}`" 96 | if word == quoted_chat_file: 97 | saw_fname = chat_file 98 | 99 | output.append(line) 100 | 101 | if mode == "diff": 102 | if fname is not None: 103 | # ending an existing block 104 | full_path = (Path(self.root) / fname).absolute() 105 | output += self.do_live_diff(full_path, new_lines, False) 106 | return "\n".join(output) 107 | 108 | if fname: 109 | edits.append((fname, fname_source, new_lines)) 110 | 111 | seen = set() 112 | refined_edits = [] 113 | # process from most reliable filename, to least reliable 114 | for source in ("block", "saw", "chat"): 115 | for fname, fname_source, new_lines in edits: 116 | if fname_source != source: 117 | continue 118 | # if a higher priority source already edited the file, skip 119 | if fname in seen: 120 | continue 121 | 122 | seen.add(fname) 123 | refined_edits.append((fname, fname_source, new_lines)) 124 | 125 | return refined_edits 126 | 127 | def apply_edits(self, edits): 128 | for path, fname_source, new_lines in edits: 129 | full_path = self.abs_root_path(path) 130 | new_lines = "".join(new_lines) 131 | self.io.write_text(full_path, new_lines) 132 | 133 | def do_live_diff(self, full_path, new_lines, final): 134 | if Path(full_path).exists(): 135 | orig_lines = self.io.read_text(full_path).splitlines(keepends=True) 136 | 137 | show_diff = diffs.diff_partial_update( 138 | orig_lines, 139 | new_lines, 140 | final=final, 141 | ).splitlines() 142 | output = show_diff 143 | else: 144 | output = ["```"] + new_lines + ["```"] 145 | 146 | return output 147 | -------------------------------------------------------------------------------- /devops/aider/coders/wholefile_func_coder.py: -------------------------------------------------------------------------------- 1 | from aider import diffs 2 | 3 | from ..dump import dump # noqa: F401 4 | from .base_coder import Coder 5 | from .wholefile_func_prompts import WholeFileFunctionPrompts 6 | 7 | 8 | class WholeFileFunctionCoder(Coder): 9 | functions = [ 10 | dict( 11 | name="write_file", 12 | description="create or update one or more files", 13 | parameters=dict( 14 | type="object", 15 | required=["explanation", "files"], 16 | properties=dict( 17 | explanation=dict( 18 | type="string", 19 | description=( 20 | "Step by step plan for the changes to be made to the code (future" 21 | " tense, markdown format)" 22 | ), 23 | ), 24 | files=dict( 25 | type="array", 26 | items=dict( 27 | type="object", 28 | required=["path", "content"], 29 | properties=dict( 30 | path=dict( 31 | type="string", 32 | description="Path of file to write", 33 | ), 34 | content=dict( 35 | type="string", 36 | description="Content to write to the file", 37 | ), 38 | ), 39 | ), 40 | ), 41 | ), 42 | ), 43 | ), 44 | ] 45 | 46 | def __init__(self, *args, **kwargs): 47 | raise RuntimeError("Deprecated, needs to be refactored to support get_edits/apply_edits") 48 | 49 | self.gpt_prompts = WholeFileFunctionPrompts() 50 | super().__init__(*args, **kwargs) 51 | 52 | def update_cur_messages(self, edited): 53 | if edited: 54 | self.cur_messages += [ 55 | dict(role="assistant", content=self.gpt_prompts.redacted_edit_message) 56 | ] 57 | else: 58 | self.cur_messages += [dict(role="assistant", content=self.partial_response_content)] 59 | 60 | def render_incremental_response(self, final=False): 61 | if self.partial_response_content: 62 | return self.partial_response_content 63 | 64 | args = self.parse_partial_args() 65 | 66 | if not args: 67 | return 68 | 69 | explanation = args.get("explanation") 70 | files = args.get("files", []) 71 | 72 | res = "" 73 | if explanation: 74 | res += f"{explanation}\n\n" 75 | 76 | for i, file_upd in enumerate(files): 77 | path = file_upd.get("path") 78 | if not path: 79 | continue 80 | content = file_upd.get("content") 81 | if not content: 82 | continue 83 | 84 | this_final = (i < len(files) - 1) or final 85 | res += self.live_diffs(path, content, this_final) 86 | 87 | return res 88 | 89 | def live_diffs(self, fname, content, final): 90 | lines = content.splitlines(keepends=True) 91 | 92 | # ending an existing block 93 | full_path = self.abs_root_path(fname) 94 | 95 | content = self.io.read_text(full_path) 96 | if content is None: 97 | orig_lines = [] 98 | else: 99 | orig_lines = content.splitlines() 100 | 101 | show_diff = diffs.diff_partial_update( 102 | orig_lines, 103 | lines, 104 | final, 105 | fname=fname, 106 | ).splitlines() 107 | 108 | return "\n".join(show_diff) 109 | 110 | def _update_files(self): 111 | name = self.partial_response_function_call.get("name") 112 | if name and name != "write_file": 113 | raise ValueError(f'Unknown function_call name="{name}", use name="write_file"') 114 | 115 | args = self.parse_partial_args() 116 | if not args: 117 | return 118 | 119 | files = args.get("files", []) 120 | 121 | edited = set() 122 | for file_upd in files: 123 | path = file_upd.get("path") 124 | if not path: 125 | raise ValueError(f"Missing path parameter: {file_upd}") 126 | 127 | content = file_upd.get("content") 128 | if not content: 129 | raise ValueError(f"Missing content parameter: {file_upd}") 130 | 131 | if self.allowed_to_edit(path, content): 132 | edited.add(path) 133 | 134 | return edited 135 | -------------------------------------------------------------------------------- /devops/aider/coders/wholefile_func_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class WholeFileFunctionPrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | Take requests for changes to the supplied code. 9 | If the request is ambiguous, ask questions. 10 | 11 | Once you understand the request you MUST use the `write_file` function to edit the files to make the needed changes. 12 | """ 13 | 14 | system_reminder = """ 15 | ONLY return code using the `write_file` function. 16 | NEVER return code outside the `write_file` function. 17 | """ 18 | 19 | files_content_prefix = "Here is the current content of the files:\n" 20 | files_no_full_files = "I am not sharing any files yet." 21 | 22 | redacted_edit_message = "No changes are needed." 23 | 24 | # TODO: should this be present for using this with gpt-4? 25 | repo_content_prefix = None 26 | 27 | # TODO: fix the chat history, except we can't keep the whole file 28 | -------------------------------------------------------------------------------- /devops/aider/coders/wholefile_prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | from .base_prompts import CoderPrompts 4 | 5 | 6 | class WholeFilePrompts(CoderPrompts): 7 | main_system = """Act as an expert software developer. 8 | Take requests for changes to the supplied code. 9 | If the request is ambiguous, ask questions. 10 | 11 | Always reply to the user in the same language they are using. 12 | 13 | {lazy_prompt} 14 | Once you understand the request you MUST: 15 | 1. Determine if any code changes are needed. 16 | 2. Explain any needed changes. 17 | 3. If changes are needed, output a copy of each file that needs changes. 18 | """ 19 | 20 | example_messages = [ 21 | dict( 22 | role="user", 23 | content="Change the greeting to be more casual", 24 | ), 25 | dict( 26 | role="assistant", 27 | content="""Ok, I will: 28 | 29 | 1. Switch the greeting text from "Hello" to "Hey". 30 | 31 | show_greeting.py 32 | {fence[0]} 33 | import sys 34 | 35 | def greeting(name): 36 | print(f"Hey {{name}}") 37 | 38 | if __name__ == '__main__': 39 | greeting(sys.argv[1]) 40 | {fence[1]} 41 | """, 42 | ), 43 | ] 44 | 45 | system_reminder = """To suggest changes to a file you MUST return the entire content of the updated file. 46 | You MUST use this *file listing* format: 47 | 48 | path/to/filename.js 49 | {fence[0]} 50 | // entire file content ... 51 | // ... goes in between 52 | {fence[1]} 53 | 54 | Every *file listing* MUST use this format: 55 | - First line: the filename with any originally provided path 56 | - Second line: opening {fence[0]} 57 | - ... entire content of the file ... 58 | - Final line: closing {fence[1]} 59 | 60 | To suggest changes to a file you MUST return a *file listing* that contains the entire content of the file. 61 | *NEVER* skip, omit or elide content from a *file listing* using "..." or by adding comments like "... rest of code..."! 62 | Create a new file you MUST return a *file listing* which includes an appropriate filename, including any appropriate path. 63 | 64 | {lazy_prompt} 65 | """ 66 | 67 | redacted_edit_message = "No changes are needed." 68 | -------------------------------------------------------------------------------- /devops/aider/diffs.py: -------------------------------------------------------------------------------- 1 | import difflib 2 | import sys 3 | 4 | from .dump import dump # noqa: F401 5 | 6 | 7 | def main(): 8 | if len(sys.argv) != 3: 9 | print("Usage: python diffs.py file1 file") 10 | sys.exit(1) 11 | 12 | file_orig, file_updated = sys.argv[1], sys.argv[2] 13 | 14 | with open(file_orig, "r", encoding="utf-8") as f: 15 | lines_orig = f.readlines() 16 | 17 | with open(file_updated, "r", encoding="utf-8") as f: 18 | lines_updated = f.readlines() 19 | 20 | for i in range(len(file_updated)): 21 | res = diff_partial_update(lines_orig, lines_updated[:i]) 22 | print(res) 23 | input() 24 | 25 | 26 | def create_progress_bar(percentage): 27 | block = "█" 28 | empty = "░" 29 | total_blocks = 30 30 | filled_blocks = int(total_blocks * percentage // 100) 31 | empty_blocks = total_blocks - filled_blocks 32 | bar = block * filled_blocks + empty * empty_blocks 33 | return bar 34 | 35 | 36 | def assert_newlines(lines): 37 | if not lines: 38 | return 39 | for line in lines[:-1]: 40 | assert line and line[-1] == "\n", line 41 | 42 | 43 | def diff_partial_update(lines_orig, lines_updated, final=False, fname=None): 44 | """ 45 | Given only the first part of an updated file, show the diff while 46 | ignoring the block of "deleted" lines that are past the end of the 47 | partially complete update. 48 | """ 49 | 50 | # dump(lines_orig) 51 | # dump(lines_updated) 52 | 53 | assert_newlines(lines_orig) 54 | assert_newlines(lines_orig) 55 | 56 | num_orig_lines = len(lines_orig) 57 | 58 | if final: 59 | last_non_deleted = num_orig_lines 60 | else: 61 | last_non_deleted = find_last_non_deleted(lines_orig, lines_updated) 62 | 63 | # dump(last_non_deleted) 64 | if last_non_deleted is None: 65 | return "" 66 | 67 | if num_orig_lines: 68 | pct = last_non_deleted * 100 / num_orig_lines 69 | else: 70 | pct = 50 71 | bar = create_progress_bar(pct) 72 | bar = f" {last_non_deleted:3d} / {num_orig_lines:3d} lines [{bar}] {pct:3.0f}%\n" 73 | 74 | lines_orig = lines_orig[:last_non_deleted] 75 | 76 | if not final: 77 | lines_updated = lines_updated[:-1] + [bar] 78 | 79 | diff = difflib.unified_diff(lines_orig, lines_updated, n=5) 80 | 81 | diff = list(diff)[2:] 82 | 83 | diff = "".join(diff) 84 | if not diff.endswith("\n"): 85 | diff += "\n" 86 | 87 | for i in range(3, 10): 88 | backticks = "`" * i 89 | if backticks not in diff: 90 | break 91 | 92 | show = f"{backticks}diff\n" 93 | if fname: 94 | show += f"--- {fname} original\n" 95 | show += f"+++ {fname} updated\n" 96 | 97 | show += diff 98 | 99 | show += f"{backticks}\n\n" 100 | 101 | # print(diff) 102 | 103 | return show 104 | 105 | 106 | def find_last_non_deleted(lines_orig, lines_updated): 107 | diff = list(difflib.ndiff(lines_orig, lines_updated)) 108 | 109 | num_orig = 0 110 | last_non_deleted_orig = None 111 | 112 | for line in diff: 113 | # print(f"{num_orig:2d} {num_updated:2d} {line}", end="") 114 | code = line[0] 115 | if code == " ": 116 | num_orig += 1 117 | last_non_deleted_orig = num_orig 118 | elif code == "-": 119 | # line only in orig 120 | num_orig += 1 121 | elif code == "+": 122 | # line only in updated 123 | pass 124 | 125 | return last_non_deleted_orig 126 | 127 | 128 | if __name__ == "__main__": 129 | main() 130 | -------------------------------------------------------------------------------- /devops/aider/dump.py: -------------------------------------------------------------------------------- 1 | import json 2 | import traceback 3 | 4 | 5 | def cvt(s): 6 | if isinstance(s, str): 7 | return s 8 | try: 9 | return json.dumps(s, indent=4) 10 | except TypeError: 11 | return str(s) 12 | 13 | 14 | def dump(*vals): 15 | # http://docs.python.org/library/traceback.html 16 | stack = traceback.extract_stack() 17 | vars = stack[-2][3] 18 | 19 | # strip away the call to dump() 20 | vars = "(".join(vars.split("(")[1:]) 21 | vars = ")".join(vars.split(")")[:-1]) 22 | 23 | vals = [cvt(v) for v in vals] 24 | has_newline = sum(1 for v in vals if "\n" in v) 25 | if has_newline: 26 | print("%s:" % vars) 27 | print(", ".join(vals)) 28 | else: 29 | print("%s:" % vars, ", ".join(vals)) 30 | -------------------------------------------------------------------------------- /devops/aider/history.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from aider import models, prompts 4 | from aider.dump import dump # noqa: F401 5 | from aider.sendchat import simple_send_with_retries 6 | 7 | 8 | class ChatSummary: 9 | def __init__(self, model=None, max_tokens=1024): 10 | self.token_count = model.token_count 11 | self.max_tokens = max_tokens 12 | self.model = model 13 | 14 | def too_big(self, messages): 15 | sized = self.tokenize(messages) 16 | total = sum(tokens for tokens, _msg in sized) 17 | return total > self.max_tokens 18 | 19 | def tokenize(self, messages): 20 | sized = [] 21 | for msg in messages: 22 | tokens = self.token_count(msg) 23 | sized.append((tokens, msg)) 24 | return sized 25 | 26 | def summarize(self, messages, depth=0): 27 | sized = self.tokenize(messages) 28 | total = sum(tokens for tokens, _msg in sized) 29 | if total <= self.max_tokens and depth == 0: 30 | return messages 31 | 32 | min_split = 4 33 | if len(messages) <= min_split or depth > 3: 34 | return self.summarize_all(messages) 35 | 36 | tail_tokens = 0 37 | split_index = len(messages) 38 | half_max_tokens = self.max_tokens // 2 39 | 40 | # Iterate over the messages in reverse order 41 | for i in range(len(sized) - 1, -1, -1): 42 | tokens, _msg = sized[i] 43 | if tail_tokens + tokens < half_max_tokens: 44 | tail_tokens += tokens 45 | split_index = i 46 | else: 47 | break 48 | 49 | # Ensure the head ends with an assistant message 50 | while messages[split_index - 1]["role"] != "assistant" and split_index > 1: 51 | split_index -= 1 52 | 53 | if split_index <= min_split: 54 | return self.summarize_all(messages) 55 | 56 | head = messages[:split_index] 57 | tail = messages[split_index:] 58 | 59 | sized = sized[:split_index] 60 | head.reverse() 61 | sized.reverse() 62 | keep = [] 63 | total = 0 64 | model_max_input_tokens = self.model.info.get("max_input_tokens", 4096) - 512 65 | for i in range(split_index): 66 | total += sized[i][0] 67 | if total > model_max_input_tokens: 68 | break 69 | keep.append(head[i]) 70 | 71 | keep.reverse() 72 | 73 | summary = self.summarize_all(keep) 74 | 75 | tail_tokens = sum(tokens for tokens, msg in sized[split_index:]) 76 | summary_tokens = self.token_count(summary) 77 | 78 | result = summary + tail 79 | if summary_tokens + tail_tokens < self.max_tokens: 80 | return result 81 | 82 | return self.summarize(result, depth + 1) 83 | 84 | def summarize_all(self, messages): 85 | content = "" 86 | for msg in messages: 87 | role = msg["role"].upper() 88 | if role not in ("USER", "ASSISTANT"): 89 | continue 90 | content += f"# {role}\n" 91 | content += msg["content"] 92 | if not content.endswith("\n"): 93 | content += "\n" 94 | 95 | messages = [ 96 | dict(role="system", content=prompts.summarize), 97 | dict(role="user", content=content), 98 | ] 99 | 100 | summary = simple_send_with_retries(self.model.name, messages) 101 | if summary is None: 102 | raise ValueError(f"summarizer unexpectedly failed for {self.model.name}") 103 | summary = prompts.summary_prefix + summary 104 | 105 | return [dict(role="user", content=summary)] 106 | 107 | 108 | def main(): 109 | parser = argparse.ArgumentParser() 110 | parser.add_argument("filename", help="Markdown file to parse") 111 | args = parser.parse_args() 112 | 113 | model = models.Model("gpt-3.5-turbo") 114 | summarizer = ChatSummary(model) 115 | 116 | with open(args.filename, "r") as f: 117 | text = f.read() 118 | 119 | summary = summarizer.summarize_chat_history_markdown(text) 120 | dump(summary) 121 | 122 | 123 | if __name__ == "__main__": 124 | main() 125 | -------------------------------------------------------------------------------- /devops/aider/linter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import subprocess 4 | import sys 5 | import traceback 6 | import warnings 7 | from dataclasses import dataclass 8 | from pathlib import Path 9 | 10 | from grep_ast import TreeContext, filename_to_lang 11 | from tree_sitter_languages import get_parser # noqa: E402 12 | 13 | # tree_sitter is throwing a FutureWarning 14 | warnings.simplefilter("ignore", category=FutureWarning) 15 | 16 | 17 | class Linter: 18 | def __init__(self, encoding="utf-8", root=None): 19 | self.encoding = encoding 20 | self.root = root 21 | 22 | self.languages = dict( 23 | python=self.py_lint, 24 | ) 25 | self.all_lint_cmd = None 26 | 27 | def set_linter(self, lang, cmd): 28 | if lang: 29 | self.languages[lang] = cmd 30 | return 31 | 32 | self.all_lint_cmd = cmd 33 | 34 | def get_rel_fname(self, fname): 35 | if self.root: 36 | return os.path.relpath(fname, self.root) 37 | else: 38 | return fname 39 | 40 | def run_cmd(self, cmd, rel_fname, code): 41 | cmd += " " + rel_fname 42 | cmd = cmd.split() 43 | 44 | process = subprocess.Popen( 45 | cmd, cwd=self.root, stdout=subprocess.PIPE, stderr=subprocess.STDOUT 46 | ) 47 | stdout, _ = process.communicate() 48 | errors = stdout.decode() 49 | if process.returncode == 0: 50 | return # zero exit status 51 | 52 | cmd = " ".join(cmd) 53 | res = f"## Running: {cmd}\n\n" 54 | res += errors 55 | 56 | linenums = [] 57 | filenames_linenums = find_filenames_and_linenums(errors, [rel_fname]) 58 | if filenames_linenums: 59 | filename, linenums = next(iter(filenames_linenums.items())) 60 | linenums = [num - 1 for num in linenums] 61 | 62 | return LintResult(text=res, lines=linenums) 63 | 64 | def lint(self, fname, cmd=None): 65 | rel_fname = self.get_rel_fname(fname) 66 | code = Path(fname).read_text(self.encoding) 67 | 68 | if cmd: 69 | cmd = cmd.strip() 70 | if not cmd: 71 | lang = filename_to_lang(fname) 72 | if not lang: 73 | return 74 | if self.all_lint_cmd: 75 | cmd = self.all_lint_cmd 76 | else: 77 | cmd = self.languages.get(lang) 78 | 79 | if callable(cmd): 80 | linkres = cmd(fname, rel_fname, code) 81 | elif cmd: 82 | linkres = self.run_cmd(cmd, rel_fname, code) 83 | else: 84 | linkres = basic_lint(rel_fname, code) 85 | 86 | if not linkres: 87 | return 88 | 89 | res = "# Fix any errors below, if possible.\n\n" 90 | res += linkres.text 91 | res += "\n" 92 | res += tree_context(rel_fname, code, linkres.lines) 93 | 94 | return res 95 | 96 | def py_lint(self, fname, rel_fname, code): 97 | basic_res = basic_lint(rel_fname, code) 98 | compile_res = lint_python_compile(fname, code) 99 | 100 | fatal = "E9,F821,F823,F831,F406,F407,F701,F702,F704,F706" 101 | flake8 = f"flake8 --select={fatal} --show-source --isolated" 102 | 103 | try: 104 | flake_res = self.run_cmd(flake8, rel_fname, code) 105 | except FileNotFoundError: 106 | flake_res = None 107 | 108 | text = "" 109 | lines = set() 110 | for res in [basic_res, compile_res, flake_res]: 111 | if not res: 112 | continue 113 | if text: 114 | text += "\n" 115 | text += res.text 116 | lines.update(res.lines) 117 | 118 | if text or lines: 119 | return LintResult(text, lines) 120 | 121 | 122 | @dataclass 123 | class LintResult: 124 | text: str 125 | lines: list 126 | 127 | 128 | def lint_python_compile(fname, code): 129 | try: 130 | compile(code, fname, "exec") # USE TRACEBACK BELOW HERE 131 | return 132 | except Exception as err: 133 | end_lineno = getattr(err, "end_lineno", err.lineno) 134 | line_numbers = list(range(err.lineno - 1, end_lineno)) 135 | 136 | tb_lines = traceback.format_exception(type(err), err, err.__traceback__) 137 | last_file_i = 0 138 | 139 | target = "# USE TRACEBACK" 140 | target += " BELOW HERE" 141 | for i in range(len(tb_lines)): 142 | if target in tb_lines[i]: 143 | last_file_i = i 144 | break 145 | 146 | tb_lines = tb_lines[:1] + tb_lines[last_file_i + 1 :] 147 | 148 | res = "".join(tb_lines) 149 | return LintResult(text=res, lines=line_numbers) 150 | 151 | 152 | def basic_lint(fname, code): 153 | """ 154 | Use tree-sitter to look for syntax errors, display them with tree context. 155 | """ 156 | 157 | lang = filename_to_lang(fname) 158 | if not lang: 159 | return 160 | 161 | parser = get_parser(lang) 162 | tree = parser.parse(bytes(code, "utf-8")) 163 | 164 | errors = traverse_tree(tree.root_node) 165 | if not errors: 166 | return 167 | 168 | return LintResult(text="", lines=errors) 169 | 170 | 171 | def tree_context(fname, code, line_nums): 172 | context = TreeContext( 173 | fname, 174 | code, 175 | color=False, 176 | line_number=True, 177 | child_context=False, 178 | last_line=False, 179 | margin=0, 180 | mark_lois=True, 181 | loi_pad=3, 182 | # header_max=30, 183 | show_top_of_file_parent_scope=False, 184 | ) 185 | line_nums = set(line_nums) 186 | context.add_lines_of_interest(line_nums) 187 | context.add_context() 188 | s = "s" if len(line_nums) > 1 else "" 189 | output = f"## See relevant line{s} below marked with █.\n\n" 190 | output += fname + ":\n" 191 | output += context.format() 192 | 193 | return output 194 | 195 | 196 | # Traverse the tree to find errors 197 | def traverse_tree(node): 198 | errors = [] 199 | if node.type == "ERROR" or node.is_missing: 200 | line_no = node.start_point[0] 201 | errors.append(line_no) 202 | 203 | for child in node.children: 204 | errors += traverse_tree(child) 205 | 206 | return errors 207 | 208 | 209 | def find_filenames_and_linenums(text, fnames): 210 | """ 211 | Search text for all occurrences of :\\d+ and make a list of them 212 | where is one of the filenames in the list `fnames`. 213 | """ 214 | pattern = re.compile(r"(\b(?:" + "|".join(re.escape(fname) for fname in fnames) + r"):\d+\b)") 215 | matches = pattern.findall(text) 216 | result = {} 217 | for match in matches: 218 | fname, linenum = match.rsplit(":", 1) 219 | if fname not in result: 220 | result[fname] = set() 221 | result[fname].add(int(linenum)) 222 | return result 223 | 224 | 225 | def main(): 226 | """ 227 | Main function to parse files provided as command line arguments. 228 | """ 229 | if len(sys.argv) < 2: 230 | print("Usage: python linter.py ...") 231 | sys.exit(1) 232 | 233 | linter = Linter(root=os.getcwd()) 234 | for file_path in sys.argv[1:]: 235 | errors = linter.lint(file_path) 236 | if errors: 237 | print(errors) 238 | 239 | 240 | if __name__ == "__main__": 241 | main() 242 | -------------------------------------------------------------------------------- /devops/aider/litellm.py: -------------------------------------------------------------------------------- 1 | import os 2 | import warnings 3 | 4 | warnings.filterwarnings("ignore", category=UserWarning, module="pydantic") 5 | 6 | os.environ["OR_SITE_URL"] = "http://aider.chat" 7 | os.environ["OR_APP_NAME"] = "Aider" 8 | 9 | import litellm # noqa: E402 10 | 11 | litellm.suppress_debug_info = True 12 | litellm.set_verbose = False 13 | 14 | __all__ = [litellm] 15 | -------------------------------------------------------------------------------- /devops/aider/mdstream.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import io 4 | import time 5 | 6 | from rich.console import Console 7 | from rich.live import Live 8 | from rich.markdown import Markdown 9 | from rich.text import Text 10 | 11 | from aider.dump import dump # noqa: F401 12 | 13 | _text = """ 14 | # Header 15 | 16 | Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum. 17 | 18 | 19 | 20 | ## Sub header 21 | 22 | - List 1 23 | - List 2 24 | - List me 25 | - List you 26 | 27 | 28 | 29 | ```python 30 | import sys 31 | 32 | def greeting(): 33 | print("Hello world!") 34 | ``` 35 | 36 | ## Sub header too 37 | 38 | The end. 39 | 40 | """ # noqa: E501 41 | 42 | 43 | class MarkdownStream: 44 | live = None 45 | when = 0 46 | min_delay = 0.050 47 | live_window = 6 48 | 49 | def __init__(self, mdargs=None): 50 | self.printed = [] 51 | 52 | if mdargs: 53 | self.mdargs = mdargs 54 | else: 55 | self.mdargs = dict() 56 | 57 | self.live = Live(Text(""), refresh_per_second=1.0 / self.min_delay) 58 | self.live.start() 59 | 60 | def __del__(self): 61 | if self.live: 62 | try: 63 | self.live.stop() 64 | except Exception: 65 | pass 66 | 67 | def update(self, text, final=False): 68 | now = time.time() 69 | if not final and now - self.when < self.min_delay: 70 | return 71 | self.when = now 72 | 73 | string_io = io.StringIO() 74 | console = Console(file=string_io, force_terminal=True) 75 | 76 | markdown = Markdown(text, **self.mdargs) 77 | 78 | console.print(markdown) 79 | output = string_io.getvalue() 80 | 81 | lines = output.splitlines(keepends=True) 82 | num_lines = len(lines) 83 | 84 | if not final: 85 | num_lines -= self.live_window 86 | 87 | if final or num_lines > 0: 88 | num_printed = len(self.printed) 89 | 90 | show = num_lines - num_printed 91 | 92 | if show <= 0: 93 | return 94 | 95 | show = lines[num_printed:num_lines] 96 | show = "".join(show) 97 | show = Text.from_ansi(show) 98 | self.live.console.print(show) 99 | 100 | self.printed = lines[:num_lines] 101 | 102 | if final: 103 | self.live.update(Text("")) 104 | self.live.stop() 105 | self.live = None 106 | else: 107 | rest = lines[num_lines:] 108 | rest = "".join(rest) 109 | # rest = '...\n' + rest 110 | rest = Text.from_ansi(rest) 111 | self.live.update(rest) 112 | 113 | 114 | if __name__ == "__main__": 115 | _text = 5 * _text 116 | 117 | pm = MarkdownStream() 118 | for i in range(6, len(_text)): 119 | pm.update(_text[:i]) 120 | time.sleep(0.01) 121 | 122 | pm.update(_text, final=True) 123 | -------------------------------------------------------------------------------- /devops/aider/prompts.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | 4 | # COMMIT 5 | commit_system = """You are an expert software engineer. 6 | Review the provided context and diffs which are about to be committed to a git repo. 7 | Generate a *SHORT* 1 line, 1 sentence commit message that describes the purpose of the changes. 8 | The commit message MUST be in the past tense. 9 | It must describe the changes *which have been made* in the diffs! 10 | Reply with JUST the commit message, without quotes, comments, questions, etc! 11 | """ 12 | 13 | # COMMANDS 14 | undo_command_reply = ( 15 | "I did `git reset --hard HEAD~1` to discard the last edits. Please wait for further" 16 | " instructions before attempting that change again. Feel free to ask relevant questions about" 17 | " why the changes were reverted." 18 | ) 19 | 20 | added_files = """I added these files to the chat: {fnames}. 21 | 22 | If you need to propose edits to other existing files not already added to the chat, you *MUST* tell the me their full path names and ask me to *add the files to the chat*. End your reply and wait for my approval. You can keep asking if you then decide you need to edit more files.""" 23 | 24 | 25 | run_output = """I ran this command: 26 | 27 | {command} 28 | 29 | And got this output: 30 | 31 | {output} 32 | """ 33 | 34 | # CHAT HISTORY 35 | summarize = """*Briefly* summarize this partial conversation about programming. 36 | Include less detail about older parts and more detail about the most recent messages. 37 | Start a new paragraph every time the topic changes! 38 | 39 | This is only part of a longer conversation so *DO NOT* conclude the summary with language like "Finally, ...". Because the conversation continues after the summary. 40 | The summary *MUST* include the function names, libraries, packages that are being discussed. 41 | The summary *MUST* include the filenames that are being referenced by the assistant inside the ```...``` fenced code blocks! 42 | The summaries *MUST NOT* include ```...``` fenced code blocks! 43 | 44 | Phrase the summary with the USER in first person, telling the ASSISTANT about the conversation. 45 | Write *as* the user. 46 | The user should refer to the assistant as *you*. 47 | Start the summary with "I asked you...". 48 | """ 49 | 50 | summary_prefix = "I spoke to you previously about a number of things.\n" 51 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-c-tags.scm: -------------------------------------------------------------------------------- 1 | (struct_specifier name: (type_identifier) @name.definition.class body:(_)) @definition.class 2 | 3 | (declaration type: (union_specifier name: (type_identifier) @name.definition.class)) @definition.class 4 | 5 | (function_declarator declarator: (identifier) @name.definition.function) @definition.function 6 | 7 | (type_definition declarator: (type_identifier) @name.definition.type) @definition.type 8 | 9 | (enum_specifier name: (type_identifier) @name.definition.type) @definition.type 10 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-c_sharp-tags.scm: -------------------------------------------------------------------------------- 1 | (class_declaration 2 | name: (identifier) @name.definition.class 3 | ) @definition.class 4 | 5 | (class_declaration 6 | bases: (base_list (_) @name.reference.class) 7 | ) @reference.class 8 | 9 | (interface_declaration 10 | name: (identifier) @name.definition.interface 11 | ) @definition.interface 12 | 13 | (interface_declaration 14 | bases: (base_list (_) @name.reference.interface) 15 | ) @reference.interface 16 | 17 | (method_declaration 18 | name: (identifier) @name.definition.method 19 | ) @definition.method 20 | 21 | (object_creation_expression 22 | type: (identifier) @name.reference.class 23 | ) @reference.class 24 | 25 | (type_parameter_constraints_clause 26 | target: (identifier) @name.reference.class 27 | ) @reference.class 28 | 29 | (type_constraint 30 | type: (identifier) @name.reference.class 31 | ) @reference.class 32 | 33 | (variable_declaration 34 | type: (identifier) @name.reference.class 35 | ) @reference.class 36 | 37 | (invocation_expression 38 | function: 39 | (member_access_expression 40 | name: (identifier) @name.reference.send 41 | ) 42 | ) @reference.send 43 | 44 | (namespace_declaration 45 | name: (identifier) @name.definition.module 46 | ) @definition.module 47 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-cpp-tags.scm: -------------------------------------------------------------------------------- 1 | (struct_specifier name: (type_identifier) @name.definition.class body:(_)) @definition.class 2 | 3 | (declaration type: (union_specifier name: (type_identifier) @name.definition.class)) @definition.class 4 | 5 | (function_declarator declarator: (identifier) @name.definition.function) @definition.function 6 | 7 | (function_declarator declarator: (field_identifier) @name.definition.function) @definition.function 8 | 9 | (function_declarator declarator: (qualified_identifier scope: (namespace_identifier) @scope name: (identifier) @name.definition.method)) @definition.method 10 | 11 | (type_definition declarator: (type_identifier) @name.definition.type) @definition.type 12 | 13 | (enum_specifier name: (type_identifier) @name.definition.type) @definition.type 14 | 15 | (class_specifier name: (type_identifier) @name.definition.class) @definition.class 16 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-elisp-tags.scm: -------------------------------------------------------------------------------- 1 | ;; defun/defsubst 2 | (function_definition name: (symbol) @name.definition.function) @definition.function 3 | 4 | ;; Treat macros as function definitions for the sake of TAGS. 5 | (macro_definition name: (symbol) @name.definition.function) @definition.function 6 | 7 | ;; Match function calls 8 | (list (symbol) @name.reference.function) @reference.function 9 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-elixir-tags.scm: -------------------------------------------------------------------------------- 1 | ; Definitions 2 | 3 | ; * modules and protocols 4 | (call 5 | target: (identifier) @ignore 6 | (arguments (alias) @name.definition.module) 7 | (#match? @ignore "^(defmodule|defprotocol)$")) @definition.module 8 | 9 | ; * functions/macros 10 | (call 11 | target: (identifier) @ignore 12 | (arguments 13 | [ 14 | ; zero-arity functions with no parentheses 15 | (identifier) @name.definition.function 16 | ; regular function clause 17 | (call target: (identifier) @name.definition.function) 18 | ; function clause with a guard clause 19 | (binary_operator 20 | left: (call target: (identifier) @name.definition.function) 21 | operator: "when") 22 | ]) 23 | (#match? @ignore "^(def|defp|defdelegate|defguard|defguardp|defmacro|defmacrop|defn|defnp)$")) @definition.function 24 | 25 | ; References 26 | 27 | ; ignore calls to kernel/special-forms keywords 28 | (call 29 | target: (identifier) @ignore 30 | (#match? @ignore "^(def|defp|defdelegate|defguard|defguardp|defmacro|defmacrop|defn|defnp|defmodule|defprotocol|defimpl|defstruct|defexception|defoverridable|alias|case|cond|else|for|if|import|quote|raise|receive|require|reraise|super|throw|try|unless|unquote|unquote_splicing|use|with)$")) 31 | 32 | ; ignore module attributes 33 | (unary_operator 34 | operator: "@" 35 | operand: (call 36 | target: (identifier) @ignore)) 37 | 38 | ; * function call 39 | (call 40 | target: [ 41 | ; local 42 | (identifier) @name.reference.call 43 | ; remote 44 | (dot 45 | right: (identifier) @name.reference.call) 46 | ]) @reference.call 47 | 48 | ; * pipe into function call 49 | (binary_operator 50 | operator: "|>" 51 | right: (identifier) @name.reference.call) @reference.call 52 | 53 | ; * modules 54 | (alias) @name.reference.module @reference.module 55 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-elm-tags.scm: -------------------------------------------------------------------------------- 1 | (value_declaration (function_declaration_left (lower_case_identifier) @name.definition.function)) @definition.function 2 | 3 | (function_call_expr (value_expr (value_qid) @name.reference.function)) @reference.function 4 | (exposed_value (lower_case_identifier) @name.reference.function)) @reference.function 5 | (type_annotation ((lower_case_identifier) @name.reference.function) (colon)) @reference.function 6 | 7 | (type_declaration ((upper_case_identifier) @name.definition.type) ) @definition.type 8 | 9 | (type_ref (upper_case_qid (upper_case_identifier) @name.reference.type)) @reference.type 10 | (exposed_type (upper_case_identifier) @name.reference.type)) @reference.type 11 | 12 | (type_declaration (union_variant (upper_case_identifier) @name.definition.union)) @definition.union 13 | 14 | (value_expr (upper_case_qid (upper_case_identifier) @name.reference.union)) @reference.union 15 | 16 | 17 | (module_declaration 18 | (upper_case_qid (upper_case_identifier)) @name.definition.module 19 | ) @definition.module 20 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-go-tags.scm: -------------------------------------------------------------------------------- 1 | ( 2 | (comment)* @doc 3 | . 4 | (function_declaration 5 | name: (identifier) @name.definition.function) @definition.function 6 | (#strip! @doc "^//\\s*") 7 | (#set-adjacent! @doc @definition.function) 8 | ) 9 | 10 | ( 11 | (comment)* @doc 12 | . 13 | (method_declaration 14 | name: (field_identifier) @name.definition.method) @definition.method 15 | (#strip! @doc "^//\\s*") 16 | (#set-adjacent! @doc @definition.method) 17 | ) 18 | 19 | (call_expression 20 | function: [ 21 | (identifier) @name.reference.call 22 | (parenthesized_expression (identifier) @name.reference.call) 23 | (selector_expression field: (field_identifier) @name.reference.call) 24 | (parenthesized_expression (selector_expression field: (field_identifier) @name.reference.call)) 25 | ]) @reference.call 26 | 27 | (type_spec 28 | name: (type_identifier) @name.definition.type) @definition.type 29 | 30 | (type_identifier) @name.reference.type @reference.type 31 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-java-tags.scm: -------------------------------------------------------------------------------- 1 | (class_declaration 2 | name: (identifier) @name.definition.class) @definition.class 3 | 4 | (method_declaration 5 | name: (identifier) @name.definition.method) @definition.method 6 | 7 | (method_invocation 8 | name: (identifier) @name.reference.call 9 | arguments: (argument_list) @reference.call) 10 | 11 | (interface_declaration 12 | name: (identifier) @name.definition.interface) @definition.interface 13 | 14 | (type_list 15 | (type_identifier) @name.reference.implementation) @reference.implementation 16 | 17 | (object_creation_expression 18 | type: (type_identifier) @name.reference.class) @reference.class 19 | 20 | (superclass (type_identifier) @name.reference.class) @reference.class 21 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-javascript-tags.scm: -------------------------------------------------------------------------------- 1 | ( 2 | (comment)* @doc 3 | . 4 | (method_definition 5 | name: (property_identifier) @name.definition.method) @definition.method 6 | (#not-eq? @name.definition.method "constructor") 7 | (#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$") 8 | (#select-adjacent! @doc @definition.method) 9 | ) 10 | 11 | ( 12 | (comment)* @doc 13 | . 14 | [ 15 | (class 16 | name: (_) @name.definition.class) 17 | (class_declaration 18 | name: (_) @name.definition.class) 19 | ] @definition.class 20 | (#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$") 21 | (#select-adjacent! @doc @definition.class) 22 | ) 23 | 24 | ( 25 | (comment)* @doc 26 | . 27 | [ 28 | (function 29 | name: (identifier) @name.definition.function) 30 | (function_declaration 31 | name: (identifier) @name.definition.function) 32 | (generator_function 33 | name: (identifier) @name.definition.function) 34 | (generator_function_declaration 35 | name: (identifier) @name.definition.function) 36 | ] @definition.function 37 | (#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$") 38 | (#select-adjacent! @doc @definition.function) 39 | ) 40 | 41 | ( 42 | (comment)* @doc 43 | . 44 | (lexical_declaration 45 | (variable_declarator 46 | name: (identifier) @name.definition.function 47 | value: [(arrow_function) (function)]) @definition.function) 48 | (#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$") 49 | (#select-adjacent! @doc @definition.function) 50 | ) 51 | 52 | ( 53 | (comment)* @doc 54 | . 55 | (variable_declaration 56 | (variable_declarator 57 | name: (identifier) @name.definition.function 58 | value: [(arrow_function) (function)]) @definition.function) 59 | (#strip! @doc "^[\\s\\*/]+|^[\\s\\*/]$") 60 | (#select-adjacent! @doc @definition.function) 61 | ) 62 | 63 | (assignment_expression 64 | left: [ 65 | (identifier) @name.definition.function 66 | (member_expression 67 | property: (property_identifier) @name.definition.function) 68 | ] 69 | right: [(arrow_function) (function)] 70 | ) @definition.function 71 | 72 | (pair 73 | key: (property_identifier) @name.definition.function 74 | value: [(arrow_function) (function)]) @definition.function 75 | 76 | ( 77 | (call_expression 78 | function: (identifier) @name.reference.call) @reference.call 79 | (#not-match? @name.reference.call "^(require)$") 80 | ) 81 | 82 | (call_expression 83 | function: (member_expression 84 | property: (property_identifier) @name.reference.call) 85 | arguments: (_) @reference.call) 86 | 87 | (new_expression 88 | constructor: (_) @name.reference.class) @reference.class 89 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-ocaml-tags.scm: -------------------------------------------------------------------------------- 1 | ; Modules 2 | ;-------- 3 | 4 | ( 5 | (comment)? @doc . 6 | (module_definition (module_binding (module_name) @name.definition.module) @definition.module) 7 | (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$") 8 | ) 9 | 10 | (module_path (module_name) @name.reference.module) @reference.module 11 | 12 | ; Modules types 13 | ;-------------- 14 | 15 | ( 16 | (comment)? @doc . 17 | (module_type_definition (module_type_name) @name.definition.interface) @definition.interface 18 | (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$") 19 | ) 20 | 21 | (module_type_path (module_type_name) @name.reference.implementation) @reference.implementation 22 | 23 | ; Functions 24 | ;---------- 25 | 26 | ( 27 | (comment)? @doc . 28 | (value_definition 29 | [ 30 | (let_binding 31 | pattern: (value_name) @name.definition.function 32 | (parameter)) 33 | (let_binding 34 | pattern: (value_name) @name.definition.function 35 | body: [(fun_expression) (function_expression)]) 36 | ] @definition.function 37 | ) 38 | (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$") 39 | ) 40 | 41 | ( 42 | (comment)? @doc . 43 | (external (value_name) @name.definition.function) @definition.function 44 | (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$") 45 | ) 46 | 47 | (application_expression 48 | function: (value_path (value_name) @name.reference.call)) @reference.call 49 | 50 | (infix_expression 51 | left: (value_path (value_name) @name.reference.call) 52 | (infix_operator) @reference.call 53 | (#eq? @reference.call "@@")) 54 | 55 | (infix_expression 56 | (infix_operator) @reference.call 57 | right: (value_path (value_name) @name.reference.call) 58 | (#eq? @reference.call "|>")) 59 | 60 | ; Operator 61 | ;--------- 62 | 63 | ( 64 | (comment)? @doc . 65 | (value_definition 66 | (let_binding 67 | pattern: (parenthesized_operator [ 68 | (prefix_operator) 69 | (infix_operator) 70 | (hash_operator) 71 | (indexing_operator) 72 | (let_operator) 73 | (and_operator) 74 | (match_operator) 75 | ] @name.definition.function)) @definition.function) 76 | (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$") 77 | ) 78 | 79 | [ 80 | (prefix_operator) 81 | (sign_operator) 82 | (infix_operator) 83 | (hash_operator) 84 | (indexing_operator) 85 | (let_operator) 86 | (and_operator) 87 | (match_operator) 88 | ] @name.reference.call @reference.call 89 | 90 | ; Classes 91 | ;-------- 92 | 93 | ( 94 | (comment)? @doc . 95 | [ 96 | (class_definition (class_binding (class_name) @name.definition.class) @definition.class) 97 | (class_type_definition (class_type_binding (class_type_name) @name.definition.class) @definition.class) 98 | ] 99 | (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$") 100 | ) 101 | 102 | [ 103 | (class_path (class_name) @name.reference.class) 104 | (class_type_path (class_type_name) @name.reference.class) 105 | ] @reference.class 106 | 107 | ; Methods 108 | ;-------- 109 | 110 | ( 111 | (comment)? @doc . 112 | (method_definition (method_name) @name.definition.method) @definition.method 113 | (#strip! @doc "^\\(\\*\\*?\\s*|\\s\\*\\)$") 114 | ) 115 | 116 | (method_invocation (method_name) @name.reference.call) @reference.call 117 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-php-tags.scm: -------------------------------------------------------------------------------- 1 | (class_declaration 2 | name: (name) @name.definition.class) @definition.class 3 | 4 | (function_definition 5 | name: (name) @name.definition.function) @definition.function 6 | 7 | (method_declaration 8 | name: (name) @name.definition.function) @definition.function 9 | 10 | (object_creation_expression 11 | [ 12 | (qualified_name (name) @name.reference.class) 13 | (variable_name (name) @name.reference.class) 14 | ]) @reference.class 15 | 16 | (function_call_expression 17 | function: [ 18 | (qualified_name (name) @name.reference.call) 19 | (variable_name (name)) @name.reference.call 20 | ]) @reference.call 21 | 22 | (scoped_call_expression 23 | name: (name) @name.reference.call) @reference.call 24 | 25 | (member_call_expression 26 | name: (name) @name.reference.call) @reference.call 27 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-python-tags.scm: -------------------------------------------------------------------------------- 1 | (class_definition 2 | name: (identifier) @name.definition.class) @definition.class 3 | 4 | (function_definition 5 | name: (identifier) @name.definition.function) @definition.function 6 | 7 | (call 8 | function: [ 9 | (identifier) @name.reference.call 10 | (attribute 11 | attribute: (identifier) @name.reference.call) 12 | ]) @reference.call 13 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-ql-tags.scm: -------------------------------------------------------------------------------- 1 | (classlessPredicate 2 | name: (predicateName) @name.definition.function) @definition.function 3 | 4 | (memberPredicate 5 | name: (predicateName) @name.definition.method) @definition.method 6 | 7 | (aritylessPredicateExpr 8 | name: (literalId) @name.reference.call) @reference.call 9 | 10 | (module 11 | name: (moduleName) @name.definition.module) @definition.module 12 | 13 | (dataclass 14 | name: (className) @name.definition.class) @definition.class 15 | 16 | (datatype 17 | name: (className) @name.definition.class) @definition.class 18 | 19 | (datatypeBranch 20 | name: (className) @name.definition.class) @definition.class 21 | 22 | (qualifiedRhs 23 | name: (predicateName) @name.reference.call) @reference.call 24 | 25 | (typeExpr 26 | name: (className) @name.reference.type) @reference.type 27 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-ruby-tags.scm: -------------------------------------------------------------------------------- 1 | ; Method definitions 2 | 3 | ( 4 | (comment)* @doc 5 | . 6 | [ 7 | (method 8 | name: (_) @name.definition.method) @definition.method 9 | (singleton_method 10 | name: (_) @name.definition.method) @definition.method 11 | ] 12 | (#strip! @doc "^#\\s*") 13 | (#select-adjacent! @doc @definition.method) 14 | ) 15 | 16 | (alias 17 | name: (_) @name.definition.method) @definition.method 18 | 19 | (setter 20 | (identifier) @ignore) 21 | 22 | ; Class definitions 23 | 24 | ( 25 | (comment)* @doc 26 | . 27 | [ 28 | (class 29 | name: [ 30 | (constant) @name.definition.class 31 | (scope_resolution 32 | name: (_) @name.definition.class) 33 | ]) @definition.class 34 | (singleton_class 35 | value: [ 36 | (constant) @name.definition.class 37 | (scope_resolution 38 | name: (_) @name.definition.class) 39 | ]) @definition.class 40 | ] 41 | (#strip! @doc "^#\\s*") 42 | (#select-adjacent! @doc @definition.class) 43 | ) 44 | 45 | ; Module definitions 46 | 47 | ( 48 | (module 49 | name: [ 50 | (constant) @name.definition.module 51 | (scope_resolution 52 | name: (_) @name.definition.module) 53 | ]) @definition.module 54 | ) 55 | 56 | ; Calls 57 | 58 | (call method: (identifier) @name.reference.call) @reference.call 59 | 60 | ( 61 | [(identifier) (constant)] @name.reference.call @reference.call 62 | (#is-not? local) 63 | (#not-match? @name.reference.call "^(lambda|load|require|require_relative|__FILE__|__LINE__)$") 64 | ) 65 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-rust-tags.scm: -------------------------------------------------------------------------------- 1 | ; ADT definitions 2 | 3 | (struct_item 4 | name: (type_identifier) @name.definition.class) @definition.class 5 | 6 | (enum_item 7 | name: (type_identifier) @name.definition.class) @definition.class 8 | 9 | (union_item 10 | name: (type_identifier) @name.definition.class) @definition.class 11 | 12 | ; type aliases 13 | 14 | (type_item 15 | name: (type_identifier) @name.definition.class) @definition.class 16 | 17 | ; method definitions 18 | 19 | (declaration_list 20 | (function_item 21 | name: (identifier) @name.definition.method)) @definition.method 22 | 23 | ; function definitions 24 | 25 | (function_item 26 | name: (identifier) @name.definition.function) @definition.function 27 | 28 | ; trait definitions 29 | (trait_item 30 | name: (type_identifier) @name.definition.interface) @definition.interface 31 | 32 | ; module definitions 33 | (mod_item 34 | name: (identifier) @name.definition.module) @definition.module 35 | 36 | ; macro definitions 37 | 38 | (macro_definition 39 | name: (identifier) @name.definition.macro) @definition.macro 40 | 41 | ; references 42 | 43 | (call_expression 44 | function: (identifier) @name.reference.call) @reference.call 45 | 46 | (call_expression 47 | function: (field_expression 48 | field: (field_identifier) @name.reference.call)) @reference.call 49 | 50 | (macro_invocation 51 | macro: (identifier) @name.reference.call) @reference.call 52 | 53 | ; implementations 54 | 55 | (impl_item 56 | trait: (type_identifier) @name.reference.implementation) @reference.implementation 57 | 58 | (impl_item 59 | type: (type_identifier) @name.reference.implementation 60 | !trait) @reference.implementation 61 | -------------------------------------------------------------------------------- /devops/aider/queries/tree-sitter-typescript-tags.scm: -------------------------------------------------------------------------------- 1 | (function_signature 2 | name: (identifier) @name.definition.function) @definition.function 3 | 4 | (method_signature 5 | name: (property_identifier) @name.definition.method) @definition.method 6 | 7 | (abstract_method_signature 8 | name: (property_identifier) @name.definition.method) @definition.method 9 | 10 | (abstract_class_declaration 11 | name: (type_identifier) @name.definition.class) @definition.class 12 | 13 | (module 14 | name: (identifier) @name.definition.module) @definition.module 15 | 16 | (interface_declaration 17 | name: (type_identifier) @name.definition.interface) @definition.interface 18 | 19 | (type_annotation 20 | (type_identifier) @name.reference.type) @reference.type 21 | 22 | (new_expression 23 | constructor: (identifier) @name.reference.class) @reference.class 24 | 25 | (function_declaration 26 | name: (identifier) @name.definition.function) @definition.function 27 | 28 | (method_definition 29 | name: (property_identifier) @name.definition.method) @definition.method 30 | 31 | (class_declaration 32 | name: (type_identifier) @name.definition.class) @definition.class 33 | 34 | (interface_declaration 35 | name: (type_identifier) @name.definition.class) @definition.class 36 | 37 | (type_alias_declaration 38 | name: (type_identifier) @name.definition.type) @definition.type 39 | 40 | (enum_declaration 41 | name: (identifier) @name.definition.enum) @definition.enum 42 | -------------------------------------------------------------------------------- /devops/aider/repo.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path, PurePosixPath 3 | 4 | import git 5 | import pathspec 6 | 7 | from aider import prompts, utils 8 | from aider.sendchat import simple_send_with_retries 9 | 10 | from .dump import dump # noqa: F401 11 | 12 | 13 | class GitRepo: 14 | repo = None 15 | aider_ignore_file = None 16 | aider_ignore_spec = None 17 | aider_ignore_ts = 0 18 | 19 | def __init__(self, io, fnames, git_dname, aider_ignore_file=None, models=None): 20 | self.io = io 21 | self.models = models 22 | 23 | if git_dname: 24 | check_fnames = [git_dname] 25 | elif fnames: 26 | check_fnames = fnames 27 | else: 28 | check_fnames = ["."] 29 | 30 | repo_paths = [] 31 | for fname in check_fnames: 32 | fname = Path(fname) 33 | fname = fname.resolve() 34 | 35 | if not fname.exists() and fname.parent.exists(): 36 | fname = fname.parent 37 | 38 | try: 39 | repo_path = git.Repo(fname, search_parent_directories=True).working_dir 40 | repo_path = utils.safe_abs_path(repo_path) 41 | repo_paths.append(repo_path) 42 | except git.exc.InvalidGitRepositoryError: 43 | pass 44 | except git.exc.NoSuchPathError: 45 | pass 46 | 47 | num_repos = len(set(repo_paths)) 48 | 49 | if num_repos == 0: 50 | raise FileNotFoundError 51 | if num_repos > 1: 52 | self.io.tool_error("Files are in different git repos.") 53 | raise FileNotFoundError 54 | 55 | # https://github.com/gitpython-developers/GitPython/issues/427 56 | self.repo = git.Repo(repo_paths.pop(), odbt=git.GitDB) 57 | self.root = utils.safe_abs_path(self.repo.working_tree_dir) 58 | 59 | if aider_ignore_file: 60 | self.aider_ignore_file = Path(aider_ignore_file) 61 | 62 | def commit(self, fnames=None, context=None, prefix=None, message=None): 63 | if not fnames and not self.repo.is_dirty(): 64 | return 65 | 66 | diffs = self.get_diffs(fnames) 67 | if not diffs: 68 | return 69 | 70 | if message: 71 | commit_message = message 72 | else: 73 | commit_message = self.get_commit_message(diffs, context) 74 | 75 | if not commit_message: 76 | commit_message = "(no commit message provided)" 77 | 78 | if prefix: 79 | commit_message = prefix + commit_message 80 | 81 | full_commit_message = commit_message 82 | if context: 83 | full_commit_message += "\n\n# Aider chat conversation:\n\n" + context 84 | 85 | cmd = ["-m", full_commit_message, "--no-verify"] 86 | if fnames: 87 | fnames = [str(self.abs_root_path(fn)) for fn in fnames] 88 | for fname in fnames: 89 | self.repo.git.add(fname) 90 | cmd += ["--"] + fnames 91 | else: 92 | cmd += ["-a"] 93 | 94 | self.repo.git.commit(cmd) 95 | commit_hash = self.repo.head.commit.hexsha[:7] 96 | self.io.tool_output(f"Commit {commit_hash} {commit_message}") 97 | 98 | return commit_hash, commit_message 99 | 100 | def get_rel_repo_dir(self): 101 | try: 102 | return os.path.relpath(self.repo.git_dir, os.getcwd()) 103 | except ValueError: 104 | return self.repo.git_dir 105 | 106 | def get_commit_message(self, diffs, context): 107 | if len(diffs) >= 4 * 1024 * 4: 108 | self.io.tool_error("Diff is too large to generate a commit message.") 109 | return 110 | 111 | diffs = "# Diffs:\n" + diffs 112 | 113 | content = "" 114 | if context: 115 | content += context + "\n" 116 | content += diffs 117 | 118 | messages = [ 119 | dict(role="system", content=prompts.commit_system), 120 | dict(role="user", content=content), 121 | ] 122 | 123 | for model in self.models: 124 | commit_message = simple_send_with_retries(model.name, messages) 125 | if commit_message: 126 | break 127 | 128 | if not commit_message: 129 | self.io.tool_error("Failed to generate commit message!") 130 | return 131 | 132 | commit_message = commit_message.strip() 133 | if commit_message and commit_message[0] == '"' and commit_message[-1] == '"': 134 | commit_message = commit_message[1:-1].strip() 135 | 136 | return commit_message 137 | 138 | def get_diffs(self, fnames=None): 139 | # We always want diffs of index and working dir 140 | 141 | current_branch_has_commits = False 142 | try: 143 | active_branch = self.repo.active_branch 144 | try: 145 | commits = self.repo.iter_commits(active_branch) 146 | current_branch_has_commits = any(commits) 147 | except git.exc.GitCommandError: 148 | pass 149 | except TypeError: 150 | pass 151 | 152 | if not fnames: 153 | fnames = [] 154 | 155 | diffs = "" 156 | for fname in fnames: 157 | if not self.path_in_repo(fname): 158 | diffs += f"Added {fname}\n" 159 | 160 | if current_branch_has_commits: 161 | args = ["HEAD", "--"] + list(fnames) 162 | diffs += self.repo.git.diff(*args) 163 | return diffs 164 | 165 | wd_args = ["--"] + list(fnames) 166 | index_args = ["--cached"] + wd_args 167 | 168 | diffs += self.repo.git.diff(*index_args) 169 | diffs += self.repo.git.diff(*wd_args) 170 | 171 | return diffs 172 | 173 | def diff_commits(self, pretty, from_commit, to_commit): 174 | args = [] 175 | if pretty: 176 | args += ["--color"] 177 | 178 | args += [from_commit, to_commit] 179 | diffs = self.repo.git.diff(*args) 180 | 181 | return diffs 182 | 183 | def get_tracked_files(self): 184 | if not self.repo: 185 | return [] 186 | 187 | try: 188 | commit = self.repo.head.commit 189 | except ValueError: 190 | commit = None 191 | 192 | files = [] 193 | if commit: 194 | for blob in commit.tree.traverse(): 195 | if blob.type == "blob": # blob is a file 196 | files.append(blob.path) 197 | 198 | # Add staged files 199 | index = self.repo.index 200 | staged_files = [path for path, _ in index.entries.keys()] 201 | 202 | files.extend(staged_files) 203 | 204 | # convert to appropriate os.sep, since git always normalizes to / 205 | res = set(self.normalize_path(path) for path in files) 206 | 207 | res = [fname for fname in res if not self.ignored_file(fname)] 208 | 209 | return res 210 | 211 | def normalize_path(self, path): 212 | return str(Path(PurePosixPath((Path(self.root) / path).relative_to(self.root)))) 213 | 214 | def ignored_file(self, fname): 215 | if not self.aider_ignore_file or not self.aider_ignore_file.is_file(): 216 | return 217 | 218 | try: 219 | fname = self.normalize_path(fname) 220 | except ValueError: 221 | return 222 | 223 | mtime = self.aider_ignore_file.stat().st_mtime 224 | if mtime != self.aider_ignore_ts: 225 | self.aider_ignore_ts = mtime 226 | lines = self.aider_ignore_file.read_text().splitlines() 227 | self.aider_ignore_spec = pathspec.PathSpec.from_lines( 228 | pathspec.patterns.GitWildMatchPattern, 229 | lines, 230 | ) 231 | 232 | return self.aider_ignore_spec.match_file(fname) 233 | 234 | def path_in_repo(self, path): 235 | if not self.repo: 236 | return 237 | 238 | tracked_files = set(self.get_tracked_files()) 239 | return self.normalize_path(path) in tracked_files 240 | 241 | def abs_root_path(self, path): 242 | res = Path(self.root) / path 243 | return utils.safe_abs_path(res) 244 | 245 | def get_dirty_files(self): 246 | """ 247 | Returns a list of all files which are dirty (not committed), either staged or in the working 248 | directory. 249 | """ 250 | dirty_files = set() 251 | 252 | # Get staged files 253 | staged_files = self.repo.git.diff("--name-only", "--cached").splitlines() 254 | dirty_files.update(staged_files) 255 | 256 | # Get unstaged files 257 | unstaged_files = self.repo.git.diff("--name-only").splitlines() 258 | dirty_files.update(unstaged_files) 259 | 260 | return list(dirty_files) 261 | 262 | def is_dirty(self, path=None): 263 | if path and not self.path_in_repo(path): 264 | return True 265 | 266 | return self.repo.is_dirty(path=path) 267 | -------------------------------------------------------------------------------- /devops/aider/scrape.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import re 4 | import sys 5 | 6 | import httpx 7 | import pypandoc 8 | from bs4 import BeautifulSoup 9 | from playwright.sync_api import sync_playwright 10 | 11 | from aider import __version__ 12 | 13 | aider_user_agent = f"Aider/{__version__} +https://aider.chat" 14 | 15 | # Playwright is nice because it has a simple way to install dependencies on most 16 | # platforms. 17 | PLAYWRIGHT_INFO = """ 18 | For better web scraping, install Playwright chromium with this command in your terminal: 19 | 20 | playwright install --with-deps chromium 21 | 22 | See https://aider.chat/docs/install.html#enable-playwright-optional for more info. 23 | """ 24 | 25 | 26 | class Scraper: 27 | pandoc_available = None 28 | playwright_available = None 29 | playwright_instructions_shown = False 30 | 31 | # Public API... 32 | def __init__(self, print_error=None): 33 | """ 34 | `print_error` - a function to call to print error/debug info. 35 | """ 36 | if print_error: 37 | self.print_error = print_error 38 | else: 39 | self.print_error = print 40 | 41 | def scrape(self, url): 42 | """ 43 | Scrape a url and turn it into readable markdown. 44 | 45 | `url` - the URLto scrape. 46 | """ 47 | self.try_playwright() 48 | 49 | if self.playwright_available: 50 | content = self.scrape_with_playwright(url) 51 | else: 52 | content = self.scrape_with_httpx(url) 53 | 54 | if not content: 55 | return 56 | 57 | self.try_pandoc() 58 | 59 | content = self.html_to_markdown(content) 60 | # content = html_to_text(content) 61 | 62 | return content 63 | 64 | # Internals... 65 | def scrape_with_playwright(self, url): 66 | with sync_playwright() as p: 67 | try: 68 | browser = p.chromium.launch() 69 | except Exception as e: 70 | self.playwright_available = False 71 | self.print_error(e) 72 | return 73 | 74 | page = browser.new_page() 75 | 76 | user_agent = page.evaluate("navigator.userAgent") 77 | user_agent = user_agent.replace("Headless", "") 78 | user_agent = user_agent.replace("headless", "") 79 | user_agent += " " + aider_user_agent 80 | 81 | page = browser.new_page(user_agent=user_agent) 82 | page.goto(url) 83 | content = page.content() 84 | browser.close() 85 | 86 | return content 87 | 88 | def try_playwright(self): 89 | if self.playwright_available is not None: 90 | return 91 | 92 | with sync_playwright() as p: 93 | try: 94 | p.chromium.launch() 95 | self.playwright_available = True 96 | except Exception: 97 | self.playwright_available = False 98 | 99 | def get_playwright_instructions(self): 100 | if self.playwright_available in (True, None): 101 | return 102 | if self.playwright_instructions_shown: 103 | return 104 | 105 | self.playwright_instructions_shown = True 106 | return PLAYWRIGHT_INFO 107 | 108 | def scrape_with_httpx(self, url): 109 | headers = {"User-Agent": f"Mozilla./5.0 ({aider_user_agent})"} 110 | try: 111 | with httpx.Client(headers=headers) as client: 112 | response = client.get(url) 113 | response.raise_for_status() 114 | return response.text 115 | except httpx.HTTPError as http_err: 116 | self.print_error(f"HTTP error occurred: {http_err}") 117 | except Exception as err: 118 | self.print_error(f"An error occurred: {err}") 119 | return None 120 | 121 | def try_pandoc(self): 122 | if self.pandoc_available: 123 | return 124 | 125 | try: 126 | pypandoc.get_pandoc_version() 127 | self.pandoc_available = True 128 | return 129 | except OSError: 130 | pass 131 | 132 | pypandoc.download_pandoc(delete_installer=True) 133 | self.pandoc_available = True 134 | 135 | def html_to_markdown(self, page_source): 136 | soup = BeautifulSoup(page_source, "html.parser") 137 | soup = slimdown_html(soup) 138 | page_source = str(soup) 139 | 140 | md = pypandoc.convert_text(page_source, "markdown", format="html") 141 | 142 | md = re.sub(r"", " ", md) 143 | md = re.sub(r"
", " ", md) 144 | 145 | md = re.sub(r"\n\s*\n", "\n\n", md) 146 | 147 | return md 148 | 149 | 150 | def slimdown_html(soup): 151 | for svg in soup.find_all("svg"): 152 | svg.decompose() 153 | 154 | if soup.img: 155 | soup.img.decompose() 156 | 157 | for tag in soup.find_all(href=lambda x: x and x.startswith("data:")): 158 | tag.decompose() 159 | 160 | for tag in soup.find_all(src=lambda x: x and x.startswith("data:")): 161 | tag.decompose() 162 | 163 | for tag in soup.find_all(True): 164 | for attr in list(tag.attrs): 165 | if attr != "href": 166 | tag.attrs.pop(attr, None) 167 | 168 | return soup 169 | 170 | 171 | # Adapted from AutoGPT, MIT License 172 | # 173 | # https://github.com/Significant-Gravitas/AutoGPT/blob/fe0923ba6c9abb42ac4df79da580e8a4391e0418/autogpts/autogpt/autogpt/commands/web_selenium.py#L173 174 | 175 | 176 | def html_to_text(page_source: str) -> str: 177 | soup = BeautifulSoup(page_source, "html.parser") 178 | 179 | for script in soup(["script", "style"]): 180 | script.extract() 181 | 182 | text = soup.get_text() 183 | lines = (line.strip() for line in text.splitlines()) 184 | chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) 185 | text = "\n".join(chunk for chunk in chunks if chunk) 186 | return text 187 | 188 | 189 | def main(url): 190 | scraper = Scraper() 191 | content = scraper.scrape(url) 192 | print(content) 193 | 194 | 195 | if __name__ == "__main__": 196 | if len(sys.argv) < 2: 197 | print("Usage: python playw.py ") 198 | sys.exit(1) 199 | main(sys.argv[1]) 200 | -------------------------------------------------------------------------------- /devops/aider/sendchat.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import json 3 | 4 | import backoff 5 | import httpx 6 | import openai 7 | 8 | from aider.dump import dump # noqa: F401 9 | from aider.litellm import litellm 10 | 11 | # from diskcache import Cache 12 | 13 | 14 | CACHE_PATH = "~/.aider.send.cache.v1" 15 | CACHE = None 16 | # CACHE = Cache(CACHE_PATH) 17 | 18 | 19 | def should_giveup(e): 20 | if not hasattr(e, "status_code"): 21 | return False 22 | 23 | if type(e) in ( 24 | httpx.ConnectError, 25 | httpx.RemoteProtocolError, 26 | httpx.ReadTimeout, 27 | ): 28 | return False 29 | 30 | return not litellm._should_retry(e.status_code) 31 | 32 | 33 | @backoff.on_exception( 34 | backoff.expo, 35 | ( 36 | httpx.ConnectError, 37 | httpx.RemoteProtocolError, 38 | httpx.ReadTimeout, 39 | litellm.exceptions.APIConnectionError, 40 | litellm.exceptions.APIError, 41 | litellm.exceptions.RateLimitError, 42 | litellm.exceptions.ServiceUnavailableError, 43 | litellm.exceptions.Timeout, 44 | ), 45 | giveup=should_giveup, 46 | max_time=60, 47 | on_backoff=lambda details: print( 48 | f"{details.get('exception','Exception')}\nRetry in {details['wait']:.1f} seconds." 49 | ), 50 | ) 51 | def send_with_retries(model_name, messages, functions, stream, temperature=0): 52 | kwargs = dict( 53 | model=model_name, 54 | messages=messages, 55 | temperature=temperature, 56 | stream=stream, 57 | ) 58 | if functions is not None: 59 | kwargs["functions"] = functions 60 | 61 | key = json.dumps(kwargs, sort_keys=True).encode() 62 | 63 | # Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes 64 | hash_object = hashlib.sha1(key) 65 | 66 | if not stream and CACHE is not None and key in CACHE: 67 | return hash_object, CACHE[key] 68 | 69 | # del kwargs['stream'] 70 | 71 | res = litellm.completion(**kwargs) 72 | 73 | if not stream and CACHE is not None: 74 | CACHE[key] = res 75 | 76 | return hash_object, res 77 | 78 | 79 | def simple_send_with_retries(model_name, messages): 80 | try: 81 | _hash, response = send_with_retries( 82 | model_name=model_name, 83 | messages=messages, 84 | functions=None, 85 | stream=False, 86 | ) 87 | return response.choices[0].message.content 88 | except (AttributeError, openai.BadRequestError): 89 | return 90 | -------------------------------------------------------------------------------- /devops/aider/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | from pathlib import Path 4 | 5 | import git 6 | 7 | from aider.dump import dump # noqa: F401 8 | 9 | IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp"} 10 | 11 | 12 | class IgnorantTemporaryDirectory: 13 | def __init__(self): 14 | self.temp_dir = tempfile.TemporaryDirectory() 15 | 16 | def __enter__(self): 17 | return self.temp_dir.__enter__() 18 | 19 | def __exit__(self, exc_type, exc_val, exc_tb): 20 | try: 21 | self.temp_dir.__exit__(exc_type, exc_val, exc_tb) 22 | except (OSError, PermissionError): 23 | pass # Ignore errors (Windows) 24 | 25 | 26 | class ChdirTemporaryDirectory(IgnorantTemporaryDirectory): 27 | def __init__(self): 28 | try: 29 | self.cwd = os.getcwd() 30 | except FileNotFoundError: 31 | self.cwd = None 32 | 33 | super().__init__() 34 | 35 | def __enter__(self): 36 | res = super().__enter__() 37 | os.chdir(self.temp_dir.name) 38 | return res 39 | 40 | def __exit__(self, exc_type, exc_val, exc_tb): 41 | if self.cwd: 42 | try: 43 | os.chdir(self.cwd) 44 | except FileNotFoundError: 45 | pass 46 | super().__exit__(exc_type, exc_val, exc_tb) 47 | 48 | 49 | class GitTemporaryDirectory(ChdirTemporaryDirectory): 50 | def __enter__(self): 51 | dname = super().__enter__() 52 | self.repo = make_repo(dname) 53 | return dname 54 | 55 | def __exit__(self, exc_type, exc_val, exc_tb): 56 | del self.repo 57 | super().__exit__(exc_type, exc_val, exc_tb) 58 | 59 | 60 | def make_repo(path=None): 61 | if not path: 62 | path = "." 63 | repo = git.Repo.init(path) 64 | repo.config_writer().set_value("user", "name", "Test User").release() 65 | repo.config_writer().set_value("user", "email", "testuser@example.com").release() 66 | 67 | return repo 68 | 69 | 70 | def is_image_file(file_name): 71 | """ 72 | Check if the given file name has an image file extension. 73 | 74 | :param file_name: The name of the file to check. 75 | :return: True if the file is an image, False otherwise. 76 | """ 77 | file_name = str(file_name) # Convert file_name to string 78 | return any(file_name.endswith(ext) for ext in IMAGE_EXTENSIONS) 79 | 80 | 81 | def safe_abs_path(res): 82 | "Gives an abs path, which safely returns a full (not 8.3) windows path" 83 | res = Path(res).resolve() 84 | return str(res) 85 | 86 | 87 | def show_messages(messages, title=None, functions=None): 88 | if title: 89 | print(title.upper(), "*" * 50) 90 | 91 | for msg in messages: 92 | print() 93 | role = msg["role"].upper() 94 | content = msg.get("content") 95 | if isinstance(content, list): # Handle list content (e.g., image messages) 96 | for item in content: 97 | if isinstance(item, dict) and "image_url" in item: 98 | print(role, "Image URL:", item["image_url"]["url"]) 99 | elif isinstance(content, str): # Handle string content 100 | for line in content.splitlines(): 101 | print(role, line) 102 | content = msg.get("function_call") 103 | if content: 104 | print(role, content) 105 | 106 | if functions: 107 | dump(functions) 108 | 109 | 110 | def split_chat_history_markdown(text, include_tool=False): 111 | messages = [] 112 | user = [] 113 | assistant = [] 114 | tool = [] 115 | lines = text.splitlines(keepends=True) 116 | 117 | def append_msg(role, lines): 118 | lines = "".join(lines) 119 | if lines.strip(): 120 | messages.append(dict(role=role, content=lines)) 121 | 122 | for line in lines: 123 | if line.startswith("# "): 124 | continue 125 | if line.startswith("> "): 126 | append_msg("assistant", assistant) 127 | assistant = [] 128 | append_msg("user", user) 129 | user = [] 130 | tool.append(line[2:]) 131 | continue 132 | # if line.startswith("#### /"): 133 | # continue 134 | 135 | if line.startswith("#### "): 136 | append_msg("assistant", assistant) 137 | assistant = [] 138 | append_msg("tool", tool) 139 | tool = [] 140 | 141 | content = line[5:] 142 | user.append(content) 143 | continue 144 | 145 | append_msg("user", user) 146 | user = [] 147 | append_msg("tool", tool) 148 | tool = [] 149 | 150 | assistant.append(line) 151 | 152 | append_msg("assistant", assistant) 153 | append_msg("user", user) 154 | 155 | if not include_tool: 156 | messages = [m for m in messages if m["role"] != "tool"] 157 | 158 | return messages 159 | -------------------------------------------------------------------------------- /devops/aider/versioncheck.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import packaging.version 4 | import requests 5 | 6 | import aider 7 | 8 | 9 | def check_version(print_cmd): 10 | try: 11 | response = requests.get("https://pypi.org/pypi/aider-chat/json") 12 | data = response.json() 13 | latest_version = data["info"]["version"] 14 | current_version = aider.__version__ 15 | 16 | is_update_available = packaging.version.parse(latest_version) > packaging.version.parse( 17 | current_version 18 | ) 19 | 20 | if is_update_available: 21 | print_cmd( 22 | f"Newer version v{latest_version} is available. To upgrade, run:" # noqa: E231 23 | ) 24 | py = sys.executable 25 | if "pipx" in py: 26 | print_cmd("pipx upgrade aider-chat") 27 | else: 28 | print_cmd(f"{py} -m pip install --upgrade aider-chat") 29 | 30 | return is_update_available 31 | except Exception as err: 32 | print_cmd(f"Error checking pypi for new version: {err}") 33 | return False 34 | 35 | 36 | if __name__ == "__main__": 37 | check_version(print) 38 | -------------------------------------------------------------------------------- /devops/aider/voice.py: -------------------------------------------------------------------------------- 1 | import os 2 | import queue 3 | import tempfile 4 | import time 5 | 6 | import numpy as np 7 | 8 | from aider.litellm import litellm 9 | 10 | try: 11 | import soundfile as sf 12 | except (OSError, ModuleNotFoundError): 13 | sf = None 14 | 15 | from prompt_toolkit.shortcuts import prompt 16 | 17 | from .dump import dump # noqa: F401 18 | 19 | 20 | class SoundDeviceError(Exception): 21 | pass 22 | 23 | 24 | class Voice: 25 | max_rms = 0 26 | min_rms = 1e5 27 | pct = 0 28 | 29 | threshold = 0.15 30 | 31 | def __init__(self): 32 | if sf is None: 33 | raise SoundDeviceError 34 | try: 35 | print("Initializing sound device...") 36 | import sounddevice as sd 37 | 38 | self.sd = sd 39 | except (OSError, ModuleNotFoundError): 40 | raise SoundDeviceError 41 | 42 | def callback(self, indata, frames, time, status): 43 | """This is called (from a separate thread) for each audio block.""" 44 | rms = np.sqrt(np.mean(indata**2)) 45 | self.max_rms = max(self.max_rms, rms) 46 | self.min_rms = min(self.min_rms, rms) 47 | 48 | rng = self.max_rms - self.min_rms 49 | if rng > 0.001: 50 | self.pct = (rms - self.min_rms) / rng 51 | else: 52 | self.pct = 0.5 53 | 54 | self.q.put(indata.copy()) 55 | 56 | def get_prompt(self): 57 | num = 10 58 | if np.isnan(self.pct) or self.pct < self.threshold: 59 | cnt = 0 60 | else: 61 | cnt = int(self.pct * 10) 62 | 63 | bar = "░" * cnt + "█" * (num - cnt) 64 | bar = bar[:num] 65 | 66 | dur = time.time() - self.start_time 67 | return f"Recording, press ENTER when done... {dur:.1f}sec {bar}" 68 | 69 | def record_and_transcribe(self, history=None, language=None): 70 | try: 71 | return self.raw_record_and_transcribe(history, language) 72 | except KeyboardInterrupt: 73 | return 74 | 75 | def raw_record_and_transcribe(self, history, language): 76 | self.q = queue.Queue() 77 | 78 | filename = tempfile.mktemp(suffix=".wav") 79 | 80 | try: 81 | sample_rate = int(self.sd.query_devices(None, 'input')['default_samplerate']) 82 | except (TypeError, ValueError): 83 | sample_rate = 16000 # fallback to 16kHz if unable to query device 84 | 85 | self.start_time = time.time() 86 | 87 | with self.sd.InputStream(samplerate=sample_rate, channels=1, callback=self.callback): 88 | prompt(self.get_prompt, refresh_interval=0.1) 89 | 90 | with sf.SoundFile(filename, mode="x", samplerate=sample_rate, channels=1) as file: 91 | while not self.q.empty(): 92 | file.write(self.q.get()) 93 | 94 | with open(filename, "rb") as fh: 95 | transcript = litellm.transcription( 96 | model="whisper-1", file=fh, prompt=history, language=language 97 | ) 98 | 99 | text = transcript.text 100 | return text 101 | 102 | 103 | if __name__ == "__main__": 104 | api_key = os.getenv("OPENAI_API_KEY") 105 | if not api_key: 106 | raise ValueError("Please set the OPENAI_API_KEY environment variable.") 107 | print(Voice().record_and_transcribe()) 108 | -------------------------------------------------------------------------------- /devops/aider_chat/benchmark/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/devops/aider_chat/benchmark/__init__.py -------------------------------------------------------------------------------- /devops/aider_chat/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/devops/aider_chat/tests/__init__.py -------------------------------------------------------------------------------- /devops/benchmark/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/devops/benchmark/__init__.py -------------------------------------------------------------------------------- /devops/benchmark/over_time.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import yaml 3 | from imgcat import imgcat 4 | from matplotlib import rc 5 | 6 | 7 | def plot_over_time(yaml_file): 8 | with open(yaml_file, "r") as file: 9 | data = yaml.safe_load(file) 10 | 11 | dates = [] 12 | pass_rates = [] 13 | models = [] 14 | 15 | for entry in data: 16 | if "released" in entry and "pass_rate_2" in entry: 17 | dates.append(entry["released"]) 18 | pass_rates.append(entry["pass_rate_2"]) 19 | models.append(entry["model"].split("(")[0].strip()) 20 | 21 | plt.rcParams["hatch.linewidth"] = 0.5 22 | plt.rcParams["hatch.color"] = "#444444" 23 | 24 | rc("font", **{"family": "sans-serif", "sans-serif": ["Helvetica"], "size": 10}) 25 | plt.rcParams["text.color"] = "#444444" 26 | 27 | fig, ax = plt.subplots(figsize=(10, 5)) 28 | ax.grid(axis="y", zorder=0, lw=0.2) 29 | for spine in ax.spines.values(): 30 | spine.set_edgecolor("#DDDDDD") 31 | spine.set_linewidth(0.5) 32 | 33 | colors = [ 34 | "red" if "gpt-4" in model else "green" if "gpt-3.5" in model else "blue" for model in models 35 | ] 36 | ax.scatter(dates, pass_rates, c=colors, alpha=0.5, s=120) 37 | 38 | for i, model in enumerate(models): 39 | ax.annotate( 40 | model, 41 | (dates[i], pass_rates[i]), 42 | fontsize=12, 43 | alpha=0.75, 44 | xytext=(5, 5), 45 | textcoords="offset points", 46 | ) 47 | 48 | ax.set_xlabel("Model release date", fontsize=18, color="#555") 49 | ax.set_ylabel("Aider code editing benchmark,\npercent completed correctly", fontsize=18, color="#555") 50 | ax.set_title("LLM code editing skill by model release date", fontsize=20) 51 | ax.set_ylim(0, 30) 52 | plt.xticks(fontsize=14) 53 | plt.tight_layout(pad=3.0) 54 | plt.savefig("tmp_over_time.png") 55 | plt.savefig("tmp_over_time.svg") 56 | imgcat(fig) 57 | 58 | 59 | # Example usage 60 | plot_over_time("_data/edit_leaderboard.yml") 61 | -------------------------------------------------------------------------------- /devops/benchmark/prompts.py: -------------------------------------------------------------------------------- 1 | instructions_addendum = """ 2 | #### 3 | 4 | Use the above instructions to modify the supplied files: {file_list} 5 | Don't change the names of existing functions or classes, as they may be referenced from other code like unit tests, etc. 6 | Only use standard python libraries, don't suggest installing any packages. 7 | """ # noqa: E501 8 | 9 | 10 | test_failures = """ 11 | #### 12 | 13 | See the testing errors above. 14 | The tests are correct. 15 | Fix the code in {file_list} to resolve the errors. 16 | """ 17 | -------------------------------------------------------------------------------- /devops/benchmark/refactor_tools.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import ast 4 | import os 5 | import shutil 6 | import sys 7 | from pathlib import Path 8 | 9 | from aider.dump import dump # noqa: F401 10 | 11 | 12 | class ParentNodeTransformer(ast.NodeTransformer): 13 | """ 14 | This transformer sets the 'parent' attribute on each node. 15 | """ 16 | 17 | def generic_visit(self, node): 18 | for child in ast.iter_child_nodes(node): 19 | child.parent = node 20 | return super(ParentNodeTransformer, self).generic_visit(node) 21 | 22 | 23 | def verify_full_func_at_top_level(tree, func, func_children): 24 | func_nodes = [ 25 | item for item in ast.walk(tree) if isinstance(item, ast.FunctionDef) and item.name == func 26 | ] 27 | assert func_nodes, f"Function {func} not found" 28 | 29 | for func_node in func_nodes: 30 | if not isinstance(func_node.parent, ast.Module): 31 | continue 32 | 33 | num_children = sum(1 for _ in ast.walk(func_node)) 34 | pct_diff_children = abs(num_children - func_children) * 100 / func_children 35 | assert ( 36 | pct_diff_children < 10 37 | ), f"Old method had {func_children} children, new method has {num_children}" 38 | return 39 | 40 | assert False, f"{func} is not a top level function" 41 | 42 | 43 | def verify_old_class_children(tree, old_class, old_class_children): 44 | node = next( 45 | ( 46 | item 47 | for item in ast.walk(tree) 48 | if isinstance(item, ast.ClassDef) and item.name == old_class 49 | ), 50 | None, 51 | ) 52 | assert node is not None, f"Old class {old_class} not found" 53 | 54 | num_children = sum(1 for _ in ast.walk(node)) 55 | 56 | pct_diff_children = abs(num_children - old_class_children) * 100 / old_class_children 57 | assert ( 58 | pct_diff_children < 10 59 | ), f"Old class had {old_class_children} children, new class has {num_children}" 60 | 61 | 62 | def verify_refactor(fname, func, func_children, old_class, old_class_children): 63 | with open(fname, "r") as file: 64 | file_contents = file.read() 65 | tree = ast.parse(file_contents) 66 | ParentNodeTransformer().visit(tree) # Set parent attribute for all nodes 67 | 68 | verify_full_func_at_top_level(tree, func, func_children) 69 | 70 | verify_old_class_children(tree, old_class, old_class_children - func_children) 71 | 72 | 73 | ############################ 74 | 75 | 76 | class SelfUsageChecker(ast.NodeVisitor): 77 | def __init__(self): 78 | self.non_self_methods = [] 79 | self.parent_class_name = None 80 | self.num_class_children = 0 81 | 82 | def visit_FunctionDef(self, node): 83 | # Check if the first argument is 'self' and if it's not used 84 | if node.args.args and node.args.args[0].arg == "self": 85 | self_used = any( 86 | isinstance(expr, ast.Name) and expr.id == "self" 87 | for stmt in node.body 88 | for expr in ast.walk(stmt) 89 | ) 90 | super_used = any( 91 | isinstance(expr, ast.Name) and expr.id == "super" 92 | for stmt in node.body 93 | for expr in ast.walk(stmt) 94 | ) 95 | if not self_used and not super_used: 96 | # Calculate the number of child nodes in the function 97 | num_child_nodes = sum(1 for _ in ast.walk(node)) 98 | res = ( 99 | self.parent_class_name, 100 | node.name, 101 | self.num_class_children, 102 | num_child_nodes, 103 | ) 104 | self.non_self_methods.append(res) 105 | self.generic_visit(node) 106 | 107 | def visit_ClassDef(self, node): 108 | self.parent_class_name = node.name 109 | self.num_class_children = sum(1 for _ in ast.walk(node)) 110 | self.generic_visit(node) 111 | 112 | 113 | def find_python_files(path): 114 | if os.path.isfile(path) and path.endswith(".py"): 115 | return [path] 116 | elif os.path.isdir(path): 117 | py_files = [] 118 | for root, dirs, files in os.walk(path): 119 | for file in files: 120 | if file.endswith(".py"): 121 | full_path = os.path.join(root, file) 122 | py_files.append(full_path) 123 | return py_files 124 | else: 125 | return [] 126 | 127 | 128 | def find_non_self_methods(path): 129 | python_files = find_python_files(path) 130 | non_self_methods = [] 131 | for filename in python_files: 132 | with open(filename, "r") as file: 133 | try: 134 | node = ast.parse(file.read(), filename=filename) 135 | except: 136 | pass 137 | checker = SelfUsageChecker() 138 | checker.visit(node) 139 | for method in checker.non_self_methods: 140 | non_self_methods.append([filename] + list(method)) 141 | 142 | return non_self_methods 143 | 144 | 145 | def process(entry): 146 | fname, class_name, method_name, class_children, method_children = entry 147 | if method_children > class_children / 2: 148 | return 149 | if method_children < 250: 150 | return 151 | 152 | fname = Path(fname) 153 | if "test" in fname.stem: 154 | return 155 | 156 | print(f"{fname} {class_name} {method_name} {class_children} {method_children}") 157 | 158 | dname = Path("tmp.benchmarks/refactor-benchmark-spyder") 159 | dname.mkdir(exist_ok=True) 160 | 161 | dname = dname / f"{fname.stem}_{class_name}_{method_name}" 162 | dname.mkdir(exist_ok=True) 163 | 164 | shutil.copy(fname, dname / fname.name) 165 | 166 | docs_dname = dname / ".docs" 167 | docs_dname.mkdir(exist_ok=True) 168 | 169 | ins_fname = docs_dname / "instructions.md" 170 | ins_fname.write_text(f"""# Refactor {class_name}.{method_name} 171 | 172 | Refactor the `{method_name}` method in the `{class_name}` class to be a stand alone, top level function. 173 | Name the new function `{method_name}`, exactly the same name as the existing method. 174 | Update any existing `self.{method_name}` calls to work with the new `{method_name}` function. 175 | """) # noqa: E501 176 | 177 | test_fname = dname / f"{fname.stem}_test.py" 178 | test_fname.write_text(f""" 179 | import unittest 180 | from benchmark.refactor_tools import verify_refactor 181 | from pathlib import Path 182 | 183 | class TheTest(unittest.TestCase): 184 | def test_{method_name}(self): 185 | fname = Path(__file__).parent / "{fname.name}" 186 | method = "{method_name}" 187 | method_children = {method_children} 188 | 189 | class_name = "{class_name}" 190 | class_children = {class_children} 191 | 192 | verify_refactor(fname, method, method_children, class_name, class_children) 193 | 194 | if __name__ == "__main__": 195 | unittest.main() 196 | """) 197 | 198 | 199 | def main(paths): 200 | for path in paths: 201 | methods = find_non_self_methods(path) 202 | # methods = sorted(methods, key=lambda x: x[4]) 203 | 204 | for method in methods: 205 | process(method) 206 | 207 | 208 | if __name__ == "__main__": 209 | main(sys.argv[1:]) 210 | -------------------------------------------------------------------------------- /devops/benchmark/rungrid.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import subprocess 4 | import sys 5 | 6 | from aider.dump import dump # noqa: F401 7 | 8 | 9 | def main(): 10 | models = [ 11 | "gpt-3.5-turbo-0301", 12 | "gpt-3.5-turbo-0613", 13 | # "gpt-3.5-turbo-16k-0613", 14 | "gpt-3.5-turbo-1106", 15 | # "gpt-4-0314", 16 | # "gpt-4-0613", 17 | ] 18 | edit_formats = [ 19 | "diff", 20 | # "diff-func", 21 | # "whole", 22 | # "whole-func", 23 | ] 24 | 25 | # for repeat in range(1, 2, 1): 26 | for model in models: 27 | for edit_format in edit_formats: 28 | # dump(model, edit_format) 29 | 30 | if "-func" in edit_format and "-03" in model: 31 | continue 32 | 33 | # if (model, edit_format) == ("gpt-3.5-turbo-16k-0613", "whole-func"): 34 | # # sublist reliably hangs the API? 35 | # continue 36 | 37 | dirname = f"rungrid-nov-{model}-{edit_format}" 38 | # dirname = f"rungrid-{model}-{edit_format}-repeat-{repeat}" 39 | run(dirname, model, edit_format) 40 | 41 | 42 | def run(dirname, model, edit_format): 43 | cmd = [ 44 | "./benchmark/benchmark.py", 45 | dirname, 46 | "--model", 47 | model, 48 | "--edit-format", 49 | edit_format, 50 | "--threads", 51 | "10", 52 | "--cont", 53 | ] 54 | print(" ".join(cmd)) 55 | 56 | subprocess.run(cmd, check=True) 57 | 58 | 59 | if __name__ == "__main__": 60 | status = main() 61 | sys.exit(status) 62 | -------------------------------------------------------------------------------- /devops/benchmark/swe_bench.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | import matplotlib.pyplot as plt 5 | from imgcat import imgcat 6 | from matplotlib import rc 7 | 8 | from aider.dump import dump # noqa: F401 9 | 10 | 11 | def plot_swe_bench(data_file, is_lite): 12 | with open(data_file, "r") as file: 13 | lines = file.readlines() 14 | 15 | models = [] 16 | pass_rates = [] 17 | instances = [] 18 | for line in lines: 19 | if line.strip(): 20 | pass_rate, model = line.split("%") 21 | model = model.strip() 22 | if "(" in model: 23 | pieces = model.split("(") 24 | model = pieces[0] 25 | ins = pieces[1].strip(")") 26 | else: 27 | ins = None 28 | instances.insert(0, ins) 29 | model = model.replace("|", "\n") 30 | models.insert(0, model.strip()) 31 | pass_rates.insert(0, float(pass_rate.strip())) 32 | 33 | dump(instances) 34 | 35 | plt.rcParams["hatch.linewidth"] = 0.5 36 | plt.rcParams["hatch.color"] = "#444444" 37 | 38 | font_color = "#555" 39 | font_params = { 40 | "family": "sans-serif", 41 | "sans-serif": ["Helvetica"], 42 | "size": 10, 43 | "weight": "bold", 44 | } 45 | rc("font", **font_params) 46 | plt.rcParams["text.color"] = font_color 47 | 48 | fig, ax = plt.subplots(figsize=(10, 5.5)) 49 | ax.grid(axis="y", zorder=0, lw=0.2) 50 | for spine in ax.spines.values(): 51 | spine.set_edgecolor("#DDDDDD") 52 | spine.set_linewidth(0.5) 53 | 54 | if is_lite: 55 | colors = ["#17965A" if "Aider" in model else "#b3d1e6" for model in models] 56 | else: 57 | colors = ["#1A75C2" if "Aider" in model else "#b3d1e6" for model in models] 58 | 59 | bars = [] 60 | for model, pass_rate, color in zip(models, pass_rates, colors): 61 | alpha = 0.9 if "Aider" in model else 0.3 62 | hatch = "" 63 | # if is_lite: 64 | # hatch = "///" if "(570)" in model else "" 65 | bar = ax.bar(model, pass_rate, color=color, alpha=alpha, zorder=3, hatch=hatch) 66 | bars.append(bar[0]) 67 | 68 | for label in ax.get_xticklabels(): 69 | if "Aider" in str(label): 70 | label.set_fontfamily("Helvetica Bold") 71 | 72 | for model, bar in zip(models, bars): 73 | yval = bar.get_height() 74 | y = yval - 1 75 | va = "top" 76 | color = "#eee" if "Aider" in model else "#555" 77 | fontfamily = "Helvetica Bold" if "Aider" in model else "Helvetica" 78 | ax.text( 79 | bar.get_x() + bar.get_width() / 2, 80 | y, 81 | f"{yval}%", 82 | ha="center", 83 | va=va, 84 | fontsize=16, 85 | color=color, 86 | fontfamily=fontfamily, 87 | ) 88 | 89 | for model, ins, bar in zip(models, instances, bars): 90 | if not ins: 91 | continue 92 | yval = bar.get_height() 93 | y = yval - 2.5 94 | va = "top" 95 | color = "#eee" if "Aider" in model else "#555" 96 | ax.text( 97 | bar.get_x() + bar.get_width() / 2, 98 | y, 99 | f"of {ins}", 100 | ha="center", 101 | va=va, 102 | fontsize=12, 103 | color=color, 104 | ) 105 | 106 | # ax.set_xlabel("Models", fontsize=18) 107 | ax.set_ylabel("Pass@1 (%)", fontsize=18, color=font_color) 108 | if is_lite: 109 | title = "SWE Bench Lite" 110 | else: 111 | title = "SWE Bench" 112 | ax.set_title(title, fontsize=20) 113 | # ax.set_ylim(0, 29.9) 114 | plt.xticks( 115 | fontsize=16, 116 | color=font_color, 117 | ) 118 | 119 | plt.tight_layout(pad=3.0) 120 | 121 | out_fname = Path(data_file.replace("-", "_")) 122 | plt.savefig(out_fname.with_suffix(".jpg").name) 123 | plt.savefig(out_fname.with_suffix(".svg").name) 124 | imgcat(fig) 125 | ax.xaxis.label.set_color(font_color) 126 | 127 | 128 | fname = sys.argv[1] 129 | is_lite = "lite" in fname 130 | 131 | plot_swe_bench(fname, is_lite) 132 | -------------------------------------------------------------------------------- /devops/benchmark/test_benchmark.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E501 2 | 3 | import unittest 4 | 5 | from benchmark import cleanup_test_output 6 | 7 | 8 | class TestCleanupTestOutput(unittest.TestCase): 9 | def test_cleanup_test_output(self): 10 | # Test case with timing info 11 | output = "Ran 5 tests in 0.003s\nOK" 12 | expected = "\nOK" 13 | self.assertEqual(cleanup_test_output(output), expected) 14 | 15 | # Test case without timing info 16 | output = "OK" 17 | expected = "OK" 18 | self.assertEqual(cleanup_test_output(output), expected) 19 | 20 | def test_cleanup_test_output_lines(self): 21 | # Test case with timing info 22 | output = """F 23 | ====================================================================== 24 | FAIL: test_cleanup_test_output (test_benchmark.TestCleanupTestOutput.test_cleanup_test_output) 25 | ---------------------------------------------------------------------- 26 | Traceback (most recent call last): 27 | File "/Users/gauthier/Projects/aider/benchmark/test_benchmark.py", line 14, in test_cleanup_test_output 28 | self.assertEqual(cleanup_test_output(output), expected) 29 | AssertionError: 'OK' != 'OKx' 30 | - OK 31 | + OKx 32 | ? + 33 | """ 34 | 35 | expected = """F 36 | ==== 37 | FAIL: test_cleanup_test_output (test_benchmark.TestCleanupTestOutput.test_cleanup_test_output) 38 | ---- 39 | Traceback (most recent call last): 40 | File "/Users/gauthier/Projects/aider/benchmark/test_benchmark.py", line 14, in test_cleanup_test_output 41 | self.assertEqual(cleanup_test_output(output), expected) 42 | AssertionError: 'OK' != 'OKx' 43 | - OK 44 | + OKx 45 | ? + 46 | """ 47 | self.assertEqual(cleanup_test_output(output), expected) 48 | -------------------------------------------------------------------------------- /devops/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/devops/cli/__init__.py -------------------------------------------------------------------------------- /devops/hello.py: -------------------------------------------------------------------------------- 1 | from hello import hello 2 | -------------------------------------------------------------------------------- /devops/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/devops/modules/__init__.py -------------------------------------------------------------------------------- /devops/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.11 3 | # by the following command: 4 | # 5 | # pip-compile requirements.in 6 | # 7 | aiohttp==3.9.5 8 | # via litellm 9 | aiosignal==1.3.1 10 | # via aiohttp 11 | altair==5.3.0 12 | # via streamlit 13 | annotated-types==0.6.0 14 | # via pydantic 15 | anyio==4.3.0 16 | # via 17 | # httpx 18 | # openai 19 | attrs==23.2.0 20 | # via 21 | # aiohttp 22 | # jsonschema 23 | # referencing 24 | backoff==2.2.1 25 | # via -r requirements.in 26 | beautifulsoup4==4.12.3 27 | # via -r requirements.in 28 | blinker==1.8.2 29 | # via streamlit 30 | cachetools==5.3.3 31 | # via 32 | # google-auth 33 | # streamlit 34 | certifi==2024.2.2 35 | # via 36 | # httpcore 37 | # httpx 38 | # requests 39 | cffi==1.16.0 40 | # via 41 | # sounddevice 42 | # soundfile 43 | charset-normalizer==3.3.2 44 | # via requests 45 | click==8.1.7 46 | # via 47 | # litellm 48 | # streamlit 49 | configargparse==1.7 50 | # via -r requirements.in 51 | diff-match-patch==20230430 52 | # via -r requirements.in 53 | diskcache==5.6.3 54 | # via -r requirements.in 55 | distro==1.9.0 56 | # via openai 57 | filelock==3.14.0 58 | # via huggingface-hub 59 | flake8==7.0.0 60 | # via -r requirements.in 61 | frozenlist==1.4.1 62 | # via 63 | # aiohttp 64 | # aiosignal 65 | fsspec==2024.5.0 66 | # via huggingface-hub 67 | gitdb==4.0.11 68 | # via gitpython 69 | gitpython==3.1.43 70 | # via 71 | # -r requirements.in 72 | # streamlit 73 | google-ai-generativelanguage==0.6.4 74 | # via google-generativeai 75 | google-api-core[grpc]==2.19.0 76 | # via 77 | # google-ai-generativelanguage 78 | # google-api-python-client 79 | # google-generativeai 80 | google-api-python-client==2.129.0 81 | # via google-generativeai 82 | google-auth==2.29.0 83 | # via 84 | # google-ai-generativelanguage 85 | # google-api-core 86 | # google-api-python-client 87 | # google-auth-httplib2 88 | # google-generativeai 89 | google-auth-httplib2==0.2.0 90 | # via google-api-python-client 91 | google-generativeai==0.5.4 92 | # via -r requirements.in 93 | googleapis-common-protos==1.63.0 94 | # via 95 | # google-api-core 96 | # grpcio-status 97 | greenlet==3.0.3 98 | # via playwright 99 | grep-ast==0.3.2 100 | # via -r requirements.in 101 | grpcio==1.63.0 102 | # via 103 | # google-api-core 104 | # grpcio-status 105 | grpcio-status==1.62.2 106 | # via google-api-core 107 | h11==0.14.0 108 | # via httpcore 109 | httpcore==1.0.5 110 | # via httpx 111 | httplib2==0.22.0 112 | # via 113 | # google-api-python-client 114 | # google-auth-httplib2 115 | httpx==0.27.0 116 | # via openai 117 | huggingface-hub==0.23.0 118 | # via tokenizers 119 | idna==3.7 120 | # via 121 | # anyio 122 | # httpx 123 | # requests 124 | # yarl 125 | importlib-metadata==7.1.0 126 | # via litellm 127 | jinja2==3.1.4 128 | # via 129 | # altair 130 | # litellm 131 | # pydeck 132 | jsonschema==4.22.0 133 | # via 134 | # -r requirements.in 135 | # altair 136 | jsonschema-specifications==2023.12.1 137 | # via jsonschema 138 | litellm==1.37.16 139 | # via -r requirements.in 140 | markdown-it-py==3.0.0 141 | # via rich 142 | markupsafe==2.1.5 143 | # via jinja2 144 | mccabe==0.7.0 145 | # via flake8 146 | mdurl==0.1.2 147 | # via markdown-it-py 148 | multidict==6.0.5 149 | # via 150 | # aiohttp 151 | # yarl 152 | networkx==3.2.1 153 | # via -r requirements.in 154 | numpy==1.26.4 155 | # via 156 | # -r requirements.in 157 | # altair 158 | # pandas 159 | # pyarrow 160 | # pydeck 161 | # scipy 162 | # streamlit 163 | openai==1.30.1 164 | # via 165 | # -r requirements.in 166 | # litellm 167 | packaging==24.0 168 | # via 169 | # -r requirements.in 170 | # altair 171 | # huggingface-hub 172 | # streamlit 173 | pandas==2.2.2 174 | # via 175 | # altair 176 | # streamlit 177 | pathspec==0.12.1 178 | # via 179 | # -r requirements.in 180 | # grep-ast 181 | pillow==10.3.0 182 | # via 183 | # -r requirements.in 184 | # streamlit 185 | playwright==1.44.0 186 | # via -r requirements.in 187 | prompt-toolkit==3.0.43 188 | # via -r requirements.in 189 | proto-plus==1.23.0 190 | # via 191 | # google-ai-generativelanguage 192 | # google-api-core 193 | protobuf==4.25.3 194 | # via 195 | # google-ai-generativelanguage 196 | # google-api-core 197 | # google-generativeai 198 | # googleapis-common-protos 199 | # grpcio-status 200 | # proto-plus 201 | # streamlit 202 | pyarrow==16.1.0 203 | # via streamlit 204 | pyasn1==0.6.0 205 | # via 206 | # pyasn1-modules 207 | # rsa 208 | pyasn1-modules==0.4.0 209 | # via google-auth 210 | pycodestyle==2.11.1 211 | # via flake8 212 | pycparser==2.22 213 | # via cffi 214 | pydantic==2.7.1 215 | # via 216 | # google-generativeai 217 | # openai 218 | pydantic-core==2.18.2 219 | # via pydantic 220 | pydeck==0.9.1 221 | # via streamlit 222 | pyee==11.1.0 223 | # via playwright 224 | pyflakes==3.2.0 225 | # via flake8 226 | pygments==2.18.0 227 | # via rich 228 | pypandoc==1.13 229 | # via -r requirements.in 230 | pyparsing==3.1.2 231 | # via httplib2 232 | python-dateutil==2.9.0.post0 233 | # via pandas 234 | python-dotenv==1.0.1 235 | # via litellm 236 | pytz==2024.1 237 | # via pandas 238 | pyyaml==6.0.1 239 | # via 240 | # -r requirements.in 241 | # huggingface-hub 242 | referencing==0.35.1 243 | # via 244 | # jsonschema 245 | # jsonschema-specifications 246 | regex==2024.5.15 247 | # via tiktoken 248 | requests==2.31.0 249 | # via 250 | # google-api-core 251 | # huggingface-hub 252 | # litellm 253 | # streamlit 254 | # tiktoken 255 | rich==13.7.1 256 | # via 257 | # -r requirements.in 258 | # streamlit 259 | rpds-py==0.18.1 260 | # via 261 | # jsonschema 262 | # referencing 263 | rsa==4.9 264 | # via google-auth 265 | scipy==1.13.0 266 | # via -r requirements.in 267 | six==1.16.0 268 | # via python-dateutil 269 | smmap==5.0.1 270 | # via gitdb 271 | sniffio==1.3.1 272 | # via 273 | # anyio 274 | # httpx 275 | # openai 276 | sounddevice==0.4.6 277 | # via -r requirements.in 278 | soundfile==0.12.1 279 | # via -r requirements.in 280 | soupsieve==2.5 281 | # via beautifulsoup4 282 | streamlit==1.34.0 283 | # via -r requirements.in 284 | tenacity==8.3.0 285 | # via streamlit 286 | tiktoken==0.7.0 287 | # via 288 | # -r requirements.in 289 | # litellm 290 | tokenizers==0.19.1 291 | # via litellm 292 | toml==0.10.2 293 | # via streamlit 294 | toolz==0.12.1 295 | # via altair 296 | tornado==6.4 297 | # via streamlit 298 | tqdm==4.66.4 299 | # via 300 | # google-generativeai 301 | # huggingface-hub 302 | # openai 303 | tree-sitter==0.21.3 304 | # via 305 | # -r requirements.in 306 | # tree-sitter-languages 307 | tree-sitter-languages==1.10.2 308 | # via grep-ast 309 | typing-extensions==4.11.0 310 | # via 311 | # google-generativeai 312 | # huggingface-hub 313 | # openai 314 | # pydantic 315 | # pydantic-core 316 | # pyee 317 | # streamlit 318 | tzdata==2024.1 319 | # via pandas 320 | uritemplate==4.1.1 321 | # via google-api-python-client 322 | urllib3==2.2.1 323 | # via requests 324 | wcwidth==0.2.13 325 | # via prompt-toolkit 326 | yarl==1.9.4 327 | # via aiohttp 328 | zipp==3.18.2 329 | # via importlib-metadata 330 | -------------------------------------------------------------------------------- /devops/setup.cfg: -------------------------------------------------------------------------------- 1 | [egg_info] 2 | tag_build = 3 | tag_date = 0 4 | 5 | -------------------------------------------------------------------------------- /devops/setup.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from setuptools import find_packages, setup 4 | 5 | with open("requirements.txt") as f: 6 | requirements = f.read().splitlines() 7 | 8 | # Ensure that __version__ is defined in the aider package 9 | from aider import __version__ 10 | 11 | with open("README.md", "r", encoding="utf-8") as f: 12 | long_description = f.read() 13 | long_description = re.sub(r"\n!\[.*\]\(.*\)", "", long_description) 14 | long_description = re.sub(r"\n- \[.*\]\(.*\)", "", long_description) 15 | 16 | setup( 17 | name="aider-chat", 18 | version=__version__, 19 | packages=find_packages(), 20 | include_package_data=True, 21 | package_data={ 22 | "aider": ["queries/*"], 23 | }, 24 | install_requires=requirements, 25 | python_requires=">=3.9,<3.13", 26 | entry_points={ 27 | "console_scripts": [ 28 | "aider = aider.main:main", 29 | ], 30 | }, 31 | description="aider is GPT powered coding in your terminal", 32 | long_description=long_description, 33 | long_description_content_type="text/markdown", 34 | url="https://github.com/paul-gauthier/aider", 35 | classifiers=[ 36 | "Development Status :: 4 - Beta", 37 | "Environment :: Console", 38 | "Intended Audience :: Developers", 39 | "License :: OSI Approved :: Apache Software License", 40 | "Programming Language :: Python :: 3", 41 | "Programming Language :: Python :: 3.10", 42 | "Programming Language :: Python :: 3.11", 43 | "Programming Language :: Python :: 3.12", 44 | "Programming Language :: Python :: 3.9", 45 | "Programming Language :: Python", 46 | "Topic :: Software Development", 47 | ], 48 | ) 49 | -------------------------------------------------------------------------------- /devops/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruvnet/agentic-devops/520da11e3337e234b3193018d343adb33b5299f1/devops/tests/__init__.py -------------------------------------------------------------------------------- /devops/tests/test_io.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | from pathlib import Path 4 | from unittest.mock import MagicMock, patch 5 | 6 | from aider.io import AutoCompleter, InputOutput 7 | from aider.utils import ChdirTemporaryDirectory 8 | 9 | 10 | class TestInputOutput(unittest.TestCase): 11 | def test_no_color_environment_variable(self): 12 | with patch.dict(os.environ, {"NO_COLOR": "1"}): 13 | io = InputOutput() 14 | self.assertFalse(io.pretty) 15 | 16 | def test_autocompleter_with_non_existent_file(self): 17 | root = "" 18 | rel_fnames = ["non_existent_file.txt"] 19 | addable_rel_fnames = [] 20 | commands = None 21 | autocompleter = AutoCompleter(root, rel_fnames, addable_rel_fnames, commands, "utf-8") 22 | self.assertEqual(autocompleter.words, set(rel_fnames)) 23 | 24 | def test_autocompleter_with_unicode_file(self): 25 | with ChdirTemporaryDirectory(): 26 | root = "" 27 | fname = "file.py" 28 | rel_fnames = [fname] 29 | addable_rel_fnames = [] 30 | commands = None 31 | autocompleter = AutoCompleter(root, rel_fnames, addable_rel_fnames, commands, "utf-8") 32 | self.assertEqual(autocompleter.words, set(rel_fnames)) 33 | 34 | Path(fname).write_text("def hello(): pass\n") 35 | autocompleter = AutoCompleter(root, rel_fnames, addable_rel_fnames, commands, "utf-8") 36 | self.assertEqual(autocompleter.words, set(rel_fnames + ["hello"])) 37 | 38 | encoding = "utf-16" 39 | some_content_which_will_error_if_read_with_encoding_utf8 = "ÅÍÎÏ".encode(encoding) 40 | with open(fname, "wb") as f: 41 | f.write(some_content_which_will_error_if_read_with_encoding_utf8) 42 | 43 | autocompleter = AutoCompleter(root, rel_fnames, addable_rel_fnames, commands, "utf-8") 44 | self.assertEqual(autocompleter.words, set(rel_fnames)) 45 | 46 | @patch("aider.io.PromptSession") 47 | def test_get_input_is_a_directory_error(self, MockPromptSession): 48 | # Mock the PromptSession to simulate user input 49 | mock_session = MockPromptSession.return_value 50 | mock_session.prompt.return_value = "test input" 51 | 52 | io = InputOutput(pretty=False) # Windows tests throw UnicodeDecodeError 53 | root = "/" 54 | rel_fnames = ["existing_file.txt"] 55 | addable_rel_fnames = ["new_file.txt"] 56 | commands = MagicMock() 57 | 58 | # Simulate IsADirectoryError 59 | with patch("aider.io.open", side_effect=IsADirectoryError): 60 | result = io.get_input(root, rel_fnames, addable_rel_fnames, commands) 61 | self.assertEqual(result, "test input") 62 | 63 | 64 | if __name__ == "__main__": 65 | unittest.main() 66 | -------------------------------------------------------------------------------- /devops/tests/test_models.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from aider.models import Model 4 | 5 | 6 | class TestModels(unittest.TestCase): 7 | def test_max_context_tokens(self): 8 | model = Model("gpt-3.5-turbo") 9 | self.assertEqual(model.info["max_input_tokens"], 16385) 10 | 11 | model = Model("gpt-3.5-turbo-16k") 12 | self.assertEqual(model.info["max_input_tokens"], 16385) 13 | 14 | model = Model("gpt-3.5-turbo-1106") 15 | self.assertEqual(model.info["max_input_tokens"], 16385) 16 | 17 | model = Model("gpt-4") 18 | self.assertEqual(model.info["max_input_tokens"], 8 * 1024) 19 | 20 | model = Model("gpt-4-32k") 21 | self.assertEqual(model.info["max_input_tokens"], 32 * 1024) 22 | 23 | model = Model("gpt-4-0613") 24 | self.assertEqual(model.info["max_input_tokens"], 8 * 1024) 25 | 26 | 27 | if __name__ == "__main__": 28 | unittest.main() 29 | -------------------------------------------------------------------------------- /devops/tests/test_repomap.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | from aider.dump import dump # noqa: F401 5 | from aider.io import InputOutput 6 | from aider.models import Model 7 | from aider.repomap import RepoMap 8 | from aider.utils import IgnorantTemporaryDirectory 9 | 10 | 11 | class TestRepoMap(unittest.TestCase): 12 | def setUp(self): 13 | self.GPT35 = Model("gpt-3.5-turbo") 14 | 15 | def test_get_repo_map(self): 16 | # Create a temporary directory with sample files for testing 17 | test_files = [ 18 | "test_file1.py", 19 | "test_file2.py", 20 | "test_file3.md", 21 | "test_file4.json", 22 | ] 23 | 24 | with IgnorantTemporaryDirectory() as temp_dir: 25 | for file in test_files: 26 | with open(os.path.join(temp_dir, file), "w") as f: 27 | f.write("") 28 | 29 | io = InputOutput() 30 | repo_map = RepoMap(main_model=self.GPT35, root=temp_dir, io=io) 31 | other_files = [os.path.join(temp_dir, file) for file in test_files] 32 | result = repo_map.get_repo_map([], other_files) 33 | 34 | # Check if the result contains the expected tags map 35 | self.assertIn("test_file1.py", result) 36 | self.assertIn("test_file2.py", result) 37 | self.assertIn("test_file3.md", result) 38 | self.assertIn("test_file4.json", result) 39 | 40 | # close the open cache files, so Windows won't error 41 | del repo_map 42 | 43 | def test_get_repo_map_with_identifiers(self): 44 | # Create a temporary directory with a sample Python file containing identifiers 45 | test_file1 = "test_file_with_identifiers.py" 46 | file_content1 = """\ 47 | class MyClass: 48 | def my_method(self, arg1, arg2): 49 | return arg1 + arg2 50 | 51 | def my_function(arg1, arg2): 52 | return arg1 * arg2 53 | """ 54 | 55 | test_file2 = "test_file_import.py" 56 | file_content2 = """\ 57 | from test_file_with_identifiers import MyClass 58 | 59 | obj = MyClass() 60 | print(obj.my_method(1, 2)) 61 | print(my_function(3, 4)) 62 | """ 63 | 64 | test_file3 = "test_file_pass.py" 65 | file_content3 = "pass" 66 | 67 | with IgnorantTemporaryDirectory() as temp_dir: 68 | with open(os.path.join(temp_dir, test_file1), "w") as f: 69 | f.write(file_content1) 70 | 71 | with open(os.path.join(temp_dir, test_file2), "w") as f: 72 | f.write(file_content2) 73 | 74 | with open(os.path.join(temp_dir, test_file3), "w") as f: 75 | f.write(file_content3) 76 | 77 | io = InputOutput() 78 | repo_map = RepoMap(main_model=self.GPT35, root=temp_dir, io=io) 79 | other_files = [ 80 | os.path.join(temp_dir, test_file1), 81 | os.path.join(temp_dir, test_file2), 82 | os.path.join(temp_dir, test_file3), 83 | ] 84 | result = repo_map.get_repo_map([], other_files) 85 | 86 | # Check if the result contains the expected tags map with identifiers 87 | self.assertIn("test_file_with_identifiers.py", result) 88 | self.assertIn("MyClass", result) 89 | self.assertIn("my_method", result) 90 | self.assertIn("my_function", result) 91 | self.assertIn("test_file_pass.py", result) 92 | 93 | # close the open cache files, so Windows won't error 94 | del repo_map 95 | 96 | def test_get_repo_map_all_files(self): 97 | test_files = [ 98 | "test_file0.py", 99 | "test_file1.txt", 100 | "test_file2.md", 101 | "test_file3.json", 102 | "test_file4.html", 103 | "test_file5.css", 104 | "test_file6.js", 105 | ] 106 | 107 | with IgnorantTemporaryDirectory() as temp_dir: 108 | for file in test_files: 109 | with open(os.path.join(temp_dir, file), "w") as f: 110 | f.write("") 111 | 112 | repo_map = RepoMap(main_model=self.GPT35, root=temp_dir, io=InputOutput()) 113 | 114 | other_files = [os.path.join(temp_dir, file) for file in test_files] 115 | result = repo_map.get_repo_map([], other_files) 116 | dump(other_files) 117 | dump(repr(result)) 118 | 119 | # Check if the result contains each specific file in the expected tags map without ctags 120 | for file in test_files: 121 | self.assertIn(file, result) 122 | 123 | # close the open cache files, so Windows won't error 124 | del repo_map 125 | 126 | def test_get_repo_map_excludes_added_files(self): 127 | # Create a temporary directory with sample files for testing 128 | test_files = [ 129 | "test_file1.py", 130 | "test_file2.py", 131 | "test_file3.md", 132 | "test_file4.json", 133 | ] 134 | 135 | with IgnorantTemporaryDirectory() as temp_dir: 136 | for file in test_files: 137 | with open(os.path.join(temp_dir, file), "w") as f: 138 | f.write("def foo(): pass\n") 139 | 140 | io = InputOutput() 141 | repo_map = RepoMap(main_model=self.GPT35, root=temp_dir, io=io) 142 | test_files = [os.path.join(temp_dir, file) for file in test_files] 143 | result = repo_map.get_repo_map(test_files[:2], test_files[2:]) 144 | 145 | dump(result) 146 | 147 | # Check if the result contains the expected tags map 148 | self.assertNotIn("test_file1.py", result) 149 | self.assertNotIn("test_file2.py", result) 150 | self.assertIn("test_file3.md", result) 151 | self.assertIn("test_file4.json", result) 152 | 153 | # close the open cache files, so Windows won't error 154 | del repo_map 155 | 156 | 157 | class TestRepoMapTypescript(unittest.TestCase): 158 | def setUp(self): 159 | self.GPT35 = Model("gpt-3.5-turbo") 160 | 161 | def test_get_repo_map_typescript(self): 162 | # Create a temporary directory with a sample TypeScript file 163 | test_file_ts = "test_file.ts" 164 | file_content_ts = """\ 165 | interface IMyInterface { 166 | someMethod(): void; 167 | } 168 | 169 | type ExampleType = { 170 | key: string; 171 | value: number; 172 | }; 173 | 174 | enum Status { 175 | New, 176 | InProgress, 177 | Completed, 178 | } 179 | 180 | export class MyClass { 181 | constructor(public value: number) {} 182 | 183 | add(input: number): number { 184 | return this.value + input; 185 | return this.value + input; 186 | } 187 | } 188 | 189 | export function myFunction(input: number): number { 190 | return input * 2; 191 | } 192 | """ 193 | 194 | with IgnorantTemporaryDirectory() as temp_dir: 195 | with open(os.path.join(temp_dir, test_file_ts), "w") as f: 196 | f.write(file_content_ts) 197 | 198 | io = InputOutput() 199 | repo_map = RepoMap(main_model=self.GPT35, root=temp_dir, io=io) 200 | other_files = [os.path.join(temp_dir, test_file_ts)] 201 | result = repo_map.get_repo_map([], other_files) 202 | 203 | # Check if the result contains the expected tags map with TypeScript identifiers 204 | self.assertIn("test_file.ts", result) 205 | self.assertIn("IMyInterface", result) 206 | self.assertIn("ExampleType", result) 207 | self.assertIn("Status", result) 208 | self.assertIn("MyClass", result) 209 | self.assertIn("add", result) 210 | self.assertIn("myFunction", result) 211 | 212 | # close the open cache files, so Windows won't error 213 | del repo_map 214 | 215 | 216 | if __name__ == "__main__": 217 | unittest.main() 218 | -------------------------------------------------------------------------------- /devops/tests/test_sendchat.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest.mock import MagicMock, patch 3 | 4 | import httpx 5 | 6 | from aider.litellm import litellm 7 | from aider.sendchat import send_with_retries 8 | 9 | 10 | class PrintCalled(Exception): 11 | pass 12 | 13 | 14 | class TestSendChat(unittest.TestCase): 15 | @patch("litellm.completion") 16 | @patch("builtins.print") 17 | def test_send_with_retries_rate_limit_error(self, mock_print, mock_completion): 18 | mock = MagicMock() 19 | mock.status_code = 500 20 | 21 | # Set up the mock to raise 22 | mock_completion.side_effect = [ 23 | litellm.exceptions.RateLimitError( 24 | "rate limit exceeded", 25 | response=mock, 26 | llm_provider="llm_provider", 27 | model="model", 28 | ), 29 | None, 30 | ] 31 | 32 | # Call the send_with_retries method 33 | send_with_retries("model", ["message"], None, False) 34 | mock_print.assert_called_once() 35 | 36 | @patch("litellm.completion") 37 | @patch("builtins.print") 38 | def test_send_with_retries_connection_error(self, mock_print, mock_completion): 39 | # Set up the mock to raise 40 | mock_completion.side_effect = [ 41 | httpx.ConnectError("Connection error"), 42 | None, 43 | ] 44 | 45 | # Call the send_with_retries method 46 | send_with_retries("model", ["message"], None, False) 47 | mock_print.assert_called_once() 48 | -------------------------------------------------------------------------------- /devops/tests/test_udiff.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from aider.coders.udiff_coder import find_diffs 4 | from aider.dump import dump # noqa: F401 5 | 6 | 7 | class TestUnifiedDiffCoder(unittest.TestCase): 8 | def test_find_diffs_single_hunk(self): 9 | # Test find_diffs with a single hunk 10 | content = """ 11 | Some text... 12 | 13 | ```diff 14 | --- file.txt 15 | +++ file.txt 16 | @@ ... @@ 17 | -Original 18 | +Modified 19 | ``` 20 | """ 21 | edits = find_diffs(content) 22 | dump(edits) 23 | self.assertEqual(len(edits), 1) 24 | 25 | edit = edits[0] 26 | self.assertEqual(edit[0], "file.txt") 27 | self.assertEqual(edit[1], ["-Original\n", "+Modified\n"]) 28 | 29 | def test_find_diffs_dev_null(self): 30 | # Test find_diffs with a single hunk 31 | content = """ 32 | Some text... 33 | 34 | ```diff 35 | --- /dev/null 36 | +++ file.txt 37 | @@ ... @@ 38 | -Original 39 | +Modified 40 | ``` 41 | """ 42 | edits = find_diffs(content) 43 | dump(edits) 44 | self.assertEqual(len(edits), 1) 45 | 46 | edit = edits[0] 47 | self.assertEqual(edit[0], "file.txt") 48 | self.assertEqual(edit[1], ["-Original\n", "+Modified\n"]) 49 | 50 | def test_find_diffs_dirname_with_spaces(self): 51 | # Test find_diffs with a single hunk 52 | content = """ 53 | Some text... 54 | 55 | ```diff 56 | --- dir name with spaces/file.txt 57 | +++ dir name with spaces/file.txt 58 | @@ ... @@ 59 | -Original 60 | +Modified 61 | ``` 62 | """ 63 | edits = find_diffs(content) 64 | dump(edits) 65 | self.assertEqual(len(edits), 1) 66 | 67 | edit = edits[0] 68 | self.assertEqual(edit[0], "dir name with spaces/file.txt") 69 | self.assertEqual(edit[1], ["-Original\n", "+Modified\n"]) 70 | 71 | def test_find_multi_diffs(self): 72 | content = """ 73 | To implement the `--check-update` option, I will make the following changes: 74 | 75 | 1. Add the `--check-update` argument to the argument parser in `aider/main.py`. 76 | 2. Modify the `check_version` function in `aider/versioncheck.py` to return a boolean indicating whether an update is available. 77 | 3. Use the returned value from `check_version` in `aider/main.py` to set the exit status code when `--check-update` is used. 78 | 79 | Here are the diffs for those changes: 80 | 81 | ```diff 82 | --- aider/versioncheck.py 83 | +++ aider/versioncheck.py 84 | @@ ... @@ 85 | except Exception as err: 86 | print_cmd(f"Error checking pypi for new version: {err}") 87 | + return False 88 | 89 | --- aider/main.py 90 | +++ aider/main.py 91 | @@ ... @@ 92 | other_group.add_argument( 93 | "--version", 94 | action="version", 95 | version=f"%(prog)s {__version__}", 96 | help="Show the version number and exit", 97 | ) 98 | + other_group.add_argument( 99 | + "--check-update", 100 | + action="store_true", 101 | + help="Check for updates and return status in the exit code", 102 | + default=False, 103 | + ) 104 | other_group.add_argument( 105 | "--apply", 106 | metavar="FILE", 107 | ``` 108 | 109 | These changes will add the `--check-update` option to the command-line interface and use the `check_version` function to determine if an update is available, exiting with status code `0` if no update is available and `1` if an update is available. 110 | """ # noqa: E501 111 | 112 | edits = find_diffs(content) 113 | dump(edits) 114 | self.assertEqual(len(edits), 2) 115 | self.assertEqual(len(edits[0][1]), 3) 116 | 117 | 118 | if __name__ == "__main__": 119 | unittest.main() 120 | -------------------------------------------------------------------------------- /misc/cli-null.py: -------------------------------------------------------------------------------- 1 | import os 2 | import click 3 | import subprocess 4 | import pkg_resources 5 | import sys 6 | from azure.identity import DefaultAzureCredential, ClientSecretCredential 7 | from azure.core.exceptions import ClientAuthenticationError 8 | from azure.mgmt.resource import ResourceManagementClient 9 | from azure.mgmt.web import WebSiteManagementClient 10 | 11 | 12 | # Utility functions 13 | def check_installation(): 14 | click.echo("🔍 Checking for installed packages and requirements...") 15 | required_packages = ['click', 'azure-identity', 'azure-mgmt-resource', 'azure-mgmt-web', 'azure-appconfiguration', 'pytest', 'python-dotenv'] 16 | installed_packages = {pkg.key for pkg in pkg_resources.working_set} 17 | for package in required_packages: 18 | if package not in installed_packages: 19 | click.echo(f"📦 Package {package} is not installed. Installing...") 20 | subprocess.check_call([sys.executable, '-m', 'pip', 'install', package]) 21 | else: 22 | click.echo(f"✅ Package {package} is already installed.") 23 | click.echo("✅ All required packages are installed.") 24 | 25 | def check_cli_tools(): 26 | click.echo("🔍 Checking for required CLI tools...") 27 | required_tools = {'gh': 'https://github.com/cli/cli#installation', 'az': 'https://aka.ms/InstallAzureCLIDeb'} 28 | for tool, install_url in required_tools.items(): 29 | if subprocess.call(['which', tool], stdout=subprocess.PIPE, stderr=subprocess.PIPE) != 0: 30 | if tool == 'az': 31 | click.echo(f"📦 {tool} CLI is not installed. Installing...") 32 | subprocess.check_call("curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash", shell=True) 33 | else: 34 | click.echo(f"❌ {tool} CLI is not installed. Please install {tool} CLI manually from {install_url}.") 35 | sys.exit(1) 36 | click.echo("✅ All required CLI tools are installed.") 37 | 38 | def validate_azure_keys(): 39 | click.echo("🔍 Validating Azure keys...") 40 | required_keys = { 41 | 'AZURE_CLIENT_ID': 'AZURE_CLIENT_ID', 42 | 'AZURE_CLIENT_SECRET': 'AZURE_CLIENT_SECRET', 43 | 'AZURE_TENANT_ID': 'AZURE_TENANT_ID', 44 | 'AZURE_SUBSCRIPTION_ID': 'AZURE_SUBSCRIPTION_ID' 45 | } 46 | for env_var in required_keys: 47 | value = os.getenv(env_var) 48 | if not value: 49 | value = click.prompt(f"🔑 Please enter your {env_var}", hide_input=False) 50 | os.environ[env_var] = value 51 | click.echo("✅ All essential Azure keys are set.") 52 | 53 | def verbose_output(message): 54 | click.echo(f"📝 {message}") 55 | 56 | def run_tests(): 57 | click.echo("🧪 Running tests...") 58 | result = subprocess.run(['pytest'], capture_output=True, text=True) 59 | click.echo(result.stdout) 60 | if result.returncode != 0: 61 | click.echo("❌ Tests failed:", result.stderr) 62 | raise Exception("Tests failed") 63 | click.echo("✅ All tests passed.") 64 | 65 | # Command functions 66 | def get_subscription_id(): 67 | subscription_id = os.getenv('AZURE_SUBSCRIPTION_ID') 68 | if not subscription_id: 69 | subscription_id = click.prompt("🔑 Please enter your AZURE_SUBSCRIPTION_ID", hide_input=False) 70 | os.environ['AZURE_SUBSCRIPTION_ID'] = subscription_id 71 | return subscription_id 72 | 73 | def list_deployments_cmd(): 74 | try: 75 | subscription_id = get_subscription_id() 76 | credential = ClientSecretCredential( 77 | tenant_id=os.getenv('AZURE_TENANT_ID'), 78 | client_id=os.getenv('AZURE_CLIENT_ID'), 79 | client_secret=os.getenv('AZURE_CLIENT_SECRET') 80 | ) 81 | resource_client = ResourceManagementClient(credential, subscription_id) 82 | deployments = resource_client.deployments.list_at_subscription_scope() 83 | for deployment in deployments: 84 | click.echo(f"🌐 Name: {deployment.name}, Resource Group: {deployment.resource_group}, State: {deployment.properties.provisioning_state}") 85 | except ClientAuthenticationError as e: 86 | click.echo(f"⚠️ Authentication Error: {e}") 87 | click.echo("🔧 Please ensure that the client secret is correct and not the client secret ID. You can update the secret and try again.") 88 | click.echo("🔗 Troubleshooting: https://aka.ms/azsdk/python/identity/defaultazurecredential/troubleshoot") 89 | except Exception as e: 90 | click.echo(f"⚠️ Error: {e}") 91 | click.echo("🔧 Please ensure that the service principal has the required permissions. You can assign the 'Owner' role to the service principal using the Azure Portal or Azure CLI.") 92 | click.echo("🔗 Documentation: https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-steps") 93 | 94 | def main_menu(): 95 | click.echo("📋 Main Menu") 96 | click.echo("1. List Deployments") 97 | click.echo("2. Setup Deployment") 98 | click.echo("3. Create Deployment") 99 | click.echo("4. Update Deployment") 100 | click.echo("5. Remove Deployment") 101 | click.echo("6. Exit") 102 | 103 | choice = click.prompt("Enter your choice", type=int) 104 | 105 | if choice == 1: 106 | list_deployments_cmd() 107 | elif choice == 2: 108 | setup_deployment_cmd() 109 | elif choice == 3: 110 | create_deployment_cmd() 111 | elif choice == 4: 112 | update_deployment_cmd() 113 | elif choice == 5: 114 | remove_deployment_cmd() 115 | elif choice == 6: 116 | click.echo("👋 Exiting. Goodbye!") 117 | sys.exit(0) 118 | else: 119 | click.echo("❌ Invalid choice. Please try again.") 120 | 121 | @click.command() 122 | def run(): 123 | """ 124 | Start the interactive menu or chat UI. 125 | """ 126 | check_installation() 127 | check_cli_tools() 128 | validate_azure_keys() 129 | 130 | mode = click.prompt("Choose mode (1: Menu, 2: Chat)", type=int) 131 | if mode == 1: 132 | main_menu() 133 | elif mode == 2: 134 | asyncio.run(agentic_chat()) 135 | else: 136 | click.echo("❌ Invalid choice. Please choose 1 for Menu or 2 for Chat.") 137 | 138 | if __name__ == '__main__': 139 | run() 140 | -------------------------------------------------------------------------------- /misc/main copy.py: -------------------------------------------------------------------------------- 1 | import click 2 | import subprocess 3 | import importlib.metadata as metadata 4 | import os 5 | import sys 6 | from lionagi import Session 7 | from azure.identity import DefaultAzureCredential 8 | from azure.core.exceptions import ClientAuthenticationError 9 | from azure.mgmt.resource import ResourceManagementClient 10 | from azure.mgmt.web import WebSiteManagementClient 11 | from azure.appconfiguration import AzureAppConfigurationClient 12 | import asyncio 13 | import re 14 | from concurrent.futures import ThreadPoolExecutor 15 | from cli.ci_cd_pipeline import create_ci_cd_pipeline 16 | from cli.utils import ( 17 | check_installation, 18 | check_cli_tools, 19 | validate_azure_keys, 20 | verbose_output, 21 | run_tests, 22 | list_deployments_cmd, 23 | setup_deployment_cmd, 24 | create_deployment_cmd, 25 | update_deployment_cmd, 26 | remove_deployment_cmd, 27 | get_subscription_id 28 | ) 29 | from cli.agentic import initialize_lionagi_session, handle_user_input 30 | 31 | @click.group() 32 | def cli(): 33 | pass 34 | 35 | @cli.command() 36 | def welcome(): 37 | click.echo(r""" 38 | ___ __ _ ____ 39 | / | ____ ____ ____ / /_(______ / __ \___ _ ______ ____ _____ 40 | / /| |/ __ `/ _ \/ __ \/ __/ / ___/ / / / / _ | | / / __ \/ __ \/ ___/ 41 | / ___ / /_/ / __/ / / / /_/ / /__ / /_/ / __| |/ / /_/ / /_/ (__ ) 42 | /_/ |_\__, /\___/_/ /_/\__/_/\___/ /_____/\___/|___/\____/ .___/____/ 43 | /____/ /_/ 44 | 45 | Welcome to the CLI tool! 46 | """) 47 | 48 | @cli.command() 49 | def create_pipeline(): 50 | create_ci_cd_pipeline() 51 | 52 | @click.command() 53 | def run(): 54 | """ 55 | Start the interactive menu or chat UI. 56 | """ 57 | mode = click.prompt("Choose mode (1: Menu, 2: Chat)", type=int) 58 | if mode == 1: 59 | main_menu() 60 | elif mode == 2: 61 | asyncio.run(agentic_chat()) 62 | else: 63 | click.echo("❌ Invalid choice. Please choose 1 for Menu or 2 for Chat.") 64 | main_menu() 65 | 66 | def main_menu(): 67 | click.echo("\n📋 Main Menu") 68 | click.echo("1. List Deployments") 69 | click.echo("2. Setup Deployment") 70 | click.echo("3. Create Deployment") 71 | click.echo("4. Update Deployment") 72 | click.echo("5. Remove Deployment") 73 | click.echo("6. Load Coder Menu") 74 | click.echo("7. Exit") 75 | 76 | choice = click.prompt("Enter your choice", type=int) 77 | 78 | if choice == 1: 79 | list_deployments_cmd() 80 | elif choice == 2: 81 | setup_deployment_cmd() 82 | elif choice == 3: 83 | create_deployment_cmd() 84 | elif choice == 4: 85 | update_deployment_cmd() 86 | elif choice == 5: 87 | remove_deployment_cmd() 88 | elif choice == 6: 89 | import coder 90 | coder.coder_menu() 91 | elif choice == 7: 92 | click.echo("\n👋 Exiting. Goodbye!\n") 93 | sys.exit(0) 94 | else: 95 | click.echo("\n❌ Invalid choice. Please try again.") 96 | main_menu() 97 | 98 | async def agentic_chat(): 99 | session = await initialize_lionagi_session() 100 | click.echo("🚀 Welcome to the interactive DevOps chat! Type 'exit' to leave the chat.") 101 | 102 | while True: 103 | user_input = click.prompt("💬 You", type=str) 104 | 105 | if user_input.lower() in ['exit', 'quit']: 106 | click.echo("👋 Exiting chat mode. Goodbye!") 107 | break 108 | 109 | try: 110 | # Parse important elements from user input 111 | repo_path = None 112 | if "../" in user_input: 113 | repo_path = user_input.split("../")[1].strip() 114 | elif "./" in user_input: 115 | repo_path = user_input.split("./")[1].strip() 116 | 117 | if repo_path: 118 | click.echo(f"🔍 Analyzing directory structure for {repo_path}...") 119 | else: 120 | # Ask for clarification if path not provided 121 | repo_path = click.prompt("Enter the path to the repository directory", type=str, default=".") 122 | click.echo(f"🔍 Analyzing directory structure for {repo_path}...") 123 | 124 | click.echo("📁 Listing files and directories...") 125 | files_and_dirs = subprocess.check_output(f"ls -lR {repo_path}", shell=True, text=True) 126 | 127 | # Chunk the directory information to limit tokens 128 | max_tokens = 2048 129 | chunks = [files_and_dirs[i:i+max_tokens] for i in range(0, len(files_and_dirs), max_tokens)] 130 | 131 | review_results = [] 132 | for chunk in chunks: 133 | click.echo("🤖 Sending directory information to LionAGI for review...") 134 | review_result = await handle_user_input(session, chunk) 135 | review_results.append(review_result) 136 | 137 | click.echo("🦁 LionAGI review:") 138 | for result in review_results: 139 | click.echo(result) 140 | 141 | click.echo("🤔 Generating potential actions based on the request...") 142 | action_prompt = f"User request: {user_input}\nDirectory information:\n{files_and_dirs}" 143 | potential_actions = await handle_user_input(session, action_prompt) 144 | 145 | click.echo("📝 Potential actions:") 146 | click.echo(potential_actions) 147 | 148 | execute_action = click.confirm("Do you want to execute any of the suggested actions?") 149 | if execute_action: 150 | selected_action = click.prompt("Enter the action you want to execute", type=str) 151 | click.echo(f"⚙️ Executing: {selected_action}") 152 | subprocess.run(selected_action, shell=True, check=True) 153 | else: 154 | click.echo("⏭️ Skipping action execution.") 155 | 156 | except subprocess.CalledProcessError as e: 157 | click.echo(f"⚠️ Error: {e.output}") 158 | except Exception as e: 159 | click.echo(f"⚠️ Error: {e}") 160 | 161 | if __name__ == '__main__': 162 | check_installation() 163 | check_cli_tools() 164 | validate_azure_keys() 165 | run() 166 | 167 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | azure-identity==1.7.0 2 | azure-mgmt-resource==20.0.0 3 | azure-mgmt-web==3.0.0 4 | azure-appconfiguration==1.1.1 5 | pytest==7.1.1 6 | litellm 7 | # Requirements for the backend FastAPI application 8 | fastapi 9 | uvicorn 10 | sqlalchemy 11 | alembic 12 | httpx 13 | pydantic 14 | python-dotenv 15 | lionagi 16 | wheel 17 | setuptools 18 | psutil -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import re 2 | from setuptools import find_packages, setup 3 | from pathlib import Path 4 | 5 | # Adjust the paths for the new location of setup.py 6 | base_dir = Path(__file__).resolve().parent 7 | 8 | # Read the requirements from the requirements.txt file 9 | requirements_path = base_dir / "devops/requirements.txt" 10 | if not requirements_path.exists(): 11 | raise FileNotFoundError(f"Could not find requirements.txt at {requirements_path}") 12 | 13 | with open(requirements_path) as f: 14 | requirements = f.read().splitlines() 15 | 16 | # Ensure that __version__ is defined in the agentic_devops package 17 | __version__ = "0.0.8" 18 | 19 | # Read the long description from the README.md file 20 | readme_path = base_dir / "README.md" 21 | if not readme_path.exists(): 22 | raise FileNotFoundError(f"Could not find README.md at {readme_path}") 23 | 24 | with open(readme_path, "r", encoding="utf-8") as f: 25 | long_description = f.read() 26 | long_description = re.sub(r"\n!\[.*\]\(.*\)", "", long_description) # Remove images 27 | long_description = re.sub(r"\n- \[.*\]\(.*\)", "", long_description) # Remove links 28 | 29 | # Setup configuration 30 | setup( 31 | name="Agentic-DevOps", 32 | version="0.0.8", 33 | author="rUv", 34 | author_email="null@ruv.net", 35 | description="Agentic DevOps Tool for automating and managing various DevOps tasks and configurations.", 36 | long_description=long_description, 37 | long_description_content_type="text/markdown", 38 | url="https://github.com/ruvnet/agentic-devops", 39 | packages=find_packages(where="devops"), 40 | package_dir={"": "devops"}, 41 | include_package_data=True, 42 | install_requires=requirements, 43 | entry_points={ 44 | "console_scripts": [ 45 | "agentic-devops=aider.main_wrapper:main", 46 | "agentic-devops-cli=aider.main:main", 47 | ], 48 | }, 49 | classifiers=[ 50 | "Development Status :: 3 - Alpha", 51 | "Environment :: Console", 52 | "Intended Audience :: Developers", 53 | "License :: OSI Approved :: Apache Software License", 54 | "Programming Language :: Python :: 3", 55 | "Programming Language :: Python :: 3.10", 56 | "Programming Language :: Python :: 3.11", 57 | "Programming Language :: Python :: 3.12", 58 | "Programming Language :: Python :: 3.9", 59 | "Programming Language :: Python", 60 | "Topic :: Software Development", 61 | ], 62 | ) 63 | --------------------------------------------------------------------------------