├── .github └── workflows │ ├── code-review.yml │ └── github-pages.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── THIRD-PARTY ├── action.yml ├── app ├── designIntentionClassification.md ├── package.json ├── src │ ├── ActionExecutor.ts │ ├── ActionTypeDeterminer.ts │ ├── FunctionRegistry.ts │ ├── IntentionClassifier.ts │ ├── LargeLanguageModel.ts │ ├── handler.ts │ ├── index.ts │ └── utilsApp.ts └── tsconfig.json ├── debugging ├── sample.ts ├── testUtils.ts ├── utils.js └── utils.ts ├── dist ├── codeLayout.sh ├── codeReviewInline.d.ts ├── code_layout.sh ├── coverageAnalyzer.d.ts ├── index.d.ts ├── index.js ├── index1.js ├── prGeneration.d.ts ├── preview │ ├── languageModel.d.ts │ ├── prompt-obsolete.d.ts │ ├── promptRefiner.d.ts │ ├── resultCollector.d.ts │ ├── snippetMap.d.ts │ ├── testGenerator.d.ts │ └── testValidator.d.ts ├── prompts.d.ts ├── testGenerator.d.ts ├── testUtils.d.ts ├── testValidator.d.ts ├── ut_py.d.ts ├── ut_ts.d.ts └── utils.d.ts ├── docs ├── designIntentionClassification.md └── designTestGenerator.md ├── jest.config.js ├── jest.setup.js ├── notebook ├── lambda_function.py ├── lambda_function.zip └── llama2-13b.ipynb ├── package-lock.json ├── package.json ├── src ├── PromptGuide.md ├── codeLayout.sh ├── codeReviewInline.ts ├── deprecated-coverageAnalyzer.ts ├── deprecated-testGenerator.ts ├── deprecated-testUtils.ts ├── deprecated-testValidator.ts ├── index.ts ├── prGeneration.ts ├── preview │ ├── languageModel.ts │ ├── prompt-obsolete.ts │ ├── promptRefiner.ts │ ├── resultCollector.ts │ ├── snippetMap.ts │ ├── testGenerator.ts │ └── testValidator.ts ├── prompts.ts └── utils.ts ├── test ├── AUTO_GENERATED_TESTS_README.md ├── debugTestGenerator.ts ├── erroneous_code_test.py ├── sample.test.ts ├── testUtils.test.ts └── utils.test.ts ├── tools └── github_stats.py ├── tsconfig.build.json ├── tsconfig.json └── web ├── .gitignore ├── README.md ├── components ├── CopyableCommand.tsx ├── FAQ.tsx ├── Features.tsx ├── Footer.tsx ├── Header.tsx ├── Hero.tsx └── QuickStart.tsx ├── image.png ├── next-env.d.ts ├── next.config.js ├── package.json ├── pages ├── 404.js ├── _app.tsx └── index.tsx ├── postcss.config.js ├── styles └── globals.css ├── tailwind.config.js ├── tsconfig.json └── vercel.json /.github/workflows/code-review.yml: -------------------------------------------------------------------------------- 1 | name: Intelligent Code Review 2 | # Enable manual trigger 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | types: [opened, synchronize] 7 | 8 | # Avoid running the same workflow on the same branch concurrently 9 | concurrency: 10 | group: ${{ github.workflow }}-${{ github.ref }} 11 | 12 | jobs: 13 | review: 14 | runs-on: ubuntu-latest 15 | environment: AWS_ROLE_TO_ASSUME 16 | 17 | permissions: 18 | # read repository contents and write pull request comments 19 | id-token: write 20 | # allow github action bot to push new content into existing pull requests 21 | contents: write 22 | # contents: read 23 | pull-requests: write 24 | steps: 25 | - name: Checkout code 26 | uses: actions/checkout@v3 27 | 28 | - name: Set up Node.js 29 | uses: actions/setup-node@v3 30 | with: 31 | node-version: '20' 32 | 33 | - name: Install dependencies @actions/core and @actions/github 34 | run: | 35 | npm install @actions/core 36 | npm install @actions/github 37 | shell: bash 38 | 39 | # check if required dependencies @actions/core and @actions/github are installed 40 | - name: Check if required dependencies are installed 41 | run: | 42 | npm list @actions/core 43 | npm list @actions/github 44 | shell: bash 45 | 46 | - name: Debug GitHub Token and environment variables 47 | run: | 48 | if [ -n "${{ secrets.GITHUB_TOKEN }}" ]; then 49 | echo "GitHub Token is set" 50 | else 51 | echo "GitHub Token is not set" 52 | fi 53 | if [ -n "${{ env.AWS_ROLE_TO_ASSUME_VAR }}" ]; then 54 | echo "AWS Role to Assume is set" 55 | else 56 | echo "AWS Role to Assume is not set" 57 | fi 58 | 59 | # assume the specified IAM role and set up the AWS credentials for use in subsequent steps. 60 | - name: Configure AWS Credentials 61 | uses: aws-actions/configure-aws-credentials@v4 62 | with: 63 | # using repository environment variable to get the role arn 64 | role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME_VAR }} 65 | aws-region: us-east-1 66 | 67 | - name: Intelligent GitHub Actions 68 | uses: aws-samples/aws-genai-cicd-suite@stable 69 | with: 70 | # Automatic Provision: The GITHUB_TOKEN is automatically created and provided by GitHub for each workflow run. You don't need to manually create or store this token as a secret. 71 | github-token: ${{ secrets.GITHUB_TOKEN }} 72 | aws-region: us-east-1 73 | model-id: anthropic.claude-3-sonnet-20240229-v1:0 74 | generate-code-review: 'true' 75 | generate-code-review-level: 'detailed' 76 | generate-code-review-exclude-files: '*.md,*.json,*.js' 77 | generate-pr-description: 'true' 78 | generate-unit-test: 'false' 79 | generate-unit-test-source-folder: 'debugging' 80 | # Removed the invalid input 'generate-unit-test-exclude-files' 81 | # output-language: 'zh' 82 | env: 83 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 84 | -------------------------------------------------------------------------------- /.github/workflows/github-pages.yml: -------------------------------------------------------------------------------- 1 | name: Deploy to GitHub Pages 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | paths: 9 | - 'web/**' # Only trigger on changes in web folder 10 | 11 | jobs: 12 | build-and-deploy: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | 17 | - name: Setup Node.js 18 | uses: actions/setup-node@v2 19 | with: 20 | node-version: '20' # or whichever version you're using 21 | 22 | - name: Install dependencies 23 | working-directory: ./web 24 | run: | 25 | if [ -f "package-lock.json" ]; then 26 | npm ci 27 | else 28 | npm install 29 | fi 30 | 31 | - name: Build 32 | working-directory: ./web 33 | run: npm run build 34 | 35 | - name: Export 36 | working-directory: ./web 37 | run: npx next export 38 | 39 | # By adding an empty .nojekyll file to the root of your GitHub Pages branch, we tell GitHub not to process site with Jekyll, ensures that Next.js build is served exactly as it was built, also to preserving underscore prefixed directories 40 | - name: Create .nojekyll file 41 | run: touch ./web/out/.nojekyll 42 | 43 | - name: Deploy to GitHub Pages 44 | uses: peaceiris/actions-gh-pages@v3 45 | with: 46 | github_token: ${{ secrets.GITHUB_TOKEN }} 47 | publish_dir: ./web/out 48 | # This parameter specifies the branch where the built site will be published. By default, it's set to 'gh-pages', but you can uncomment and modify this line if you want to publish to a different branch. For most GitHub Pages deployments, using 'gh-pages' is the standard. 49 | publish_branch: gh-pages 50 | force_orphan: true 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | coverage.xml 3 | coverage 4 | .coverage 5 | __pycache__ 6 | downloaded_images 7 | sample_models 8 | # pem files 9 | *.pem 10 | .ragatouille 11 | *.crt 12 | *.key 13 | *.p12 14 | *.context.json 15 | node_modules 16 | build 17 | .vercel 18 | .aider* 19 | /tmp 20 | dist 21 | tmp.rm.later 22 | package-lock.json 23 | 24 | # Next.js, ignore the .next folder 25 | /web/.next 26 | 27 | # JS file in the src/preview folder 28 | /src/preview/*.js 29 | /src/*.js 30 | /test/*.js -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /action.yml: -------------------------------------------------------------------------------- 1 | name: 'Intelligent DevOps (Intelli-Ops) using Amazon Bedrock' 2 | description: 'AI-powered GitHub Action for code reviews, PR descriptions, unit tests, and issue management using Amazon Bedrock.' 3 | inputs: 4 | github-token: 5 | description: 'GitHub token' 6 | required: true 7 | aws-region: 8 | description: 'AWS Region' 9 | required: true 10 | default: 'us-east-1' 11 | model-id: 12 | description: 'Model ID' 13 | required: false 14 | # Use specific prefix sagemaker. to specify the model hosted in AWS SageMaker 15 | default: 'anthropic.claude-3-5-sonnet-20240620-v1:0' 16 | generate-code-review-exclude-files: 17 | description: 'Exclude file list, separated by comma, e.g. [*.md,*.json]' 18 | required: false 19 | # defaul to null to disable the filter 20 | default: '' 21 | generate-code-review-level: 22 | description: 'Code review level' 23 | required: false 24 | default: 'detailed' 25 | generate-code-review: 26 | description: 'Generate code review' 27 | required: false 28 | generate-pr-description: 29 | description: 'Generate PR description' 30 | required: false 31 | default: 'false' 32 | generate-unit-test: 33 | description: 'Whether to generate unit tests' 34 | required: false 35 | default: 'false' 36 | generate-unit-test-source-folder: 37 | description: 'The folder path where unit tests should be generated based on the source code, only applicable when generate-unit-test is true' 38 | required: false 39 | default: '' 40 | generate-unit-test-exclude-files: 41 | description: 'Exclude file list, separated by comma, e.g. [*.md,*.json]' 42 | required: false 43 | default: '' 44 | output-language: 45 | description: 'Output language' 46 | required: false 47 | default: 'en' 48 | # The index.js file in the intelligent-code-review action no longer needs to explicitly set AWS credentials. It will use the credentials set up by the configure-aws-credentials action 49 | runs: 50 | using: 'node20' 51 | main: 'dist/index.js' 52 | 53 | branding: 54 | color: 'purple' 55 | icon: 'check-circle' 56 | -------------------------------------------------------------------------------- /app/designIntentionClassification.md: -------------------------------------------------------------------------------- 1 | # Design Document: Intention Classification and Function Registry for GitHub Operations 2 | 3 | ## 1. Introduction 4 | 5 | This document outlines the design for implementing the GitHub App which uses intention classification to interpret user queries and a agent-alike framework to execute appropriate actions. This design aims to create a modular, extensible, and efficient system capable of handling a wide range of GitHub-related tasks. 6 | 7 | ## 2. System Overview 8 | 9 | The system consists of several key components: 10 | 1. Intention Classifier: Uses LLM to classify user intentions. 11 | 2. Action Type Determiner: Determines the appropriate action type based on the classified intention. 12 | 3. Function Registry: Manages the available functions for different action types. 13 | 4. Action Executor: Selects and executes the appropriate function(s) based on the action type and user query. 14 | 5. Large Language Model (LLM) & Prompt Templates: Interfaces with the Large Language Model for various tasks. 15 | 16 | The system works as follows: 17 | ```mermaid 18 | graph TD 19 | A[User Query] --> B[Context Extractor] 20 | B --> C[Inputs Object] 21 | C --> D[Prompts] 22 | D --> E[LLM Claude 3.5 on Amazon Bedrock] 23 | E --> F[Intention Classifier] 24 | F --> G[Action Type Determiner] 25 | G --> H{Action Type} 26 | H -->|LLM-Only| I[LLM-Only Function Executor] 27 | H -->|Codebase-Aware| J[Codebase-Aware Function Executor] 28 | H -->|External API| K[External API Function Executor] 29 | I --> L[Function Registry] 30 | J --> L 31 | K --> L 32 | L --> M[Execute Function] 33 | M --> N[Return Result to User] 34 | ``` 35 | 36 | ## 3. Query Categories 37 | 38 | The system will handle various types of user queries related to GitHub operations, including but not limited to: 39 | 40 | | Intention Category | Action | 41 | |--------------------|--------| 42 | | Code review and analysis | Invoke GitHub API to fetch specific commit/file details or load the whole codebase into memory, then use LLM or registered functions to apply thereon | 43 | | Repository management | Use GitHub API to fetch repository details or load the whole codebase into memory, then use LLM or registered functions to apply thereon | 44 | | Documentation tasks | Use LLM to generate documentation according to a style guide, then use registered functions to apply them | 45 | | GitHub Actions and CI/CD operations | Use GitHub API to fetch workflow details, then use LLM or registered functions to apply thereon | 46 | 47 | Sample queries: 48 | 49 | - Code review and analysis: 50 | -- I pushed a fix in commit , please review it. 51 | -- Generate unit testing code for this file, or read src/utils.ts and generate unit testing code. 52 | 53 | - Repository management: 54 | -- Summarize stats about this repository and render them as a table. Additionally, render a pie chart showing the language distribution in the codebase. 55 | -- Modularize this function. 56 | -- Read the files in the src/scheduler package and generate a class diagram using mermaid and a README in the markdown format. 57 | 58 | - Documentation tasks: 59 | -- Generate a Pull Request description for this PR. 60 | 61 | - GitHub Actions and CI/CD operations: 62 | -- Create a GitHub Actions workflow to deploy this service to AWS Lambda. 63 | 64 | ## 4. Key Components 65 | 66 | ### 4.1 Intention Classifier 67 | 68 | The Intention Classifier uses the LLM to interpret user queries and classify them into predefined categories: 69 | 70 | ```typescript 71 | class IntentionClassifier { 72 | private llm: LargeLanguageModel; 73 | 74 | constructor(llm: LargeLanguageModel) { 75 | this.llm = llm; 76 | } 77 | 78 | async classify(query: string, context: any): Promise { 79 | const inputs = new Inputs(context); 80 | const prompt = Prompts.renderIntentionClassificationPrompt(inputs); 81 | return this.llm.classify(prompt); 82 | } 83 | } 84 | ``` 85 | 86 | ### 4.2 Action Type Determiner 87 | 88 | The Action Type Determiner maps classified intentions to appropriate action types: 89 | 90 | ```typescript 91 | class ActionTypeDeterminer { 92 | determineActionType(intention: string): FunctionType { 93 | // Logic to map intention to action type 94 | } 95 | } 96 | ``` 97 | 98 | We define three main action types: 99 | 1. LLM-Only Actions: Require only LLM interaction. 100 | 2. Codebase-Aware Actions: Need information from the entire codebase. 101 | 3. External API Actions: Invoke registered functions (GitHub API calls, etc.). 102 | 103 | 104 | ### 4.3 Function Registry 105 | 106 | We implement the Function Registry to manage the registration and retrieval of functions: 107 | 108 | ```typescript 109 | interface RegisteredFunction { 110 | id: string; 111 | name: string; 112 | type: FunctionType; 113 | execute: (query: string, context: any) => Promise; 114 | } 115 | 116 | class FunctionRegistry { 117 | private functions: Map = new Map(); 118 | 119 | registerFunction(func: RegisteredFunction): void { /* ... */ } 120 | getFunction(id: string): RegisteredFunction | undefined { /* ... */ } 121 | listFunctions(type?: FunctionType): RegisteredFunction[] { /* ... */ } 122 | updateFunction(id: string, updates: Partial): void { /* ... */ } 123 | deleteFunction(id: string): boolean { /* ... */ } 124 | } 125 | ``` 126 | 127 | 128 | ### 4.4 Action Executor 129 | 130 | The Action Executor is responsible for: 131 | (1) select proper function to execute based on specific user query per category; 132 | (2) orchestrate the workflow of possible multiple function execution along with LLM invocation; 133 | (3) error handling of function execution and iteration with feedback or error message; 134 | (4) evaluation on the output per iteration and judgement on the criterion the final output meets. 135 | 136 | ```typescript 137 | class ActionExecutor { 138 | private registry: FunctionRegistry; 139 | private llm: LargeLanguageModel; 140 | 141 | constructor(registry: FunctionRegistry, llm: LargeLanguageModel) { 142 | this.registry = registry; 143 | this.llm = llm; 144 | } 145 | 146 | async execute(intention: string, query: string, context: any): Promise { 147 | const actionType = this.determineActionType(intention); 148 | const functions = this.registry.listFunctions(actionType); 149 | const selectedFunction = await this.selectFunction(functions, query, context); 150 | 151 | let result: any; 152 | let iterations = 0; 153 | const maxIterations = 3; 154 | 155 | do { 156 | try { 157 | result = await this.executeFunction(selectedFunction, query, context); 158 | const evaluation = await this.evaluateOutput(result, query, context); 159 | 160 | if (evaluation.isSatisfactory) { 161 | return { success: true, result, iterations }; 162 | } else if (iterations < maxIterations) { 163 | context = { ...context, previousResult: result, feedback: evaluation.feedback }; 164 | } else { 165 | return { success: false, error: "Max iterations reached without satisfactory result" }; 166 | } 167 | } catch (error) { 168 | const errorHandler = await this.determineErrorHandling(error, query, context); 169 | if (errorHandler.retry && iterations < maxIterations) { 170 | context = { ...context, error, errorFeedback: errorHandler.feedback }; 171 | } else { 172 | return { success: false, error: errorHandler.message }; 173 | } 174 | } 175 | iterations++; 176 | } while (iterations < maxIterations); 177 | } 178 | 179 | private async selectFunction(functions: RegisteredFunction[], query: string, context: any): Promise { /* ... */ } 180 | private async executeFunction(func: RegisteredFunction, query: string, context: any): Promise { /* ... */ } 181 | private async evaluateOutput(result: any, query: string, context: any): Promise<{ isSatisfactory: boolean, feedback?: string }> { /* ... */ } 182 | private async determineErrorHandling(error: any, query: string, context: any): Promise<{ retry: boolean, feedback?: string, message: string }> { /* ... */ } 183 | private determineActionType(intention: string): FunctionType { /* ... */ } 184 | } 185 | ``` 186 | 187 | ### 4.5 Large Language Model (LLM) & Prompt Templates 188 | 189 | We will use Claude 3.5 or a similar LLM through Amazon Bedrock for intention classification, and the LLM will be provided with context about GitHub operations and the user's query. 190 | 191 | We will implement a modular prompt system with two main classes: 192 | 193 | ```typescript 194 | class Inputs { 195 | // Properties to store various input parameters 196 | constructor(context: any) { 197 | // Initialize properties based on context 198 | } 199 | clone(): Inputs { 200 | // Return a deep copy of the inputs 201 | } 202 | render(template: string): string { 203 | // Replace placeholders in template with actual values 204 | } 205 | } 206 | 207 | class Prompts { 208 | static intentionClassificationPrompt = "..."; // Prompt template 209 | static renderIntentionClassificationPrompt(inputs: Inputs): string { 210 | return inputs.render(this.intentionClassificationPrompt); 211 | } 212 | // Other prompt methods... 213 | } 214 | ``` 215 | 216 | ## 5. Future Work 217 | - Develop a versioning system for registered functions 218 | - Implement function chaining for complex operations 219 | - Implement parallel execution for independent actions in complex queries 220 | - Create a visual workflow designer for complex action sequences 221 | - Develop a plugin system for easy addition of new action handlers 222 | -------------------------------------------------------------------------------- /app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "github-chatbot", 3 | "version": "1.0.0", 4 | "main": "src/index.ts", 5 | "scripts": { 6 | "start": "ts-node src/index.ts", 7 | "build": "tsc" 8 | }, 9 | "dependencies": { 10 | "@octokit/rest": "^18.12.0", 11 | "@octokit/webhooks-types": "^6.10.0", 12 | "express": "^4.17.1", 13 | "langchain": "^0.2.17" 14 | }, 15 | "devDependencies": { 16 | "@types/express": "^4.17.21", 17 | "@types/node": "^16.11.12", 18 | "ts-node": "^10.4.0", 19 | "typescript": "^4.5.4" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /app/src/ActionExecutor.ts: -------------------------------------------------------------------------------- 1 | import { FunctionRegistry, RegisteredFunction } from './FunctionRegistry'; 2 | import { LargeLanguageModel } from './LargeLanguageModel'; 3 | import { ActionTypeDeterminer, FunctionType } from './ActionTypeDeterminer'; 4 | import { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime'; 5 | 6 | interface ExecutionResult { 7 | success: boolean; 8 | result?: any; 9 | error?: string; 10 | iterations: number; 11 | } 12 | 13 | export class ActionExecutor { 14 | private registry: FunctionRegistry; 15 | private llm: LargeLanguageModel; 16 | private actionTypeDeterminer: ActionTypeDeterminer; 17 | 18 | constructor(registry: FunctionRegistry, client: BedrockRuntimeClient, modelId: string) { 19 | this.registry = registry; 20 | this.llm = new LargeLanguageModel(client, modelId); 21 | this.actionTypeDeterminer = new ActionTypeDeterminer(); 22 | } 23 | 24 | async execute(intention: string, query: string, context: any): Promise { 25 | const actionType = this.actionTypeDeterminer.determineActionType(intention); 26 | const functions = this.registry.listFunctions(actionType); 27 | const selectedFunction = await this.selectFunction(functions, query, context); 28 | 29 | let result: any; 30 | let iterations = 0; 31 | const maxIterations = 3; 32 | 33 | do { 34 | try { 35 | result = await this.executeFunction(selectedFunction, query, context); 36 | const evaluation = await this.evaluateOutput(result, query, context); 37 | 38 | if (evaluation.isSatisfactory) { 39 | return { success: true, result, iterations }; 40 | } else if (iterations < maxIterations) { 41 | context = { ...context, previousResult: result, feedback: evaluation.feedback }; 42 | } else { 43 | return { success: false, error: "Max iterations reached without satisfactory result", iterations }; 44 | } 45 | } catch (error) { 46 | const errorHandler = await this.determineErrorHandling(error, query, context); 47 | if (errorHandler.retry && iterations < maxIterations) { 48 | context = { ...context, error, errorFeedback: errorHandler.feedback }; 49 | } else { 50 | return { success: false, error: errorHandler.message, iterations }; 51 | } 52 | } 53 | iterations++; 54 | } while (iterations < maxIterations); 55 | 56 | return { success: false, error: "Unexpected error", iterations }; 57 | } 58 | 59 | private async selectFunction(functions: RegisteredFunction[], query: string, context: any): Promise { 60 | // For now, just return the first function. This can be improved later. 61 | return functions[0]; 62 | } 63 | 64 | private async executeFunction(func: RegisteredFunction, query: string, context: any): Promise { 65 | return func.execute(query, context); 66 | } 67 | 68 | private async evaluateOutput(result: any, query: string, context: any): Promise<{ isSatisfactory: boolean, feedback?: string }> { 69 | // This is a placeholder. In a real implementation, you might use the LLM to evaluate the output. 70 | return { isSatisfactory: true }; 71 | } 72 | 73 | private async determineErrorHandling(error: any, query: string, context: any): Promise<{ retry: boolean, feedback?: string, message: string }> { 74 | // This is a placeholder. In a real implementation, you might use the LLM to determine how to handle the error. 75 | return { retry: false, message: error.message || "An error occurred" }; 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /app/src/ActionTypeDeterminer.ts: -------------------------------------------------------------------------------- 1 | export enum FunctionType { 2 | LLMOnly, 3 | CodebaseAware, 4 | ExternalAPI 5 | } 6 | 7 | export class ActionTypeDeterminer { 8 | determineActionType(intention: string): FunctionType { 9 | // Simple mapping logic, can be expanded later 10 | if (intention.includes('generate') || intention.includes('summarize')) { 11 | return FunctionType.LLMOnly; 12 | } else if (intention.includes('review') || intention.includes('analyze')) { 13 | return FunctionType.CodebaseAware; 14 | } else { 15 | return FunctionType.ExternalAPI; 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /app/src/FunctionRegistry.ts: -------------------------------------------------------------------------------- 1 | import { FunctionType } from './ActionTypeDeterminer'; 2 | 3 | export interface RegisteredFunction { 4 | id: string; 5 | name: string; 6 | type: FunctionType; 7 | execute: (query: string, context: any) => Promise; 8 | } 9 | 10 | export class FunctionRegistry { 11 | private functions: Map = new Map(); 12 | 13 | registerFunction(func: RegisteredFunction): void { 14 | this.functions.set(func.id, func); 15 | } 16 | 17 | getFunction(id: string): RegisteredFunction | undefined { 18 | return this.functions.get(id); 19 | } 20 | 21 | listFunctions(type?: FunctionType): RegisteredFunction[] { 22 | if (type === undefined) { 23 | return Array.from(this.functions.values()); 24 | } 25 | return Array.from(this.functions.values()).filter(func => func.type === type); 26 | } 27 | 28 | updateFunction(id: string, updates: Partial): void { 29 | const func = this.functions.get(id); 30 | if (func) { 31 | this.functions.set(id, { ...func, ...updates }); 32 | } 33 | } 34 | 35 | deleteFunction(id: string): boolean { 36 | return this.functions.delete(id); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /app/src/IntentionClassifier.ts: -------------------------------------------------------------------------------- 1 | import { Inputs, Prompts } from '../../src/prompts'; 2 | import { invokeModel } from '../../src/utils'; 3 | import { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime'; 4 | 5 | export class IntentionClassifier { 6 | private client: BedrockRuntimeClient; 7 | private modelId: string; 8 | 9 | constructor(client: BedrockRuntimeClient, modelId: string) { 10 | this.client = client; 11 | this.modelId = modelId; 12 | } 13 | 14 | async classify(query: string, context: any): Promise { 15 | const inputs = new Inputs(); 16 | const prompts = new Prompts(); 17 | 18 | inputs.userQuery = query; 19 | const prompt = prompts.renderIntentionClassificationPrompt(inputs); 20 | console.log('Intention classification prompt: ', prompt); 21 | 22 | const result = await invokeModel(this.client, this.modelId, prompt); 23 | return result; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /app/src/LargeLanguageModel.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime'; 2 | import { invokeModel } from '../../src/utils'; 3 | 4 | export class LargeLanguageModel { 5 | private client: BedrockRuntimeClient; 6 | private modelId: string; 7 | 8 | constructor(client: BedrockRuntimeClient, modelId: string) { 9 | this.client = client; 10 | this.modelId = modelId; 11 | } 12 | 13 | async classify(prompt: string): Promise { 14 | try { 15 | const result = await invokeModel(this.client, this.modelId, prompt); 16 | return result.trim(); 17 | } catch (error) { 18 | console.error('Error occurred while classifying:', error); 19 | throw error; 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /app/src/handler.ts: -------------------------------------------------------------------------------- 1 | import { Octokit } from '@octokit/rest'; 2 | import { WebhookEvent } from '@octokit/webhooks-types'; 3 | import { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime'; 4 | import { IntentionClassifier } from './IntentionClassifier'; 5 | import { ActionExecutor } from './ActionExecutor'; 6 | import { FunctionRegistry } from './FunctionRegistry'; 7 | import { FunctionType } from './ActionTypeDeterminer'; 8 | import { generateUnitTestsPerFile, modularizeFunction, generateStats, findConsoleLogStatements, generateClassDiagram, debugBotConfig } from './utilsApp'; 9 | 10 | const bedrockClient = new BedrockRuntimeClient({ region: 'us-east-1' }); 11 | const modelId = 'anthropic.claude-3-sonnet-20240229-v1:0'; 12 | 13 | const intentionClassifier = new IntentionClassifier(bedrockClient, modelId); 14 | const functionRegistry = new FunctionRegistry(); 15 | const actionExecutor = new ActionExecutor(functionRegistry, bedrockClient, modelId); 16 | 17 | // Register functions 18 | functionRegistry.registerFunction({ 19 | id: 'generateUnitTests', 20 | name: 'Generate Unit Tests', 21 | type: FunctionType.CodebaseAware, 22 | execute: async (query: string, context: any) => { 23 | const { repository, issue, comment } = context; 24 | return generateUnitTestsPerFile(repository.full_name, issue.number.toString(), comment.path); 25 | } 26 | }); 27 | 28 | functionRegistry.registerFunction({ 29 | id: 'modularizeFunction', 30 | name: 'Modularize Function', 31 | type: FunctionType.CodebaseAware, 32 | execute: async (query: string, context: any) => { 33 | const { repository, pull_request, comment } = context; 34 | return modularizeFunction(repository.full_name, pull_request.head.ref, comment.path, comment.line); 35 | } 36 | }); 37 | 38 | functionRegistry.registerFunction({ 39 | id: 'generateStats', 40 | name: 'Generate Repository Stats', 41 | type: FunctionType.ExternalAPI, 42 | execute: async (query: string, context: any) => { 43 | const { repository } = context; 44 | return generateStats(repository.full_name); 45 | } 46 | }); 47 | 48 | functionRegistry.registerFunction({ 49 | id: 'findConsoleLogStatements', 50 | name: 'Find Console Log Statements', 51 | type: FunctionType.CodebaseAware, 52 | execute: async (query: string, context: any) => { 53 | const { repository } = context; 54 | return findConsoleLogStatements(repository.full_name); 55 | } 56 | }); 57 | 58 | functionRegistry.registerFunction({ 59 | id: 'generateClassDiagram', 60 | name: 'Generate Class Diagram', 61 | type: FunctionType.CodebaseAware, 62 | execute: async (query: string, context: any) => { 63 | const { repository } = context; 64 | // Extract package path from query or use a default 65 | const packagePath = query.includes('package') ? query.split('package')[1].trim() : 'src'; 66 | return generateClassDiagram(repository.full_name, packagePath); 67 | } 68 | }); 69 | 70 | functionRegistry.registerFunction({ 71 | id: 'debugBotConfig', 72 | name: 'Debug Bot Configuration', 73 | type: FunctionType.ExternalAPI, 74 | execute: async (query: string, context: any) => { 75 | const { repository } = context; 76 | return debugBotConfig(repository.full_name); 77 | } 78 | }); 79 | 80 | // Entry point for issue comments raise in PR 81 | export async function handleIssueComment(event: WebhookEvent, octokit: Octokit) { 82 | if ('comment' in event && 'issue' in event) { 83 | const { comment, issue, repository } = event; 84 | const commentBody = comment.body; 85 | const appName = '@IBTBot'; 86 | 87 | if (commentBody.startsWith(appName)) { 88 | console.log('Handling issue comment with body: ', commentBody) 89 | const userQuery = commentBody.replace(appName, '').trim(); 90 | try { 91 | const context = { 92 | repository: repository, 93 | issue: issue, 94 | comment: comment 95 | }; 96 | 97 | const intention = await intentionClassifier.classify(userQuery, context); 98 | console.log('User query intention: ', intention); 99 | 100 | const result = await actionExecutor.execute(intention, userQuery, context); 101 | 102 | let response = ''; 103 | if (result.success) { 104 | response = `Here's the response to your query:\n\n${result.result}`; 105 | } else { 106 | response = `I apologize, but I encountered an error while processing your request: ${result.error}`; 107 | } 108 | 109 | await octokit.issues.createComment({ 110 | owner: repository.owner.login, 111 | repo: repository.name, 112 | issue_number: issue.number, 113 | body: response 114 | }); 115 | } catch (error) { 116 | console.error('Error processing the request:', error); 117 | await octokit.issues.createComment({ 118 | owner: repository.owner.login, 119 | repo: repository.name, 120 | issue_number: issue.number, 121 | body: `I apologize, but I encountered an error while processing your request. Please try again later or contact the repository maintainer if the issue persists.` 122 | }); 123 | } 124 | } 125 | } 126 | } 127 | 128 | // Implement handleReviewComment and handlePullRequest functions 129 | export async function handleReviewComment(event: WebhookEvent, octokit: Octokit) { 130 | // Implementation goes here 131 | } 132 | 133 | export async function handlePullRequest(event: WebhookEvent, octokit: Octokit) { 134 | // Implementation goes here 135 | } 136 | -------------------------------------------------------------------------------- /app/src/index.ts: -------------------------------------------------------------------------------- 1 | import express from 'express'; 2 | import { Octokit } from '@octokit/rest'; 3 | import { WebhookEvent } from '@octokit/webhooks-types'; 4 | import { handleReviewComment, handlePullRequest, handleIssueComment } from './handler' 5 | 6 | const app = express(); 7 | app.use(express.json()); 8 | 9 | const port = process.env.PORT || 3000; 10 | 11 | // Check if GITHUB_APP_TOKEN is set 12 | if (!process.env.GITHUB_APP_TOKEN) { 13 | console.error("Error: GITHUB_APP_TOKEN environment variable is not set"); 14 | process.exit(1); 15 | } 16 | 17 | const octokit = new Octokit({ auth: process.env.GITHUB_APP_TOKEN }); 18 | console.log("GitHub App Token is set:", !!process.env.GITHUB_APP_TOKEN); 19 | 20 | /* 21 | Review comments: Directly reply to a review comment made by IBT Bot. Example: 22 | -- I pushed a fix in commit , please review it. 23 | -- Generate unit testing code for this file. 24 | Open a follow-up GitHub issue for this discussion. 25 | Files and specific lines of code (under the "Files changed" tab): Tag @IBTBot in a new review comment at the desired location with your query. Examples: 26 | -- @IBTBot generate unit testing code for this file. 27 | -- @IBTBot modularize this function. 28 | PR comments: Tag @IBTBot in a new PR comment to ask questions about the PR branch. For the best results, please provide a very specific query, as very limited context is provided in this mode. Examples: 29 | -- @IBTBot gather interesting stats about this repository and render them as a table. Additionally, render a pie chart showing the language distribution in the codebase. 30 | -- @IBTBot read src/utils.ts and generate unit testing code. 31 | -- @IBTBot read the files in the src/scheduler package and generate a class diagram using mermaid and a README in the markdown format. 32 | -- @IBTBot help me debug IBT Bot configuration file, not for now. 33 | */ 34 | app.post('/webhook', async (req, res) => { 35 | const event = req.body as WebhookEvent; 36 | const githubEvent = req.headers["x-github-event"] as string; 37 | console.log('githubEvent received: ', githubEvent) 38 | try { 39 | switch (githubEvent) { 40 | case "pull_request_review_comment": 41 | await handleReviewComment(event, octokit); 42 | break; 43 | case "pull_request": 44 | if ('action' in event) { 45 | if (event.action === "opened" || event.action === "synchronize") { 46 | await handlePullRequest(event, octokit); 47 | } 48 | } 49 | break; 50 | // Note a new comment in PR will trigger the issue comment event 51 | case "issue_comment": 52 | if ('action' in event && (event.action === "created" || event.action === "edited")) { 53 | await handleIssueComment(event, octokit); 54 | } 55 | break; 56 | default: 57 | console.log(`Unhandled event type: ${githubEvent}`); 58 | } 59 | res.status(200).send("OK"); 60 | } catch (error) { 61 | console.error("Error processing webhook:", error); 62 | res.status(500).send("Internal Server Error"); 63 | } 64 | }); 65 | 66 | app.listen(port, () => { 67 | console.log(`Server running on port ${port}`); 68 | }); 69 | -------------------------------------------------------------------------------- /app/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2018", 4 | "module": "commonjs", 5 | "outDir": "./dist", 6 | "rootDir": "./app", 7 | "strict": true, 8 | "esModuleInterop": true, 9 | "skipLibCheck": true, 10 | "forceConsistentCasingInFileNames": true, 11 | "paths": { 12 | "@/*": ["./*"] 13 | }, 14 | }, 15 | "include": ["app/**/*"], 16 | "exclude": ["node_modules"] 17 | } -------------------------------------------------------------------------------- /debugging/sample.ts: -------------------------------------------------------------------------------- 1 | 2 | export function add(a: number, b: number): number { 3 | return a + b; 4 | } 5 | 6 | export function subtract(a: number, b: number): number { 7 | return a - b; 8 | } 9 | -------------------------------------------------------------------------------- /debugging/testUtils.ts: -------------------------------------------------------------------------------- 1 | export interface TestCase { 2 | name: string; 3 | type: 'direct' | 'indirect' | 'not-testable'; 4 | code: string; 5 | } 6 | 7 | export async function generateFakeResponse(): Promise { 8 | // Return a predefined fake response structure 9 | return [ 10 | { 11 | name: 'Default Unit Test due to the model time out during the test generation, most likely due to the prompt being too long', 12 | type: 'direct', 13 | code: "test('default test', () => { expect(true).toBe(true); });", 14 | }, 15 | ]; 16 | } 17 | 18 | export function createPrompt(sourceCode: string): string { 19 | return ` 20 | You are an expert TypeScript developer specializing in unit testing. Your task is to analyze the following TypeScript code and generate comprehensive unit tests using Jest. 21 | 22 | 23 | ${sourceCode} 24 | 25 | 26 | Please follow these steps: 27 | 1. Carefully read and understand the provided TypeScript code. 28 | 2. Categorize each method into one of these types: 29 | a) Methods that can be tested directly 30 | b) Methods that can be tested indirectly 31 | c) Methods that are not unit-testable 32 | 3. For each testable method, create a unit test using Jest. 33 | 4. Structure your response as a JSON array of test cases, where each test case has the following format: 34 | { 35 | "name": "Test name", 36 | "type": "direct" | "indirect" | "not-testable", 37 | "code": "The actual test code" 38 | } 39 | 40 | Important guidelines: 41 | - Ensure your tests are comprehensive and cover various scenarios, including edge cases. 42 | - Use clear and descriptive test names. 43 | - Include comments in your test code to explain the purpose of each test. 44 | - Follow TypeScript and Jest best practices. 45 | - For methods that are not unit-testable, explain why in a comment. 46 | - Make sure to import all necessary dependencies and mock external modules. 47 | - Use jest.mock() to mock external dependencies like fs, path, and child_process. 48 | - Include setup and teardown code (beforeEach, afterEach) where necessary. 49 | - Use appropriate Jest matchers (e.g., toHaveBeenCalledWith, toThrow) for precise assertions. 50 | - Consider using test.each for parameterized tests when appropriate. 51 | - Ensure that async functions are properly tested using async/await syntax. 52 | 53 | Here's an example of the expected output format: 54 | 55 | [ 56 | { 57 | "name": "Test input validation with empty array", 58 | "type": "direct", 59 | "code": "import { runUnitTests } from '../src/yourFile';\nimport * as fs from 'fs';\nimport * as path from 'path';\n\njest.mock('fs');\njest.mock('path');\njest.mock('child_process');\n\ndescribe('runUnitTests', () => {\n beforeEach(() => {\n jest.clearAllMocks();\n console.log = jest.fn();\n });\n\n it('should handle empty input array', async () => {\n // Test that the function handles an empty input array correctly\n await runUnitTests([]);\n expect(console.log).toHaveBeenCalledWith('Input test cases', []);\n expect(console.log).toHaveBeenCalledWith('No test cases to run');\n });\n});" 60 | } 61 | ] 62 | 63 | 64 | After generating the test cases, please review your output and ensure: 65 | 1. The tests are fully executable and correctly written. 66 | 2. The code is thoroughly commented for a beginner to understand. 67 | 3. The tests follow TypeScript and Jest best practices. 68 | 4. All external dependencies are properly mocked. 69 | 5. Edge cases and error scenarios are covered. 70 | 71 | Provide your response as a valid JSON array containing objects with the specified structure. Do not include any explanatory text outside of the JSON array. 72 | `; 73 | } -------------------------------------------------------------------------------- /debugging/utils.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient, InvokeModelCommand } from "@aws-sdk/client-bedrock-runtime"; 2 | 3 | // Define the LanguageCode type 4 | export type LanguageCode = 'en' | 'zh' | 'ja' | 'es' | 'fr' | 'de' | 'it'; 5 | 6 | // Full definition of PullRequest from GitHub API can be found at https://gist.github.com/GuillaumeFalourd/e53ec9b6bc783cce184bd1eec263799d 7 | export interface PullRequest { 8 | title: string; 9 | number: number; 10 | body: string; 11 | head: { 12 | sha: string; 13 | ref: string; 14 | }; 15 | base: { 16 | sha: string; 17 | }; 18 | } 19 | 20 | export interface PullFile { 21 | filename: string; 22 | status: string; 23 | patch?: string; 24 | } 25 | 26 | // Update the languageCodeToName object with the correct type 27 | export const languageCodeToName: Record = { 28 | 'en': 'English', 29 | 'zh': 'Chinese', 30 | 'ja': 'Japanese', 31 | 'es': 'Spanish', 32 | 'fr': 'French', 33 | 'de': 'German', 34 | 'it': 'Italian', 35 | }; 36 | 37 | // This function splits the content into chunks of maxChunkSize 38 | export function splitContentIntoChunks_deprecated(content: string, maxChunkSize: number): string[] { 39 | const chunks: string[] = []; 40 | let currentChunk = ''; 41 | 42 | content.split('\n').forEach(line => { 43 | if (currentChunk.length + line.length > maxChunkSize) { 44 | chunks.push(currentChunk); 45 | currentChunk = ''; 46 | } 47 | currentChunk += line + '\n'; 48 | }); 49 | 50 | if (currentChunk) { 51 | chunks.push(currentChunk); 52 | } 53 | 54 | return chunks; 55 | } 56 | 57 | export function shouldExcludeFile(filename: string, excludePatterns: string[]): boolean { 58 | return excludePatterns.some(pattern => { 59 | const regex = new RegExp(`^${pattern.replace(/\*/g, '.*')}$`); 60 | return regex.test(filename); 61 | }); 62 | } 63 | 64 | export function splitIntoSoloFile(combinedCode: string): Record { 65 | // split the whole combinedCode content into individual files (index.ts, index_test.ts, index.js) by recognize the character like: "// File: ./index.ts", filter the content with suffix ".tx" and not contain "test" in file name (index.ts), 66 | const fileChunks: Record = {}; 67 | const filePattern = /\/\/ File: \.\/(.+)/; 68 | let currentFile = ''; 69 | let currentContent = ''; 70 | 71 | combinedCode.split('\n').forEach(line => { 72 | const match = line.match(filePattern); 73 | if (match) { 74 | if (currentFile) { 75 | fileChunks[currentFile] = currentContent.trim(); 76 | } 77 | currentFile = match[1] as string; 78 | currentContent = ''; 79 | } else { 80 | currentContent += line + '\n'; 81 | } 82 | }); 83 | 84 | if (currentFile) { 85 | fileChunks[currentFile] = currentContent.trim(); 86 | } 87 | return fileChunks; 88 | } 89 | 90 | export async function extractFunctions(content: string): Promise { 91 | // const functionPattern = /(?:export\s+)?(?:async\s+)?function\s+\w+\s*\([^)]*\)(?:\s*:\s*[^{]*?)?\s*{(?:[^{}]*|\{(?:[^{}]*|\{[^{}]*\})*\})*}/gs; 92 | // const matches = content.match(functionPattern); 93 | // return matches ? matches.map(match => match.trim()) : []; 94 | 95 | // Dummy response for debugging purposes 96 | return [ 97 | 'export async function generateUnitTests(client: BedrockRuntimeClient, modelId: string, sourceCode: string): Promise { ... }', 98 | 'async function runUnitTests(testCases: TestCase[], sourceCode: string): Promise { ... }', 99 | 'function generateTestReport(testCases: TestCase[]): Promise { ... }', 100 | ]; 101 | } 102 | 103 | export async function exponentialBackoff( 104 | fn: () => Promise, 105 | maxRetries: number, 106 | initialDelay: number, 107 | functionName: string 108 | ): Promise { 109 | let retries = 0; 110 | while (true) { 111 | try { 112 | const result = await fn(); 113 | console.log(`Function '${functionName}' executed successfully on attempt ${retries + 1}`); 114 | return result; 115 | } catch (error) { 116 | if (retries >= maxRetries) { 117 | console.error(`Max retries (${maxRetries}) reached for function '${functionName}'. Throwing error.`); 118 | throw error; 119 | } 120 | const delay = initialDelay * Math.pow(2, retries); 121 | console.log(`Attempt ${retries + 1} for function '${functionName}' failed. Retrying in ${delay}ms...`); 122 | await new Promise(resolve => setTimeout(resolve, delay)); 123 | retries++; 124 | } 125 | } 126 | } 127 | 128 | // note the default temperature is 1 according to official documentation: https://docs.anthropic.com/en/api/complete 129 | export async function invokeModel(client: BedrockRuntimeClient, modelId: string, payloadInput: string, temperature: number = 0.6): Promise { 130 | const maxRetries = 3; 131 | const initialDelay = 1000; // 1 second 132 | 133 | const invokeWithRetry = async (): Promise => { 134 | try { 135 | // seperate branch to invoke RESTFul endpoint exposed by API Gateway, if the modelId is prefixed with string like "sagemaker..execute-api..amazonaws.com/prod" 136 | if (modelId.startsWith("sagemaker.")) { 137 | // invoke RESTFul endpoint e.g. curl -X POST -H "Content-Type: application/json" -d '{"prompt": "import argparse\ndef main(string: str):\n print(string)\n print(string[::-1])\n if __name__ == \"__main__\":", "parameters": {"max_new_tokens": 256, "temperature": 0.1}}' https://.execute-api..amazonaws.com/prod 138 | const endpoint = modelId.split("sagemaker.")[1]; 139 | 140 | // invoke the RESTFul endpoint with the payload 141 | const payload = { 142 | prompt: payloadInput, 143 | parameters: { 144 | max_new_tokens: 256, 145 | temperature: 0.1, 146 | }, 147 | }; 148 | 149 | const response = await fetch(`https://${endpoint}`, { 150 | method: 'POST', 151 | headers: { 152 | 'Content-Type': 'application/json', 153 | }, 154 | body: JSON.stringify(payload), 155 | }); 156 | 157 | const responseBody = await response.json(); 158 | // extract the generated text from the response, the output payload should be in the format { "generated_text": "..." } using codellama model for now 159 | const finalResult = (responseBody as { generated_text: string }).generated_text; 160 | 161 | return finalResult; 162 | } 163 | 164 | const payload = { 165 | anthropic_version: "bedrock-2023-05-31", 166 | max_tokens: 4096, 167 | temperature: temperature, 168 | messages: [ 169 | { 170 | role: "user", 171 | content: [{ 172 | type: "text", 173 | text: payloadInput, 174 | }], 175 | }, 176 | ], 177 | }; 178 | 179 | const command = new InvokeModelCommand({ 180 | // modelId: "anthropic.claude-3-5-sonnet-20240620-v1:0" 181 | modelId: modelId, 182 | contentType: "application/json", 183 | body: JSON.stringify(payload), 184 | }); 185 | 186 | const apiResponse = await client.send(command); 187 | const decodedResponseBody = new TextDecoder().decode(apiResponse.body); 188 | const responseBody = JSON.parse(decodedResponseBody); 189 | return responseBody.content[0].text; 190 | } catch (error) { 191 | if (error instanceof Error && error.name === 'ThrottlingException') { 192 | throw error; // Allow retry for throttling errors 193 | } 194 | console.error('Error occurred while invoking the model', error); 195 | throw error; // Throw other errors without retry 196 | } 197 | }; 198 | 199 | return exponentialBackoff(invokeWithRetry, maxRetries, initialDelay, invokeModel.name); 200 | } -------------------------------------------------------------------------------- /dist/codeLayout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: ./codeLayout.sh [REPO_DIR] [OUTPUT_FILE] [EXTRACT_MODE] [FILE_EXTENSIONS] 3 | # Description: Combine all code files in a repository into a single file, with option to extract functions or include whole file content 4 | # Example: ./codeLayout.sh ~/projects/my_project combined_output.txt functions py js html css java cpp h cs 5 | # Example: ./codeLayout.sh ~/projects/my_project combined_output.txt whole py js html css java cpp h cs 6 | 7 | # Directory of the repository (default to current directory if not specified) 8 | REPO_DIR="${1:-.}" 9 | 10 | # Output file (default to combined_output.txt if not specified) 11 | OUTPUT_FILE="${2:-combined_output.txt}" 12 | 13 | # Extract mode (default to 'functions' if not specified) 14 | EXTRACT_MODE="${3:-functions}" 15 | 16 | # List of file extensions to include (default to a predefined list if not specified) 17 | FILE_EXTENSIONS=("${@:4}") 18 | if [ ${#FILE_EXTENSIONS[@]} -eq 0 ]; then 19 | FILE_EXTENSIONS=("py" "js" "java" "cpp" "ts") 20 | fi 21 | 22 | # Empty the output file if it exists 23 | > "$OUTPUT_FILE" 24 | 25 | # Function to extract functions and combine files 26 | combine_files() { 27 | local dir="$1" 28 | local find_command="find \"$dir\" -type f \\( -name \"*.${FILE_EXTENSIONS[0]}\"" 29 | for ext in "${FILE_EXTENSIONS[@]:1}"; do 30 | find_command+=" -o -name \"*.$ext\"" 31 | done 32 | find_command+=" \\) -not -path \"*/node_modules/*\" -print0" 33 | 34 | eval $find_command | while IFS= read -r -d '' file; do 35 | echo "// File: $file" >> "$OUTPUT_FILE" 36 | if [ "$EXTRACT_MODE" = "functions" ]; then 37 | # Extract functions only 38 | perl -0777 -ne 'print "$&\n\n" while /((?:export\s+)?(?:async\s+)?function\s+\w+\s*\([^)]*\)(?:\s*:\s*[^{]*?)?\s*{(?:[^{}]*|\{(?:[^{}]*|\{[^{}]*\})*\})*})/gs' "$file" >> "$OUTPUT_FILE" 39 | else 40 | # Include whole file content 41 | cat "$file" >> "$OUTPUT_FILE" 42 | fi 43 | echo -e "\n" >> "$OUTPUT_FILE" 44 | done 45 | } 46 | 47 | # Combine the files 48 | combine_files "$REPO_DIR" 49 | 50 | echo "All files have been processed and combined into $OUTPUT_FILE" 51 | if [ "$EXTRACT_MODE" = "functions" ]; then 52 | echo "Mode: Extracted functions only" 53 | else 54 | echo "Mode: Included whole file content" 55 | fi -------------------------------------------------------------------------------- /dist/codeReviewInline.d.ts: -------------------------------------------------------------------------------- 1 | import { getOctokit } from '@actions/github'; 2 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 3 | export declare function generateCodeReviewComment(bedrockClient: BedrockRuntimeClient, modelId: string, octokit: ReturnType, excludePatterns: string[], reviewLevel: string, outputLanguage: string): Promise; 4 | -------------------------------------------------------------------------------- /dist/code_layout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: ./code_layout.sh [REPO_DIR] [OUTPUT_FILE] [EXTRACT_MODE] [FILE_EXTENSIONS] 3 | # Description: Combine all code files in a repository into a single file, with option to extract functions or include whole file content 4 | # Example: ./code_layout.sh ~/projects/my_project combined_output.txt functions py js html css java cpp h cs 5 | # Example: ./code_layout.sh ~/projects/my_project combined_output.txt whole py js html css java cpp h cs 6 | 7 | # Directory of the repository (default to current directory if not specified) 8 | REPO_DIR="${1:-.}" 9 | 10 | # Output file (default to combined_output.txt if not specified) 11 | OUTPUT_FILE="${2:-combined_output.txt}" 12 | 13 | # Extract mode (default to 'functions' if not specified) 14 | EXTRACT_MODE="${3:-functions}" 15 | 16 | # List of file extensions to include (default to a predefined list if not specified) 17 | FILE_EXTENSIONS=("${@:4}") 18 | if [ ${#FILE_EXTENSIONS[@]} -eq 0 ]; then 19 | FILE_EXTENSIONS=("py" "js" "java" "cpp" "ts") 20 | fi 21 | 22 | # Empty the output file if it exists 23 | > "$OUTPUT_FILE" 24 | 25 | # Function to extract functions and combine files 26 | combine_files() { 27 | local dir="$1" 28 | local find_command="find \"$dir\" -type f \\( -name \"*.${FILE_EXTENSIONS[0]}\"" 29 | for ext in "${FILE_EXTENSIONS[@]:1}"; do 30 | find_command+=" -o -name \"*.$ext\"" 31 | done 32 | find_command+=" \\) -not -path \"*/node_modules/*\" -print0" 33 | 34 | eval $find_command | while IFS= read -r -d '' file; do 35 | echo "// File: $file" >> "$OUTPUT_FILE" 36 | if [ "$EXTRACT_MODE" = "functions" ]; then 37 | # Extract functions only 38 | perl -0777 -ne 'print "$&\n\n" while /((?:export\s+)?(?:async\s+)?function\s+\w+\s*\([^)]*\)(?:\s*:\s*[^{]*?)?\s*{(?:[^{}]*|\{(?:[^{}]*|\{[^{}]*\})*\})*})/gs' "$file" >> "$OUTPUT_FILE" 39 | else 40 | # Include whole file content 41 | cat "$file" >> "$OUTPUT_FILE" 42 | fi 43 | echo -e "\n" >> "$OUTPUT_FILE" 44 | done 45 | } 46 | 47 | # Combine the files 48 | combine_files "$REPO_DIR" 49 | 50 | echo "All files have been processed and combined into $OUTPUT_FILE" 51 | if [ "$EXTRACT_MODE" = "functions" ]; then 52 | echo "Mode: Extracted functions only" 53 | else 54 | echo "Mode: Included whole file content" 55 | fi -------------------------------------------------------------------------------- /dist/coverageAnalyzer.d.ts: -------------------------------------------------------------------------------- 1 | export declare function analyzeCoverage(testFilePath: string, sourceCode: string): Promise<{ 2 | statements: number; 3 | branches: number; 4 | functions: number; 5 | lines: number; 6 | }>; 7 | -------------------------------------------------------------------------------- /dist/index.d.ts: -------------------------------------------------------------------------------- 1 | export {}; 2 | -------------------------------------------------------------------------------- /dist/index1.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { 3 | if (k2 === undefined) k2 = k; 4 | var desc = Object.getOwnPropertyDescriptor(m, k); 5 | if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { 6 | desc = { enumerable: true, get: function() { return m[k]; } }; 7 | } 8 | Object.defineProperty(o, k2, desc); 9 | }) : (function(o, m, k, k2) { 10 | if (k2 === undefined) k2 = k; 11 | o[k2] = m[k]; 12 | })); 13 | var __exportStar = (this && this.__exportStar) || function(m, exports) { 14 | for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); 15 | }; 16 | Object.defineProperty(exports, "__esModule", { value: true }); 17 | var ts_jest_transformer_1 = require("./legacy/ts-jest-transformer"); 18 | __exportStar(require("./config"), exports); 19 | __exportStar(require("./constants"), exports); 20 | __exportStar(require("./legacy/compiler"), exports); 21 | __exportStar(require("./legacy/ts-jest-transformer"), exports); 22 | __exportStar(require("./legacy/config/config-set"), exports); 23 | __exportStar(require("./presets/create-jest-preset"), exports); 24 | __exportStar(require("./raw-compiler-options"), exports); 25 | __exportStar(require("./utils"), exports); 26 | __exportStar(require("./types"), exports); 27 | exports.default = { 28 | createTransformer: function (tsJestConfig) { 29 | return new ts_jest_transformer_1.TsJestTransformer(tsJestConfig); 30 | }, 31 | }; 32 | -------------------------------------------------------------------------------- /dist/prGeneration.d.ts: -------------------------------------------------------------------------------- 1 | import { getOctokit } from '@actions/github'; 2 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 3 | export declare function generatePRDescription(client: BedrockRuntimeClient, modelId: string, octokit: ReturnType): Promise; 4 | -------------------------------------------------------------------------------- /dist/preview/languageModel.d.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 2 | export interface ICompletionModel { 3 | getCompletions(prompt: string, temperature: number): Promise; 4 | } 5 | export declare class LanguageModel implements ICompletionModel { 6 | private client; 7 | private modelId; 8 | constructor(client: BedrockRuntimeClient, modelId: string); 9 | getCompletions(prompt: string, temperature: number): Promise; 10 | } 11 | -------------------------------------------------------------------------------- /dist/preview/prompt-obsolete.d.ts: -------------------------------------------------------------------------------- 1 | export declare class Prompt { 2 | private apiFunction; 3 | readonly id: string; 4 | private snippets; 5 | private docComments; 6 | private functionBody; 7 | private error; 8 | constructor(apiFunction: string, snippets?: string[]); 9 | assemble(): string; 10 | createTestSource(completion: string): string; 11 | private extractFunctionName; 12 | hasSnippets(): boolean; 13 | hasDocComments(): boolean; 14 | hasFunctionBody(): boolean; 15 | addSnippets(): Prompt; 16 | addDocComments(): Prompt; 17 | addFunctionBody(): Prompt; 18 | addError(error: string): Prompt; 19 | } 20 | -------------------------------------------------------------------------------- /dist/preview/promptRefiner.d.ts: -------------------------------------------------------------------------------- 1 | import { Prompt } from './prompt-obsolete'; 2 | export declare class PromptRefiner { 3 | static refinePrompt(prompt: Prompt, error: string): Prompt[]; 4 | } 5 | -------------------------------------------------------------------------------- /dist/preview/resultCollector.d.ts: -------------------------------------------------------------------------------- 1 | import { Prompts } from '../prompts'; 2 | import { ICoverageSummary } from './testValidator'; 3 | export interface ITestInfo { 4 | testName: string; 5 | testSource: string; 6 | prompt: Prompts; 7 | } 8 | export interface IPromptInfo { 9 | prompt: Prompts; 10 | completionsCount: number; 11 | } 12 | export interface ITestResultCollector { 13 | recordTestInfo(testInfo: ITestInfo): void; 14 | recordTestResult(testInfo: ITestInfo & { 15 | outcome: { 16 | status: string; 17 | error?: string; 18 | }; 19 | }): void; 20 | recordPromptInfo(prompt: Prompts, completionsCount: number): void; 21 | recordCoverageInfo(coverageSummary: ICoverageSummary): void; 22 | hasPrompt(prompt: Prompts): boolean; 23 | getTestResults(): Array; 29 | getCoverageInfo(): ICoverageSummary; 30 | } 31 | export declare class BaseTestResultCollector implements ITestResultCollector { 32 | private tests; 33 | private prompts; 34 | private testResults; 35 | private coverageInfo; 36 | recordTestInfo(testInfo: ITestInfo): void; 37 | recordTestResult(testInfo: ITestInfo & { 38 | outcome: { 39 | status: string; 40 | error?: string; 41 | }; 42 | }): void; 43 | recordPromptInfo(prompt: Prompts, completionsCount: number): void; 44 | recordCoverageInfo(coverageSummary: ICoverageSummary): void; 45 | hasPrompt(prompt: Prompts): boolean; 46 | getTestResults(): Array; 52 | getTestSource(testName: string): string | null; 53 | getCoverageInfo(): ICoverageSummary; 54 | } 55 | -------------------------------------------------------------------------------- /dist/preview/snippetMap.d.ts: -------------------------------------------------------------------------------- 1 | export declare class SnippetMap { 2 | private snippets; 3 | addSnippet(functionName: string, snippet: string): void; 4 | getSnippets(functionName: string): string[]; 5 | hasSnippets(functionName: string): boolean; 6 | } 7 | -------------------------------------------------------------------------------- /dist/preview/testGenerator.d.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 2 | import { getOctokit } from "@actions/github"; 3 | import { ICompletionModel } from "./languageModel"; 4 | import { TestValidator } from "./testValidator"; 5 | import { ITestResultCollector } from "./resultCollector"; 6 | import { SnippetMap } from "./snippetMap"; 7 | import { ICoverageSummary } from './testValidator'; 8 | import { ITestInfo } from "./resultCollector"; 9 | export declare class TestGenerator { 10 | private temperatures; 11 | private snippetMap; 12 | private model; 13 | private validator; 14 | private collector; 15 | private worklist; 16 | constructor(temperatures: number[], snippetMap: SnippetMap, model: ICompletionModel, validator: TestValidator, collector: ITestResultCollector); 17 | generateAndValidateTests(fileMeta: { 18 | fileName: string; 19 | filePath: string; 20 | fileContent: string; 21 | rootDir: string; 22 | }, snippets: string[]): Promise<{ 23 | generatedTests: string[]; 24 | coverageSummary: ICoverageSummary; 25 | testResults: Array; 31 | }>; 32 | private validateCompletion; 33 | private parseExecutableCode; 34 | private extractFunctions; 35 | } 36 | export declare function generateUnitTestsSuite(client: BedrockRuntimeClient, modelId: string, octokit: ReturnType, repo: { 37 | owner: string; 38 | repo: string; 39 | }, unitTestSourceFolder: string): Promise; 40 | export declare function generateTestCasesForFile(client: BedrockRuntimeClient, modelId: string, fileMeta: { 41 | fileName: string; 42 | filePath: string; 43 | fileContent: string; 44 | rootDir: string; 45 | }): Promise<{ 46 | generatedTests: string[]; 47 | coverageSummary: ICoverageSummary; 48 | testResults: Array; 54 | }>; 55 | -------------------------------------------------------------------------------- /dist/preview/testValidator.d.ts: -------------------------------------------------------------------------------- 1 | export interface ICoverageSummary { 2 | lines: { 3 | total: number; 4 | covered: number; 5 | skipped: number; 6 | pct: number; 7 | }; 8 | statements: { 9 | total: number; 10 | covered: number; 11 | skipped: number; 12 | pct: number; 13 | }; 14 | functions: { 15 | total: number; 16 | covered: number; 17 | skipped: number; 18 | pct: number; 19 | }; 20 | branches: { 21 | total: number; 22 | covered: number; 23 | skipped: number; 24 | pct: number; 25 | }; 26 | } 27 | export declare class TestValidator { 28 | private packagePath; 29 | private testDir; 30 | private coverageDirs; 31 | constructor(packagePath?: string); 32 | validateTest(testName: string, testSource: string, rootDir: string): { 33 | status: string; 34 | error?: string; 35 | }; 36 | private ensureTsJestInstalled; 37 | getCoverageSummary(): ICoverageSummary; 38 | private addCoverage; 39 | private calculatePercentages; 40 | } 41 | -------------------------------------------------------------------------------- /dist/prompts.d.ts: -------------------------------------------------------------------------------- 1 | export declare class Inputs { 2 | systemMessage: string; 3 | title: string; 4 | description: string; 5 | rawSummary: string; 6 | shortSummary: string; 7 | fileName: string; 8 | filePath: string; 9 | fileContent: string; 10 | fileDiff: string; 11 | patches: string; 12 | diff: string; 13 | commentChain: string; 14 | comment: string; 15 | languageName: string; 16 | hunkContent: string; 17 | snippets: string[]; 18 | docComments: string; 19 | functionBody: string; 20 | generatedUnitTestCodeExecutionError: string; 21 | generatedUnitTestCode: string; 22 | userQuery: string; 23 | constructor(systemMessage?: string, title?: string, description?: string, rawSummary?: string, shortSummary?: string, fileName?: string, filePath?: string, fileContent?: string, fileDiff?: string, patches?: string, diff?: string, commentChain?: string, comment?: string, languageName?: string, hunkContent?: string, snippets?: string[], docComments?: string, functionBody?: string, generatedUnitTestCodeExecutionError?: string, generatedUnitTestCode?: string, userQuery?: string); 24 | clone(): Inputs; 25 | render(content: string): string; 26 | } 27 | export declare class Prompts { 28 | private apiFunction; 29 | readonly id: string; 30 | private snippets; 31 | private docComments; 32 | private functionBody; 33 | refinedPrompt: string; 34 | summarize: string; 35 | summarizeReleaseNotes: string; 36 | constructor(apiFunction?: string, snippets?: string[], docComments?: string, functionBody?: string, summarize?: string, summarizeReleaseNotes?: string, refinedPrompt?: string); 37 | detailedReviewPrompt: string; 38 | conciseReviewPrompt: string; 39 | /** 40 | * Structured representation of a prompt we finally send to the model to generate test cases, which is a generation from another prompt. 41 | * 42 | * ```js 43 | * let mocha = require('mocha'); // -+ 44 | * let assert = require('assert'); // | Imports 45 | * let pkg = require('pkg'); // -+ 46 | * 47 | * // usage #1 // -+ 48 | * ... // | 49 | * // usage #2 // | Usage snippets 50 | * ... // -+ 51 | * 52 | * // this does... // -+ 53 | * // @param foo // | 54 | * // @returns bar // | Doc comment 55 | * ... // -+ 56 | * 57 | * // fn(args) // Signature of the function we're testing 58 | * // function fn(args) { // -+ 59 | * // ... // | Function body (optional) 60 | * // } // -+ 61 | * 62 | * describe('test pkg', function() { // Test suite header 63 | * it('test fn', function(done) { // Test case header 64 | * ``` 65 | * 66 | * The structured representation keeps track of these parts and provides methods 67 | * to assemble them into a textual prompt and complete them into a test case. 68 | */ 69 | preProcessUnitTestGenerationPrompt: string; 70 | unitTestGenerationPrompt: string; 71 | unitTestGenerationRefinedPrompt: string; 72 | intentionClassificationPrompt: string; 73 | renderDetailedReviewPrompt(inputs: Inputs): string; 74 | renderConciseReviewPrompt(inputs: Inputs): string; 75 | renderUnitTestGenerationPrompt(inputs: Inputs): string; 76 | renderUnitTestGenerationRefinedPrompt(inputs: Inputs): string; 77 | renderIntentionClassificationPrompt(inputs: Inputs): string; 78 | } 79 | -------------------------------------------------------------------------------- /dist/testGenerator.d.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 2 | import { TestCase } from './testUtils'; 3 | export declare function generateUnitTests(client: BedrockRuntimeClient, modelId: string, sourceCode: string): Promise; 4 | export declare function runUnitTests(testCases: TestCase[], unitTestSourceFolder: string): Promise; 5 | export declare function generateTestReport(testCases: TestCase[]): Promise; 6 | -------------------------------------------------------------------------------- /dist/testUtils.d.ts: -------------------------------------------------------------------------------- 1 | export interface TestCase { 2 | name: string; 3 | type: 'direct' | 'indirect' | 'not-testable'; 4 | code: string; 5 | } 6 | export declare function generateFakeResponse(): Promise; 7 | export declare function createPrompt(sourceCode: string): string; 8 | -------------------------------------------------------------------------------- /dist/testValidator.d.ts: -------------------------------------------------------------------------------- 1 | import { TestCase } from './testUtils'; 2 | export declare function validateTestCases(testCases: TestCase[], sourceCode: string): Promise; 3 | -------------------------------------------------------------------------------- /dist/ut_py.d.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 2 | interface TestCase { 3 | name: string; 4 | type: 'direct' | 'indirect' | 'not-testable'; 5 | code: string; 6 | } 7 | export declare function generateUnitTests(client: BedrockRuntimeClient, modelId: string, sourceCode: string): Promise; 8 | export declare function runUnitTests(testCases: TestCase[]): Promise; 9 | export declare function generateTestReport(testCases: TestCase[]): Promise; 10 | export declare function setupPythonEnvironment(): Promise; 11 | export {}; 12 | -------------------------------------------------------------------------------- /dist/ut_ts.d.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 2 | import { TestCase } from './testUtils'; 3 | export declare function generateUnitTests(client: BedrockRuntimeClient, modelId: string, sourceCode: string): Promise; 4 | export declare function runUnitTests(testCases: TestCase[], unitTestSourceFolder: string): Promise; 5 | export declare function generateTestReport(testCases: TestCase[]): Promise; 6 | -------------------------------------------------------------------------------- /dist/utils.d.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 2 | export type LanguageCode = 'en' | 'zh' | 'ja' | 'es' | 'fr' | 'de' | 'it'; 3 | export interface PullRequest { 4 | title: string; 5 | number: number; 6 | body: string; 7 | head: { 8 | sha: string; 9 | ref: string; 10 | }; 11 | base: { 12 | sha: string; 13 | }; 14 | } 15 | export interface PullFile { 16 | filename: string; 17 | status: string; 18 | patch?: string; 19 | } 20 | export declare const languageCodeToName: Record; 21 | export declare function splitContentIntoChunks_deprecated(content: string, maxChunkSize: number): string[]; 22 | export declare function shouldExcludeFile(filename: string, excludePatterns: string[]): boolean; 23 | export declare function splitIntoSoloFile(combinedCode: string): Record; 24 | export declare function extractFunctions(content: string): Promise; 25 | export declare function exponentialBackoff(fn: () => Promise, maxRetries: number, initialDelay: number, functionName: string): Promise; 26 | export declare function invokeModel(client: BedrockRuntimeClient, modelId: string, payloadInput: string, temperature?: number): Promise; 27 | -------------------------------------------------------------------------------- /docs/designIntentionClassification.md: -------------------------------------------------------------------------------- 1 | # Design Document: Intention Classification and Function Registry for GitHub Operations 2 | 3 | ## 1. Introduction 4 | 5 | This document outlines the design for implementing the GitHub App which uses intention classification to interpret user queries and a agent-alike framework to execute appropriate actions. This design aims to create a modular, extensible, and efficient system capable of handling a wide range of GitHub-related tasks. 6 | 7 | ## 2. System Overview 8 | 9 | The system consists of several key components: 10 | 1. Intention Classifier: Uses LLM to classify user intentions. 11 | 2. Action Type Determiner: Determines the appropriate action type based on the classified intention. 12 | 3. Function Registry: Manages the available functions for different action types. 13 | 4. Action Executor: Selects and executes the appropriate function(s) based on the action type and user query. 14 | 5. Large Language Model (LLM) & Prompt Templates: Interfaces with the Large Language Model for various tasks. 15 | 16 | The system works as follows: 17 | ```mermaid 18 | graph TD 19 | A[User Query] --> B[Context Extractor] 20 | B --> C[Inputs Object] 21 | C --> D[Prompts] 22 | D --> E[LLM Claude 3.5 on Amazon Bedrock] 23 | E --> F[Intention Classifier] 24 | F --> G[Action Type Determiner] 25 | G --> H{Action Type} 26 | H -->|LLMOnly| I[LLM-Only Function Executor] 27 | H -->|LLMWithRegisteredFunction| J[LLMWithRegisteredFunction Function Executor] 28 | H -->|LLMWithRegisteredFunctionAndCodebase| K[LLMWithRegisteredFunctionAndCodebase Function Executor] 29 | I --> L[Function Registry] 30 | J --> L 31 | K --> L 32 | L --> M[Execute Function] 33 | M --> N[Return Result to User] 34 | ``` 35 | 36 | ## 3. Query Categories 37 | 38 | The system will handle various types of user queries related to GitHub operations, including but not limited to: 39 | 40 | | Intention Category | Action | 41 | |--------------------|--------| 42 | | Code review and analysis | Invoke GitHub API to fetch specific commit/file details or load the whole codebase into memory, then use LLM or registered functions to apply thereon, set to action type LLMWithRegisteredFunctionAndCodebase | 43 | | Repository management | Use GitHub API to fetch repository details or load the whole codebase into memory, then use LLM or registered functions to apply thereon, set to action type LLMWithRegisteredFunction | 44 | | Documentation tasks | Use LLM to generate documentation according to a style guide, then use registered functions to apply them, set to action type LLMOnly | 45 | | GitHub Actions and CI/CD operations | Use GitHub API to fetch workflow details, then use LLM or registered functions to apply thereon, set to action type LLMWithRegisteredFunction | 46 | | Other (general query) | Use LLM to generate output, set to action type LLMOnly | 47 | 48 | Sample queries: 49 | 50 | - Code review and analysis: 51 | -- I pushed a fix in commit , please review it. 52 | -- Generate unit testing code for this file, or read src/utils.ts and generate unit testing code. 53 | 54 | - Repository management: 55 | -- Summarize stats about this repository and render them as a table. Additionally, render a pie chart showing the language distribution in the codebase. 56 | -- Modularize this function. 57 | -- Read the files in the src/scheduler package and generate a class diagram using mermaid and a README in the markdown format. 58 | 59 | - Documentation tasks: 60 | -- Generate a Pull Request description for this PR. 61 | 62 | - GitHub Actions and CI/CD operations: 63 | -- Create a GitHub Actions workflow to deploy this service to AWS Lambda. 64 | 65 | ## 4. Key Components 66 | 67 | ### 4.1 Intention Classifier 68 | 69 | The Intention Classifier uses the LLM to interpret user queries and classify them into predefined categories: 70 | 71 | ```typescript 72 | class IntentionClassifier { 73 | private llm: LargeLanguageModel; 74 | 75 | constructor(llm: LargeLanguageModel) { 76 | this.llm = llm; 77 | } 78 | 79 | async classify(query: string, context: any): Promise { 80 | const inputs = new Inputs(context); 81 | const prompt = Prompts.renderIntentionClassificationPrompt(inputs); 82 | return this.llm.classify(prompt); 83 | } 84 | } 85 | ``` 86 | 87 | ### 4.2 Action Type Determiner 88 | 89 | The Action Type Determiner maps classified intentions to appropriate action types: 90 | 91 | ```typescript 92 | class ActionTypeDeterminer { 93 | determineActionType(intention: string): FunctionType { 94 | // Logic to map intention to action type 95 | } 96 | } 97 | ``` 98 | 99 | We define three main action types: 100 | 1. LLMOnly, only LLM is needed to generate the output 101 | 2. LLMWithRegisteredFunction, LLM is needed together with registered functions, e.g. GitHub API calls 102 | 3. LLMWithRegisteredFunctionAndCodebase, LLM is needed together with registered functions and the whole codebase is loaded into memory 103 | 104 | ### 4.3 Function Registry 105 | 106 | We implement the Function Registry to manage the registration and retrieval of functions: 107 | 108 | ```typescript 109 | interface RegisteredFunction { 110 | id: string; 111 | name: string; 112 | description: string; 113 | type: FunctionType; 114 | execute: (query: string, context: any) => Promise; 115 | } 116 | 117 | class FunctionRegistry { 118 | private functions: Map = new Map(); 119 | 120 | registerFunction(func: RegisteredFunction): void { /* ... */ } 121 | getFunction(id: string): RegisteredFunction | undefined { /* ... */ } 122 | listFunctions(type?: FunctionType): RegisteredFunction[] { /* ... */ } 123 | updateFunction(id: string, updates: Partial): void { /* ... */ } 124 | deleteFunction(id: string): boolean { /* ... */ } 125 | } 126 | ``` 127 | 128 | 129 | ### 4.4 Action Executor 130 | 131 | The Action Executor is responsible for: 132 | (1) select proper function to execute based on specific user query per category; 133 | (2) orchestrate the workflow of possible multiple function execution along with LLM invocation; 134 | (3) error handling of function execution and iteration with feedback or error message; 135 | (4) evaluation on the output per iteration and judgement on the criterion the final output meets. 136 | 137 | ```typescript 138 | class ActionExecutor { 139 | private registry: FunctionRegistry; 140 | private llm: LargeLanguageModel; 141 | 142 | constructor(registry: FunctionRegistry, llm: LargeLanguageModel) { 143 | this.registry = registry; 144 | this.llm = llm; 145 | } 146 | 147 | async execute(intention: string, query: string, context: any): Promise { 148 | const actionType = this.determineActionType(intention); 149 | const functions = this.registry.listFunctions(actionType); 150 | const selectedFunction = await this.selectFunction(functions, query, context); 151 | 152 | let result: any; 153 | let iterations = 0; 154 | const maxIterations = 3; 155 | 156 | do { 157 | try { 158 | result = await this.executeFunction(selectedFunction, query, context); 159 | const evaluation = await this.evaluateOutput(result, query, context); 160 | 161 | if (evaluation.isSatisfactory) { 162 | return { success: true, result, iterations }; 163 | } else if (iterations < maxIterations) { 164 | context = { ...context, previousResult: result, feedback: evaluation.feedback }; 165 | } else { 166 | return { success: false, error: "Max iterations reached without satisfactory result" }; 167 | } 168 | } catch (error) { 169 | const errorHandler = await this.determineErrorHandling(error, query, context); 170 | if (errorHandler.retry && iterations < maxIterations) { 171 | context = { ...context, error, errorFeedback: errorHandler.feedback }; 172 | } else { 173 | return { success: false, error: errorHandler.message }; 174 | } 175 | } 176 | iterations++; 177 | } while (iterations < maxIterations); 178 | } 179 | 180 | private async selectFunction(functions: RegisteredFunction[], query: string, context: any): Promise { /* ... */ } 181 | private async executeFunction(func: RegisteredFunction, query: string, context: any): Promise { /* ... */ } 182 | private async evaluateOutput(result: any, query: string, context: any): Promise<{ isSatisfactory: boolean, feedback?: string }> { /* ... */ } 183 | private async determineErrorHandling(error: any, query: string, context: any): Promise<{ retry: boolean, feedback?: string, message: string }> { /* ... */ } 184 | private determineActionType(intention: string): FunctionType { /* ... */ } 185 | } 186 | ``` 187 | 188 | ### 4.5 Large Language Model (LLM) & Prompt Templates 189 | 190 | We will use Claude 3.5 or a similar LLM through Amazon Bedrock for intention classification, and the LLM will be provided with context about GitHub operations and the user's query. 191 | 192 | We will implement a modular prompt system with two main classes: 193 | 194 | ```typescript 195 | class Inputs { 196 | // Properties to store various input parameters 197 | constructor(context: any) { 198 | // Initialize properties based on context 199 | } 200 | clone(): Inputs { 201 | // Return a deep copy of the inputs 202 | } 203 | render(template: string): string { 204 | // Replace placeholders in template with actual values 205 | } 206 | } 207 | 208 | class Prompts { 209 | static intentionClassificationPrompt = "..."; // Prompt template 210 | static renderIntentionClassificationPrompt(inputs: Inputs): string { 211 | return inputs.render(this.intentionClassificationPrompt); 212 | } 213 | // Other prompt methods... 214 | } 215 | ``` 216 | 217 | ## 5. Future Work 218 | - Develop a versioning system for registered functions 219 | - Implement function chaining for complex operations 220 | - Implement parallel execution for independent actions in complex queries 221 | - Create a visual workflow designer for complex action sequences 222 | - Develop a plugin system for easy addition of new action handlers 223 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | preset: 'ts-jest', 3 | testEnvironment: 'node', 4 | roots: ['/test'], 5 | testMatch: ['**/*.test.ts'], 6 | transform: { 7 | '^.+\\.ts$': 'ts-jest', 8 | }, 9 | moduleNameMapper: { 10 | '^@/(.*)$': '/src/$1', 11 | }, 12 | setupFilesAfterEnv: ['/jest.setup.js'], 13 | testPathIgnorePatterns: ['/node_modules/', '/dist/'], 14 | } -------------------------------------------------------------------------------- /jest.setup.js: -------------------------------------------------------------------------------- 1 | jest.mock('@actions/github', () => ({ 2 | context: { 3 | repo: { 4 | owner: 'testOwner', 5 | repo: 'testRepo', 6 | }, 7 | payload: { 8 | pull_request: { 9 | number: 1, 10 | head: { 11 | sha: 'testSha', 12 | ref: 'testRef', 13 | }, 14 | }, 15 | }, 16 | }, 17 | getOctokit: jest.fn(), 18 | })); 19 | 20 | jest.mock('@aws-sdk/client-bedrock-runtime', () => ({ 21 | BedrockRuntimeClient: jest.fn(), 22 | InvokeModelCommand: jest.fn(), 23 | })); -------------------------------------------------------------------------------- /notebook/lambda_function.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import boto3 4 | # Initialize SageMaker Runtime client 5 | smr_client = boto3.client('sagemaker-runtime') 6 | 7 | def lambda_handler(event, context): 8 | 9 | # Get the endpoint name from environment variable or configuration 10 | endpoint_name = os.environ.get('SAGEMAKER_ENDPOINT_NAME') 11 | 12 | # Parse the input from the API Gateway event 13 | body = json.loads(event['body']) 14 | prompt = body.get('prompt', '') 15 | params = body.get('parameters', { 16 | "max_new_tokens": 256, 17 | "temperature": 0.1 18 | }) 19 | 20 | try: 21 | # Invoke the SageMaker endpoint 22 | response = smr_client.invoke_endpoint( 23 | EndpointName=endpoint_name, 24 | Body=json.dumps({ 25 | "inputs": prompt, 26 | "parameters": params 27 | }), 28 | ContentType="application/json" 29 | ) 30 | 31 | # Parse and return the result 32 | result = json.loads(response['Body'].read().decode("utf8")) 33 | 34 | return { 35 | 'statusCode': 200, 36 | 'body': json.dumps(result) 37 | } 38 | except Exception as e: 39 | return { 40 | 'statusCode': 500, 41 | 'body': json.dumps({'error': str(e)}) 42 | } 43 | -------------------------------------------------------------------------------- /notebook/lambda_function.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-cicd-suite/d15da72c3ab2cacc50e21ca5e4866b7dcecae990/notebook/lambda_function.zip -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "intelli-ops", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "dist/index.js", 6 | "scripts": { 7 | "prebuild": "npm install --save-dev @vercel/ncc", 8 | "build": "tsc -p tsconfig.build.json", 9 | "lint": "eslint .", 10 | "package": "npm run build && rm ./dist/index.js || true && cp ./build/index.js ./dist/ && ncc build --license THIRD-PARTY -o dist && copyup -E dist/THIRD-PARTY . || true && del-cli dist/THIRD-PARTY || true && cp ./src/codeLayout.sh ./dist/", 11 | "test": "jest" 12 | }, 13 | "keywords": [], 14 | "author": "", 15 | "license": "MIT", 16 | "dependencies": { 17 | "@actions/core": "^1.10.1", 18 | "@actions/github": "^6.0.0", 19 | "@aws-sdk/client-bedrock-runtime": "^3.621.0", 20 | "copyfiles": "^2.4.1", 21 | "del-cli": "^5.1.0", 22 | "ts-jest": "^29.2.5" 23 | }, 24 | "devDependencies": { 25 | "@types/jest": "^29.5.12", 26 | "@types/node": "^20.14.11", 27 | "@vercel/ncc": "^0.38.2", 28 | "aws-sdk-client-mock": "^4.0.1", 29 | "jest": "^29.7.0", 30 | "jest-fetch-mock": "^3.0.3", 31 | "ts-node": "^10.9.2", 32 | "typescript": "^5.5.4" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/codeLayout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: ./codeLayout.sh [REPO_DIR] [OUTPUT_FILE] [EXTRACT_MODE] [FILE_EXTENSIONS] 3 | # Description: Combine all code files in a repository into a single file, with option to extract functions or include whole file content 4 | # Example: ./codeLayout.sh ~/projects/my_project combined_output.txt functions py js html css java cpp h cs 5 | # Example: ./codeLayout.sh ~/projects/my_project combined_output.txt whole py js html css java cpp h cs 6 | 7 | # Directory of the repository (default to current directory if not specified) 8 | REPO_DIR="${1:-.}" 9 | 10 | # Output file (default to combined_output.txt if not specified) 11 | OUTPUT_FILE="${2:-combined_output.txt}" 12 | 13 | # Extract mode (default to 'functions' if not specified) 14 | EXTRACT_MODE="${3:-functions}" 15 | 16 | # List of file extensions to include (default to a predefined list if not specified) 17 | FILE_EXTENSIONS=("${@:4}") 18 | if [ ${#FILE_EXTENSIONS[@]} -eq 0 ]; then 19 | FILE_EXTENSIONS=("py" "js" "java" "cpp" "ts") 20 | fi 21 | 22 | # Empty the output file if it exists 23 | > "$OUTPUT_FILE" 24 | 25 | # Function to extract functions and combine files 26 | combine_files() { 27 | local dir="$1" 28 | local find_command="find \"$dir\" -type f \\( -name \"*.${FILE_EXTENSIONS[0]}\"" 29 | for ext in "${FILE_EXTENSIONS[@]:1}"; do 30 | find_command+=" -o -name \"*.$ext\"" 31 | done 32 | find_command+=" \\) -not -path \"*/node_modules/*\" -print0" 33 | 34 | eval $find_command | while IFS= read -r -d '' file; do 35 | echo "// File: $file" >> "$OUTPUT_FILE" 36 | if [ "$EXTRACT_MODE" = "functions" ]; then 37 | # Extract functions only 38 | perl -0777 -ne 'print "$&\n\n" while /((?:export\s+)?(?:async\s+)?function\s+\w+\s*\([^)]*\)(?:\s*:\s*[^{]*?)?\s*{(?:[^{}]*|\{(?:[^{}]*|\{[^{}]*\})*\})*})/gs' "$file" >> "$OUTPUT_FILE" 39 | else 40 | # Include whole file content 41 | cat "$file" >> "$OUTPUT_FILE" 42 | fi 43 | echo -e "\n" >> "$OUTPUT_FILE" 44 | done 45 | } 46 | 47 | # Combine the files 48 | combine_files "$REPO_DIR" 49 | 50 | echo "All files have been processed and combined into $OUTPUT_FILE" 51 | if [ "$EXTRACT_MODE" = "functions" ]; then 52 | echo "Mode: Extracted functions only" 53 | else 54 | echo "Mode: Included whole file content" 55 | fi -------------------------------------------------------------------------------- /src/codeReviewInline.ts: -------------------------------------------------------------------------------- 1 | import * as core from '@actions/core'; 2 | import { getOctokit, context } from '@actions/github'; 3 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 4 | import { invokeModel, PullRequest, PullFile, shouldExcludeFile, languageCodeToName, LanguageCode } from '@/src/utils'; 5 | import { Inputs, Prompts} from '@/src/prompts'; 6 | 7 | const CODE_REVIEW_HEADER = "🔍 AI Code Review (Powered by Amazon Bedrock)"; 8 | 9 | export async function generateCodeReviewComment(bedrockClient: BedrockRuntimeClient, modelId: string, octokit: ReturnType, excludePatterns: string[], reviewLevel: string, outputLanguage: string): Promise { 10 | 11 | const pullRequest = context.payload.pull_request as PullRequest; 12 | const repo = context.repo; 13 | 14 | // fetch the list of files changed in the PR each time since the file can be changed in operation like unit test generation etc. 15 | const { data: files } = await octokit.rest.pulls.listFiles({ 16 | ...repo, 17 | pull_number: pullRequest.number, 18 | }); 19 | 20 | let reviewComments: { path: string; position: number; body: string }[] = []; 21 | let ignoredFilesCount = 0; 22 | let selectedFilesCount = 0; 23 | let additionalCommentsCount = 0; 24 | let ignoredFilesDetails: string[] = []; 25 | let selectedFilesDetails: string[] = []; 26 | let additionalCommentsDetails: string[] = []; 27 | 28 | const inputs: Inputs = new Inputs() 29 | const prompts: Prompts = new Prompts() 30 | 31 | for (const file of files as PullFile[]) { 32 | // The sample contents of file.patch, which contains a unified diff representation of the changes made to a file in a pull request, with multiple hunks in the file 33 | // diff --git a/file1.txt b/file1.txt 34 | // index 7cfc5c8..e69de29 100644 35 | // --- a/file1.txt 36 | // +++ b/file1.txt 37 | 38 | // @@ -1,3 +1,2 @@ 39 | // -This is the original line 1. 40 | // -This is the original line 2. 41 | // +This is the new line 1. 42 | // This is an unchanged line. 43 | // @@ -10,3 +10,2 @@ 44 | // -This is the original line 10. 45 | // -This is the original line 11. 46 | // +This is the new line 10. 47 | // This is an unchanged line. 48 | 49 | // @@ is the hunk header that shows where the changes are and how many lines are changed. In this case, it indicates that the changes start at line 1 of the old file and affect 3 lines, and start at line 1 of the new file and affect 2 lines. 50 | 51 | // console.log(`File patch content: ${file.patch} for file: ${file.filename}`); 52 | if (file.status !== 'removed' && file.patch && !shouldExcludeFile(file.filename, excludePatterns)) { 53 | selectedFilesCount++; 54 | 55 | // Split the patch into hunks, but keep the hunk headers 56 | const hunks = file.patch.split(/(?=^@@\s+-\d+,\d+\s+\+\d+,\d+\s+@@)/m); 57 | // console.log(`=========================================== File patch for ${file.filename} ===========================================`); 58 | // console.log(`Hunks: ${hunks}`); 59 | // console.log(`=========================================== File patch for ${file.filename} ===========================================`); 60 | selectedFilesDetails.push(`${file.filename} (${hunks.length} hunks)`); 61 | 62 | let totalPosition = 0; 63 | for (const [hunkIndex, hunk] of hunks.entries()) { 64 | // hunkLines and hunkContent indeed contain the same information, just in different formats: 65 | // hunkLines is an array of strings, where each string represents a line of the hunk. 66 | // hunkContent is a single string, which is the result of joining all the lines in hunkLines with newline characters. 67 | const hunkLines: string[] = hunk.split('\n') 68 | const hunkContent: string = hunkLines.join('\n'); 69 | const languageName = languageCodeToName[outputLanguage as LanguageCode] || 'English'; 70 | if (!(outputLanguage in languageCodeToName)) { 71 | core.warning(`Unsupported output language: ${outputLanguage}. Defaulting to English.`); 72 | } 73 | 74 | // console.log(`=========================================== Hunk ${hunkIndex} of ${file.filename} ===========================================`); 75 | // console.log(`Hunk: ${hunk}`); 76 | // console.log(`Hunk lines: ${hunkLines}`); 77 | // console.log(`Hunk content: ${hunkContent}`); 78 | // console.log(`=========================================== Hunk ${hunkIndex} of ${file.filename} ===========================================`); 79 | 80 | // Assemble the inputs for the prompt 81 | inputs.title = pullRequest.title; 82 | inputs.description = pullRequest.body; 83 | // inputs.rawSummary = pullRequest.body; 84 | // inputs.shortSummary = pullRequest.body; 85 | inputs.fileName = file.filename; 86 | // inputs.fileContent = file.patch; 87 | // inputs.fileDiff = file.patch; 88 | inputs.hunkContent = hunkContent; 89 | // inputs.patches = file.patch; 90 | // inputs.diff = file.patch; 91 | // inputs.commentChain = file.patch; 92 | // inputs.comment = file.patch; 93 | inputs.languageName = languageName; 94 | 95 | var finalPromt = reviewLevel === 'detailed' ? prompts.renderDetailedReviewPrompt(inputs) : prompts.renderConciseReviewPrompt(inputs); 96 | var review = await invokeModel(bedrockClient, modelId, finalPromt); 97 | 98 | if (!review || review.trim() == '') { 99 | console.warn(`No review comments generated for hunk ${hunkIndex} in file ${file.filename}, skipping`); 100 | continue; 101 | } 102 | 103 | // TODO, this is a temporary workaround to remove all the xml tag with content "review comments" in the output if it exists, e.g. , , , and , , , 104 | review = review.replace(/|<\/Review Comments>||<\/review Comments>||<\/review comment>||<\/Review comment>/g, '').trim(); 105 | 106 | if (review.includes('Looks Good To Me')) { 107 | additionalCommentsCount++; 108 | // add delimiter symbol "---" per file end to make the output more readable 109 | additionalCommentsDetails.push(`${file.filename} (hunk index: ${hunkIndex}):\n${review}\n\n---\n`); 110 | console.log("The full review skipped due to LGTM is: ", review); 111 | continue; 112 | } 113 | console.log("Review for file: ", file.filename, "hunk: ", hunkIndex, "is: ", review); 114 | // Parse multiple comments from the review according to current prompt template, example output: 115 | /* 116 | 8-8: 117 | **Use type annotations for function parameters and return types.** TypeScript provides type annotations to help catch potential bugs during development and improve code maintainability. 118 | 119 | ```typescript 120 | export function add(a: number, b: number): number { 121 | return a + b; 122 | } 123 | ``` 124 | 125 | 6-7: 126 | **Consider using a more descriptive function name for `subtract`.** A function name like `minus` or `difference` might better convey the operation being performed. 127 | 128 | ```typescript 129 | export function subtract(a: number, b: number): number { 130 | return a - b; 131 | } 132 | ``` 133 | */ 134 | const comments = parseReviewComments(review); 135 | 136 | for (const comment of comments) { 137 | const { startLine, endLine, body } = comment; 138 | // Calculate the actual position in the file 139 | const hunkHeaderMatch = hunkLines[0] ? hunkLines[0].match(/^@@ -\d+,\d+ \+(\d+),/) : null; 140 | const hunkStartLine = hunkHeaderMatch && hunkHeaderMatch[1] ? parseInt(hunkHeaderMatch[1]) : 1; 141 | // We add 1 to calculate the correct position because: 1. GitHub's API uses 1-based indexing for line numbers; 2. The position should account for the hunk header line 142 | const position = totalPosition + (startLine - hunkStartLine + 1); 143 | // Prepend the header to each review comment 144 | const reviewWithHeader = `${CODE_REVIEW_HEADER}\n\n${body}`; 145 | // The position value equals the number of lines down from the first "@@" hunk header in the file you want to add a comment. The line just below the "@@" line is position 1, the next line is position 2, and so on. The position in the diff continues to increase through lines of whitespace and additional hunks until the beginning of a new file. 146 | reviewComments.push({ 147 | path: file.filename, 148 | position: position, 149 | body: reviewWithHeader, 150 | }); 151 | } 152 | 153 | totalPosition += hunkLines.length - 1; // -1 to account for the hunk header 154 | } 155 | } else { 156 | ignoredFilesCount++; 157 | console.log(`Skipping file: ${file.filename} due to the file being removed or explicitly excluded`); 158 | ignoredFilesDetails.push(`${file.filename} is excluded by exclude rules`); 159 | } 160 | } 161 | 162 | // we always post the summary even if there is no review comments, so that we can let the user know the review level and the number of files processed 163 | if (reviewComments.length > 0 || additionalCommentsCount > 0) { 164 | let summaryTemplate = ` 165 | {{CODE_REVIEW_HEADER}} 166 | 167 | Actionable comments posted: ${reviewComments.length} 168 |
169 | Review Details 170 |
171 | Review option chosen 172 | 173 | - **Configuration used: GitHub Actions** 174 | - **Code review level: ${reviewLevel}** 175 |
176 |
177 | Commits 178 | Files that changed from the base of the PR and between ${pullRequest.base.sha} to ${pullRequest.head.sha} 179 |
180 |
181 | Files ignored due to path filters (${ignoredFilesCount}) 182 | 183 | ${ignoredFilesDetails.map(file => `- ${file}`).join('\n')} 184 |
185 |
186 | Files selected for processing (${selectedFilesCount}) 187 | 188 | ${selectedFilesDetails.map(file => `- ${file}`).join('\n')} 189 |
190 |
191 | Additional comments not posted (${additionalCommentsCount}) 192 | 193 | ${additionalCommentsDetails.map(file => `- ${file}`).join('\n')} 194 |
195 |
196 | `; 197 | summaryTemplate = summaryTemplate.replace('{{CODE_REVIEW_HEADER}}', CODE_REVIEW_HEADER); 198 | try { 199 | await octokit.rest.pulls.createReview({ 200 | ...repo, 201 | pull_number: pullRequest.number, 202 | commit_id: pullRequest.head.sha, 203 | body: summaryTemplate, 204 | event: 'COMMENT', 205 | // The review comment here will be empty if all the review comments are skipped due to "Looks Good To Me" 206 | comments: reviewComments, 207 | headers: { 208 | 'X-GitHub-Api-Version': '2022-11-28' 209 | } 210 | }); 211 | console.log('Code review comments posted successfully.'); 212 | } catch (error) { 213 | console.error('Error posting code review comments:', error); 214 | throw error; 215 | } 216 | } else { 217 | console.log('No review comments to post.'); 218 | } 219 | } 220 | 221 | function parseReviewComments(review: string): { startLine: number; endLine: number; body: string }[] { 222 | const comments = []; 223 | const lines = review.split('\n'); 224 | let currentComment = null; 225 | 226 | for (const line of lines) { 227 | const match = line.match(/^(\d+)-(\d+):/); 228 | if (match) { 229 | if (currentComment) { 230 | comments.push(currentComment); 231 | } 232 | currentComment = { 233 | startLine: parseInt(match[1] ?? ''), 234 | endLine: parseInt(match[2] ?? ''), 235 | body: line.slice(match[0].length).trim() 236 | }; 237 | } else if (currentComment) { 238 | currentComment.body += '\n' + line.trim(); 239 | } 240 | } 241 | 242 | if (currentComment) { 243 | comments.push(currentComment); 244 | } 245 | return comments; 246 | } 247 | -------------------------------------------------------------------------------- /src/deprecated-coverageAnalyzer.ts: -------------------------------------------------------------------------------- 1 | import { execSync } from 'child_process'; 2 | import * as fs from 'fs'; 3 | import * as path from 'path'; 4 | 5 | export async function analyzeCoverage(testFilePath: string, sourceCode: string): Promise<{ 6 | statements: number; 7 | branches: number; 8 | functions: number; 9 | lines: number; 10 | }> { 11 | const coverageDir = path.join(__dirname, '..', 'coverage'); 12 | if (!fs.existsSync(coverageDir)) { 13 | fs.mkdirSync(coverageDir, { recursive: true }); 14 | } 15 | 16 | const sourceFilePath = path.join(__dirname, '..', 'temp_source.ts'); 17 | fs.writeFileSync(sourceFilePath, sourceCode); 18 | 19 | try { 20 | execSync(`npx jest ${testFilePath} --coverage --coverageReporters="json-summary" --collectCoverageFrom=${sourceFilePath}`, { 21 | stdio: 'inherit', 22 | env: { ...process.env, NODE_ENV: 'test' }, 23 | }); 24 | 25 | const coverageSummary = JSON.parse(fs.readFileSync(path.join(coverageDir, 'coverage-summary.json'), 'utf-8')); 26 | const fileCoverage = coverageSummary[sourceFilePath]; 27 | 28 | return { 29 | statements: fileCoverage.statements.pct, 30 | branches: fileCoverage.branches.pct, 31 | functions: fileCoverage.functions.pct, 32 | lines: fileCoverage.lines.pct, 33 | }; 34 | } catch (error) { 35 | console.error('Error analyzing coverage:', error); 36 | return { 37 | statements: 0, 38 | branches: 0, 39 | functions: 0, 40 | lines: 0, 41 | }; 42 | } finally { 43 | fs.unlinkSync(sourceFilePath); 44 | } 45 | } -------------------------------------------------------------------------------- /src/deprecated-testGenerator.ts: -------------------------------------------------------------------------------- 1 | import * as fs from 'fs'; 2 | import * as path from 'path'; 3 | import { BedrockRuntimeClient, InvokeModelCommand } from "@aws-sdk/client-bedrock-runtime"; 4 | import { execSync } from 'child_process'; 5 | import { setTimeout } from 'timers/promises'; 6 | import { validateTestCases } from './deprecated-testValidator'; 7 | import { analyzeCoverage } from './deprecated-coverageAnalyzer'; 8 | import { TestCase, generateFakeResponse, createPrompt } from './deprecated-testUtils'; 9 | import { invokeModel } from './utils'; 10 | // Remove the duplicate TestCase interface and generateFakeResponse function 11 | 12 | export async function generateUnitTests(client: BedrockRuntimeClient, modelId: string, sourceCode: string): Promise { 13 | const prompt = createPrompt(sourceCode); 14 | console.log('Generating unit tests with total prompt length:', prompt.length); 15 | 16 | try { 17 | const finalResult = await invokeModel(client, modelId, prompt); 18 | if (finalResult === undefined) { 19 | console.log('Request timed out, returning fake response'); 20 | return await generateFakeResponse(); 21 | } 22 | try { 23 | const parsedTestCases = JSON.parse(finalResult.replace(/\n/g, '\\n')) as TestCase[]; 24 | if (!Array.isArray(parsedTestCases)) { 25 | throw new Error('Parsed result is not an array'); 26 | } 27 | const validatedTestCases = await validateTestCases(parsedTestCases, sourceCode); 28 | console.log('Generated and validated test cases:', validatedTestCases); 29 | return validatedTestCases; 30 | } catch (error) { 31 | console.error('Failed to parse or validate AI response:', error); 32 | console.log('Raw AI response:', finalResult); 33 | // Attempt to extract test cases manually in consideration of the inconsistent format of the AI response 34 | const extractedTestCases = extractTestCases(finalResult); 35 | if (extractedTestCases.length > 0) { 36 | console.log('Extracted test cases manually:', extractedTestCases); 37 | return extractedTestCases; 38 | } 39 | return []; 40 | } 41 | } catch (error) { 42 | console.error('Error occurred while generating unit tests:', error); 43 | return []; 44 | } 45 | } 46 | 47 | function extractTestCases(rawResponse: string): TestCase[] { 48 | const testCases: TestCase[] = []; 49 | const regex = /\{\s*"name":\s*"([^"]+)",\s*"type":\s*"([^"]+)",\s*"code":\s*"([^"]*)"\s*\}/g; 50 | let match; 51 | while ((match = regex.exec(rawResponse)) !== null) { 52 | if (match[1] && match[2] && match[3]) { 53 | testCases.push({ 54 | name: match[1], 55 | type: match[2] as 'direct' | 'indirect' | 'not-testable', 56 | code: match[3].replace(/\\n/g, '\n').replace(/\\"/g, '"') 57 | }); 58 | } 59 | } 60 | return testCases; 61 | } 62 | 63 | export async function runUnitTests(testCases: TestCase[], unitTestSourceFolder: string): Promise { 64 | 65 | const sourceFilePath = path.join(process.cwd(), unitTestSourceFolder); 66 | let sourceCode = ''; 67 | 68 | try { 69 | const files = fs.readdirSync(sourceFilePath); 70 | for (const file of files) { 71 | if (file.endsWith('.ts') || file.endsWith('.js')) { 72 | const filePath = path.join(sourceFilePath, file); 73 | const fileContent = fs.readFileSync(filePath, 'utf-8'); 74 | sourceCode += fileContent + '\n\n'; 75 | } 76 | } 77 | } catch (error) { 78 | console.error(`Error reading source files: ${error}`); 79 | return; 80 | } 81 | 82 | if (sourceCode === '') { 83 | console.warn('No source code files found in the specified directory. Skipping unit tests execution and report generation.'); 84 | return; 85 | } 86 | 87 | if (!Array.isArray(testCases) || testCases.length === 0) { 88 | console.log('Input test cases', testCases); 89 | console.log('No test cases to run'); 90 | return; 91 | } 92 | // note this is the temporary directory for storing the generated test cases while the actual test cases pushed to the repo are 'test/unit_tests.ts' handled the main function 93 | const testDir = path.join(__dirname, '..', 'generated_tests'); 94 | if (!fs.existsSync(testDir)) { 95 | fs.mkdirSync(testDir, { recursive: true }); 96 | } 97 | console.log('Writing test cases to:', testDir, testCases); 98 | const testFilePath = path.join(testDir, 'generated.test.ts'); 99 | const testFileContent = testCases 100 | .filter(tc => tc.type !== 'not-testable') 101 | .map(tc => tc.code) 102 | .join('\n\n'); 103 | 104 | fs.writeFileSync(testFilePath, testFileContent); 105 | 106 | try { 107 | // log out the execution result of the test 108 | execSync(`npx jest ${testFilePath}`, { stdio: 'inherit' }); 109 | console.log('Tests passed successfully'); 110 | const coverage = await analyzeCoverage(testFilePath, sourceCode); 111 | console.log('Test coverage:', coverage); 112 | } catch (error) { 113 | console.error('Error running tests:', error); 114 | } 115 | } 116 | 117 | export async function generateTestReport(testCases: TestCase[]): Promise { 118 | if (!Array.isArray(testCases)) { 119 | console.log('Invalid test cases input. Skipping report generation.'); 120 | return; 121 | } 122 | const report = { 123 | totalTests: testCases.length, 124 | directTests: testCases.filter(tc => tc.type === 'direct').length, 125 | indirectTests: testCases.filter(tc => tc.type === 'indirect').length, 126 | notTestable: testCases.filter(tc => tc.type === 'not-testable').length, 127 | }; 128 | 129 | const reportDir = path.join(__dirname, '..', 'reports'); 130 | if (!fs.existsSync(reportDir)) { 131 | fs.mkdirSync(reportDir, { recursive: true }); 132 | } 133 | 134 | const reportPath = path.join(reportDir, 'report.json'); 135 | fs.writeFileSync(reportPath, JSON.stringify(report, null, 2)); 136 | 137 | // TODO: upload the artifact from the report directory as an artifact named "logs", using actions/upload-artifact@v4 138 | console.log('Test report generated:', report); 139 | } -------------------------------------------------------------------------------- /src/deprecated-testUtils.ts: -------------------------------------------------------------------------------- 1 | export interface TestCase { 2 | name: string; 3 | type: 'direct' | 'indirect' | 'not-testable'; 4 | code: string; 5 | } 6 | 7 | export async function generateFakeResponse(): Promise { 8 | // Return a predefined fake response structure 9 | return [ 10 | { 11 | name: 'Default Unit Test due to the model time out during the test generation, most likely due to the prompt being too long', 12 | type: 'direct', 13 | code: "test('default test', () => { expect(true).toBe(true); });", 14 | }, 15 | ]; 16 | } 17 | 18 | export function createPrompt(sourceCode: string): string { 19 | return ` 20 | You are an expert TypeScript developer specializing in unit testing. Your task is to analyze the following TypeScript code and generate comprehensive unit tests using Jest. 21 | 22 | 23 | ${sourceCode} 24 | 25 | 26 | Please follow these steps: 27 | 1. Carefully read and understand the provided TypeScript code. 28 | 2. Categorize each method into one of these types: 29 | a) Methods that can be tested directly 30 | b) Methods that can be tested indirectly 31 | c) Methods that are not unit-testable 32 | 3. For each testable method, create a unit test using Jest. 33 | 4. Structure your response as a JSON array of test cases, where each test case has the following format: 34 | { 35 | "name": "Test name", 36 | "type": "direct" | "indirect" | "not-testable", 37 | "code": "The actual test code" 38 | } 39 | 40 | Important guidelines: 41 | - Ensure your tests are comprehensive and cover various scenarios, including edge cases. 42 | - Use clear and descriptive test names. 43 | - Include comments in your test code to explain the purpose of each test. 44 | - Follow TypeScript and Jest best practices. 45 | - For methods that are not unit-testable, explain why in a comment. 46 | - Make sure to import all necessary dependencies and mock external modules. 47 | - Use jest.mock() to mock external dependencies like fs, path, and child_process. 48 | - Include setup and teardown code (beforeEach, afterEach) where necessary. 49 | - Use appropriate Jest matchers (e.g., toHaveBeenCalledWith, toThrow) for precise assertions. 50 | - Consider using test.each for parameterized tests when appropriate. 51 | - Ensure that async functions are properly tested using async/await syntax. 52 | 53 | Here's an example of the expected output format: 54 | 55 | [ 56 | { 57 | "name": "Test input validation with empty array", 58 | "type": "direct", 59 | "code": "import { runUnitTests } from '../src/yourFile';\nimport * as fs from 'fs';\nimport * as path from 'path';\n\njest.mock('fs');\njest.mock('path');\njest.mock('child_process');\n\ndescribe('runUnitTests', () => {\n beforeEach(() => {\n jest.clearAllMocks();\n console.log = jest.fn();\n });\n\n it('should handle empty input array', async () => {\n // Test that the function handles an empty input array correctly\n await runUnitTests([]);\n expect(console.log).toHaveBeenCalledWith('Input test cases', []);\n expect(console.log).toHaveBeenCalledWith('No test cases to run');\n });\n});" 60 | } 61 | ] 62 | 63 | 64 | After generating the test cases, please review your output and ensure: 65 | 1. The tests are fully executable and correctly written. 66 | 2. The code is thoroughly commented for a beginner to understand. 67 | 3. The tests follow TypeScript and Jest best practices. 68 | 4. All external dependencies are properly mocked. 69 | 5. Edge cases and error scenarios are covered. 70 | 71 | Provide your response as a valid JSON array containing objects with the specified structure. Do not include any explanatory text outside of the JSON array. 72 | `; 73 | } -------------------------------------------------------------------------------- /src/deprecated-testValidator.ts: -------------------------------------------------------------------------------- 1 | import * as ts from 'typescript'; 2 | import { TestCase } from './deprecated-testUtils'; 3 | 4 | export async function validateTestCases(testCases: TestCase[], sourceCode: string): Promise { 5 | const validatedTestCases: TestCase[] = []; 6 | 7 | for (const testCase of testCases) { 8 | if (isValidTestCase(testCase, sourceCode)) { 9 | validatedTestCases.push(testCase); 10 | } else { 11 | console.warn(`Invalid test case: ${testCase.name}`); 12 | } 13 | } 14 | 15 | return validatedTestCases; 16 | } 17 | 18 | function isValidTestCase(testCase: TestCase, sourceCode: string): boolean { 19 | // Check if the test case has all required properties 20 | if (!testCase.name || !testCase.type || !testCase.code) { 21 | return false; 22 | } 23 | 24 | // Check if the test case type is valid 25 | if (!['direct', 'indirect', 'not-testable'].includes(testCase.type)) { 26 | return false; 27 | } 28 | 29 | // Parse the source code and test code 30 | const sourceFile = ts.createSourceFile('source.ts', sourceCode, ts.ScriptTarget.Latest, true); 31 | const testFile = ts.createSourceFile('test.ts', testCase.code, ts.ScriptTarget.Latest, true); 32 | 33 | // Check if the test code references functions or classes from the source code 34 | const sourceSymbols = extractSymbols(sourceFile); 35 | const testSymbols = extractSymbols(testFile); 36 | 37 | return testSymbols.some(symbol => sourceSymbols.includes(symbol)); 38 | } 39 | 40 | function extractSymbols(sourceFile: ts.SourceFile): string[] { 41 | const symbols: string[] = []; 42 | 43 | function visit(node: ts.Node) { 44 | if (ts.isFunctionDeclaration(node) || ts.isClassDeclaration(node)) { 45 | if (node.name) { 46 | symbols.push(node.name.text); 47 | } 48 | } 49 | ts.forEachChild(node, visit); 50 | } 51 | 52 | visit(sourceFile); 53 | return symbols; 54 | } -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import * as core from '@actions/core'; 2 | import { getOctokit, context } from '@actions/github'; 3 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 4 | import { setTimeout } from 'timers/promises'; 5 | 6 | // current we support typescript and python, while the python library is not available yet, we will use typescript as the default language 7 | // using abosolute path to import the functions from testGenerator.ts 8 | import { generatePRDescription } from '@/src/prGeneration'; 9 | import { generateCodeReviewComment } from '@/src/codeReviewInline'; 10 | import { generateUnitTestsSuite } from '@/src/preview/testGenerator'; 11 | import { PullRequest } from '@/src/utils'; 12 | 13 | async function run(): Promise { 14 | try { 15 | const githubToken = core.getInput('github-token'); 16 | const awsRegion = core.getInput('aws-region'); 17 | const modelId = core.getInput('model-id'); 18 | const excludeFiles = core.getInput('generate-code-review-exclude-files'); 19 | const excludePatterns = excludeFiles ? excludeFiles.split(',').map(p => p.trim()) : []; 20 | const reviewLevel = core.getInput('generate-code-review-level'); 21 | const generateCodeReview = core.getInput('generate-code-review'); 22 | const generatePrDescription = core.getInput('generate-pr-description'); 23 | const generateUnitTest = core.getInput('generate-unit-test'); 24 | const outputLanguage = core.getInput('output-language'); 25 | const unitTestSourceFolder = core.getInput('generate-unit-test-source-folder'); 26 | const unitTestExcludeFiles = core.getInput('generate-unit-test-exclude-files'); 27 | const unitTestExcludePatterns = unitTestExcludeFiles ? unitTestExcludeFiles.split(',').map(p => p.trim()) : []; 28 | 29 | console.log(`GitHub Token: ${githubToken ? 'Token is set' : 'Token is not set'}`); 30 | console.log(`AWS Region: ${awsRegion}`); 31 | console.log(`Model ID: ${modelId}`); 32 | console.log(`Excluded files: ${excludeFiles}`); 33 | console.log(`Code review: ${generateCodeReview}`); 34 | console.log(`Output language: ${outputLanguage}`); 35 | console.log(`Review level: ${reviewLevel}`); 36 | console.log(`Generate PR description: ${generatePrDescription.toLowerCase() === 'true' ? 'true' : 'false'}`); 37 | console.log(`Generate unit test suite: ${generateUnitTest.toLowerCase() === 'true' ? 'true' : 'false'}`); 38 | console.log(`Generate unit test source folder: ${unitTestSourceFolder}`); 39 | console.log(`Generate unit test exclude files: ${unitTestExcludeFiles}`); 40 | 41 | if (!githubToken) { 42 | throw new Error('GitHub token is not set'); 43 | } 44 | 45 | const bedrockClient = new BedrockRuntimeClient({ region: awsRegion || 'us-east-1' }); 46 | const octokit = getOctokit(githubToken); 47 | 48 | if (!context.payload.pull_request) { 49 | console.log('No pull request found in the context. This action should be run only on pull request events.'); 50 | return; 51 | } 52 | 53 | const pullRequest = context.payload.pull_request as PullRequest; 54 | const repo = context.repo; 55 | 56 | console.log(`Reviewing PR #${pullRequest.number} in ${repo.owner}/${repo.repo}`); 57 | 58 | // branch to generate PR description 59 | if (generatePrDescription.toLowerCase() === 'true') { 60 | await generatePRDescription(bedrockClient, modelId, octokit); 61 | } 62 | 63 | // branch to generate code review comments 64 | if (generateCodeReview.toLowerCase() === 'true') { 65 | await generateCodeReviewComment(bedrockClient, modelId, octokit, excludePatterns, reviewLevel, outputLanguage); 66 | } 67 | 68 | // branch to generate unit tests suite 69 | if (generateUnitTest.toLowerCase() === 'true') { 70 | console.log('Start to generate unit test suite'); 71 | if (!unitTestSourceFolder) { 72 | throw new Error('Test folder path is not specified'); 73 | } 74 | /* 75 | export async function generateUnitTestsSuite( 76 | client: BedrockRuntimeClient, 77 | modelId: string, 78 | octokit: ReturnType, 79 | excludePatterns: string[], 80 | repo: { owner: string, repo: string }, 81 | unitTestSourceFolder: string 82 | ) 83 | */ 84 | await generateUnitTestsSuite(bedrockClient, modelId, octokit, repo, unitTestSourceFolder); 85 | } 86 | 87 | } catch (error) { 88 | if (error instanceof Error) { 89 | core.setFailed(`Error: ${error.message}`); 90 | console.error('Stack trace:', error.stack); 91 | } else { 92 | core.setFailed('An unknown error occurred'); 93 | } 94 | } 95 | } 96 | 97 | run(); 98 | -------------------------------------------------------------------------------- /src/prGeneration.ts: -------------------------------------------------------------------------------- 1 | import { getOctokit, context } from '@actions/github'; 2 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 3 | // using abosolute path to import the functions from testGenerator.ts 4 | import { invokeModel, PullRequest } from '@/src/utils'; 5 | 6 | const PR_DESCRIPTION_HEADER = "🤖 AI-Generated PR Description (Powered by Amazon Bedrock)"; 7 | 8 | const pr_generation_prompt = 9 | ` 10 | 11 | You are a developer tasked with creating a pull request (PR) for a software project. Your primary goal is to provide a clear and informative description of the changes you are proposing. 12 | 13 | 14 | 15 | Maintain a professional and informative tone. Be clear and concise in your descriptions. 16 | 17 | 18 | 19 | This pull request includes the following changes, in format of file name: file status: 20 | [Insert the code change to be referenced in the PR description] 21 | 22 | 23 | 24 | Please include a summary of the changes in one of the following categories: 25 | - Bug fix (non-breaking change which fixes an issue) 26 | - New feature (non-breaking change which adds functionality) 27 | - Breaking change (fix or feature that would cause existing functionality to not work as expected) 28 | - This change requires a documentation update 29 | 30 | Please also include relevant motivation and context. List any dependencies that are required for this change. 31 | 32 | 33 | 34 | Provide your PR description in the following format: 35 | # Description 36 | [Insert the PR description here] 37 | 38 | ## Type of change 39 | [Select one of the following options in the checkbox] 40 | - [ ] Bug fix (non-breaking change which fixes an issue) 41 | - [ ] New feature (non-breaking change which adds functionality) 42 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 43 | - [ ] This change requires a documentation update 44 | 45 | `; 46 | 47 | let statsSummary: {file: string, added: number, removed: number, summary?: string}[] = []; 48 | 49 | function calculateFilePatchNumLines(fileChange: string): { added: number, removed: number } { 50 | const lines = fileChange.split('\n'); 51 | let added = 0; 52 | let removed = 0; 53 | 54 | lines.forEach(line => { 55 | if (line.startsWith('+')) { 56 | added++; 57 | } else if (line.startsWith('-')) { 58 | removed++; 59 | } 60 | }); 61 | 62 | return { added, removed }; 63 | } 64 | 65 | async function generateFileSummary(client: BedrockRuntimeClient, modelId: string, patch: string): Promise { 66 | const prompt = `Summarize the following code changes into concise and clear description in less than 30 words:\n\n${patch}`; 67 | return await invokeModel(client, modelId, prompt); 68 | } 69 | 70 | export async function generatePRDescription(client: BedrockRuntimeClient, modelId: string, octokit: ReturnType): Promise { 71 | const pullRequest = context.payload.pull_request as PullRequest; 72 | const repo = context.repo; 73 | 74 | // Fetch the current PR description 75 | const { data: currentPR } = await octokit.rest.pulls.get({ 76 | ...repo, 77 | pull_number: pullRequest.number, 78 | }); 79 | const originalDescription = currentPR.body || ''; 80 | 81 | // fetch the list of files changed in the PR each time since the file can be changed in operation like unit test generation, code review, etc. 82 | const { data: files } = await octokit.rest.pulls.listFiles({ 83 | ...repo, 84 | pull_number: pullRequest.number, 85 | }); 86 | 87 | const fileNameAndStatus = await Promise.all(files.map(async (file) => { 88 | try { 89 | if (file.status === 'removed') { 90 | const { added, removed } = calculateFilePatchNumLines(file.patch as string); 91 | statsSummary.push({file: file.filename, added: 0, removed: removed, summary: 'This file is removed in this PR'}); 92 | return `${file.filename}: removed`; 93 | } else { 94 | const { data: content } = await octokit.rest.repos.getContent({ 95 | ...repo, 96 | path: file.filename, 97 | ref: pullRequest.head.sha, 98 | }); 99 | const { added, removed } = calculateFilePatchNumLines(file.patch as string); 100 | const summary = await generateFileSummary(client, modelId, file.patch as string); 101 | statsSummary.push({file: file.filename, added: added, removed: removed, summary: summary}); 102 | return `${file.filename}: ${file.status}`; 103 | } 104 | } catch (error) { 105 | if ((error as any).status === 404) { 106 | console.log(`File ${file.filename} not found in the repository`); 107 | return `${file.filename}: not found`; 108 | } 109 | return `${file.filename}: error`; 110 | } 111 | })); 112 | 113 | const prDescriptionTemplate = pr_generation_prompt.replace('[Insert the code change to be referenced in the PR description]', fileNameAndStatus.join('\n')); 114 | 115 | // Generate the new PR description 116 | const payloadInput = prDescriptionTemplate; 117 | const newPrDescription = await invokeModel(client, modelId, payloadInput); 118 | 119 | // Fix the table column width using div element and inline HTML 120 | const fixedDescription = ` 121 | ## File Stats Summary 122 | 123 | File number involved in this PR: *{{FILE_NUMBER}}*, unfold to see the details: 124 | 125 |
126 | 127 | The file changes summary is as follows: 128 | 129 | |
Files
|
Changes
|
Change Summary
| 130 | |:-------|:--------|:--------------| 131 | {{FILE_CHANGE_SUMMARY}} 132 | 133 |
134 | `; 135 | 136 | const fileChangeSummary = statsSummary.map(file => { 137 | const fileName = file.file; 138 | const changes = `${file.added} added, ${file.removed} removed`; 139 | return `| ${fileName} | ${changes} | ${file.summary || ''} |`; 140 | }).join('\n'); 141 | const fileNumber = statsSummary.length.toString(); 142 | const updatedDescription = fixedDescription 143 | .replace('{{FILE_CHANGE_SUMMARY}}', fileChangeSummary) 144 | .replace('{{FILE_NUMBER}}', fileNumber); 145 | 146 | // Combine the new PR description with the stats 147 | const aiGeneratedContent = newPrDescription + updatedDescription; 148 | 149 | // Create the foldable AI-generated content 150 | const foldableContent = ` 151 |
152 | ${PR_DESCRIPTION_HEADER} 153 | 154 | ${aiGeneratedContent} 155 | 156 |
157 | `; 158 | 159 | // Combine the original description with the foldable AI-generated content 160 | const finalDescription = `${originalDescription}\n\n${foldableContent}`; 161 | 162 | // Update the PR with the combined description 163 | await octokit.rest.pulls.update({ 164 | ...repo, 165 | pull_number: pullRequest.number, 166 | body: finalDescription, 167 | }); 168 | console.log('PR description updated successfully with appended AI-generated content.'); 169 | } 170 | -------------------------------------------------------------------------------- /src/preview/languageModel.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 2 | import { invokeModel } from "../utils"; 3 | 4 | export interface ICompletionModel { 5 | getCompletions(prompt: string, temperature: number): Promise; 6 | } 7 | 8 | export class LanguageModel implements ICompletionModel { 9 | constructor( 10 | private client: BedrockRuntimeClient, 11 | private modelId: string 12 | ) {} 13 | 14 | async getCompletions(prompt: string, temperature: number): Promise { 15 | try { 16 | const completion = await invokeModel(this.client, this.modelId, prompt, temperature); 17 | // return the array of completions, only one completion for now 18 | return [completion]; 19 | } catch (error) { 20 | console.error("Error getting completions:", error); 21 | return []; 22 | } 23 | } 24 | } -------------------------------------------------------------------------------- /src/preview/prompt-obsolete.ts: -------------------------------------------------------------------------------- 1 | export class Prompt { 2 | readonly id: string; 3 | private snippets: string[]; 4 | private docComments: string = ''; 5 | private functionBody: string = ''; 6 | private error: string = ''; 7 | 8 | constructor(private apiFunction: string, snippets: string[] = []) { 9 | this.id = `prompt_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`; 10 | this.snippets = snippets; 11 | } 12 | 13 | assemble(): string { 14 | let assembledPrompt = `Write a unit test for the following API function:\n\n${this.apiFunction}\n\n`; 15 | 16 | if (this.snippets.length > 0) { 17 | assembledPrompt += "Usage examples:\n" + this.snippets.join("\n") + "\n\n"; 18 | } 19 | 20 | if (this.docComments) { 21 | assembledPrompt += "Function documentation:\n" + this.docComments + "\n\n"; 22 | } 23 | 24 | if (this.functionBody) { 25 | assembledPrompt += "Function body:\n" + this.functionBody + "\n\n"; 26 | } 27 | 28 | if (this.error) { 29 | assembledPrompt += "Previous error:\n" + this.error + "\n\nPlease address this error in the new test.\n\n"; 30 | } 31 | 32 | assembledPrompt += "Generate a complete, runnable unit test for this function:"; 33 | 34 | return assembledPrompt; 35 | } 36 | 37 | createTestSource(completion: string): string { 38 | return ` 39 | const assert = require('assert'); 40 | const { ${this.extractFunctionName(this.apiFunction)} } = require('../src/your-module'); 41 | 42 | describe('${this.extractFunctionName(this.apiFunction)} Tests', () => { 43 | ${completion} 44 | }); 45 | `; 46 | } 47 | 48 | private extractFunctionName(apiFunction: string): string { 49 | const match = apiFunction.match(/function\s+(\w+)/); 50 | return match && match[1] ? match[1] : 'UnknownFunction'; 51 | } 52 | 53 | hasSnippets(): boolean { 54 | return this.snippets.length > 0; 55 | } 56 | 57 | hasDocComments(): boolean { 58 | return this.docComments !== ''; 59 | } 60 | 61 | hasFunctionBody(): boolean { 62 | return this.functionBody !== ''; 63 | } 64 | 65 | addSnippets(): Prompt { 66 | // In a real implementation, you'd fetch snippets from somewhere 67 | const newSnippets = [...this.snippets, "const result = apiFunction(arg1, arg2);"]; 68 | return new Prompt(this.apiFunction, newSnippets); 69 | } 70 | 71 | addDocComments(): Prompt { 72 | const newPrompt = new Prompt(this.apiFunction, this.snippets); 73 | newPrompt.docComments = "/** This function does something important */"; 74 | return newPrompt; 75 | } 76 | 77 | addFunctionBody(): Prompt { 78 | const newPrompt = new Prompt(this.apiFunction, this.snippets); 79 | newPrompt.functionBody = "function body { /* implementation */ }"; 80 | return newPrompt; 81 | } 82 | 83 | addError(error: string): Prompt { 84 | const newPrompt = new Prompt(this.apiFunction, this.snippets); 85 | newPrompt.error = error; 86 | return newPrompt; 87 | } 88 | } -------------------------------------------------------------------------------- /src/preview/promptRefiner.ts: -------------------------------------------------------------------------------- 1 | import { Prompt } from './prompt-obsolete'; 2 | 3 | export class PromptRefiner { 4 | static refinePrompt(prompt: Prompt, error: string): Prompt[] { 5 | const refinedPrompts: Prompt[] = []; 6 | 7 | // SnippetIncluder 8 | if (!prompt.hasSnippets()) { 9 | refinedPrompts.push(prompt.addSnippets()); 10 | } 11 | 12 | // RetryWithError 13 | refinedPrompts.push(prompt.addError(error)); 14 | 15 | // DocCommentIncluder 16 | if (!prompt.hasDocComments()) { 17 | refinedPrompts.push(prompt.addDocComments()); 18 | } 19 | 20 | // FunctionBodyIncluder 21 | if (!prompt.hasFunctionBody()) { 22 | refinedPrompts.push(prompt.addFunctionBody()); 23 | } 24 | 25 | return refinedPrompts; 26 | } 27 | } -------------------------------------------------------------------------------- /src/preview/resultCollector.ts: -------------------------------------------------------------------------------- 1 | import { Prompts } from '../prompts'; 2 | import { ICoverageSummary } from './testValidator'; 3 | 4 | export interface ITestInfo { 5 | testName: string; 6 | testSource: string; 7 | prompt: Prompts; 8 | } 9 | 10 | export interface IPromptInfo { 11 | prompt: Prompts; 12 | completionsCount: number; 13 | } 14 | 15 | export interface ITestResultCollector { 16 | recordTestInfo(testInfo: ITestInfo): void; 17 | recordTestResult(testInfo: ITestInfo & { outcome: { status: string; error?: string } }): void; 18 | recordPromptInfo(prompt: Prompts, completionsCount: number): void; 19 | recordCoverageInfo(coverageSummary: ICoverageSummary): void; 20 | hasPrompt(prompt: Prompts): boolean; 21 | getTestResults(): Array; 22 | getCoverageInfo(): ICoverageSummary; 23 | } 24 | 25 | export class BaseTestResultCollector implements ITestResultCollector { 26 | private tests: Map = new Map(); 27 | private prompts: Map = new Map(); 28 | private testResults: Array = []; 29 | private coverageInfo: ICoverageSummary | null = null; 30 | 31 | recordTestInfo(testInfo: ITestInfo): void { 32 | this.tests.set(testInfo.testName, testInfo); 33 | } 34 | 35 | recordTestResult(testInfo: ITestInfo & { outcome: { status: string; error?: string } }): void { 36 | this.testResults.push(testInfo); 37 | } 38 | 39 | recordPromptInfo(prompt: Prompts, completionsCount: number): void { 40 | this.prompts.set(prompt.id, { prompt, completionsCount }); 41 | } 42 | 43 | recordCoverageInfo(coverageSummary: ICoverageSummary): void { 44 | this.coverageInfo = coverageSummary; 45 | } 46 | 47 | hasPrompt(prompt: Prompts): boolean { 48 | return this.prompts.has(prompt.id); 49 | } 50 | 51 | getTestResults(): Array { 52 | return this.testResults; 53 | } 54 | 55 | getTestSource(testName: string): string | null { 56 | const testInfo = this.tests.get(testName); 57 | return testInfo ? testInfo.testSource : null; 58 | } 59 | 60 | getCoverageInfo(): ICoverageSummary { 61 | return this.coverageInfo || { 62 | lines: { total: 0, covered: 0, skipped: 0, pct: 0 }, 63 | statements: { total: 0, covered: 0, skipped: 0, pct: 0 }, 64 | functions: { total: 0, covered: 0, skipped: 0, pct: 0 }, 65 | branches: { total: 0, covered: 0, skipped: 0, pct: 0 } 66 | }; 67 | } 68 | } -------------------------------------------------------------------------------- /src/preview/snippetMap.ts: -------------------------------------------------------------------------------- 1 | export class SnippetMap { 2 | private snippets: Map = new Map(); 3 | 4 | addSnippet(functionName: string, snippet: string): void { 5 | if (!this.snippets.has(functionName)) { 6 | this.snippets.set(functionName, []); 7 | } 8 | this.snippets.get(functionName)!.push(snippet); 9 | } 10 | 11 | getSnippets(functionName: string): string[] { 12 | return this.snippets.get(functionName) || []; 13 | } 14 | 15 | hasSnippets(functionName: string): boolean { 16 | return this.snippets.has(functionName) && this.snippets.get(functionName)!.length > 0; 17 | } 18 | } -------------------------------------------------------------------------------- /src/preview/testValidator.ts: -------------------------------------------------------------------------------- 1 | import * as fs from 'fs'; 2 | import * as path from 'path'; 3 | import { spawnSync } from 'child_process'; 4 | import * as os from 'os'; 5 | 6 | export interface ICoverageSummary { 7 | lines: { total: number; covered: number; skipped: number; pct: number }; 8 | statements: { total: number; covered: number; skipped: number; pct: number }; 9 | functions: { total: number; covered: number; skipped: number; pct: number }; 10 | branches: { total: number; covered: number; skipped: number; pct: number }; 11 | } 12 | 13 | export class TestValidator { 14 | private testDir: string; 15 | private coverageDirs: string[] = []; 16 | /* For GitHub Action, the process.cwd() will return the directory of the runner, the file hierarchy will be as follows along with the repo code: 17 | /home/runner/work/repo-name/repo-name 18 | 19 | To execute the generated unit tests in fix test/ folder for code in specified folder (rootDir), e.g. src/, we need to configure the tsConfigFile, jestConfigFile accordingly. 20 | 21 | The typical file hierarchy will be as follows: 22 | /home/runner/work/[repository-name]/[repository-name]/ 23 | │ 24 | ├── .github/ 25 | │ └── workflows/ 26 | │ └── main.yml 27 | │ 28 | ├── dist/ (generated by ts-build, this is the actual folder that will be used in the github action) 29 | │ ├── src/ 30 | │ │ ├── index.js 31 | │ │ ├── index.d.ts (this is the entry file for github action) 32 | │ └── ... 33 | │ 34 | ├── src/ (original source code) 35 | │ ├── file-1.ts 36 | │ ├── file-2.ts 37 | │ ├── ... 38 | │ └── file-n.ts 39 | │ 40 | ├── unitTestGenerated/ (generated unit test for the external source code) 41 | │ ├── file-1.test.ts 42 | │ ├── file-2.test.ts 43 | │ ├── ... 44 | │ └── file-n.test.ts 45 | │ 46 | ├── test/ (unit test for the original source code) 47 | ├── .gitignore 48 | ├── package.json 49 | ├── README.md 50 | └── LICENSE 51 | */ 52 | 53 | constructor(private packagePath: string = process.cwd()) { 54 | // this.testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'test-validator-')); 55 | this.testDir = path.join(this.packagePath, 'preflight-tests'); 56 | if (!fs.existsSync(this.testDir)) { 57 | fs.mkdirSync(this.testDir, { recursive: true }); 58 | } 59 | } 60 | 61 | validateTest(testName: string, testSource: string, rootDir: string): { status: string; error?: string } { 62 | console.log('Validating test: ', testName, '\nTest source: ', testSource, '\nRoot dir: ', rootDir, '\nTest dir: ', this.testDir, '\nCurrent folder hierarchy: ', fs.readdirSync(this.packagePath)); 63 | const testFile = path.join(this.testDir, `${testName}.test.ts`); 64 | fs.writeFileSync(testFile, testSource); 65 | // const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "jest-validator")); 66 | const tmpDir = path.join(this.testDir, "jest-validator"); 67 | if (!fs.existsSync(tmpDir)) { 68 | fs.mkdirSync(tmpDir, { recursive: true }); 69 | } 70 | const coverageDir = path.join(tmpDir, "coverage"); 71 | const reportFile = path.join(tmpDir, "report.json"); 72 | 73 | // Create a temporary tsconfig.json file, 74 | const tsConfigFile = path.join(this.testDir, 'tsconfig.json'); 75 | const tsConfig = { 76 | compilerOptions: { 77 | target: "es2018", 78 | module: "commonjs", 79 | strict: true, 80 | esModuleInterop: true, 81 | skipLibCheck: true, 82 | forceConsistentCasingInFileNames: true, 83 | baseUrl: this.packagePath, 84 | paths: { 85 | "@/*": ["./*"], 86 | [`${rootDir}/*`]: ["./*"] 87 | }, 88 | moduleResolution: "node", 89 | resolveJsonModule: true 90 | }, 91 | include: [ 92 | "./**/*.ts", 93 | "../**/*.ts" 94 | ], 95 | exclude: ["node_modules"] 96 | }; 97 | fs.writeFileSync(tsConfigFile, JSON.stringify(tsConfig, null, 2)); 98 | 99 | // Create a temporary Jest config file 100 | const jestConfigFile = path.join(this.testDir, 'jest.config.js'); 101 | const jestConfig = ` 102 | module.exports = { 103 | preset: 'ts-jest', 104 | testEnvironment: 'node', 105 | transform: { 106 | '^.+\\.tsx?$': ['ts-jest', { 107 | tsconfig: '${tsConfigFile}' 108 | }], 109 | }, 110 | moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], 111 | moduleDirectories: ['node_modules', '${path.join(this.packagePath, rootDir)}'], 112 | rootDir: '${path.join(this.packagePath, rootDir)}', 113 | modulePaths: ['${path.join(this.packagePath, rootDir)}'], 114 | testMatch: ['**/*.test.ts'], 115 | moduleNameMapper: { 116 | '^@/(.*)$': '/$1', 117 | '^${rootDir}/(.*)$': '/$1' 118 | } 119 | };`; 120 | fs.writeFileSync(jestConfigFile, jestConfig); 121 | 122 | // Ensure ts-jest is installed 123 | this.ensureTsJestInstalled(); 124 | 125 | const res = spawnSync( 126 | 'npx', 127 | [ 128 | 'jest', 129 | '--coverage', 130 | '--coverageDirectory', coverageDir, 131 | '--json', 132 | '--outputFile', reportFile, 133 | '--rootDir', this.packagePath, 134 | '--config', jestConfigFile, 135 | testFile 136 | ], 137 | { timeout: 30000, encoding: 'utf-8', cwd: this.packagePath } 138 | ); 139 | // Log out the actual npx command for debugging 140 | console.log(`Executed command: npx jest --coverage --coverageDirectory ${coverageDir} --json --outputFile ${reportFile} --rootDir ${this.packagePath} --config ${jestConfigFile} ${testFile}`); 141 | if (res.status !== 0) { 142 | return { status: 'FAILED', error: res.stderr || res.stdout }; 143 | } 144 | 145 | const report = JSON.parse(fs.readFileSync(reportFile, 'utf-8')); 146 | 147 | if (report.numFailedTests > 0) { 148 | const failedTestResult = report.testResults[0].assertionResults.find((result: any) => result.status === 'failed'); 149 | return { status: 'FAILED', error: failedTestResult ? failedTestResult.failureMessages.join('\n') : 'Unknown error' }; 150 | } 151 | 152 | // Only record the coverage directory if all tests passed 153 | this.coverageDirs.push(coverageDir); 154 | return { status: 'PASSED' }; 155 | } 156 | 157 | private ensureTsJestInstalled() { 158 | try { 159 | require.resolve('ts-jest'); 160 | } catch (e) { 161 | console.log('ts-jest not found. Installing...'); 162 | spawnSync('npm', ['install', '--save-dev', 'ts-jest'], { stdio: 'inherit', cwd: this.packagePath }); 163 | } 164 | } 165 | 166 | getCoverageSummary(): ICoverageSummary { 167 | let aggregatedCoverage: ICoverageSummary = { 168 | lines: { total: 0, covered: 0, skipped: 0, pct: 0 }, 169 | statements: { total: 0, covered: 0, skipped: 0, pct: 0 }, 170 | functions: { total: 0, covered: 0, skipped: 0, pct: 0 }, 171 | branches: { total: 0, covered: 0, skipped: 0, pct: 0 } 172 | }; 173 | 174 | for (const coverageDir of this.coverageDirs) { 175 | const coverageFile = path.join(coverageDir, 'coverage-final.json'); 176 | if (fs.existsSync(coverageFile)) { 177 | const coverage = JSON.parse(fs.readFileSync(coverageFile, 'utf-8')); 178 | 179 | for (const fileCoverage of Object.values(coverage)) { 180 | const summary = (fileCoverage as any).summary; 181 | if (summary) { 182 | this.addCoverage(aggregatedCoverage.lines, summary.lines); 183 | this.addCoverage(aggregatedCoverage.statements, summary.statements); 184 | this.addCoverage(aggregatedCoverage.functions, summary.functions); 185 | this.addCoverage(aggregatedCoverage.branches, summary.branches); 186 | } 187 | } 188 | } 189 | } 190 | 191 | this.calculatePercentages(aggregatedCoverage.lines); 192 | this.calculatePercentages(aggregatedCoverage.statements); 193 | this.calculatePercentages(aggregatedCoverage.functions); 194 | this.calculatePercentages(aggregatedCoverage.branches); 195 | 196 | return aggregatedCoverage; 197 | } 198 | 199 | private addCoverage(target: { total: number; covered: number; skipped: number; pct: number }, source: { total: number; covered: number; skipped: number; pct: number }) { 200 | target.total += source.total; 201 | target.covered += source.covered; 202 | target.skipped += source.skipped; 203 | } 204 | 205 | private calculatePercentages(coverage: { total: number; covered: number; skipped: number; pct: number }) { 206 | coverage.pct = coverage.total === 0 ? 100 : (coverage.covered / coverage.total) * 100; 207 | } 208 | } -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient, InvokeModelCommand } from "@aws-sdk/client-bedrock-runtime"; 2 | 3 | // Define the LanguageCode type 4 | export type LanguageCode = 'en' | 'zh' | 'ja' | 'es' | 'fr' | 'de' | 'it'; 5 | 6 | // Full definition of PullRequest from GitHub API can be found at https://gist.github.com/GuillaumeFalourd/e53ec9b6bc783cce184bd1eec263799d 7 | export interface PullRequest { 8 | title: string; 9 | number: number; 10 | body: string; 11 | head: { 12 | sha: string; 13 | ref: string; 14 | }; 15 | base: { 16 | sha: string; 17 | }; 18 | } 19 | 20 | export interface PullFile { 21 | filename: string; 22 | status: string; 23 | patch?: string; 24 | } 25 | 26 | // Update the languageCodeToName object with the correct type 27 | export const languageCodeToName: Record = { 28 | 'en': 'English', 29 | 'zh': 'Chinese', 30 | 'ja': 'Japanese', 31 | 'es': 'Spanish', 32 | 'fr': 'French', 33 | 'de': 'German', 34 | 'it': 'Italian', 35 | }; 36 | 37 | // This function splits the content into chunks of maxChunkSize 38 | export function splitContentIntoChunks_deprecated(content: string, maxChunkSize: number): string[] { 39 | const chunks: string[] = []; 40 | let currentChunk = ''; 41 | 42 | content.split('\n').forEach(line => { 43 | if (currentChunk.length + line.length > maxChunkSize) { 44 | chunks.push(currentChunk); 45 | currentChunk = ''; 46 | } 47 | currentChunk += line + '\n'; 48 | }); 49 | 50 | if (currentChunk) { 51 | chunks.push(currentChunk); 52 | } 53 | 54 | return chunks; 55 | } 56 | 57 | export function shouldExcludeFile(filename: string, excludePatterns: string[]): boolean { 58 | return excludePatterns.some(pattern => { 59 | const regex = new RegExp(`^${pattern.replace(/\*/g, '.*')}$`); 60 | return regex.test(filename); 61 | }); 62 | } 63 | 64 | export function splitIntoSoloFile(combinedCode: string): Record { 65 | // split the whole combinedCode content into individual files (index.ts, index_test.ts, index.js) by recognize the character like: "// File: ./index.ts", filter the content with suffix ".tx" and not contain "test" in file name (index.ts), 66 | const fileChunks: Record = {}; 67 | const filePattern = /\/\/ File: \.\/(.+)/; 68 | let currentFile = ''; 69 | let currentContent = ''; 70 | 71 | combinedCode.split('\n').forEach(line => { 72 | const match = line.match(filePattern); 73 | if (match) { 74 | if (currentFile) { 75 | fileChunks[currentFile] = currentContent.trim(); 76 | } 77 | currentFile = match[1] as string; 78 | currentContent = ''; 79 | } else { 80 | currentContent += line + '\n'; 81 | } 82 | }); 83 | 84 | if (currentFile) { 85 | fileChunks[currentFile] = currentContent.trim(); 86 | } 87 | return fileChunks; 88 | } 89 | 90 | export async function extractFunctions(content: string): Promise { 91 | // const functionPattern = /(?:export\s+)?(?:async\s+)?function\s+\w+\s*\([^)]*\)(?:\s*:\s*[^{]*?)?\s*{(?:[^{}]*|\{(?:[^{}]*|\{[^{}]*\})*\})*}/gs; 92 | // const matches = content.match(functionPattern); 93 | // return matches ? matches.map(match => match.trim()) : []; 94 | 95 | // Dummy response for debugging purposes 96 | return [ 97 | 'export async function generateUnitTests(client: BedrockRuntimeClient, modelId: string, sourceCode: string): Promise { ... }', 98 | 'async function runUnitTests(testCases: TestCase[], sourceCode: string): Promise { ... }', 99 | 'function generateTestReport(testCases: TestCase[]): Promise { ... }', 100 | ]; 101 | } 102 | 103 | export async function exponentialBackoff( 104 | fn: () => Promise, 105 | maxRetries: number, 106 | initialDelay: number, 107 | functionName: string 108 | ): Promise { 109 | let retries = 0; 110 | while (true) { 111 | try { 112 | const result = await fn(); 113 | console.log(`Function '${functionName}' executed successfully on attempt ${retries + 1}`); 114 | return result; 115 | } catch (error) { 116 | if (retries >= maxRetries) { 117 | console.error(`Max retries (${maxRetries}) reached for function '${functionName}'. Throwing error.`); 118 | throw error; 119 | } 120 | const delay = initialDelay * Math.pow(2, retries); 121 | console.log(`Attempt ${retries + 1} for function '${functionName}' failed. Retrying in ${delay}ms...`); 122 | await new Promise(resolve => setTimeout(resolve, delay)); 123 | retries++; 124 | } 125 | } 126 | } 127 | 128 | // note the default temperature is 1 according to official documentation: https://docs.anthropic.com/en/api/complete 129 | export async function invokeModel(client: BedrockRuntimeClient, modelId: string, payloadInput: string, temperature: number = 0.6): Promise { 130 | const maxRetries = 3; 131 | const initialDelay = 1000; // 1 second 132 | 133 | const invokeWithRetry = async (): Promise => { 134 | try { 135 | // seperate branch to invoke RESTFul endpoint exposed by API Gateway, if the modelId is prefixed with string like "sagemaker..execute-api..amazonaws.com/prod" 136 | if (modelId.startsWith("sagemaker.")) { 137 | // invoke RESTFul endpoint e.g. curl -X POST -H "Content-Type: application/json" -d '{"prompt": "import argparse\ndef main(string: str):\n print(string)\n print(string[::-1])\n if __name__ == \"__main__\":", "parameters": {"max_new_tokens": 256, "temperature": 0.1}}' https://.execute-api..amazonaws.com/prod 138 | const endpoint = modelId.split("sagemaker.")[1]; 139 | 140 | // invoke the RESTFul endpoint with the payload 141 | const payload = { 142 | prompt: payloadInput, 143 | parameters: { 144 | max_new_tokens: 256, 145 | temperature: 0.1, 146 | }, 147 | }; 148 | 149 | const response = await fetch(`https://${endpoint}`, { 150 | method: 'POST', 151 | headers: { 152 | 'Content-Type': 'application/json', 153 | }, 154 | body: JSON.stringify(payload), 155 | }); 156 | 157 | const responseBody = await response.json(); 158 | // extract the generated text from the response, the output payload should be in the format { "generated_text": "..." } using codellama model for now 159 | const finalResult = (responseBody as { generated_text: string }).generated_text; 160 | 161 | return finalResult; 162 | } 163 | 164 | const payload = { 165 | anthropic_version: "bedrock-2023-05-31", 166 | max_tokens: 4096, 167 | temperature: temperature, 168 | messages: [ 169 | { 170 | role: "user", 171 | content: [{ 172 | type: "text", 173 | text: payloadInput, 174 | }], 175 | }, 176 | ], 177 | }; 178 | 179 | const command = new InvokeModelCommand({ 180 | // modelId: "anthropic.claude-3-5-sonnet-20240620-v1:0" 181 | modelId: modelId, 182 | contentType: "application/json", 183 | body: JSON.stringify(payload), 184 | }); 185 | 186 | const apiResponse = await client.send(command); 187 | const decodedResponseBody = new TextDecoder().decode(apiResponse.body); 188 | const responseBody = JSON.parse(decodedResponseBody); 189 | return responseBody.content[0].text; 190 | } catch (error) { 191 | if (error instanceof Error && error.name === 'ThrottlingException') { 192 | throw error; // Allow retry for throttling errors 193 | } 194 | console.error('Error occurred while invoking the model', error); 195 | throw error; // Throw other errors without retry 196 | } 197 | }; 198 | 199 | return exponentialBackoff(invokeWithRetry, maxRetries, initialDelay, invokeModel.name); 200 | } -------------------------------------------------------------------------------- /test/AUTO_GENERATED_TESTS_README.md: -------------------------------------------------------------------------------- 1 | # Auto-Generated Unit Tests 2 | 3 | This document provides an overview of the automatically generated unit tests for this project. 4 | 5 | ## Generated Test Suites 6 | 7 | - **sample.test.ts**: Tests for `sample.ts` 8 | - Location: `test/sample.test.ts` 9 | - Source file: `debugging/sample.ts` 10 | 11 | - **testUtils.test.ts**: Tests for `testUtils.ts` 12 | - Location: `test/testUtils.test.ts` 13 | - Source file: `debugging/testUtils.ts` 14 | 15 | - **utils.test.ts**: Tests for `utils.ts` 16 | - Location: `test/utils.test.ts` 17 | - Source file: `debugging/utils.ts` 18 | 19 | ## Test Coverage 20 | 21 | The following test coverage was achieved during the pre-flight phase: 22 | 23 | ``` 24 | { 25 | "lines": { 26 | "total": 0, 27 | "covered": 0, 28 | "skipped": 0, 29 | "pct": 100 30 | }, 31 | "statements": { 32 | "total": 0, 33 | "covered": 0, 34 | "skipped": 0, 35 | "pct": 100 36 | }, 37 | "functions": { 38 | "total": 0, 39 | "covered": 0, 40 | "skipped": 0, 41 | "pct": 100 42 | }, 43 | "branches": { 44 | "total": 0, 45 | "covered": 0, 46 | "skipped": 0, 47 | "pct": 100 48 | } 49 | } 50 | ``` 51 | 52 | ## Test Results Summary 53 | 54 | Total tests: 33 55 | Passed tests: 3 56 | Failed tests: 30 57 | 58 | ## Running Tests Manually 59 | 60 | To run these unit tests manually, follow these steps: 61 | 62 | 1. Ensure you have Node.js and npm installed on your system. 63 | 2. Navigate to the project root directory in your terminal. 64 | 3. Install the necessary dependencies by running: 65 | ``` 66 | npm install 67 | ``` 68 | 4. Run the tests using the following command: 69 | ``` 70 | npm test 71 | ``` 72 | 73 | This will execute all the unit tests in the `test` directory. 74 | -------------------------------------------------------------------------------- /test/debugTestGenerator.ts: -------------------------------------------------------------------------------- 1 | import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; 2 | import { generateUnitTestsSuite } from "../src/preview/testGenerator"; 3 | import { getOctokit } from "@actions/github"; 4 | import { context } from "@actions/github"; 5 | import * as fs from 'fs'; 6 | 7 | const bedrockClient = new BedrockRuntimeClient({ region: 'us-east-1' }); 8 | 9 | // Sample file content 10 | const sampleFileContent = ` 11 | export function add(a: number, b: number): number { 12 | return a + b; 13 | } 14 | 15 | export function subtract(a: number, b: number): number { 16 | return a - b; 17 | } 18 | `; 19 | 20 | // Mock Octokit 21 | const mockOctokit = { 22 | rest: { 23 | repos: { 24 | listTags: async () => ({ data: [] }), 25 | getContent: async ({ path }) => { 26 | if (path === 'test') { 27 | return { 28 | data: [ 29 | { 30 | type: 'file', 31 | name: 'sample.ts', 32 | path: 'test/sample.ts', 33 | }, 34 | ], 35 | }; 36 | } else if (path === 'test/sample.ts') { 37 | return { 38 | data: { 39 | content: Buffer.from(sampleFileContent).toString('base64'), 40 | encoding: 'base64', 41 | }, 42 | }; 43 | } 44 | }, 45 | createOrUpdateFileContents: async ({ path, content }) => { 46 | if (path.endsWith('.test.ts')) { 47 | fs.writeFileSync('sample.test.ts', Buffer.from(content, 'base64').toString('utf8')); 48 | console.log('Generated tests written to sample.test.ts'); 49 | } else if (path === 'test/AUTO_GENERATED_TESTS_README.md') { 50 | fs.writeFileSync('AUTO_GENERATED_TESTS_README.md', Buffer.from(content, 'base64').toString('utf8')); 51 | console.log('Generated README written to AUTO_GENERATED_TESTS_README.md'); 52 | } 53 | return {}; 54 | }, 55 | }, 56 | pulls: { 57 | listFiles: async () => ({ data: [] }), 58 | }, 59 | }, 60 | } as unknown as ReturnType; 61 | 62 | // Mock GitHub context 63 | (context as any).payload = { 64 | pull_request: { 65 | head: { 66 | ref: 'feature-branch', 67 | sha: 'abc123', 68 | }, 69 | number: 1, 70 | }, 71 | }; 72 | 73 | async function main() { 74 | 75 | // Setup the test environment, create the sample.ts to be tested 76 | fs.writeFileSync('sample.ts', sampleFileContent); 77 | 78 | // Run the test generation 79 | try { 80 | await generateUnitTestsSuite( 81 | bedrockClient, 82 | "anthropic.claude-3-sonnet-20240229-v1:0", // or any other model ID you're using 83 | mockOctokit, 84 | { owner: "testuser", repo: "testrepo" }, 85 | "test" 86 | ); 87 | console.log("Unit tests generation completed"); 88 | } catch (error) { 89 | console.error("Error generating unit tests:", error); 90 | } 91 | 92 | // Check if test cases are generated 93 | const testCases = fs.readFileSync('sample.test.ts', 'utf8'); 94 | console.log("The generated test cases are: ", testCases); 95 | 96 | // Clean up the test environment 97 | // fs.unlinkSync('sample.ts'); 98 | // fs.unlinkSync('sample.test.ts'); 99 | // fs.unlinkSync('AUTO_GENERATED_TESTS_README.md'); 100 | } 101 | 102 | main(); -------------------------------------------------------------------------------- /test/erroneous_code_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import random 3 | 4 | def calculate_factorial(n): 5 | if n == 0: 6 | return 1 7 | else: 8 | return n * calculate_factorial(n - 1) 9 | 10 | def find_largest_number(numbers): 11 | largest = numbers[0] 12 | for num in numbers: 13 | if num > largest: 14 | largest = num 15 | return largest 16 | 17 | def inefficient_sort(arr): 18 | n = len(arr) 19 | for i in range(n): 20 | for j in range(0, n-i-1): 21 | if arr[j] > arr[j+1]: 22 | arr[j], arr[j+1] = arr[j+1], arr[j] 23 | return arr 24 | 25 | class User: 26 | def __init__(self, name, age): 27 | self.name = name 28 | self.age = age 29 | 30 | def print_user_info(self): 31 | print(f"Name: {self.name}, Age: {self.age}") 32 | 33 | def process_data(data): 34 | result = [] 35 | for item in data: 36 | if item % 2 == 0: 37 | result.append(item * 2) 38 | else: 39 | result.append(item * 3) 40 | return result 41 | 42 | def generate_random_numbers(n): 43 | numbers = [] 44 | for i in range(n): 45 | numbers.append(random.randint(1, 100)) 46 | return numbers 47 | 48 | def calculate_average(numbers): 49 | total = sum(numbers) 50 | count = len(numbers) 51 | average = total / count 52 | return average 53 | 54 | def main(): 55 | # Inefficient factorial calculation 56 | print(calculate_factorial(20)) 57 | 58 | # Unnecessary loop for finding largest number 59 | numbers = [3, 7, 2, 9, 1, 5] 60 | print(find_largest_number(numbers)) 61 | 62 | # Inefficient sorting algorithm 63 | unsorted_list = [64, 34, 25, 12, 22, 11, 90] 64 | print(inefficient_sort(unsorted_list)) 65 | 66 | # Inconsistent naming convention 67 | user1 = User("John Doe", 30) 68 | user1.print_user_info() 69 | 70 | # Redundant if-else structure 71 | data = [1, 2, 3, 4, 5] 72 | print(process_data(data)) 73 | 74 | # Inefficient random number generation 75 | random_numbers = generate_random_numbers(1000000) 76 | print(f"Generated {len(random_numbers)} random numbers") 77 | 78 | # Potential division by zero 79 | empty_list = [] 80 | print(calculate_average(empty_list)) 81 | 82 | # Unnecessary time delay 83 | time.sleep(5) 84 | print("Finished processing after 5 seconds") 85 | 86 | if __name__ == "__main__": 87 | main() 88 | -------------------------------------------------------------------------------- /test/sample.test.ts: -------------------------------------------------------------------------------- 1 | import { add, subtract } from '/home/runner/work/aws-genai-cicd-suite/aws-genai-cicd-suite/debugging/sample'; 2 | 3 | describe('add', () => { 4 | it('should add two positive numbers correctly', () => { 5 | expect(add(2, 3)).toBe(5); 6 | }); 7 | 8 | it('should add two negative numbers correctly', () => { 9 | expect(add(-2, -3)).toBe(-5); 10 | }); 11 | 12 | it('should add a positive and a negative number correctly', () => { 13 | expect(add(2, -3)).toBe(-1); 14 | }); 15 | 16 | it('should add zero to a number correctly', () => { 17 | expect(add(0, 5)).toBe(5); 18 | expect(add(5, 0)).toBe(5); 19 | }); 20 | }); 21 | 22 | describe('subtract', () => { 23 | it('should subtract two positive numbers correctly', () => { 24 | expect(subtract(5, 3)).toBe(2); 25 | }); 26 | 27 | it('should subtract two negative numbers correctly', () => { 28 | expect(subtract(-5, -3)).toBe(-2); 29 | }); 30 | 31 | it('should subtract a negative number from a positive number correctly', () => { 32 | expect(subtract(5, -3)).toBe(8); 33 | }); 34 | 35 | it('should subtract a positive number from a negative number correctly', () => { 36 | expect(subtract(-5, 3)).toBe(-8); 37 | }); 38 | 39 | it('should subtract zero from a number correctly', () => { 40 | expect(subtract(5, 0)).toBe(5); 41 | expect(subtract(0, 5)).toBe(-5); 42 | }); 43 | }); 44 | 45 | import { subtract } from '/home/runner/work/aws-genai-cicd-suite/aws-genai-cicd-suite/debugging/sample'; 46 | 47 | describe('subtract', () => { 48 | it('should subtract two positive numbers correctly', () => { 49 | expect(subtract(5, 3)).toBe(2); 50 | }); 51 | 52 | it('should subtract two negative numbers correctly', () => { 53 | expect(subtract(-10, -5)).toBe(-5); 54 | }); 55 | 56 | it('should subtract a positive number from a negative number correctly', () => { 57 | expect(subtract(-8, 3)).toBe(-11); 58 | }); 59 | 60 | it('should subtract a negative number from a positive number correctly', () => { 61 | expect(subtract(10, -4)).toBe(14); 62 | }); 63 | 64 | it('should return 0 when subtracting the same number', () => { 65 | expect(subtract(7, 7)).toBe(0); 66 | }); 67 | 68 | it('should handle large numbers correctly', () => { 69 | expect(subtract(1000000000, 500000000)).toBe(500000000); 70 | }); 71 | 72 | it('should handle floating-point numbers correctly', () => { 73 | expect(subtract(3.14, 1.57)).toBeCloseTo(1.57, 5); 74 | }); 75 | }); -------------------------------------------------------------------------------- /test/testUtils.test.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-cicd-suite/d15da72c3ab2cacc50e21ca5e4866b7dcecae990/test/testUtils.test.ts -------------------------------------------------------------------------------- /test/utils.test.ts: -------------------------------------------------------------------------------- 1 | import { shouldExcludeFile } from '@/debugging/utils'; 2 | 3 | describe('shouldExcludeFile', () => { 4 | it('should return true if the filename matches any of the exclude patterns', () => { 5 | const filename = 'src/components/Button.test.tsx'; 6 | const excludePatterns = ['**/node_modules/**', '**/dist/**', '**/*.test.*']; 7 | 8 | expect(shouldExcludeFile(filename, excludePatterns)).toBe(true); 9 | }); 10 | 11 | it('should return false if the filename does not match any of the exclude patterns', () => { 12 | const filename = 'src/components/Button.tsx'; 13 | const excludePatterns = ['**/node_modules/**', '**/dist/**']; 14 | 15 | expect(shouldExcludeFile(filename, excludePatterns)).toBe(false); 16 | }); 17 | 18 | it('should handle wildcard patterns correctly', () => { 19 | const filename = 'src/utils/helpers.ts'; 20 | const excludePatterns = ['**/node_modules/**', '**/dist/**', 'src/utils/*']; 21 | 22 | expect(shouldExcludeFile(filename, excludePatterns)).toBe(true); 23 | }); 24 | 25 | it('should handle empty exclude patterns', () => { 26 | const filename = 'src/components/Button.tsx'; 27 | const excludePatterns: string[] = []; 28 | 29 | expect(shouldExcludeFile(filename, excludePatterns)).toBe(false); 30 | }); 31 | 32 | test.each` 33 | filename | excludePatterns | expected 34 | ${'src/components/Button.tsx'} | ${['**/node_modules/**', '**/dist/**', '**/*.tsx']} | ${true} 35 | ${'src/utils/helpers.ts'} | ${['**/node_modules/**', '**/dist/**', 'src/utils/*']} | ${true} 36 | ${'src/index.tsx'} | ${['**/node_modules/**', '**/dist/**', '**/*.test.*']} | ${false} 37 | ${'src/tests/Button.test.tsx'} | ${['**/node_modules/**', '**/dist/**', '**/*.test.*']} | ${true} 38 | ${'src/styles/global.css'} | ${[]} | ${false} 39 | `('should return $expected for filename $filename and excludePatterns $excludePatterns', ({ filename, excludePatterns, expected }) => { 40 | expect(shouldExcludeFile(filename, excludePatterns)).toBe(expected); 41 | }); 42 | }); -------------------------------------------------------------------------------- /tools/github_stats.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import os 3 | from datetime import datetime, timedelta 4 | import logging 5 | import time 6 | import sys 7 | 8 | # Set up logging 9 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 10 | 11 | def fetch_data(url, headers=None, params=None): 12 | while True: 13 | try: 14 | # Convert any integer values in params to strings 15 | if params: 16 | params = {k: str(v) if isinstance(v, int) else v for k, v in params.items()} 17 | 18 | response = requests.get(url, headers=headers, params=params, timeout=10) 19 | response.raise_for_status() 20 | remaining = int(response.headers.get('X-RateLimit-Remaining', 0)) 21 | if remaining < 2: 22 | reset_time = int(response.headers.get('X-RateLimit-Reset', 0)) 23 | sleep_time = max(reset_time - time.time(), 0) + 1 24 | logging.warning(f"Rate limit almost exhausted. Sleeping for {sleep_time:.2f} seconds.") 25 | sleep_with_progress(sleep_time) 26 | return response.json(), response.headers 27 | except requests.exceptions.RequestException as e: 28 | if hasattr(e, 'response') and e.response is not None and e.response.status_code == 403: 29 | reset_time = int(e.response.headers.get('X-RateLimit-Reset', 0)) 30 | sleep_time = max(reset_time - time.time(), 0) + 1 31 | logging.warning(f"Rate limit exceeded. Sleeping for {sleep_time:.2f} seconds.") 32 | sleep_with_progress(sleep_time) 33 | else: 34 | logging.error(f"Error fetching data: {e}") 35 | return None, None 36 | 37 | def sleep_with_progress(sleep_time): 38 | for i in range(int(sleep_time)): 39 | # round the time to 0.1 seconds 40 | sleep_time = round((sleep_time - i)/1000, 1) 41 | sys.stdout.write(f"\rSleeping: {sleep_time:.1f} seconds remaining") 42 | sys.stdout.flush() 43 | time.sleep(1) 44 | sys.stdout.write("\rResuming fetching data... \n") 45 | sys.stdout.flush() 46 | 47 | def get_github_stats(repo, days=30): 48 | # GitHub API endpoint 49 | api_url = f"https://api.github.com/repos/{repo}" 50 | 51 | # Get GitHub token from environment variable 52 | github_token = os.environ.get("GITHUB_TOKEN") 53 | if not github_token: 54 | raise ValueError("GITHUB_TOKEN environment variable is not set") 55 | 56 | # Set up headers for authentication 57 | headers = { 58 | "Authorization": f"token {github_token}", 59 | "Accept": "application/vnd.github.v3+json" 60 | } 61 | 62 | # Calculate the date range 63 | end_date = datetime.utcnow() 64 | start_date = end_date - timedelta(days=days) 65 | 66 | # Initialize counters 67 | stats = { 68 | "pr_created": 0, 69 | "pr_updated": 0, 70 | "pr_closed": 0, 71 | "issues_opened": 0, 72 | "issues_closed": 0, 73 | "issue_comments": 0 74 | } 75 | 76 | # Fetch pull requests 77 | pr_url = f"{api_url}/pulls" 78 | pr_params = { 79 | "state": "all", 80 | "sort": "updated", 81 | "direction": "desc", 82 | "per_page": 100 83 | } 84 | 85 | page = 1 86 | while True: 87 | logging.info(f"Fetching pull requests page {page}") 88 | prs, headers = fetch_data(pr_url, headers=headers, params=pr_params) 89 | if not prs: 90 | break 91 | 92 | for pr in prs: 93 | pr_created_at = datetime.strptime(pr["created_at"], "%Y-%m-%dT%H:%M:%SZ") 94 | pr_updated_at = datetime.strptime(pr["updated_at"], "%Y-%m-%dT%H:%M:%SZ") 95 | 96 | if start_date <= pr_created_at <= end_date: 97 | stats["pr_created"] += 1 98 | 99 | if start_date <= pr_updated_at <= end_date: 100 | stats["pr_updated"] += 1 101 | 102 | if pr["closed_at"]: 103 | pr_closed_at = datetime.strptime(pr["closed_at"], "%Y-%m-%dT%H:%M:%SZ") 104 | if start_date <= pr_closed_at <= end_date: 105 | stats["pr_closed"] += 1 106 | 107 | if 'next' not in requests.utils.parse_header_links(headers.get('Link', '')): 108 | break 109 | page += 1 110 | pr_params['page'] = page 111 | 112 | logging.info(f"Current stats after pull requests: {stats}") 113 | 114 | # Fetch issues 115 | issue_url = f"{api_url}/issues" 116 | issue_params = { 117 | "state": "all", 118 | "sort": "updated", 119 | "direction": "desc", 120 | "per_page": 100 121 | } 122 | 123 | page = 1 124 | while True: 125 | logging.info(f"Fetching issues page {page}") 126 | issues, headers = fetch_data(issue_url, headers=headers, params=issue_params) 127 | if not issues: 128 | break 129 | 130 | for issue in issues: 131 | if "pull_request" in issue: 132 | continue # Skip pull requests 133 | 134 | issue_created_at = datetime.strptime(issue["created_at"], "%Y-%m-%dT%H:%M:%SZ") 135 | 136 | if start_date <= issue_created_at <= end_date: 137 | stats["issues_opened"] += 1 138 | 139 | if issue["closed_at"]: 140 | issue_closed_at = datetime.strptime(issue["closed_at"], "%Y-%m-%dT%H:%M:%SZ") 141 | if start_date <= issue_closed_at <= end_date: 142 | stats["issues_closed"] += 1 143 | 144 | # Fetch issue comments 145 | comments_url = issue["comments_url"] 146 | comments, _ = fetch_data(comments_url, headers=headers) 147 | if comments: 148 | for comment in comments: 149 | comment_created_at = datetime.strptime(comment["created_at"], "%Y-%m-%dT%H:%M:%SZ") 150 | if start_date <= comment_created_at <= end_date: 151 | stats["issue_comments"] += 1 152 | 153 | if headers and 'Link' in headers: 154 | if 'next' not in requests.utils.parse_header_links(headers['Link']): 155 | break 156 | else: 157 | break 158 | page += 1 159 | issue_params['page'] = page 160 | 161 | return stats 162 | 163 | if __name__ == "__main__": 164 | repo = input("Enter the GitHub repository (format: owner/repo): ") 165 | days = int(input("Enter the number of days to analyze (default 30): ") or 30) 166 | 167 | try: 168 | stats = get_github_stats(repo, days) 169 | print(f"\nGitHub Stats for {repo} (last {days} days):") 170 | print(f"Pull Requests Created: {stats['pr_created']}") 171 | print(f"Pull Requests Updated: {stats['pr_updated']}") 172 | print(f"Pull Requests Closed: {stats['pr_closed']}") 173 | print(f"Issues Opened: {stats['issues_opened']}") 174 | print(f"Issues Closed: {stats['issues_closed']}") 175 | print(f"Issue Comments: {stats['issue_comments']}") 176 | except Exception as e: 177 | logging.exception(f"An error occurred: {str(e)}") -------------------------------------------------------------------------------- /tsconfig.build.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "exclude": [ 4 | "test/**/*.ts" 5 | ], 6 | "compilerOptions": { 7 | "rootDir": "src" 8 | }, 9 | } 10 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowUnreachableCode": false, 4 | "allowUnusedLabels": false, 5 | "baseUrl": ".", 6 | "paths": { 7 | "@/*": ["./*"] 8 | }, 9 | "strict": true, 10 | "exactOptionalPropertyTypes": false, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitOverride": true, 13 | "noImplicitReturns": true, 14 | "noPropertyAccessFromIndexSignature": true, 15 | "noUncheckedIndexedAccess": true, 16 | "noUnusedLocals": false, 17 | "noUnusedParameters": false, 18 | "module": "CommonJS", 19 | "resolveJsonModule": true, 20 | "outDir": "build", 21 | "declaration": true, 22 | "newLine": "lf", 23 | "noEmitOnError": true, 24 | "sourceMap": true, 25 | "disableSizeLimit": true, 26 | "forceConsistentCasingInFileNames": true, 27 | "lib": [ "ES2020" ], 28 | "target": "ES2020", 29 | "noErrorTruncation": true, 30 | "esModuleInterop": true 31 | }, 32 | "include": [ 33 | "src/**/*.ts", 34 | // "test/**/*.ts" 35 | ], 36 | "exclude": [], 37 | } -------------------------------------------------------------------------------- /web/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | 4 | # Next.js 5 | .next 6 | 7 | # Production 8 | /build 9 | 10 | # Misc 11 | .DS_Store 12 | *.pem 13 | 14 | # Debug 15 | npm-debug.log* 16 | yarn-debug.log* 17 | yarn-error.log* 18 | 19 | # Local env files 20 | .env*.local 21 | 22 | # Vercel 23 | .vercel -------------------------------------------------------------------------------- /web/README.md: -------------------------------------------------------------------------------- 1 | # Migrate from ViteJS to NextJS 2 | 3 | ## Why migrate from ViteJS to NextJS 4 | 5 | The simple answer is support **full stack framework in the future**, means we can use the same framework to build the website and the backend service. 6 | 7 | More generally, the choice between Vite.js and Next.js depends on your specific project requirements: 8 | 9 | - If you're focusing solely on frontend development and prioritize speed and flexibility in choosing frontend frameworks, Vite.js might be more suitable. 10 | - If you're building a full-stack application, especially one that requires server-side rendering, SEO optimization, and integrated backend functionality, Next.js would be a more appropriate choice. 11 | 12 | Detailed comparison are listed below: 13 | - Targeted Audience: 14 | Vite.js does not provide built-in backend capabilities and is primarily focused on frontend development. For backend functionality, you would need to use a separate backend solution 15 | Next.js offers API routes, allowing you to build backend functionality directly within your Next.js application, making it suitable for full-stack development. 16 | 17 | - Development Speed: 18 | Vite.js is known for its extremely fast development server and build times, which can significantly speed up the development process for frontend applications. 19 | Next.js, while also performant, may have slightly longer build times due to its additional features like SSR and SSG. 20 | 21 | - Learning Curve: 22 | Vite.js was purely a frontend framework, it has a relatively low learning curve, especially for developers already familiar with modern JavaScript frameworks. 23 | Next.js was React specific framework, it has a steeper learning curve due to its full-stack nature and additional features, but it provides more out-of-the-box functionality. 24 | 25 | - Flexibility: 26 | Vite.js is more flexible in terms of frontend framework choice, supporting multiple frameworks, e.g. React, Vue, etc. 27 | Next.js is specifically designed for React applications, offering deep integration with the React ecosystem, with features like SSR and SSG. 28 | Backend Capabilities: 29 | 30 | - Deployment: 31 | Vite.js projects are typically deployed as static sites or SPAs, requiring a separate backend service if needed. [2] 32 | Next.js applications can be easily deployed to various platforms with built-in support for serverless functions and edge computing. 33 | 34 | ## How to migrate 35 | 36 | There are full diff files below for your reference which contains the changes between the ViteJS and NextJS, you can check the details in this [PR](https://github.com/yike5460/intelli-ops/pull/31) 37 | 38 | ``` 39 | Changes not staged for commit: 40 | (use "git add/rm ..." to update what will be committed) 41 | (use "git restore ..." to discard changes in working directory) 42 | modified: web/.gitignore 43 | deleted: web/index.html 44 | modified: web/package-lock.json 45 | modified: web/package.json 46 | deleted: web/src/App.jsx 47 | deleted: web/src/components/CopyableCommand.jsx 48 | deleted: web/src/components/FAQ.jsx 49 | deleted: web/src/components/Features.jsx 50 | deleted: web/src/components/Footer.jsx 51 | deleted: web/src/components/Header.jsx 52 | deleted: web/src/components/Hero.jsx 53 | deleted: web/src/components/Pricing.jsx 54 | deleted: web/src/components/QuickStart.jsx 55 | deleted: web/src/index.js 56 | deleted: web/src/index.jsx 57 | deleted: web/src/main.jsx 58 | deleted: web/vite.config.js 59 | 60 | Untracked files: 61 | (use "git add ..." to include in what will be committed) 62 | web/.next/ 63 | web/README.md 64 | web/components/ 65 | web/next-env.d.ts 66 | web/next.config.js 67 | web/pages/ 68 | web/postcss.config.js 69 | web/styles/ 70 | web/tailwind.config.js 71 | web/tsconfig.json 72 | ``` 73 | 74 | Note the configuration in Vercel hosting platform also needed to be updated in "Settings" -> "Build & Development Settings" -> "Environment Variables" 75 | 76 | ## Multiplex the website to GitHub Pages 77 | 78 | We are multiplexing the same code base in folder web to two different destinations: 79 | - GitHub Pages: using GitHub Actions to deploy the website to GitHub Pages. The workflow file is defined in `.github/workflows/github-pages.yml`, note to add create GitHub page in the repository settings and set gh-pages as the source branch. 80 | - Vercel: using Vercel to host the website. The configuration is in `vercel.json`. 81 | 82 | Note we add `.vercelignore` to configure Vercel to ignore the gh-pages branch entirely. thus avoid unnecessary build in Vercel, or add the following command in Vercel project settings -> "Git" -> "Ignored Build Step" -> "Custom": 83 | ``` 84 | if [ "$VERCEL_GIT_COMMIT_REF" = "gh-pages" ]; then echo "Skipping deploy for gh-pages branch"; exit 0; else exit 1; fi 85 | ``` -------------------------------------------------------------------------------- /web/components/CopyableCommand.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState } from 'react'; 2 | 3 | interface CopyableCommandProps { 4 | command: string; 5 | } 6 | 7 | const CopyableCommand: React.FC = ({ command }) => { 8 | const [copied, setCopied] = useState(false); 9 | 10 | const copyToClipboard = () => { 11 | navigator.clipboard.writeText(command); 12 | setCopied(true); 13 | setTimeout(() => setCopied(false), 2000); 14 | }; 15 | 16 | return ( 17 |
18 |
19 |         {command}
20 |       
21 | 27 |
28 | ); 29 | }; 30 | 31 | export default CopyableCommand; -------------------------------------------------------------------------------- /web/components/FAQ.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | const FAQItem: React.FC<{ question: string; answer: string }> = ({ question, answer }) => ( 4 |
5 |

{question}

6 |

{answer}

7 |
8 | ); 9 | 10 | const FAQ: React.FC = () => { 11 | const faqs = [ 12 | { 13 | question: "How do I set up AWS GenAI CI/CD Suite?", 14 | answer: "Setting up AWS GenAI CI/CD Suite involves configuring IAM to trust GitHub, setting up AWS credentials, cloning and publishing the action, and configuring your GitHub workflow. Detailed steps are provided in the Quick Start guide." 15 | }, 16 | { 17 | question: "What AWS services does AWS GenAI CI/CD Suite use?", 18 | answer: "AWS GenAI CI/CD Suite primarily uses AWS Bedrock API for its AI-driven features. It also requires proper IAM configuration for secure access to AWS services." 19 | } 20 | ]; 21 | 22 | return ( 23 |
24 |
25 |

Frequently Asked Questions

26 |
27 | {faqs.map((faq, index) => ( 28 | 29 | ))} 30 |
31 |
32 |
33 | ); 34 | }; 35 | 36 | export default FAQ; -------------------------------------------------------------------------------- /web/components/Features.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | const FeatureItem: React.FC<{ title: string; description: string; icon: React.ReactNode }> = ({ title, description, icon }) => ( 4 |
5 |
6 |
{icon}
7 |

{title}

8 |
9 |

{description}

10 |
11 | ); 12 | 13 | const ComparisonCard: React.FC<{ title: string; traditional: string[]; innovative: string[] }> = ({ title, traditional, innovative }) => ( 14 |
15 |

{title}

16 |
17 |
18 |
19 |

Traditional Approach

20 | 21 | 22 | 23 |
24 |
    25 | {traditional.map((item, index) => ( 26 |
  • 27 | 28 | {item} 29 |
  • 30 | ))} 31 |
32 |
33 |
34 |
35 |

AWS GenAI CI/CD Suite Approach

36 | 37 | 38 | 39 |
40 |
    41 | {innovative.map((item, index) => ( 42 |
  • 43 | 44 | {item} 45 |
  • 46 | ))} 47 |
48 |
49 |
50 |
51 | ); 52 | 53 | const Features: React.FC = () => { 54 | return ( 55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |

63 | Streamline Your Development Workflow 64 |

65 |
66 |
67 |
68 | } 72 | /> 73 | } 77 | /> 78 | } 82 | /> 83 | } 87 | /> 88 |
89 | 90 |
91 |
92 |
93 |
94 |
95 |

96 | Legacy vs Innovative 97 |

98 |
99 |
100 |
101 | 116 | 131 |
132 |
133 |
134 | ); 135 | }; 136 | 137 | export default Features; -------------------------------------------------------------------------------- /web/components/Footer.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | const Footer: React.FC = () => { 4 | return ( 5 |
6 |
7 |
8 |

9 | © 2024 AWS Industry Builder - Aaron Yi 10 | 11 | All rights reserved. 12 |

13 | 17 |
18 |
19 |
20 | ); 21 | }; 22 | 23 | export default Footer; -------------------------------------------------------------------------------- /web/components/Header.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | const Header: React.FC = () => { 4 | return ( 5 |
6 |
7 |

AWS GenAI CI/CD Suite

8 | 23 |
24 |
25 | ); 26 | }; 27 | 28 | export default Header; -------------------------------------------------------------------------------- /web/components/Hero.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | const Hero = () => { 4 | return ( 5 |
6 |
7 |
8 |

9 | Elevate Your DevOps Pipeline with Generative AI 10 |

11 |

Unleash the power of our AI-driven DevOps solution to streamline your workflows, boost productivity, and transform your IT landscape.

12 | 16 |
17 | 18 |
19 |
20 | 28 |
29 |
30 |
31 |
32 | ); 33 | }; 34 | 35 | export default Hero; -------------------------------------------------------------------------------- /web/components/QuickStart.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState } from 'react'; 2 | import CopyableCommand from './CopyableCommand'; 3 | 4 | interface FoldableCommandProps { 5 | title: string; 6 | command: string; 7 | } 8 | 9 | const FoldableCommand: React.FC = ({ title, command }) => { 10 | const [isOpen, setIsOpen] = useState(false); 11 | 12 | return ( 13 |
14 | 21 | {isOpen && ( 22 |
23 | 24 |
25 | )} 26 |
27 | ); 28 | }; 29 | 30 | const QuickStart: React.FC = () => { 31 | return ( 32 | <> 33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |

41 | Quick Start 42 |

43 |
44 |
45 |
46 |

1. Configuring IAM to trust GitHub

47 |

48 | To use GitHub's OIDC provider, you must first set up federation with the provider as an IAM IdP. Here's a CloudFormation template that will create a role with trust relationship to GitHub OIDC provider, and add the permissions to invoke the Bedrock API: 49 |

50 | 118 |

2. Setting up the GitHub Actions

119 |

120 | Here's a complete workflow sample that includes configuring AWS credentials and using the AWS GenAI CI/CD Suite GitHub Action. 121 | Go to your repo {'->'} Settings {'->'} Secrets and variables {'->'} Actions {'->'} New repository secret, then add the secret name as e.g. AWS_ROLE_TO_ASSUME, and the value as the role arn, e.g. arn:aws:iam::123456789012:role/role-name created in the previous step, then reference it in the workflow as {'{{'} secrets.AWS_ROLE_TO_ASSUME {'}}'} 122 |

123 | 189 |

190 | After the GitHub action is set up, you can trigger the workflow by pushing a new commit or opening a new pull request. The sample workflow will generate a PR description, provide inline code review, and generate unit test code. 191 |

192 |
193 |
194 |
195 | 196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |

204 | Further Explore 205 |

206 |
207 |
208 |
209 |

1. Customize Your Own (Optional)

210 |

If you want to customize the AWS GenAI CI/CD Suite GitHub Action, you can clone the repository and publish your own release:

211 | 217 |

Make sure to replace "your-username" with your actual GitHub username and adjust the version number as needed.

218 | 219 |

2. Starting the GitHub App Server

220 |

Before interacting with the GitHub App, you need to start the server that handles user requests:

221 | 224 |

225 | This will start the server locally. For a more stable user experience, consider hosting the code as a container or daemon process in a separate infrastructure. 226 |

227 | 228 |

3. Interacting with GitHub App (Note this feature is still under development with limited functionality)

229 |

Once the server is running, you can interact with the GitHub App by commenting on pull requests. Here are some example commands:

230 |
    231 |
  • @IBTBot generate interesting stats about this repository and render them as a table.
  • 232 |
  • @IBTBot show all the console.log statements in this repository.
  • 233 |
  • @IBTBot generate unit testing code for this file.
  • 234 |
  • @IBTBot read src/utils.ts and generate unit testing code.
  • 235 |
  • @IBTBot read the files in the src/scheduler package and generate a class diagram using mermaid and a README in the markdown format.
  • 236 |
  • @IBTBot modularize this function.
  • 237 |
238 |

239 | Note: Ensure that your GitHub App is properly configured and has the necessary permissions to interact with your repository. 240 |

241 |
242 |
243 |
244 | 245 | ); 246 | }; 247 | 248 | export default QuickStart; -------------------------------------------------------------------------------- /web/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-genai-cicd-suite/d15da72c3ab2cacc50e21ca5e4866b7dcecae990/web/image.png -------------------------------------------------------------------------------- /web/next-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | 4 | // NOTE: This file should not be edited 5 | // see https://nextjs.org/docs/basic-features/typescript for more information. 6 | -------------------------------------------------------------------------------- /web/next.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = { 3 | reactStrictMode: true, 4 | } 5 | 6 | if (process.env.GITHUB_ACTIONS) { 7 | nextConfig.output = 'export' 8 | nextConfig.basePath = '/aws-genai-cicd-suite' 9 | nextConfig.assetPrefix = '/aws-genai-cicd-suite/' 10 | } else { 11 | nextConfig.output = 'standalone' 12 | } 13 | 14 | module.exports = nextConfig 15 | -------------------------------------------------------------------------------- /web/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aws-genai-cicd-suite-landing-page", 3 | "version": "1.0.0", 4 | "scripts": { 5 | "dev": "next dev", 6 | "build": "next build", 7 | "start": "next start", 8 | "lint": "next lint", 9 | "export": "next build && next export" 10 | }, 11 | "dependencies": { 12 | "@vercel/analytics": "^1.3.1", 13 | "next": "^13.4.0", 14 | "react": "^18.2.0", 15 | "react-dom": "^18.2.0" 16 | }, 17 | "devDependencies": { 18 | "@types/node": "^20.0.0", 19 | "@types/react": "^18.2.0", 20 | "@types/react-dom": "^18.2.0", 21 | "autoprefixer": "^10.4.14", 22 | "postcss": "^8.4.23", 23 | "tailwindcss": "^3.3.2", 24 | "typescript": "^5.0.4" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /web/pages/404.js: -------------------------------------------------------------------------------- 1 | import Link from 'next/link' 2 | import Head from 'next/head' 3 | 4 | export default function Custom404() { 5 | return ( 6 |
7 | 8 | 404 - Page Not Found | AWS GenAI CICD Suite 9 | 10 | 11 |
12 |

404

13 |

Page Not Found

14 |

Oops! The page you are looking for does not exist or has been moved.

15 | 16 | Go back to Home 17 | 18 |
19 |
20 |

AWS GenAI CICD Suite - AI-driven GitHub Actions for automated code reviews and more

21 |
22 |
23 | ) 24 | } 25 | -------------------------------------------------------------------------------- /web/pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import '@/styles/globals.css' 2 | import type { AppProps } from 'next/app' 3 | import { Analytics } from '@vercel/analytics/react'; 4 | 5 | export default function App({ Component, pageProps }: AppProps) { 6 | return ( 7 | <> 8 | 9 | 10 | 11 | ) 12 | } -------------------------------------------------------------------------------- /web/pages/index.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import Header from '../components/Header'; 3 | import Hero from '../components/Hero'; 4 | import Features from '../components/Features'; 5 | import QuickStart from '../components/QuickStart'; 6 | import FAQ from '../components/FAQ'; 7 | import Footer from '../components/Footer'; 8 | 9 | export default function Home() { 10 | return ( 11 |
12 |
13 |
{/* Add overflow-x-hidden to prevent horizontal scrolling on mobile */} 14 | 15 | 16 | 17 | 18 |
19 |
21 | ); 22 | } -------------------------------------------------------------------------------- /web/postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | } -------------------------------------------------------------------------------- /web/styles/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; -------------------------------------------------------------------------------- /web/tailwind.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | module.exports = { 3 | content: [ 4 | './pages/**/*.{js,ts,jsx,tsx,mdx}', 5 | './components/**/*.{js,ts,jsx,tsx,mdx}', 6 | './app/**/*.{js,ts,jsx,tsx,mdx}', 7 | ], 8 | theme: { 9 | extend: {}, 10 | }, 11 | plugins: [], 12 | } -------------------------------------------------------------------------------- /web/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "strict": true, 8 | "forceConsistentCasingInFileNames": true, 9 | "noEmit": true, 10 | "esModuleInterop": true, 11 | "module": "esnext", 12 | "moduleResolution": "node", 13 | "resolveJsonModule": true, 14 | "isolatedModules": true, 15 | "jsx": "preserve", 16 | "incremental": true, 17 | "paths": { 18 | "@/*": ["./*"] 19 | } 20 | }, 21 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"], 22 | "exclude": ["node_modules"] 23 | } -------------------------------------------------------------------------------- /web/vercel.json: -------------------------------------------------------------------------------- 1 | { 2 | "buildCommand": "next build", 3 | "outputDirectory": ".next", 4 | "devCommand": "next dev", 5 | "installCommand": "npm install" 6 | } --------------------------------------------------------------------------------