├── .github └── workflows │ ├── eslint.yml │ ├── publish.yml │ ├── scorecard.yml │ └── test.yml ├── .gitignore ├── CHANGELOG.md ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── PRIVACY.md ├── README.md ├── SECURITY.md ├── SUPPORT.md ├── audit_workflow_runs.js ├── audit_workflow_runs.test.js ├── audit_workflow_runs_utils.js ├── eslint.config.js ├── find_compromised_secrets.js ├── find_compromised_secrets.test.js ├── find_compromised_secrets_utils.js ├── package-lock.json ├── package.json └── testFile.json /.github/workflows/eslint.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | # ESLint is a tool for identifying and reporting on patterns 6 | # found in ECMAScript/JavaScript code. 7 | # More details at https://github.com/eslint/eslint 8 | # and https://eslint.org 9 | 10 | name: ESLint 11 | 12 | on: 13 | push: 14 | branches: [ "main" ] 15 | pull_request: 16 | # The branches below must be a subset of the branches above 17 | branches: [ "main" ] 18 | schedule: 19 | - cron: '18 22 * * 0' 20 | 21 | permissions: 22 | contents: read 23 | 24 | jobs: 25 | eslint: 26 | name: Run eslint scanning 27 | runs-on: ubuntu-latest 28 | permissions: 29 | security-events: write 30 | actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status 31 | steps: 32 | - name: Checkout code 33 | uses: actions/checkout@4.2.2 34 | 35 | - name: Install ESLint 36 | run: | 37 | npm install eslint@9.23.0 38 | npm install @microsoft/eslint-formatter-sarif@3.1.0 39 | npm install eslint-plugin-github@6.0.0 40 | 41 | - name: Run ESLint 42 | run: | 43 | npx eslint . \ 44 | --config eslint.config.js \ 45 | --ext .js,.jsx,.ts,.tsx \ 46 | --format @microsoft/eslint-formatter-sarif \ 47 | --output-file eslint-results.sarif 48 | continue-on-error: true 49 | 50 | - name: Upload analysis results to GitHub 51 | uses: github/codeql-action/upload-sarif@3.28.15 52 | with: 53 | sarif_file: eslint-results.sarif 54 | wait-for-processing: true 55 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to npm 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | permissions: 8 | contents: read 9 | 10 | jobs: 11 | publish-npm: 12 | runs-on: ubuntu-latest 13 | permissions: 14 | id-token: write 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@4.2.2 18 | - name: Setup Node 19 | uses: actions/setup-node@4.3.0 20 | with: 21 | node-version: 22 22 | registry-url: https://registry.npmjs.org/ 23 | cache: npm 24 | - run: npm ci 25 | - run: npm test 26 | - run: | 27 | echo "Publishing $TAG_NAME" 28 | npm version ${TAG_NAME} --git-tag-version=false 29 | env: 30 | TAG_NAME: ${{github.event.release.tag_name}} 31 | - run: npm whoami; npm publish --provenance --access public 32 | env: 33 | NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} 34 | -------------------------------------------------------------------------------- /.github/workflows/scorecard.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. They are provided 2 | # by a third-party and are governed by separate terms of service, privacy 3 | # policy, and support documentation. 4 | 5 | name: Scorecard supply-chain security 6 | on: 7 | # For Branch-Protection check. Only the default branch is supported. See 8 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection 9 | branch_protection_rule: 10 | # To guarantee Maintained check is occasionally updated. See 11 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained 12 | schedule: 13 | - cron: '42 17 * * 5' 14 | push: 15 | branches: [ "main" ] 16 | 17 | # Declare default permissions as read only. 18 | permissions: read-all 19 | 20 | jobs: 21 | analysis: 22 | name: Scorecard analysis 23 | runs-on: ubuntu-latest 24 | permissions: 25 | # Needed for Code scanning upload 26 | security-events: write 27 | # Needed for GitHub OIDC token if publish_results is true 28 | id-token: write 29 | 30 | steps: 31 | - name: "Checkout code" 32 | uses: actions/checkout@4.2.2 33 | with: 34 | persist-credentials: false 35 | 36 | - name: "Run analysis" 37 | uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 38 | with: 39 | results_file: results.sarif 40 | results_format: sarif 41 | # Scorecard team runs a weekly scan of public GitHub repos, 42 | # see https://github.com/ossf/scorecard#public-data. 43 | # Setting `publish_results: true` helps us scale by leveraging your workflow to 44 | # extract the results instead of relying on our own infrastructure to run scans. 45 | # And it's free for you! 46 | publish_results: true 47 | 48 | # Upload the results as artifacts (optional). Commenting out will disable 49 | # uploads of run results in SARIF format to the repository Actions tab. 50 | # https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts 51 | - name: "Upload artifact" 52 | uses: actions/upload-artifact@4.6.2 53 | with: 54 | name: SARIF file 55 | path: results.sarif 56 | retention-days: 5 57 | 58 | # Upload the results to GitHub's code scanning dashboard (optional). 59 | # Commenting out will disable upload of results to your repo's Code Scanning dashboard 60 | - name: "Upload to code-scanning" 61 | uses: github/codeql-action/upload-sarif@3.28.12 62 | with: 63 | sarif_file: results.sarif 64 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test with npm 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | # The branches below must be a subset of the branches above 8 | branches: [ "main" ] 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | test: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@4.2.2 19 | - name: Setup Node 20 | uses: actions/setup-node@4.3.0 21 | with: 22 | node-version: 22 23 | registry-url: https://registry.npmjs.org/ 24 | cache: npm 25 | - run: npm ci 26 | - run: npm test 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | .DS_Store 3 | *.sljson 4 | *.log -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## 2025-05-28 4 | 5 | Updated audit script to take JSON input to filter by Actions and commits. 6 | 7 | Added unit tests for the audit script. 8 | 9 | Added supporting documentation. 10 | 11 | ## 2025-05-20 12 | 13 | Added script to allow decoding secrets from workflows affected by a particular set of compromises in March 2025. 14 | 15 | Made searching for Actions downloads more efficient. The search now stops after any consecutive lines seen that show an Action was downloaded, and avoids searching the rest of the log file. 16 | 17 | ## 2025-05-18 18 | 19 | Added searching for logs in the top level `0_` file, if the `1_Set up job.txt` is no longer available in the logs zip file 20 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # This project is maintained by: 2 | * @aegilops 3 | * @advanced-security/fss-advanced-security 4 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at opensource@github.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at [http://contributor-covenant.org/version/1/4][version] 72 | 73 | [homepage]: http://contributor-covenant.org 74 | [version]: http://contributor-covenant.org/version/1/4/ 75 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ## Security 4 | 5 | For security issues, see [SECURITY](SECURITY.md). 6 | 7 | ## Bugs and issues 8 | 9 | Please raise non-security bugs and suggestions in the Issues on the GitHub-hosted repository. 10 | 11 | ## Developing 12 | 13 | Please: 14 | 15 | * sign commits 16 | * format code with Prettier 17 | * lint code with ESLint, using the GitHub ESLint plugin 18 | * add unit tests with the `assert` module, and run the with `npm test` 19 | * do end-to-end testing with the script against a test organization, Enterprise and repository, and check the results are as expected 20 | 21 | ## Submitting changes 22 | 23 | Please fork the repository, and raise a Pull Request (PR) for review. 24 | 25 | Remember to update the [README](README.md) and [CHANGELOG](CHANGELOG.md). 26 | 27 | Your changes must be acceptable under the [LICENSE](LICENSE) of the project. 28 | 29 | ## Code of conduct 30 | 31 | Follow the [Code of Conduct](CODE_OF_CONDUCT.md). 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright GitHub, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /PRIVACY.md: -------------------------------------------------------------------------------- 1 | # Privacy Policy 2 | 3 | Please see the [GitHub Privacy Statement](https://docs.github.com/en/site-policy/privacy-policies/github-privacy-statement) for GitHub's overall GitHub privacy policy. 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Audit GitHub Actions used in workflow runs for an organization, Enterprise or repository 2 | 3 | Discover which versions of GitHub Actions were used in workflow runs, down to the exact commit. 4 | 5 | Checks the audit log for a GitHub Enterprise/organization (or just lists the runs, for a repository) for workflow runs created between the start date and end date. 6 | 7 | Lists the Actions and specific versions and commits used in them. 8 | 9 | Optionally, filters by particular Actions, possibly including one or more commit SHAs of interest. 10 | 11 | > [!NOTE] 12 | > This is an _unofficial_ tool created by Field Security Specialists, and is not officially supported by GitHub. 13 | 14 | ## Usage 15 | 16 | Clone this repository locally. 17 | 18 | For all scripts, you must set a `GITHUB_TOKEN` in the environment with appropriate access to the audit log on your org or Enterprise, or the repository you are interested in. It can be convenient to use the [`gh` CLI](https://cli.github.com/) to get a token, with [`gh auth login`](https://cli.github.com/manual/gh_auth_login) and [`gh auth token`](https://cli.github.com/manual/gh_auth_token). 19 | 20 | For Enterprise Server or Data Residency users, please set `GITHUB_BASE_URL` in your environment, e.g. `https://github.acme-inc.example/api/v3`. 21 | 22 | ### audit_workflow_runs.js 23 | 24 | ```text 25 | node audit_workflow_runs.js [] [] 26 | ``` 27 | 28 | Results are printed to the console in CSV, for convenience, and also appended to a single-line JSON file in the current directory. This is named `workflow_audit_results.sljson` by default, and can be set with the optional `output-file` parameter. 29 | 30 | The CSV output has the headers: 31 | 32 | ```csv 33 | org,repo,workflow,run_id,created_at,name,version,sha 34 | ``` 35 | 36 | By default all Actions are listed, but you can filter by particular Actions using a JSON formatted input file. 37 | 38 | For example: 39 | 40 | ```bash 41 | node audit_workflow_runs.js github org 2025-03-13 2025-03-15 github_actions_audit.sljson 42 | ``` 43 | 44 | ```bash 45 | node audit_workflow_runs.js github org 2025-03-13 2025-03-15 github_actions_audit.sljson actions_to_find.json 46 | ``` 47 | 48 | #### JSON input file format 49 | 50 | The JSON input file should an object with the keys being the name of the Action, and the value being an array of the commits you are interested in. 51 | 52 | Use the Action name in the format `owner/repo` or `owner/repo/path`, where `path` can contain any number of slashes. 53 | 54 | You can express some wildcards - use `*` after the first `/` in the Action to include all repositories under the owner, and use `*` in the commit array (or leave it empty) to include all commits. 55 | 56 | An Action name given without a path will match any Action in that repository, whether or not it has a path. You can also explictly use `*` in the path to match any path. 57 | 58 | ```json 59 | { 60 | "actions/setup-node": ["*"], 61 | "actions/checkout": ["*"], 62 | "actions/setup-python": ["0000000000000000000000000000000000000000"], 63 | } 64 | ``` 65 | 66 | ### find_compromised_secrets.js 67 | 68 | > [!NOTE] 69 | > This is relevant only to secrets leaked because of the `tj-actions/changed-files` and `reviewdog` compromises in March 2025. 70 | 71 | This script takes the structured single-line JSON output of `audit_workflow_runs.js` (not the convenience CSV output) and searches for secrets in the format that was leaked in those workflow runs (doubly base64 encoded, with predictable content). 72 | 73 | ```text 74 | node find_compromised_secrets.js < 75 | ``` 76 | 77 | Results are printed to the console, and written to a file in the current directory, named `compromised_secrets.sljson`. 78 | 79 | For example: 80 | 81 | ```bash 82 | node find_compromised_secrets.js < workflow_audit_results.sljson 83 | ``` 84 | 85 | ## License 86 | 87 | This project is licensed under the terms of the MIT open source license. Please refer to the [LICENSE](LICENSE) for the full terms. 88 | 89 | ## Maintainers 90 | 91 | See [CODEOWNERS](CODEOWNERS) for the list of maintainers. 92 | 93 | ## Support 94 | 95 | > [!NOTE] 96 | > This is an _unofficial_ tool created by Field Security Specialists, and is not officially supported by GitHub. 97 | 98 | See the [SUPPORT](SUPPORT.md) file. 99 | 100 | ## Background 101 | 102 | See the [CHANGELOG](CHANGELOG.md), [CONTRIBUTING](CONTRIBUTING.md), [SECURITY](SECURITY.md), [SUPPORT](SUPPORT.md), [CODE OF CONDUCT](CODE_OF_CONDUCT.md) and [PRIVACY](PRIVACY.md) files for more information. 103 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security 2 | 3 | Thanks for helping make GitHub safe for everyone. 4 | 5 | GitHub takes the security of our software products and services seriously, including all of the open source code repositories managed through our GitHub organizations, such as [GitHub](https://github.com/GitHub). 6 | 7 | Even though [open source repositories are outside of the scope of our bug bounty program](https://bounty.github.com/index.html#scope) and therefore not eligible for bounty rewards, we will ensure that your finding gets passed along to the appropriate maintainers for remediation. 8 | 9 | ## Reporting Security Issues 10 | 11 | If you believe you have found a security vulnerability in any GitHub-owned repository, please report it to us through coordinated disclosure. 12 | 13 | **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.** 14 | 15 | Instead, please send an email to opensource-security[@]github.com. 16 | 17 | Please include as much of the information listed below as you can to help us better understand and resolve the issue: 18 | 19 | * The type of issue (e.g., buffer overflow, SQL injection, or cross-site scripting) 20 | * Full paths of source file(s) related to the manifestation of the issue 21 | * The location of the affected source code (tag/branch/commit or direct URL) 22 | * Any special configuration required to reproduce the issue 23 | * Step-by-step instructions to reproduce the issue 24 | * Proof-of-concept or exploit code (if possible) 25 | * Impact of the issue, including how an attacker might exploit the issue 26 | 27 | This information will help us triage your report more quickly. 28 | 29 | ## Policy 30 | 31 | See [GitHub's Safe Harbor Policy](https://docs.github.com/en/github/site-policy/github-bug-bounty-program-legal-safe-harbor#1-safe-harbor-terms) 32 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # Support 2 | 3 | > [!NOTE] 4 | > This is an _unofficial_ tool and is not officially supported by GitHub 5 | 6 | ## How to file issues and get help 7 | 8 | This project uses GitHub issues to track bugs and feature requests. Please search the existing issues before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new issue. 9 | 10 | For help or questions about using this project, please open a discussion. 11 | 12 | - This repo is unofficially maintained by GitHub staff. We will do our best to respond to support, feature requests, and community questions in a timely manner. 13 | 14 | ## GitHub Support Policy 15 | 16 | Support for this project is limited to the resources listed above. 17 | -------------------------------------------------------------------------------- /audit_workflow_runs.js: -------------------------------------------------------------------------------- 1 | import { Octokit } from "@octokit/rest"; 2 | import { throttling } from "@octokit/plugin-throttling"; 3 | import fs from "fs"; 4 | import AdmZip from "adm-zip"; 5 | import { 6 | parseFromInputFile, 7 | matchActionsToAuditTargets, 8 | searchForSetUpJob, 9 | searchForTopLevelLog, 10 | } from "./audit_workflow_runs_utils.js"; 11 | 12 | const OctokitWithThrottling = Octokit.plugin(throttling); 13 | 14 | // Initialize Octokit with a personal access token 15 | const octokit = new OctokitWithThrottling({ 16 | auth: process.env.GITHUB_TOKEN, 17 | baseUrl: process.env.GITHUB_BASE_URL, 18 | throttle: { 19 | onRateLimit: (retryAfter, options, octokit, retryCount) => { 20 | octokit.log.warn( 21 | `Request quota exhausted for request ${options.method} ${options.url}` 22 | ); 23 | 24 | if (retryCount < 1) { 25 | // only retries once 26 | octokit.log.info(`Retrying after ${retryAfter} seconds!`); 27 | return true; 28 | } 29 | }, 30 | onSecondaryRateLimit: (retryAfter, options, octokit) => { 31 | // does not retry, only logs a warning 32 | octokit.log.warn( 33 | `SecondaryRateLimit detected for request ${options.method} ${options.url}` 34 | ); 35 | }, 36 | }, 37 | }); 38 | 39 | octokit.log.warn = () => {}; 40 | octokit.log.error = () => {}; 41 | 42 | // Helper function to extract Actions used from workflow logs 43 | async function extractActionsFromLogs(logUrl) { 44 | let retries = 3; 45 | 46 | while (retries > 0) { 47 | try { 48 | const response = await octokit.request(`GET ${logUrl}`, { 49 | headers: { Accept: "application/vnd.github+json" }, 50 | }); 51 | 52 | // get the zip file content 53 | const zipBuffer = Buffer.from(response.data); 54 | 55 | // Unzip the file 56 | const zip = new AdmZip(zipBuffer); 57 | const logEntries = zip.getEntries(); // Get all entries in the zip file 58 | 59 | const [success, actions] = searchForSetUpJob(logEntries); 60 | 61 | if (!success) { 62 | actions.push(...searchForTopLevelLog(logEntries)); 63 | } 64 | 65 | return actions; 66 | } catch (error) { 67 | if (error.status == 404) { 68 | console.error( 69 | `Failed to fetch logs from ${logUrl}: 404. This may be due to the logs being too old, or the workflow having not run due to an error.` 70 | ); 71 | return []; 72 | } else if (error.message.startsWith("Connect Timeout Error ") || error.message === "read ECONNRESET" || error.message === "read ETIMEDOUT") { 73 | console.error( 74 | `Connection timeout/reset. Retrying, attempt ${4 - retries}/3. Waiting 30 seconds...` 75 | ); 76 | retries--; 77 | // sleep 30 seconds 78 | await new Promise((resolve) => setTimeout(resolve, 30000)); 79 | continue; 80 | } 81 | console.error(`Failed to fetch logs from ${logUrl}:`, error.message); 82 | return []; 83 | } 84 | } 85 | } 86 | 87 | async function createActionsRunResults(owner, repo, run, actions) { 88 | const action_run_results = []; 89 | 90 | for (const action of actions) { 91 | const workflow = await octokit.request(`GET ${run.workflow_url}`); 92 | 93 | if (workflow.status != 200) { 94 | console.error( 95 | `Error fetching workflow ${run.workflow_url}: `, 96 | workflow.status 97 | ); 98 | continue; 99 | } 100 | 101 | const workflow_path = workflow.data.path; 102 | 103 | action_run_results.push({ 104 | org: owner, 105 | repo: repo, 106 | workflow: workflow_path, 107 | run_id: run.id, 108 | created_at: run.created_at, 109 | name: action[0], 110 | version: action[1], 111 | sha: action[2], 112 | }); 113 | } 114 | return action_run_results; 115 | } 116 | 117 | // Main function to query an organization and its repositories without using the audit log 118 | async function* auditOrganizationWithoutAuditLog(orgName, startDate, endDate) { 119 | try { 120 | // Step 1: Get all repositories in the organization 121 | const repos = await octokit.repos.listForOrg({ 122 | org: orgName, 123 | per_page: 100, 124 | }); 125 | 126 | if (repos.status != 200) { 127 | console.error(`Error listing repos for org ${orgName}: `, repos.status); 128 | return; 129 | } 130 | 131 | for (const repo of repos.data) { 132 | // Step 2: Get all workflow runs in the repository within the date range 133 | for await (const result of _auditRepo( 134 | orgName, 135 | repo.name, 136 | startDate, 137 | endDate 138 | )) { 139 | yield result; 140 | } 141 | } 142 | } catch (error) { 143 | console.error(`Error auditing organization ${orgName}: `, error.message); 144 | } 145 | } 146 | 147 | // audit a single repository 148 | async function* auditRepo(repoName, startDate, endDate) { 149 | const [org, repo] = repoName.split("/"); 150 | 151 | for await (const result of _auditRepo(org, repo, startDate, endDate)) { 152 | yield result; 153 | } 154 | } 155 | 156 | // audit a single repository, using the orgname and repo name 157 | async function* _auditRepo(org, repo, startDate, endDate) { 158 | try { 159 | const workflowRuns = await octokit.actions.listWorkflowRunsForRepo({ 160 | owner: org, 161 | repo: repo, 162 | per_page: 100, 163 | created: `${startDate}..${endDate}`, 164 | }); 165 | 166 | for (const run of workflowRuns.data.workflow_runs) { 167 | const actions = await extractActionsFromLogs(run.logs_url); 168 | 169 | const action_run_results = await createActionsRunResults( 170 | org, 171 | repo, 172 | run, 173 | actions 174 | ); 175 | 176 | for (const result of action_run_results) { 177 | yield result; 178 | } 179 | } 180 | } catch (error) { 181 | console.error(`Error auditing repo ${org}/${repo}: `, error.message); 182 | } 183 | } 184 | 185 | // use the Enterprise/Organization audit log to list all workflow runs in that period 186 | // for each workflow run, extract the actions used 187 | // get the audit log, searching for `worklows` category, workflows.prepared_workflow_job being created 188 | async function* auditEnterpriseOrOrg( 189 | entOrOrgName, 190 | entOrOrg, 191 | startDate, 192 | endDate 193 | ) { 194 | try { 195 | const phrase = `actions:workflows.prepared_workflow_job+created:${startDate}..${endDate}`; 196 | const workflow_jobs = await octokit.paginate( 197 | `GET /${ 198 | entOrOrg.startsWith("ent") ? "enterprises" : "orgs" 199 | }/${entOrOrgName}/audit-log`, 200 | { 201 | phrase, 202 | per_page: 100, 203 | } 204 | ); 205 | 206 | for (const job of workflow_jobs) { 207 | if (job.action == "workflows.created_workflow_run") { 208 | const run_id = job.workflow_run_id; 209 | const [owner, repo] = job.repo.split("/"); 210 | 211 | try { 212 | // get the workflow run log with the REST API 213 | const run = await octokit.actions.getWorkflowRun({ 214 | owner: owner, 215 | repo: repo, 216 | run_id, 217 | }); 218 | 219 | const actions = await extractActionsFromLogs(run.data.logs_url); 220 | 221 | const action_run_results = await createActionsRunResults( 222 | owner, 223 | repo, 224 | run.data, 225 | actions 226 | ); 227 | 228 | for (const result of action_run_results) { 229 | yield result; 230 | } 231 | } catch (error) { 232 | console.error( 233 | `Error fetching workflow run ${owner}/${repo}#${run_id}:`, 234 | error.status 235 | ); 236 | continue; 237 | } 238 | } 239 | } 240 | } catch (error) { 241 | console.error( 242 | `Error auditing ${entOrOrg.startsWith("ent") ? "enterprise" : "org"}:`, 243 | error.message 244 | ); 245 | } 246 | } 247 | 248 | async function main() { 249 | // Parse CLI arguments 250 | const args = process.argv.slice(2); 251 | 252 | if (args.length < 4) { 253 | const script_name = process.argv[1].split("/").pop(); 254 | console.error( 255 | `Usage: node ${script_name} [] []` 256 | ); 257 | return; 258 | } 259 | 260 | const [ 261 | orgOrEntName, 262 | orgOrEnt, 263 | startDate, 264 | endDate, 265 | argsOutputFilename, 266 | actionsToAuditFilename, 267 | ] = args; 268 | 269 | if (!["ent", "org", "repo"].includes(orgOrEnt)) { 270 | console.error(" must be 'ent', 'org', 'repo'"); 271 | return; 272 | } 273 | 274 | const actionsToAudit = parseFromInputFile(actionsToAuditFilename); 275 | 276 | const outputFilename = argsOutputFilename || "workflow_audit_results.sljson"; 277 | 278 | const action_run_results = 279 | orgOrEnt != "repo" 280 | ? auditEnterpriseOrOrg(orgOrEntName, orgOrEnt, startDate, endDate) 281 | : auditRepo(orgOrEntName, startDate, endDate); 282 | 283 | console.log("org,repo,workflow,run_id,created_at,name,version,sha"); 284 | 285 | const checkActions = Object.keys(actionsToAudit).length > 0; 286 | 287 | for await (const result of action_run_results) { 288 | if (checkActions) { 289 | if (!matchActionsToAuditTargets(result, actionsToAudit)) { 290 | continue; 291 | } 292 | } 293 | 294 | console.log(Object.values(result).join(",")); 295 | fs.appendFileSync(outputFilename, JSON.stringify(result) + "\n"); 296 | } 297 | } 298 | 299 | await main(); 300 | -------------------------------------------------------------------------------- /audit_workflow_runs.test.js: -------------------------------------------------------------------------------- 1 | import { 2 | matchActionsToAuditTargets, 3 | parseFromInputFile, 4 | searchForActionsLines, 5 | } from "./audit_workflow_runs_utils.js"; 6 | import assert from "assert"; 7 | 8 | function runTests() { 9 | console.log("Running tests for matchActionsToAuditTargets..."); 10 | testMatchActionsToAuditTargets(); 11 | console.log("Running tests for parseFromInputFile..."); 12 | testParseFromInputFile(); 13 | console.log("Running tests for searchForActionsLines..."); 14 | testSearchForActionsLines(); 15 | console.log("All tests passed!"); 16 | } 17 | 18 | function testMatchActionsToAuditTargets() { 19 | const actionsToAudit = { 20 | owner1: { 21 | repo1: { 22 | "path/to/action": ["sha1", "sha2"], 23 | "*": ["*"], 24 | }, 25 | }, 26 | }; 27 | 28 | // Test cases 29 | assert.strictEqual( 30 | matchActionsToAuditTargets( 31 | { name: "owner1/repo1/path/to/action", sha: "sha1" }, 32 | actionsToAudit 33 | ), 34 | true, 35 | "Should return true for a matching owner, repo, path, and sha" 36 | ); 37 | 38 | assert.strictEqual( 39 | matchActionsToAuditTargets( 40 | { name: "owner1/repo1/another/path", sha: "sha1" }, 41 | actionsToAudit 42 | ), 43 | true, 44 | "Should return true for a matching owner, repo, wildcard path, and sha" 45 | ); 46 | 47 | assert.strictEqual( 48 | matchActionsToAuditTargets( 49 | { name: "owner2/repo1/path/to/action", sha: "sha1" }, 50 | actionsToAudit 51 | ), 52 | false, 53 | "Should return false for a non-matching owner" 54 | ); 55 | 56 | assert.strictEqual( 57 | matchActionsToAuditTargets( 58 | { name: "owner1/repo1/path/to/action", sha: "sha3" }, 59 | actionsToAudit 60 | ), 61 | false, 62 | "Should return false for a non-matching sha" 63 | ); 64 | } 65 | 66 | function testParseFromInputFile() { 67 | const result1 = parseFromInputFile("testFile.json"); 68 | 69 | assert.deepStrictEqual( 70 | result1, 71 | { 72 | owner1: { 73 | repo1: { 74 | "path/to/action": ["sha1", "sha2"], 75 | }, 76 | }, 77 | }, 78 | "Should parse a valid JSON file into the expected structure" 79 | ); 80 | } 81 | 82 | function testSearchForActionsLines() { 83 | // Test case: Extract actions and their details from log content 84 | const logContent1 = `2025-03-28T12:00:00Z Download action repository 'actions/checkout@v4' (SHA:11bd71901bbe5b1630ceea73d27597364c9af683) 85 | 2025-03-28T12:00:01Z Download action repository 'actions/setup-node@v3' (SHA:22cd71901bbe5b1630ceea73d27597364c9af684) 86 | 2025-03-28T12:00:02Z Some other log line 87 | `; 88 | const expected1 = [ 89 | ["actions/checkout", "v4", "11bd71901bbe5b1630ceea73d27597364c9af683"], 90 | ["actions/setup-node", "v3", "22cd71901bbe5b1630ceea73d27597364c9af684"], 91 | ]; 92 | assert.deepStrictEqual( 93 | searchForActionsLines(logContent1), 94 | expected1, 95 | "Should extract actions and their details from log content" 96 | ); 97 | 98 | // Test case: No actions found 99 | const logContent2 = `2025-03-28T12:00:00Z Some random log line 100 | 2025-03-28T12:00:01Z Another random log line 101 | `; 102 | const expected2 = []; 103 | assert.deepStrictEqual( 104 | searchForActionsLines(logContent2), 105 | expected2, 106 | "Should return an empty array if no actions are found" 107 | ); 108 | 109 | // Test case: Stop processing after the first non-action line if actions were found 110 | const logContent3 = `2025-03-28T12:00:00Z Download action repository 'actions/checkout@v4' (SHA:11bd71901bbe5b1630ceea73d27597364c9af683) 111 | 2025-03-28T12:00:01Z Some other log line 112 | 2025-03-28T12:00:02Z Download action repository 'actions/setup-node@v3' (SHA:22cd71901bbe5b1630ceea73d27597364c9af684) 113 | `; 114 | const expected3 = [ 115 | ["actions/checkout", "v4", "11bd71901bbe5b1630ceea73d27597364c9af683"], 116 | ]; 117 | assert.deepStrictEqual( 118 | searchForActionsLines(logContent3), 119 | expected3, 120 | "Should stop processing after the first non-action line if actions were found" 121 | ); 122 | 123 | // Test case: Empty log content 124 | const logContent4 = ""; 125 | const expected4 = []; 126 | assert.deepStrictEqual( 127 | searchForActionsLines(logContent4), 128 | expected4, 129 | "Should handle empty log content gracefully" 130 | ); 131 | 132 | // Test case: Malformed log lines - we don't care that the SHA is invalid 133 | const logContent5 = `2025-03-28T12:00:00Z Download action repository 'actions/checkout@v4' (SHA:invalid_sha) 134 | 2025-03-28T12:00:01Z Malformed log line 135 | `; 136 | const expected5 = [["actions/checkout", "v4", "invalid_sha"]]; 137 | assert.deepStrictEqual( 138 | searchForActionsLines(logContent5), 139 | expected5, 140 | "Should handle malformed log lines gracefully" 141 | ); 142 | } 143 | 144 | // Run the tests 145 | runTests(); 146 | -------------------------------------------------------------------------------- /audit_workflow_runs_utils.js: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | 3 | export function matchActionsToAuditTargets(result, actionsToAudit) { 4 | const name = result.name; 5 | const sha = result.sha; 6 | 7 | const [owner, repo] = name.split("/"); 8 | const path = name.split("/").slice(2).join("/"); 9 | 10 | const owners = Object.keys(actionsToAudit); 11 | 12 | let matchedRepo = ""; 13 | let matchedPath = ""; 14 | 15 | if (owners.includes(owner)) { 16 | const repos = Object.keys(actionsToAudit[owner]); 17 | 18 | if (repos.includes(repo) || repos.includes("*")) { 19 | matchedRepo = repos.includes(repo) ? repo : "*"; 20 | 21 | const paths = Object.keys(actionsToAudit[owner][matchedRepo]); 22 | 23 | if (paths.includes(path) || paths.includes("*") || paths.includes("")) { 24 | matchedPath = paths.includes(path) 25 | ? path 26 | : paths.includes("*") 27 | ? "*" 28 | : ""; 29 | 30 | const hashes = actionsToAudit[owner][matchedRepo][matchedPath]; 31 | 32 | if ( 33 | hashes.includes(sha) || 34 | hashes.includes("*") || 35 | hashes.length == 0 36 | ) { 37 | return true; 38 | } 39 | } 40 | } 41 | } 42 | return false; 43 | } 44 | 45 | export function parseFromInputFile(actionsToAuditFilename) { 46 | const actionsToAudit = {}; 47 | if (actionsToAuditFilename) { 48 | const actionsToAuditFile = fs.readFileSync(actionsToAuditFilename, "utf-8"); 49 | const actionsToAuditRaw = JSON.parse(actionsToAuditFile); 50 | for (const [action, hashes] of Object.entries(actionsToAuditRaw)) { 51 | const [org, repo] = action.split("/"); 52 | const path = action.split("/").slice(2).join("/"); 53 | actionsToAudit[org] ??= {}; 54 | actionsToAudit[org][repo] ??= {}; 55 | actionsToAudit[org][repo][path] = hashes; 56 | } 57 | } 58 | return actionsToAudit; 59 | } 60 | 61 | // Regex to spot, e.g. Download action repository 'actions/checkout@v4' (SHA:11bd71901bbe5b1630ceea73d27597364c9af683) 62 | const actionRegex = /^Download action repository '(.+?)' \(SHA:(.+?)\)/; 63 | 64 | export function searchForActionsLines(logContent) { 65 | const logLines = logContent.split("\n"); 66 | const actions = []; 67 | let foundActions = false; 68 | 69 | for (const line of logLines) { 70 | // separate the timestamp from the data 71 | const data = line.split(" ").slice(1).join(" "); 72 | if (data == undefined) { 73 | continue; 74 | } 75 | if (data.startsWith("Download action repository '")) { 76 | foundActions = true; 77 | const match = actionRegex.exec(data); 78 | if (match) { 79 | const action = match[1]; 80 | const sha = match[2]; 81 | 82 | const [repo, version] = action.split("@"); 83 | actions.push([repo, version, sha]); 84 | } 85 | // quit processing the log after the first line that is not an action, if we already found actions 86 | } else if (foundActions) { 87 | break; 88 | } 89 | } 90 | 91 | return actions; 92 | } 93 | 94 | export function searchForSetUpJob(logEntries) { 95 | let foundSetUpJob = false; 96 | const actions = []; 97 | 98 | // Iterate through each file in the zip 99 | for (const entry of logEntries) { 100 | if (!entry.isDirectory) { 101 | const fileName = entry.entryName; // Get the file name 102 | if (fileName === undefined) { 103 | continue; 104 | } 105 | // get the base name of the file 106 | const baseName = fileName.split("/").pop(); 107 | if (baseName == "1_Set up job.txt") { 108 | foundSetUpJob = true; 109 | const logContent = entry.getData().toString("utf8"); 110 | actions.push(...searchForActionsLines(logContent)); 111 | } 112 | } 113 | } 114 | 115 | return [foundSetUpJob, actions]; 116 | } 117 | 118 | export function searchForTopLevelLog(logEntries) { 119 | const actions = []; 120 | 121 | // Iterate through each file in the zip 122 | for (const entry of logEntries) { 123 | if (!entry.isDirectory) { 124 | const fileName = entry.entryName; // Get the file name 125 | if (fileName !== undefined && fileName.startsWith("0_")) { 126 | const logContent = entry.getData().toString("utf8"); 127 | actions.push(...searchForActionsLines(logContent)); 128 | } 129 | } 130 | } 131 | 132 | return actions; 133 | } 134 | -------------------------------------------------------------------------------- /eslint.config.js: -------------------------------------------------------------------------------- 1 | import github from 'eslint-plugin-github'; 2 | 3 | export default [ 4 | { 5 | plugins: { 6 | github 7 | }, 8 | }, 9 | ]; 10 | -------------------------------------------------------------------------------- /find_compromised_secrets.js: -------------------------------------------------------------------------------- 1 | /* Script to find compromised secrets in an Actions workflow run. 2 | 3 | Only relevant to a particular series of incidents, where a malicious actor 4 | pushed a commit to a repository that contained a workflow that leaked secrets 5 | into the logs. 6 | 7 | They were doubly-Base64 encoded, so we need to spot Base64 strings and decode them. 8 | */ 9 | 10 | import { Octokit } from "@octokit/rest"; 11 | import fs from "fs"; 12 | import AdmZip from "adm-zip"; 13 | import { findSecretsInLines } from "./find_compromised_secrets_utils.js"; 14 | 15 | // Initialize Octokit with a personal access token 16 | const octokit = new Octokit({ 17 | auth: process.env.GITHUB_TOKEN, // Set your GitHub token in an environment variable 18 | baseUrl: process.env.GITHUB_BASE_URL, // Set the GitHub base URL, e.g. for Enterprise Server, in an env var 19 | }); 20 | 21 | // Helper function to extract secrets leaked into workflow logs 22 | async function extractSecretsFromLogs(logUrl) { 23 | try { 24 | const response = await octokit.request(`GET ${logUrl}`, { 25 | headers: { Accept: "application/vnd.github+json" }, 26 | }); 27 | 28 | // get the zip file content 29 | const zipBuffer = Buffer.from(response.data); 30 | 31 | // Unzip the file 32 | const zip = new AdmZip(zipBuffer); 33 | const logEntries = zip.getEntries(); 34 | 35 | const secrets = []; 36 | 37 | // Iterate through each file in the zip 38 | for (const entry of logEntries) { 39 | if (!entry.isDirectory) { 40 | const fileName = entry.entryName; 41 | if (fileName.startsWith("0_")) { 42 | const logContent = entry.getData().toString("utf8"); 43 | 44 | let lines = logContent.split("\n"); 45 | 46 | secrets.push(...findSecretsInLines(lines)); 47 | } 48 | } 49 | } 50 | return secrets; 51 | } catch (error) { 52 | console.error(`Failed to fetch logs from ${logUrl}:`, error.message); 53 | return []; 54 | } 55 | } 56 | 57 | async function main() { 58 | // Parse CLI arguments 59 | const args = process.argv.slice(2); 60 | 61 | if (args.length > 0) { 62 | const script_name = process.argv[1].split("/").pop(); 63 | console.error(`Usage: node ${script_name} < `); 64 | return; 65 | } 66 | 67 | // read the actions runs from STDIN, in single-line JSON format 68 | const actions_run_lines = fs.readFileSync(0).toString().split("\n"); 69 | 70 | const all_secrets = []; 71 | 72 | for (const line of actions_run_lines) { 73 | if (line == "" || !line.startsWith("{")) { 74 | continue; 75 | } 76 | 77 | try { 78 | const actions_run = JSON.parse(line); 79 | 80 | const owner = actions_run.org; 81 | const repo = actions_run.repo; 82 | const run_id = actions_run.run_id; 83 | 84 | console.log(`Processing actions run ${owner}/${repo}#${run_id}...`); 85 | 86 | // get the logs for the run 87 | const logUrl = `/repos/${owner}/${repo}/actions/runs/${run_id}/logs`; 88 | const secrets = await extractSecretsFromLogs(logUrl); 89 | 90 | console.log( 91 | `Found ${secrets.length} secrets in log for ${owner}/${repo}#${run_id}` 92 | ); 93 | 94 | for (const secret of secrets) { 95 | console.log(secret); 96 | } 97 | 98 | all_secrets.push(...secrets); 99 | } catch (error) { 100 | console.error(`Failed to parse line: ${line}`); 101 | continue; 102 | } 103 | } 104 | 105 | for (const secret of all_secrets) { 106 | // write to a file 107 | fs.appendFileSync( 108 | "compromised_secrets.sljson", 109 | JSON.stringify(secret) + "\n" 110 | ); 111 | } 112 | } 113 | 114 | await main(); 115 | -------------------------------------------------------------------------------- /find_compromised_secrets.test.js: -------------------------------------------------------------------------------- 1 | import assert from "assert"; 2 | import { 3 | findSecretsInLines, 4 | base64Regex1, 5 | } from "./find_compromised_secrets_utils.js"; 6 | 7 | function testFindSecretsInLines() { 8 | console.log("Running test for findSecretsInLines..."); 9 | 10 | // Simulate reading lines from a file 11 | const lines = [ 12 | "2025-03-20T12:01:00Z SW1kcGRHaDFZbDkwYjJ0bGJpSTZleUoyWVd4MVpTSTZJbWRvYzE4d01EQXdNREF3TURBd01EQXdNREF3TURBd01EQXdNREF3TURBd01EQXdNREFpTENBaWFYTlRaV055WlhRaU9pQjBjblZsZlFvPQo=", 13 | "2025-03-20T12:00:00Z SWpBaU9uc2lkbUZzZFdVaU9pSmhJaXdnSW1selUyVmpjbVYwSWpwMGNuVmxmUW89Cg==", 14 | "2025-03-20T12:00:00Z Some log message", 15 | "2025-03-20T12:02:00Z Another log message", 16 | "", 17 | ]; 18 | 19 | const data = 20 | "SWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; 21 | 22 | const match = base64Regex1.exec(data); 23 | 24 | assert(match, "Failed to match base64 data"); 25 | 26 | // Expected secrets after decoding 27 | const expectedSecrets = [ 28 | { 29 | github_token: { 30 | isSecret: true, 31 | value: "ghs_000000000000000000000000000000000", 32 | }, 33 | }, 34 | { 35 | 0: { 36 | isSecret: true, 37 | value: "a", 38 | }, 39 | }, 40 | ]; 41 | 42 | // Call the function 43 | const secrets = findSecretsInLines(lines); 44 | 45 | // Assert the results 46 | assert.deepStrictEqual( 47 | secrets, 48 | expectedSecrets, 49 | "The secrets extracted from the lines do not match the expected output." 50 | ); 51 | 52 | console.log("Test passed!"); 53 | } 54 | 55 | // Run the test 56 | function main() { 57 | console.log("Running tests..."); 58 | testFindSecretsInLines(); 59 | } 60 | 61 | main(); 62 | -------------------------------------------------------------------------------- /find_compromised_secrets_utils.js: -------------------------------------------------------------------------------- 1 | // base64 strings were used to leak the secrets 2 | export const base64Regex1 = 3 | /^SW[A-Za-z0-9+/]{2}(?:[A-Za-z0-9+/]{4}){15,}(?:[A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)\s*$/; 4 | 5 | export const base64Regex2 = 6 | /^I[A-Za-z0-9+/]{3}(?:[A-Za-z0-9+/]{4}){9,}(?:[A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)\s*$/; 7 | 8 | export function findSecretsInLines(lines) { 9 | const secrets = []; 10 | 11 | let foundSecrets = false; 12 | 13 | for (const line of lines) { 14 | if (line == "") { 15 | continue; 16 | } 17 | 18 | // separate the timestamp from the data 19 | const data = line.split(" ").slice(1).join(" "); 20 | 21 | if (data == undefined) { 22 | console.warn("Failed to parse log line: " + line); 23 | continue; 24 | } 25 | 26 | const match = base64Regex1.exec(data); 27 | if (!match) { 28 | // stop processing the log after the first line that does not match the regex, if we already found secrets 29 | if (foundSecrets) { 30 | break; 31 | } 32 | continue; 33 | } 34 | const secret = match[0]; 35 | 36 | // Base64 decode the secret 37 | try { 38 | const decodedOnce = Buffer.from(secret, "base64").toString(); 39 | 40 | const match2 = base64Regex2.exec(decodedOnce); 41 | if (!match2) { 42 | console.log( 43 | "Failed to match base64 data after first decode: " + decodedOnce 44 | ); 45 | continue; 46 | } 47 | 48 | const decoded = Buffer.from(decodedOnce, "base64").toString(); 49 | 50 | // json decode it 51 | try { 52 | const jsonDecoded = JSON.parse("{" + decoded + "}"); 53 | if (Object.keys(jsonDecoded).length > 0) { 54 | foundSecrets = true; 55 | secrets.push(jsonDecoded); 56 | } 57 | } catch (error) { 58 | console.log( 59 | "Failed to decode JSON data after second decode: " + decoded 60 | ); 61 | continue; 62 | } 63 | } catch (error) { 64 | continue; 65 | } 66 | } 67 | 68 | return secrets; 69 | } 70 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "@octokit/core": "^6.1.4", 4 | "@octokit/plugin-throttling": "^9.6.0", 5 | "@octokit/rest": "^21.1.1", 6 | "adm-zip": "^0.5.16" 7 | }, 8 | "type": "module", 9 | "scripts": { 10 | "test": "node audit_workflow_runs.test.js && node find_compromised_secrets.test.js" 11 | }, 12 | "name": "@github/audit-actions-workflow-runs", 13 | "version": "1.0.0", 14 | "devDependencies": { 15 | "@microsoft/eslint-formatter-sarif": "^3.1.0", 16 | "eslint": "^9.23.0", 17 | "eslint-plugin-github": "^6.0.0" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /testFile.json: -------------------------------------------------------------------------------- 1 | { 2 | "owner1/repo1/path/to/action": ["sha1", "sha2"] 3 | } 4 | --------------------------------------------------------------------------------