├── .gitattributes ├── .github └── workflows │ ├── mdbook.yml │ ├── release.yml │ └── run-test.yaml ├── .gitignore ├── .golangci.yaml ├── .goreleaser.yaml ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── RELEASE.md ├── Taskfile.yml ├── book.toml ├── cmd ├── bundle.go ├── cli-config.go ├── config.go ├── download.go ├── gatecheck │ └── main.go ├── list.go ├── root.go └── validate.go ├── demos ├── bundle.tape ├── list.tape ├── validate.tape └── version.tape ├── docs ├── SUMMARY.md ├── assets │ ├── gatecheck-logo-splash-dark.png │ ├── screenshot-grype-list.png │ └── screenshot-grype-table.png ├── cli-refactor.md ├── configuration.md ├── gatecheck-bundle.md ├── installation.md ├── list-reports.md ├── supported-reports.md ├── title-page.md ├── usage.md └── validation.md ├── go.mod ├── go.sum ├── justfile ├── pkg ├── archive │ ├── bundle.go │ └── bundle_test.go ├── artifacts │ ├── cyclonedx.go │ ├── gitleaks.go │ ├── grype.go │ ├── lcov.go │ └── semgrep.go ├── epss │ └── epss.go ├── format │ ├── matrix.go │ ├── strings.go │ └── strings_test.go ├── gatecheck │ ├── bundle.go │ ├── config.go │ ├── download.go │ ├── list.go │ ├── logo.go │ ├── metadata.go │ ├── validate.go │ └── validate_test.go ├── kev │ └── kev.go └── validate │ ├── validate.go │ └── validate_test.go ├── static ├── gatecheck-logo-dark.png ├── gatecheck-logo-light.png ├── gatecheck-logo-splash-dark.png └── gatecheck-logo-splash-light.png └── test ├── cyclonedx-grype-sbom.json ├── cyclonedx-syft-sbom.json ├── cyclonedx-trivy-sbom.json ├── epss_scores-2023-06-01.csv ├── gatecheck.yaml ├── gitleaks-report.json ├── grype-report.json ├── known_exploited_vulnerabilities.csv ├── known_exploited_vulnerabilities.json └── semgrep-sast-report.json /.gitattributes: -------------------------------------------------------------------------------- 1 | # Default behavior for text files 2 | * text=auto 3 | 4 | *.go text eol=lf 5 | -------------------------------------------------------------------------------- /.github/workflows/mdbook.yml: -------------------------------------------------------------------------------- 1 | # Sample workflow for building and deploying a mdBook site to GitHub Pages 2 | # 3 | # To get started with mdBook see: https://rust-lang.github.io/mdBook/index.html 4 | # 5 | name: Deploy mdBook site to Pages 6 | 7 | on: 8 | # Runs on pushes targeting the default branch 9 | push: 10 | branches: ["main"] 11 | 12 | # Allows you to run this workflow manually from the Actions tab 13 | workflow_dispatch: 14 | 15 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 16 | permissions: 17 | contents: read 18 | pages: write 19 | id-token: write 20 | 21 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. 22 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. 23 | concurrency: 24 | group: "pages" 25 | cancel-in-progress: false 26 | 27 | jobs: 28 | # Build job 29 | build: 30 | runs-on: ubuntu-latest 31 | env: 32 | MDBOOK_VERSION: 0.4.36 33 | steps: 34 | - uses: actions/checkout@v4 35 | - name: Install mdBook 36 | run: | 37 | curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSf -y | sh 38 | rustup update 39 | cargo install --version ${MDBOOK_VERSION} mdbook 40 | - name: Setup Pages 41 | id: pages 42 | uses: actions/configure-pages@v4 43 | - name: Build with mdBook 44 | run: mdbook build 45 | - name: Upload artifact 46 | uses: actions/upload-pages-artifact@v3 47 | with: 48 | path: ./book 49 | 50 | # Deployment job 51 | deploy: 52 | environment: 53 | name: github-pages 54 | url: ${{ steps.deployment.outputs.page_url }} 55 | runs-on: ubuntu-latest 56 | needs: build 57 | steps: 58 | - name: Deploy to GitHub Pages 59 | id: deployment 60 | uses: actions/deploy-pages@v4 61 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # .github/workflows/release.yml 2 | name: release 3 | 4 | on: 5 | push: 6 | # run only against tags 7 | tags: 8 | - "*" 9 | 10 | permissions: 11 | contents: write 12 | 13 | jobs: 14 | goreleaser: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | with: 20 | fetch-depth: 0 21 | - name: Set up Go 22 | uses: actions/setup-go@v5 23 | with: 24 | go-version: stable 25 | - name: Run GoReleaser 26 | uses: goreleaser/goreleaser-action@v5 27 | with: 28 | distribution: goreleaser 29 | version: latest 30 | args: release --clean 31 | env: 32 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 33 | -------------------------------------------------------------------------------- /.github/workflows/run-test.yaml: -------------------------------------------------------------------------------- 1 | name: CICD 2 | on: 3 | pull_request: 4 | jobs: 5 | Describe: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event." 9 | - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" 10 | - run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}." 11 | - name: Check out repository code 12 | uses: actions/checkout@v3 13 | - run: echo "💡 The ${{ github.repository }} repository has been cloned to the runner." 14 | - run: echo "🖥️ The workflow is now ready to test your code on the runner." 15 | - name: List files in the repository 16 | run: | 17 | ls ${{ github.workspace }} 18 | - run: echo "🍏 This job's status is ${{ job.status }}." 19 | test-unix: 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | platform: 24 | - ubuntu 25 | - macOS 26 | go: 27 | - 21 28 | name: '${{ matrix.platform }} | 1.${{ matrix.go }}.x' 29 | runs-on: ${{ matrix.platform }}-latest 30 | steps: 31 | - uses: actions/checkout@v3 32 | - uses: actions/setup-go@v3 33 | with: 34 | go-version: 1.${{ matrix.go }}.x 35 | cache: true 36 | - run: | 37 | export GOBIN=$HOME/go/bin 38 | case "${{ matrix.go }}" in 39 | 14|15) _version='';; 40 | *) _version='@latest';; 41 | esac 42 | go install github.com/kyoh86/richgo"${_version}" 43 | go install github.com/mitchellh/gox"${_version}" 44 | - run: RICHGO_FORCE_COLOR=1 PATH=$HOME/go/bin/:$PATH make test 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | dist/ 3 | book/ 4 | cover.cov 5 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | # Make sure to check the documentation at https://goreleaser.com 2 | # To run, tag the branch with vX.X.X and do `goreleaser release --rm` 3 | version: 1 4 | 5 | before: 6 | hooks: 7 | # You may remove this if you don't use go modules. 8 | - go mod tidy 9 | 10 | builds: 11 | - main: ./cmd/gatecheck 12 | env: 13 | - CGO_ENABLED=0 14 | goos: 15 | - linux 16 | - windows 17 | - darwin 18 | ldflags: > 19 | -X 'main.cliVersion={{.Version}}' 20 | -X 'main.gitCommit={{.ShortCommit}}' 21 | -X 'main.buildDate={{.Date}}' 22 | 23 | archives: 24 | - name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" 25 | checksum: 26 | name_template: 'checksums.txt' 27 | snapshot: 28 | name_template: "{{ incpatch .Version }}-next" 29 | changelog: 30 | use: github 31 | sort: asc 32 | filters: 33 | exclude: 34 | - '^docs:' 35 | - '^test:' 36 | 37 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [UNRELEASED] 9 | 10 | ## [0.8.1] - 2025-04-09 11 | 12 | ### Fixed 13 | 14 | - Updated EPSS schema to work with 3.14.2025 15 | 16 | ## [0.8.0] - 2024-10-23 17 | 18 | * Fixed a bug where validation failures were not resulting in a non-zero exit code 19 | * Renamed the --file / -f CLI argument to --config / -f for consistency with other tools 20 | * improvements to validate output 21 | These "improvements" are short term hacks. There is a need for a fundamental 22 | overhaul of how output is generated to improve usability 23 | * Implemented code coverage support. 24 | * Updated the docs to reflect the removal of the --all flag 25 | 26 | ## [0.7.6] - 2024-09-08 27 | 28 | ### Fixed 29 | 30 | - Crash when running `gatecheck bundle add` with no tags 31 | 32 | ## [0.7.5] - 2024-06-18 33 | 34 | ### Fixed 35 | 36 | - Missing `slog.Error` for KEV validations 37 | - Use base EPSS url instead of a specific date to get the latest data 38 | - Use a *os.File for kev and epss validation to simplify CLI runtime logic 39 | 40 | ## [0.7.0] - 2024-05-17 41 | 42 | ### Changed 43 | 44 | - Use ConfigKit with a metaconfig for all commands 45 | 46 | ### Added 47 | 48 | - Gatecheck Config Encoder & Decoder 49 | 50 | ## [0.6.2] - 2024-05-15 51 | 52 | ### Changed 53 | 54 | - Remove debug information from semgrep decoding 55 | 56 | ## [0.6.1] - 2024-05-15 57 | 58 | ### Changed 59 | 60 | - Use a table writing package instead of the builtin package 61 | 62 | ### Added 63 | 64 | - `gatecheck list --markdown` support for rendering markdown tables 65 | 66 | ## [0.6.0] - 2024-04-26 67 | 68 | ### Changed 69 | 70 | - Removed legacy CLI and deprecated pkg code 71 | 72 | ## [0.5.0] - 2024-04-26 73 | 74 | ### Fixed 75 | 76 | - Semgrep parsing error by switching to any type instead of []string 77 | - Slog validation errors for clarity 78 | 79 | ## [0.4.1] - 2024-04-02 80 | 81 | ### Fixed 82 | 83 | - EPSS Score URL default date go's std lib date parsing uses '02' for day 84 | - Viper flag collision between list and validate commands 85 | - EPSS URL env var use 86 | 87 | ## [0.4.0] - 2024-03-19 88 | 89 | ### Added 90 | 91 | - New CLI v1 behind feature flag, in development 92 | - More robust version information 93 | - CLI v1 debug and silent flags for logging 94 | - CLI v1 config init 95 | - CLI v1 config info 96 | - CLI v1 config convert 97 | - CLI v1 bundle 98 | - CLI v1 list 99 | - CLI v1 list all with epss scores 100 | 101 | ### Fixed 102 | 103 | - Wrap msg in bytes with string for logging 104 | - Grype & Cyclonedx validation errors 105 | 106 | ### Changed 107 | 108 | - Old CLI files moved to cmd/v0 109 | - Deprecated existing CLI 110 | - package organization 111 | - rules execution order is more structured 112 | 113 | ## [0.3.0] - 2023-10-17 114 | 115 | ### Deprecated 116 | 117 | - Defect Dojo export will be deprecated in the future in favor of just using the API, notice added 118 | 119 | ### Added 120 | 121 | - New Environment Variables for Defect Dojo 122 | 123 | ### Changed 124 | 125 | - Defect Dojo exporting to include mandatory values 126 | - Time Zone fix with exporting 127 | 128 | ## [0.2.2] - 2023-10-3 129 | 130 | ### Changed 131 | 132 | - Added additional logging for defect dojo export 133 | 134 | ### Fixed 135 | 136 | - Removed lazy reader in favor of fileOrEmptyBuf for better error handling 137 | - Verbose flag bug where the CLI would always be log level debug instead of warn 138 | - Grype decoder checkReport will use descriptor Timestamp instead of name since name is not a required field 139 | 140 | ## [0.2.1] - 2023-09-12 141 | 142 | ### Changed 143 | 144 | - Update to go 1.21.1 to avoid packaging errors 145 | 146 | ## [0.2.0] - 2023-09-05 147 | 148 | ### Changed 149 | 150 | - Update to go 1.21 151 | - Custom logger implementation using zerolog to std lib slog 152 | - Release process documentation 153 | 154 | ### Added 155 | 156 | - EnableSimpleRiskAcceptance Products API settings option 157 | - DeduplicationOnEngagement Engagements API settings option 158 | - CloseOldFindings Import-Scan API settings option 159 | - CloseOldFindingsProductScope Import-Scan API settings option 160 | - CreateFindingGroupsForAllFindings Import-Scan API settings option 161 | - Documentation for exported functions and structs 162 | 163 | ### Fixed 164 | 165 | - Bug where Gitleaks report with no secrets aren't properly decoded 166 | - A bunch of golangci-lint complaints 167 | 168 | ### Changed 169 | 170 | - Update DefectDojo Export Service calls and unit-tests 171 | - Update README documentation 172 | 173 | ## [0.1.3] - 2023-08-04 174 | 175 | ### Fixed 176 | 177 | - Bug with EPSS Time Zone (may need further discovery down the line) 178 | - LazyReader Export bug (AWS API wants to seek on the body which doesn't work on the LazyReader) 179 | 180 | ## [0.1.2] - 2023-08-02 181 | 182 | ### Changed 183 | 184 | - Updated dependencies 185 | - Bug fix, EPSS to use current UTC time 186 | - Bug fix, validation command has a seperate bundle function to prevent error overwriting on recursive calls 187 | 188 | ### Added 189 | 190 | - Lazy File Reader in internal/io to open file errors at read 191 | 192 | ## [0.1.0] - 2023-07-26 193 | 194 | ### Changed 195 | 196 | - _Major Refactoring_ 197 | - Bundling is now a gzipped tarball with a manifest file 198 | - Using "Agents" for KEV and EPSS downloading, simplify interface 199 | - Encoding package refactor, using generics 200 | - Common validation pattern between artifacts 201 | - Report artifacts as isolated packages instead 202 | - Table refactor for simplified table formatting 203 | - Table sorting pattern updated 204 | - Table printing to use unicode pretty borders 205 | - Fully refactored the validation pattern 206 | 207 | ### Added 208 | 209 | - EPSS Allow and Deny Thresholds 210 | - Validation rules via functions that can be layered 211 | 212 | ### Removed 213 | 214 | - Config object in favor of using a map[string]any which makes it easier to support new reports in the future 215 | - Encoding package that relied decodeBytes functions 216 | 217 | ## [0.0.10] - 2023-06-07 218 | 219 | ### Changed 220 | 221 | - New ASCII Logo 222 | - Bundle logging to use internal logger 223 | - Sort Grype print by Severity, then by Package 224 | - EPSS Service will write to existing CVE slice instead of querying 225 | - Simplified EPSS API Queries with a better async strategy 226 | - Added Data Store that can query an imported CSV file for EPSS Scores 227 | - Added a download command that will pull the CSV file from the API 228 | - Semgrep table ordering and prefix clipping 229 | - "CleanAndAbreviate" rename to ClipLeft or ClipRight 230 | 231 | ### Added 232 | 233 | - Version Command with Logo Output 234 | - Basic Logging Capabilities with custom logger, (Zerolog abstraction) package in internal/log 235 | - Global Verbose flag and elapsed execution time tracking 236 | - Debugs in CLI commands 237 | - Make commands for test and coverage 238 | - Allow Deny List for Grype reports 239 | - 'allow-missing' flag to bundle command 240 | - Sort tables by single or multiple columns in ascending, descending or custom order 241 | - Export to AWS S3 242 | - Support CycloneDX BOM and Vulnerabilities in Print, Bundle, Export, and Validate 243 | - Some debug logs focused on measuring performance 244 | - bundle extract command 245 | 246 | ## [0.0.9] - 2023-02-06 247 | 248 | ### Added 249 | 250 | - Additional debug information for bad status codes on export 251 | 252 | ### Changed 253 | 254 | - Marked config flag in validate command as required 255 | - Upgrade to go 1.20, no functional updates or changes to code 256 | 257 | ### Fixed 258 | 259 | - Bug in dojo export causing the open file to be read twice resulting in a blank file upload 260 | 261 | ## [0.0.8] - 2023-01-24 262 | 263 | ### Added 264 | 265 | - Defect Dojo Export has a exponential backoff between queries 266 | - Gatecheck Bundle 267 | - Validation in Bundle 268 | - Predictive encoding to avoid the need to label each file type 269 | - KEV Blacklisting 270 | - EPSS Table 271 | - Strings package for pretty table printing 272 | 273 | ### Changed 274 | 275 | - Exporter interface to allow retries on failed exports 276 | - Validation strategy 277 | - Removed implementation side interfacing for export services and epss in favor of caller side interfacing 278 | - Main function moved to cmd/gatecheck for better package conformation 279 | 280 | ### Removed 281 | 282 | - Gatecheck Report in favor of Gatecheck Bundle 283 | - The concept of Assets, treating everything as artifacts 284 | - Unnecessary complexity in Defect Dojo Export Service 285 | 286 | ## [0.0.7] - 2022-11-9 287 | 288 | ### Added 289 | 290 | - Gitleaks support, has the config option to allow secrets 291 | - Gitleaks test report generated from Juice Shop v14.3.0-4-g2c757a928 292 | - Gitleaks to CLI 293 | - Gitleaks as Export target to Defect Dojo 294 | - Blacklist Validation using KEVs from CISA 295 | - Dates to change log releases 296 | - CI/CD GitHub actions to auto release on tag 297 | 298 | ### Changed 299 | 300 | - YAML 2.0 to 3.0 in all places 301 | - TODO: Retry option for export command at CLI level 302 | - Use pointers for pkg/artifact values to allow nil 303 | - Use pointers for pkg/config values to allow nil 304 | - Unit tests to prevent nil pointer issues 305 | - Silence Usage text on error 306 | - Use std err in main for proper highlighting 307 | 308 | ## [0.0.6] 309 | 310 | ### Added 311 | 312 | - Semgrep add to report command and unit tests 313 | 314 | ## [0.0.5] 315 | 316 | ### Changed 317 | 318 | - Use json and yaml decoders and encoders instead of wrapping with the reader, writer pattern 319 | - Unit tests 320 | - fields/CVE to finding for use in other modules 321 | - Deprecated 'WithAsset' on Grype 322 | - Added 'WithScanReport' to Artifacts 323 | - Refactored the cmd to use the new IO functions 324 | - Refactor unit tests in cmd package to be more uniform 325 | - Removed test utility and internal packages in favor of IO functions 326 | - Move config, report, and validator to pkg/gatecheck for simplified folder structure 327 | - Moved validate responsibility to the artifact 328 | - Converted ExportGrype in exporter to just Export using a scan type enum for better support for multiple file types 329 | 330 | ### Added 331 | 332 | - JSON struct tags to config for additional support 333 | - Entity Documentation to README 334 | - Semgrep Artifact 335 | - Semgrep Entity 336 | - Generic Asset wrapper for files 337 | - cmd package now has a IO file to consolidate common operations 338 | - Semgrep command to CLI 339 | 340 | ## [0.0.4] 341 | 342 | ### Fixed 343 | 344 | - A new report command takes the project name from the config file 345 | 346 | ## [0.0.3] 347 | 348 | ### Removed 349 | 350 | - Debug prints in report command 351 | - Use of ioutil which was deprecated 352 | 353 | ### Changed 354 | 355 | - Handle edge case of timezone not being able to load due to lack of tzdata pkg/exporter/defectDojo/exporter 356 | 357 | ## [0.0.2] 358 | 359 | ### Removed 360 | 361 | - init function from all commands to prevent unexpected behaviors during test 362 | 363 | ### Changed 364 | 365 | - Commands have a wrapper function to inject arguments 366 | - Internal/Util test package uses a ReadCloser interface 367 | - Updated cmd unit tests to use ReadCloser to open test files 368 | 369 | ### Added 370 | 371 | - Exporter pkg 372 | - Defect Dojo Exporter pkg 373 | - Export command to CLI 374 | - Environment variables for Defect Dojo exporter 375 | - GitHub Action for testing 376 | 377 | ## [0.0.1] - 2022-06-28 378 | 379 | ### Added 380 | 381 | - Artifact pkg 382 | - Config pkg 383 | - Report pkg 384 | - internal utility file system functions 385 | - Initial CLI functions using Cobra 386 | - Validator for Grype 387 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please first discuss the change you wish to make via issue, 4 | email, or any other method with the owners of this repository before making a change. 5 | 6 | Please note we have a code of conduct, please follow it in all your interactions with the project. 7 | 8 | ## Trunk Based Development 9 | 10 | This project only uses a single branch 'main'. 11 | If you would like to make contributions, fork the project and submit a PR to main. 12 | 13 | ## Pull Request Process 14 | 15 | 1. Ensure any install or build dependencies are removed before the end of the layer when doing a 16 | build. 17 | 2. Update the README.md with details of changes to the interface, this includes new environment 18 | variables, exposed ports, useful file locations and container parameters. 19 | 3. Increase the version numbers in any examples files and the README.md to the new version that this 20 | Pull Request would represent. The versioning scheme we use is [SemVer](http://semver.org/). 21 | 4. Only project maintainers can merge to main. 22 | 23 | ## Release Process 24 | 25 | When a project maintainer feels that a release is needed to capture a specific suite of features, 26 | a release commit will be created and tagged. 27 | This project uses GitHub Actions to release the project with goreleaser whenever a tag is pushed. 28 | Since this project has not been released to v1.0.0, the stability and long term support of the 29 | application is not guaranteed. 30 | 31 | ## Code of Conduct 32 | 33 | ### Our Pledge 34 | 35 | In the interest of fostering an open and welcoming environment, we as 36 | contributors and maintainers pledge to making participation in our project and 37 | our community a harassment-free experience for everyone, regardless of age, body 38 | size, disability, ethnicity, gender identity and expression, level of experience, 39 | nationality, personal appearance, race, religion, or sexual identity and 40 | orientation. 41 | 42 | ### Our Standards 43 | 44 | Examples of behavior that contributes to creating a positive environment 45 | include: 46 | 47 | * Using welcoming and inclusive language 48 | * Being respectful of differing viewpoints and experiences 49 | * Gracefully accepting constructive criticism 50 | * Focusing on what is best for the community 51 | * Showing empathy towards other community members 52 | 53 | Examples of unacceptable behavior by participants include: 54 | 55 | * The use of sexualized language or imagery and unwelcome sexual attention or 56 | advances 57 | * Trolling, insulting/derogatory comments, and personal or political attacks 58 | * Public or private harassment 59 | * Publishing others' private information, such as a physical or electronic 60 | address, without explicit permission 61 | * Other conduct which could reasonably be considered inappropriate in a 62 | professional setting 63 | 64 | ### Our Responsibilities 65 | 66 | Project maintainers are responsible for clarifying the standards of acceptable 67 | behavior and are expected to take appropriate and fair corrective action in 68 | response to any instances of unacceptable behavior. 69 | 70 | Project maintainers have the right and responsibility to remove, edit, or 71 | reject comments, commits, code, wiki edits, issues, and other contributions 72 | that are not aligned to this Code of Conduct, or to ban temporarily or 73 | permanently any contributor for other behaviors that they deem inappropriate, 74 | threatening, offensive, or harmful. 75 | 76 | ### Scope 77 | 78 | This Code of Conduct applies both within project spaces and in public spaces 79 | when an individual is representing the project or its community. Examples of 80 | representing a project or community include using an official project e-mail 81 | address, posting via an official social media account, or acting as an appointed 82 | representative at an online or offline event. Representation of a project may be 83 | further defined and clarified by project maintainers. 84 | 85 | ### Enforcement 86 | 87 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 88 | reported by contacting the project team at team@gatecheck.dev. All 89 | complaints will be reviewed and investigated and will result in a response that 90 | is deemed necessary and appropriate to the circumstances. The project team is 91 | obligated to maintain confidentiality with regard to the reporter of an incident. 92 | Further details of specific enforcement policies may be posted separately. 93 | 94 | Project maintainers who do not follow or enforce the Code of Conduct in good 95 | faith may face temporary or permanent repercussions as determined by other 96 | members of the project's leadership. 97 | 98 | ### Attribution 99 | 100 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 101 | available at [http://contributor-covenant.org/version/1/4][version] 102 | 103 | [homepage]: http://contributor-covenant.org 104 | [version]: http://contributor-covenant.org/version/1/4/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2022 Clarity Innovations Inc. 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SRC := $(shell find . -type f -name '*.go') 2 | MAIN_PACKAGE_PATH := ./cmd/gatecheck 3 | BINARY_NAME := ./bin/gatecheck 4 | 5 | .PHONY: format test dependencies clean coverage open-coverage build release-snapshot release all 6 | 7 | default: all 8 | 9 | all: format test build 10 | 11 | format: 12 | $(info ******************** Checking formatting ********************) 13 | @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) 14 | 15 | test: dependencies 16 | $(info ******************** Running tests ********************) 17 | go test -cover ./... 18 | 19 | coverage: 20 | $(info ******************** Generating test coverage ********************) 21 | go test -coverprofile=coverage.out ./... 22 | 23 | open-coverage: coverage 24 | go tool cover -html=coverage.out 25 | 26 | dependencies: 27 | $(info ******************** Downloading dependencies ********************) 28 | go mod download 29 | 30 | build: 31 | $(info ******************** Compiling binary to ./bin ********************) 32 | go build -ldflags="-X 'main.cliVersion=$$(git describe --tags)' -X 'main.gitCommit=$$(git rev-parse HEAD)' -X 'main.buildDate=$$(date -u +%Y-%m-%dT%H:%M:%SZ)' -X 'main.gitDescription=$$(git log -1 --pretty=%B)'" -o ${BINARY_NAME} ${MAIN_PACKAGE_PATH} 33 | 34 | release-snapshot: 35 | goreleaser release --snapshot --rm-dist 36 | 37 | release: 38 | goreleaser release --rm-dist 39 | 40 | clean: 41 | rm -rf ${BINARY_NAME} coverage.out 42 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Gatecheck 2 | [![CICD Pipeline](https://github.com/gatecheckdev/gatecheck/actions/workflows/run-test.yaml/badge.svg?branch=main)](https://github.com/gatecheckdev/gatecheck/actions/workflows/run-test.yaml) 3 | [![Go Reference](https://pkg.go.dev/badge/github.com/gatecheckdev/gatecheck.svg)](https://pkg.go.dev/github.com/gatecheckdev/gatecheck) 4 | [![Go Report Card](https://goreportcard.com/badge/github.com/gatecheckdev/gatecheck)](https://goreportcard.com/report/github.com/gatecheckdev/gatecheck) 5 | 6 | ![Gatecheck Logo](https://static.gatecheck.dev/gatecheck-logo-splash-dark.png) 7 | 8 | Gatecheck automates report validation in a CI/CD Pipeline by comparing security findings to pre-determined thresholds. 9 | It also provides report aggregation, artifact integrity, and deployment validation. 10 | Gatecheck is stateless so self-hosting and provisioning servers is not required. 11 | 12 | ![Gatecheck Version](https://static.gatecheck.dev/gatecheck-version.gif) 13 | 14 | ## Getting Started 15 | 16 | The fastest way to get started with Gatecheck is to download the pre-built binaries for your target system. 17 | 18 | ```shell 19 | cd 20 | curl -L .tar.gz | tar xz 21 | ./gatecheck 22 | ./gatecheck --help 23 | ``` 24 | 25 | The Gatecheck CLI supports ```--help``` for every command for more detail usage. 26 | 27 | Generate a configuration file with the default thresholds set 28 | 29 | ```shell 30 | gatecheck config init > gatecheck.yaml 31 | ``` 32 | 33 | ### Summarize Reports with List 34 | 35 | ```shell 36 | gatechec ls --help 37 | ``` 38 | 39 | List with EPSS Scores is support for Grype and Cyclondex reports 40 | 41 | ![Gatecheck Version](https://static.gatecheck.dev/gatecheck-list.gif) 42 | 43 | 44 | ### Validation 45 | 46 | List with EPSS Scores is support for Grype and Cyclondex reports 47 | 48 | ![Gatecheck Validate](https://static.gatecheck.dev/gatecheck-validate.gif) 49 | 50 | ### Bundling Artifacts 51 | 52 | Bundling Artifacts produces a gzipped tarball with an associated manifest for metadata. 53 | 54 | ![Gatecheck Bundle](https://static.gatecheck.dev/gatecheck-bundle.gif) 55 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release Instructions 2 | 3 | Releasing is done by maintainers with permissions to bypass the PR only rule. 4 | By pushing a tag to the main repository, the release action is triggered which 5 | packages the app and releases it to the GitHub UI. 6 | 7 | 1. Run `task upgrade` to update dependencies and tidy modules 8 | 1. Run `task test` to make sure unit testing still passes after upgrading 9 | 1. Commit the changes `git commit -am "chore: upgrade dependencies"` 10 | 1. Update CHANGELOG.md 11 | 1. Release commit (optional) `git commit -m "release: vx.x.x && git push` 12 | 1. Release tag `git tag -s -a vX.X.X -m "Release version X.X.X"` 13 | 1. git push tag `git push vX.X.X` 14 | 15 | # References 16 | 17 | [Semantic Versioning](https://semver.org/) 18 | 19 | [Coventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) 20 | 21 | [goreleaser](https://goreleaser.com/) 22 | -------------------------------------------------------------------------------- /Taskfile.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | env: 4 | INSTALL_DIR: "/usr/local/bin" 5 | IMAGE_TAG: "gatecheck:latest" 6 | 7 | tasks: 8 | build: 9 | desc: "Build the gatecheck binary" 10 | vars: 11 | BUILD_DATE: '{{dateInZone "2006-01-02T15:04:05Z" now "UTC"}}' 12 | CLI_VERSION: 13 | sh: git describe --tags || git rev-parse --short HEAD || "v0.0.0-unknown" 14 | GIT_COMMIT: 15 | sh: git rev-parse HEAD 16 | GIT_DESCRIPTION: 17 | sh: git log -1 --pretty=%B 18 | cmds: 19 | - mkdir -p bin 20 | - go build -ldflags="-X 'main.cliVersion={{.CLI_VERSION}}' -X 'main.gitCommit={{.GIT_COMMIT}}' -X 'main.buildDate={{.BUILD_DATE}}' -X 'main.gitDescription={{.GIT_DESCRIPTION}}'" -o ./bin ./cmd/gatecheck 21 | sources: 22 | - cmd/* 23 | - pkg/* 24 | - go* 25 | generates: 26 | - bin/gatecheck 27 | 28 | install: 29 | desc: "Install the gatecheck binary" 30 | prompt: 'Install gatecheck binary to {{joinPath .INSTALL_DIR "gatecheck"}} ...Continue?' 31 | deps: 32 | - build 33 | cmds: 34 | - cp ./bin/gatecheck {{joinPath .INSTALL_DIR "gatecheck"}} 35 | sources: 36 | - ./bin/gatecheck 37 | generates: 38 | - '{{joinPath .INSTALL_DIR "gatecheck"}}' 39 | 40 | uninstall: 41 | desc: "Uninstall the gatecheck binary" 42 | prompt: 'Will remove binary {{joinPath .INSTALL_DIR "gatecheck"}}' 43 | cmds: 44 | - rm '{{joinPath .INSTALL_DIR "gatecheck"}}' 45 | 46 | test: 47 | desc: "Run unit tests with coverage" 48 | cmds: 49 | - go test -cover ./... 50 | 51 | lint: 52 | desc: "Run golangci-lint in view-only mode" 53 | cmds: 54 | - golangci-lint run 55 | 56 | fix: 57 | desc: "Fix linting errors and format code" 58 | cmds: 59 | - golangci-lint run --fix 60 | 61 | release-snapshot: 62 | desc: "Create a snapshot release" 63 | cmds: 64 | - goreleaser release --snapshot --rm-dist 65 | 66 | release: 67 | desc: "Create a release" 68 | cmds: 69 | - goreleaser release --rm-dist 70 | 71 | upgrade: 72 | desc: "Upgrade package dependencies" 73 | preconditions: 74 | - sh: git diff --quiet && git diff --cached --quiet 75 | msg: "Repository is dirty, commit changes before upgrading." 76 | cmds: 77 | - go get -u ./... 78 | - go mod tidy 79 | 80 | serve-docs: 81 | desc: "Serve documentation locally" 82 | cmds: 83 | - mdbook serve 84 | 85 | clean: 86 | desc: "Clean up build directory" 87 | cmds: 88 | - rm -rf ./bin 89 | -------------------------------------------------------------------------------- /book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["Bacchus Jackson"] 3 | language = "en" 4 | multilingual = false 5 | src = "docs" 6 | title = "Gatecheck" 7 | 8 | [output.html] 9 | default-theme = "dark" 10 | preferred-dark-theme = "navy" 11 | curly-quotes = true 12 | mathjax-support = false 13 | copy-fonts = true 14 | no-section-label = false 15 | git-repository-url = "https://github.com/gatecheckdev/gatecheck" 16 | git-repository-icon = "fa-github" 17 | edit-url-template = "https://github.com/gatecheckdev/gatecheck/edit/master/guide/{path}" 18 | -------------------------------------------------------------------------------- /cmd/bundle.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "log/slog" 5 | "os" 6 | "path" 7 | 8 | "github.com/gatecheckdev/gatecheck/pkg/gatecheck" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | var bundleCmd = &cobra.Command{ 13 | Use: "bundle", 14 | Short: "create and manage a gatecheck bundle", 15 | } 16 | 17 | var bundleCreateCmd = &cobra.Command{ 18 | Use: "create BUNDLE_FILE TARGET_FILE", 19 | Short: "create a new bundle with a new file", 20 | Aliases: []string{"init"}, 21 | Args: cobra.ExactArgs(2), 22 | PreRunE: func(cmd *cobra.Command, args []string) error { 23 | bundleFilename := args[0] 24 | targetFilename := args[1] 25 | 26 | bundleFile, err := os.OpenFile(bundleFilename, os.O_CREATE|os.O_WRONLY, 0o644) 27 | if err != nil { 28 | return err 29 | } 30 | targetFile, err := os.Open(targetFilename) 31 | if err != nil { 32 | return err 33 | } 34 | 35 | RuntimeConfig.bundleFile = bundleFile 36 | RuntimeConfig.targetFile = targetFile 37 | RuntimeConfig.BundleTagValue = RuntimeConfig.BundleTag.Value().([]string) 38 | return nil 39 | }, 40 | RunE: func(cmd *cobra.Command, args []string) error { 41 | targetFilename := args[1] 42 | label := path.Base(targetFilename) 43 | bf, tf := RuntimeConfig.bundleFile, RuntimeConfig.targetFile 44 | tags := RuntimeConfig.BundleTagValue 45 | return gatecheck.CreateBundle(bf, tf, label, tags) 46 | }, 47 | } 48 | 49 | var bundleAddCmd = &cobra.Command{ 50 | Use: "add BUNDLE_FILE TARGET_FILE", 51 | Short: "add a file to a bundle", 52 | Args: cobra.ExactArgs(2), 53 | PreRunE: func(cmd *cobra.Command, args []string) error { 54 | bundleFilename := args[0] 55 | targetFilename := args[1] 56 | 57 | bundleFile, err := os.OpenFile(bundleFilename, os.O_RDWR, 0o644) 58 | if err != nil { 59 | return err 60 | } 61 | targetFile, err := os.Open(targetFilename) 62 | if err != nil { 63 | return err 64 | } 65 | 66 | RuntimeConfig.bundleFile = bundleFile 67 | RuntimeConfig.targetFile = targetFile 68 | RuntimeConfig.BundleTagValue = RuntimeConfig.BundleTag.Value().([]string) 69 | return nil 70 | }, 71 | RunE: func(cmd *cobra.Command, args []string) error { 72 | targetFilename := args[1] 73 | slog.Info("bundle tag", "environment", os.Getenv("GATECHECK_BUNDLE_TAG")) 74 | label := path.Base(targetFilename) 75 | bf, tf := RuntimeConfig.bundleFile, RuntimeConfig.targetFile 76 | tags := RuntimeConfig.BundleTagValue 77 | return gatecheck.AppendToBundle(bf, tf, label, tags) 78 | }, 79 | } 80 | 81 | var bundleRemoveCmd = &cobra.Command{ 82 | Use: "remove BUNDLE_FILE TARGET_FILE", 83 | Short: "remove a file from a bundle by label", 84 | Aliases: []string{"rm"}, 85 | Args: cobra.ExactArgs(2), 86 | PreRunE: func(cmd *cobra.Command, args []string) error { 87 | bundleFilename := args[0] 88 | 89 | bundleFile, err := os.OpenFile(bundleFilename, os.O_RDWR, 0o644) 90 | if err != nil { 91 | return err 92 | } 93 | RuntimeConfig.bundleFile = bundleFile 94 | return nil 95 | }, 96 | RunE: func(cmd *cobra.Command, args []string) error { 97 | label := args[1] 98 | return gatecheck.RemoveFromBundle(RuntimeConfig.bundleFile, label) 99 | }, 100 | } 101 | 102 | func newBundleCommand() *cobra.Command { 103 | RuntimeConfig.BundleTag.SetupCobra(bundleCreateCmd) 104 | RuntimeConfig.BundleTag.SetupCobra(bundleAddCmd) 105 | 106 | bundleCmd.AddCommand(bundleCreateCmd, bundleAddCmd, bundleRemoveCmd) 107 | return bundleCmd 108 | } 109 | -------------------------------------------------------------------------------- /cmd/cli-config.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "io" 5 | "os" 6 | "strings" 7 | 8 | "github.com/gatecheckdev/configkit" 9 | "github.com/gatecheckdev/gatecheck/pkg/gatecheck" 10 | "github.com/spf13/cobra" 11 | ) 12 | 13 | var ( 14 | metadataFlagUsage = "flag_usage" 15 | metadataFieldType = "field_type" 16 | metadataActionInputName = "action_input_name" 17 | // metadataRequired = "required" 18 | ) 19 | 20 | type metaConfig struct { 21 | BundleTag configkit.MetaField 22 | EPSSURL configkit.MetaField 23 | KEVURL configkit.MetaField 24 | EPSSFilename configkit.MetaField 25 | KEVFilename configkit.MetaField 26 | Verbose configkit.MetaField 27 | Silent configkit.MetaField 28 | ConfigFilename configkit.MetaField 29 | Audit configkit.MetaField 30 | BundleTagValue []string 31 | bundleFile *os.File 32 | targetFile *os.File 33 | epssFile *os.File 34 | kevFile *os.File 35 | listSrcReader io.Reader 36 | listSrcName string 37 | listFormat string 38 | gatecheckConfig *gatecheck.Config 39 | // listAll bool 40 | // configOutputWriter io.Writer 41 | // configOutputFormat string 42 | } 43 | 44 | var RuntimeConfig = metaConfig{ 45 | BundleTag: configkit.MetaField{ 46 | FieldName: "BundleTag", 47 | EnvKey: "GATECHECK_BUNDLE_TAG", 48 | DefaultValue: []string{}, 49 | FlagValueP: new([]string), 50 | EnvToValueFunc: func(s string) any { 51 | return strings.Split(s, ",") 52 | }, 53 | Metadata: map[string]string{ 54 | metadataFlagUsage: "file properties for metadata", 55 | metadataFieldType: "string", 56 | metadataActionInputName: "bundle_tag", 57 | }, 58 | CobraSetupFunc: func(f configkit.MetaField, cmd *cobra.Command) { 59 | valueP := f.FlagValueP.(*[]string) 60 | usage := f.Metadata[metadataFlagUsage] 61 | cmd.Flags().StringSliceVarP(valueP, "tag", "t", []string{}, usage) 62 | }, 63 | }, 64 | EPSSURL: configkit.MetaField{ 65 | FieldName: "EPSSURL", 66 | EnvKey: "GATECHECK_EPSS_URL", 67 | DefaultValue: "", 68 | FlagValueP: new(string), 69 | CobraSetupFunc: func(f configkit.MetaField, cmd *cobra.Command) { 70 | valueP := f.FlagValueP.(*string) 71 | usage := f.Metadata[metadataFlagUsage] 72 | cmd.Flags().StringVar(valueP, "epss-url", "", usage) 73 | }, 74 | Metadata: map[string]string{ 75 | metadataFlagUsage: "The url for the FIRST.org EPSS API (\"\" will use FIRST.org official API)", 76 | metadataFieldType: "string", 77 | metadataActionInputName: "epss_url", 78 | }, 79 | }, 80 | KEVURL: configkit.MetaField{ 81 | FieldName: "KEVURL", 82 | EnvKey: "GATECHECK_KEV_URL", 83 | DefaultValue: "", 84 | FlagValueP: new(string), 85 | CobraSetupFunc: func(f configkit.MetaField, cmd *cobra.Command) { 86 | valueP := f.FlagValueP.(*string) 87 | usage := f.Metadata[metadataFlagUsage] 88 | cmd.Flags().StringVar(valueP, "kev-url", "", usage) 89 | }, 90 | Metadata: map[string]string{ 91 | metadataFlagUsage: "The url for the CISA KEV API (\"\" will use CISA Official API)", 92 | metadataFieldType: "string", 93 | metadataActionInputName: "kev_url", 94 | }, 95 | }, 96 | EPSSFilename: configkit.MetaField{ 97 | FieldName: "EPSSFilename", 98 | EnvKey: "GATECHECK_EPSS_FILENAME", 99 | DefaultValue: "", 100 | FlagValueP: new(string), 101 | CobraSetupFunc: func(f configkit.MetaField, cmd *cobra.Command) { 102 | valueP := f.FlagValueP.(*string) 103 | usage := f.Metadata[metadataFlagUsage] 104 | cmd.Flags().StringVar(valueP, "epss-filename", "", usage) 105 | }, 106 | Metadata: map[string]string{ 107 | metadataFlagUsage: "the filename for a FIRST.org EPSS csv file", 108 | metadataFieldType: "string", 109 | metadataActionInputName: "epss_filename", 110 | }, 111 | }, 112 | KEVFilename: configkit.MetaField{ 113 | FieldName: "KEVFilename", 114 | EnvKey: "GATECHECK_EPSS_FILENAME", 115 | DefaultValue: "", 116 | FlagValueP: new(string), 117 | CobraSetupFunc: func(f configkit.MetaField, cmd *cobra.Command) { 118 | valueP := f.FlagValueP.(*string) 119 | usage := f.Metadata[metadataFlagUsage] 120 | cmd.Flags().StringVar(valueP, "kev-filename", "", usage) 121 | }, 122 | Metadata: map[string]string{ 123 | metadataFlagUsage: "the filename for a FIRST.org KEV json file", 124 | metadataFieldType: "string", 125 | metadataActionInputName: "kev_filename", 126 | }, 127 | }, 128 | Verbose: configkit.MetaField{ 129 | FieldName: "Verbose", 130 | EnvKey: "GATECHECK_VERBOSE", 131 | DefaultValue: false, 132 | FlagValueP: new(bool), 133 | CobraSetupFunc: func(f configkit.MetaField, cmd *cobra.Command) { 134 | valueP := f.FlagValueP.(*bool) 135 | usage := f.Metadata[metadataFlagUsage] 136 | cmd.PersistentFlags().BoolVarP(valueP, "verbose", "v", false, usage) 137 | }, 138 | Metadata: map[string]string{ 139 | metadataFlagUsage: "log level set to debug", 140 | metadataFieldType: "bool", 141 | metadataActionInputName: "verbose", 142 | }, 143 | }, 144 | Silent: configkit.MetaField{ 145 | FieldName: "Silent", 146 | EnvKey: "GATECHECK_SILENT", 147 | DefaultValue: false, 148 | FlagValueP: new(bool), 149 | CobraSetupFunc: func(f configkit.MetaField, cmd *cobra.Command) { 150 | valueP := f.FlagValueP.(*bool) 151 | usage := f.Metadata[metadataFlagUsage] 152 | cmd.PersistentFlags().BoolVar(valueP, "silent", false, usage) 153 | }, 154 | Metadata: map[string]string{ 155 | metadataFlagUsage: "log level set to only warnings & errors", 156 | metadataFieldType: "bool", 157 | metadataActionInputName: "silent", 158 | }, 159 | }, 160 | ConfigFilename: configkit.MetaField{ 161 | FieldName: "ConfigFilename", 162 | EnvKey: "GATECHECK_CONFIG_FILENAME", 163 | DefaultValue: "", 164 | FlagValueP: new(string), 165 | CobraSetupFunc: func(f configkit.MetaField, cmd *cobra.Command) { 166 | valueP := f.FlagValueP.(*string) 167 | usage := f.Metadata[metadataFlagUsage] 168 | cmd.PersistentFlags().StringVarP(valueP, "config", "f", "", usage) 169 | }, 170 | Metadata: map[string]string{ 171 | metadataFlagUsage: "a validation configuration file", 172 | metadataFieldType: "string", 173 | metadataActionInputName: "config_filename", 174 | }, 175 | }, 176 | Audit: configkit.MetaField{ 177 | FieldName: "Audit", 178 | EnvKey: "GATECHECK_AUDIT", 179 | DefaultValue: false, 180 | FlagValueP: new(bool), 181 | CobraSetupFunc: func(f configkit.MetaField, cmd *cobra.Command) { 182 | valueP := f.FlagValueP.(*bool) 183 | usage := f.Metadata[metadataFlagUsage] 184 | cmd.PersistentFlags().BoolVarP(valueP, "audit", "a", false, usage) 185 | }, 186 | Metadata: map[string]string{ 187 | metadataFlagUsage: "audit mode - will run all rules but wil always exit 0 for validation failures", 188 | metadataFieldType: "bool", 189 | metadataActionInputName: "audit", 190 | }, 191 | }, 192 | } 193 | -------------------------------------------------------------------------------- /cmd/config.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/gatecheckdev/gatecheck/pkg/gatecheck" 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | var configCmd = &cobra.Command{ 11 | Use: "config", 12 | Short: "manage the gatecheck configuration file", 13 | } 14 | 15 | var configInitCmd = &cobra.Command{ 16 | Use: "init", 17 | Short: "output an example configuration file", 18 | RunE: func(cmd *cobra.Command, args []string) error { 19 | output, _ := cmd.Flags().GetString("output") 20 | 21 | switch output { 22 | case "json", ".json": 23 | output = ".json" 24 | case "toml", ".toml": 25 | output = ".toml" 26 | case "yaml", "yml", ".yaml", ".yml": 27 | output = ".yaml" 28 | default: 29 | return errors.New("invalid --output format, must be json,toml,yaml, or yml") 30 | } 31 | 32 | return gatecheck.NewConfigEncoder(cmd.OutOrStdout(), output).Encode(gatecheck.NewDefaultConfig()) 33 | }, 34 | } 35 | 36 | var configConvertCmd = &cobra.Command{ 37 | Use: "convert", 38 | Short: "convert and existing configuration file into another format", 39 | PreRunE: func(cmd *cobra.Command, args []string) error { 40 | configFilename, _ := cmd.Flags().GetString("file") 41 | RuntimeConfig.gatecheckConfig = &gatecheck.Config{} 42 | err := gatecheck.NewConfigDecoder(configFilename).Decode(RuntimeConfig.gatecheckConfig) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | return nil 48 | }, 49 | RunE: func(cmd *cobra.Command, args []string) error { 50 | output, _ := cmd.Flags().GetString("output") 51 | 52 | switch output { 53 | case "json", ".json": 54 | output = ".json" 55 | case "toml", ".toml": 56 | output = ".toml" 57 | case "yaml", "yml", ".yaml", ".yml": 58 | output = ".yaml" 59 | default: 60 | return errors.New("invalid --output format, must be json, toml, yaml, or yml") 61 | } 62 | 63 | return gatecheck.NewConfigEncoder(cmd.OutOrStdout(), output).Encode(RuntimeConfig.gatecheckConfig) 64 | }, 65 | } 66 | 67 | func newConfigCommand() *cobra.Command { 68 | configConvertCmd.Flags().StringP("file", "f", "gatecheck.yaml", "gatecheck validation config file") 69 | configConvertCmd.Flags().StringP("output", "o", "yaml", "Format to convert into formats=[json yaml yml toml]") 70 | configInitCmd.Flags().StringP("output", "o", "yaml", "Format to convert into formats=[json yaml yml toml]") 71 | 72 | _ = configConvertCmd.MarkFlagFilename("file", "json", "yaml", "yml", "toml") 73 | _ = configInitCmd.MarkFlagFilename("file", "json", "yaml", "yml", "toml") 74 | 75 | configCmd.AddCommand(configInitCmd, configConvertCmd) 76 | return configCmd 77 | } 78 | -------------------------------------------------------------------------------- /cmd/download.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/gatecheckdev/gatecheck/pkg/gatecheck" 5 | "github.com/spf13/cobra" 6 | ) 7 | 8 | var downloadCmd = &cobra.Command{ 9 | Use: "download", 10 | Short: "output data from supported APIs", 11 | } 12 | 13 | var downloadEPSSCmd = &cobra.Command{ 14 | Use: "epss", 15 | Short: "download epss data from FIRST API as csv to STDOUT", 16 | RunE: func(cmd *cobra.Command, args []string) error { 17 | url := RuntimeConfig.EPSSURL.Value().(string) 18 | return gatecheck.DownloadEPSS(cmd.OutOrStdout(), gatecheck.WithEPSSURL(url)) 19 | }, 20 | } 21 | 22 | var downloadKEVCmd = &cobra.Command{ 23 | Use: "kev", 24 | Short: "download kev catalog from CISA as json to STDOUT", 25 | RunE: func(cmd *cobra.Command, args []string) error { 26 | url := RuntimeConfig.KEVURL.Value().(string) 27 | return gatecheck.DownloadKEV(cmd.OutOrStdout(), gatecheck.WithKEVURL(url)) 28 | }, 29 | } 30 | 31 | func newDownloadCommand() *cobra.Command { 32 | RuntimeConfig.EPSSURL.SetupCobra(downloadEPSSCmd) 33 | RuntimeConfig.KEVURL.SetupCobra(downloadKEVCmd) 34 | downloadCmd.AddCommand(downloadEPSSCmd, downloadKEVCmd) 35 | return downloadCmd 36 | } 37 | -------------------------------------------------------------------------------- /cmd/gatecheck/main.go: -------------------------------------------------------------------------------- 1 | // Package main executes the CLI for gatecheck 2 | package main 3 | 4 | import ( 5 | "errors" 6 | "fmt" 7 | "log/slog" 8 | "os" 9 | "runtime" 10 | "time" 11 | 12 | "github.com/lmittmann/tint" 13 | 14 | "github.com/gatecheckdev/gatecheck/cmd" 15 | "github.com/gatecheckdev/gatecheck/pkg/gatecheck" 16 | ) 17 | 18 | const ( 19 | exitSystemFail int = -1 20 | exitOk int = 0 21 | exitValidationFail int = 1 22 | exitFileAccessFail int = 2 23 | ) 24 | 25 | // GatecheckVersion see CHANGELOG.md 26 | const GatecheckVersion = "[Not Provided]" 27 | 28 | // all variables here are provided as build-time arguments, with clear default values 29 | var ( 30 | cliVersion = "[Not Provided]" 31 | buildDate = "[Not Provided]" 32 | gitCommit = "[Not Provided]" 33 | gitDescription = "[Not Provided]" 34 | ) 35 | 36 | func main() { 37 | os.Exit(run()) 38 | } 39 | 40 | func run() int { 41 | cmd.ApplicationMetadata = gatecheck.ApplicationMetadata{ 42 | CLIVersion: cliVersion, 43 | GitCommit: gitCommit, 44 | BuildDate: buildDate, 45 | GitDescription: gitDescription, 46 | Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), 47 | GoVersion: runtime.Version(), 48 | Compiler: runtime.Compiler, 49 | } 50 | 51 | // Colorized logging output for the CLI 52 | logHandler := tint.NewHandler(os.Stderr, &tint.Options{Level: cmd.LogLeveler, TimeFormat: time.TimeOnly}) 53 | slog.SetDefault(slog.New(logHandler)) 54 | 55 | command := cmd.NewGatecheckCommand() 56 | 57 | err := command.Execute() 58 | if errors.Is(err, gatecheck.ErrValidationFailure) { 59 | return exitValidationFail 60 | } 61 | if err != nil { 62 | return exitSystemFail 63 | } 64 | return exitOk 65 | } 66 | -------------------------------------------------------------------------------- /cmd/list.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "log/slog" 7 | "os" 8 | "slices" 9 | "strings" 10 | 11 | "github.com/gatecheckdev/gatecheck/pkg/gatecheck" 12 | "github.com/spf13/cobra" 13 | ) 14 | 15 | var supportedTypes = []string{"grype", "semgrep", "gitleaks", "syft", "cyclonedx", "bundle", "gatecheck"} 16 | 17 | var listCmd = &cobra.Command{ 18 | Use: "list", 19 | Short: "print a table of the findings in a report or files in a gatecheck bundle", 20 | Aliases: []string{"ls", "print"}, 21 | PreRunE: func(cmd *cobra.Command, args []string) error { 22 | inputType, _ := cmd.Flags().GetString("input-type") 23 | if inputType == "" && len(args) == 0 { 24 | return errors.New("need either input-type for STDIN or filename as argument") 25 | } 26 | 27 | var err error = nil 28 | 29 | if len(args) == 0 { 30 | RuntimeConfig.listSrcReader = cmd.InOrStdin() 31 | RuntimeConfig.listSrcName = fmt.Sprintf("stdin:%s", inputType) 32 | } else { 33 | RuntimeConfig.listSrcReader, err = os.Open(args[0]) 34 | RuntimeConfig.listSrcName = args[0] 35 | } 36 | 37 | if err != nil { 38 | return err 39 | } 40 | 41 | RuntimeConfig.listFormat = "ascii" 42 | 43 | if markdownFlag, _ := cmd.Flags().GetBool("markdown"); markdownFlag { 44 | RuntimeConfig.listFormat = "markdown" 45 | } 46 | 47 | if epss, _ := cmd.Flags().GetBool("epss"); !epss { 48 | return nil 49 | } 50 | 51 | RuntimeConfig.epssFile = nil 52 | 53 | epssFilename := RuntimeConfig.EPSSFilename.Value().(string) 54 | 55 | if epssFilename == "" { 56 | return nil 57 | } 58 | 59 | RuntimeConfig.epssFile, err = os.Open(epssFilename) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | return nil 65 | }, 66 | RunE: func(cmd *cobra.Command, args []string) error { 67 | epss, _ := cmd.Flags().GetBool("epss") 68 | 69 | dst := cmd.OutOrStdout() 70 | src := RuntimeConfig.listSrcReader 71 | srcName := RuntimeConfig.listSrcName 72 | displayOpt := gatecheck.WithDisplayFormat(RuntimeConfig.listFormat) 73 | 74 | if !epss { 75 | return gatecheck.List(dst, src, srcName, displayOpt) 76 | } 77 | 78 | epssURL := RuntimeConfig.EPSSURL.Value().(string) 79 | epssFile := RuntimeConfig.epssFile 80 | 81 | // if file is nil, API will be used 82 | // if epssURL is empty, default API will be used 83 | epssOpt, err := gatecheck.WithEPSS(epssFile, epssURL) 84 | if err != nil { 85 | return err 86 | } 87 | return gatecheck.List(dst, src, srcName, displayOpt, epssOpt) 88 | }, 89 | } 90 | 91 | var listAllCmd = &cobra.Command{ 92 | Use: "list-all [FILE...]", 93 | Short: "list multiple report files", 94 | RunE: func(cmd *cobra.Command, args []string) error { 95 | epss, _ := cmd.Flags().GetBool("epss") 96 | markdown, _ := cmd.Flags().GetBool("markdown") 97 | slog.Debug("run list all", "epss", fmt.Sprintf("%v", epss), "markdown", fmt.Sprintf("%v", markdown)) 98 | 99 | for _, filename := range args { 100 | supportedFunc := func(s string) bool { 101 | return strings.Contains(filename, s) 102 | } 103 | cmd.Printf("%s\n", filename) 104 | if !slices.ContainsFunc(supportedTypes, supportedFunc) { 105 | slog.Warn("file not supported, skip", "filename", filename) 106 | continue 107 | } 108 | 109 | if _, err := os.Stat(filename); errors.Is(err, os.ErrNotExist) { 110 | slog.Error("file not found, skip", "filename", filename) 111 | continue 112 | } 113 | 114 | epssURL := RuntimeConfig.EPSSURL.Value().(string) 115 | epssFile := RuntimeConfig.epssFile 116 | 117 | opts := []gatecheck.ListOptionFunc{} 118 | displayOpt := gatecheck.WithDisplayFormat("ascii") 119 | if markdown { 120 | displayOpt = gatecheck.WithDisplayFormat("markdown") 121 | } 122 | opts = append(opts, displayOpt) 123 | 124 | if epss && slices.ContainsFunc([]string{"grype", "cyclonedx"}, supportedFunc) { 125 | epssOpt, err := gatecheck.WithEPSS(epssFile, epssURL) 126 | if err != nil { 127 | slog.Error("epss fetch failure, skip", "filename", filename, "error", err) 128 | continue 129 | } 130 | opts = append(opts, epssOpt) 131 | } 132 | 133 | dst := cmd.OutOrStdout() 134 | src, err := os.Open(filename) 135 | if err != nil { 136 | slog.Error("cannot open file, skip", "filename", filename, "error", err) 137 | continue 138 | } 139 | 140 | err = gatecheck.List(dst, src, filename, opts...) 141 | if err != nil { 142 | slog.Error("cannot list report, skip", "filename", filename, "error", err) 143 | continue 144 | } 145 | 146 | } 147 | return nil 148 | }, 149 | } 150 | 151 | func newListAllCommand() *cobra.Command { 152 | listAllCmd.Flags().Bool("markdown", false, "print as a markdown table") 153 | listAllCmd.Flags().Bool("epss", false, "List with EPSS data") 154 | return listAllCmd 155 | } 156 | 157 | func newListCommand() *cobra.Command { 158 | listCmd.Flags().StringP("input-type", "i", "", "the input filetype if using STDIN [grype|semgrep|gitleaks|syft|bundle]") 159 | listCmd.Flags().Bool("markdown", false, "print as a markdown table") 160 | listCmd.Flags().Bool("epss", false, "List with EPSS data") 161 | RuntimeConfig.EPSSURL.SetupCobra(listCmd) 162 | RuntimeConfig.EPSSFilename.SetupCobra(listCmd) 163 | return listCmd 164 | } 165 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | // Package cmd contains the CLI code for Gatecheck 2 | // 3 | // # Organization Methodology 4 | // 5 | // The goal for this package is readability, ease of maintainence, and 6 | // seperation of concerns for easier testing and debugging. 7 | // 8 | // newCommand functions should only build the command structure 9 | // to include flag, cli options, and viper bindings. 10 | // sub commands can also be included here as determined by the complexity 11 | // of the command. 12 | // 13 | // run functions are specific to cobra's runE functions 14 | // it handles parsing arguments, opening files, and early returning errors. 15 | // These commands eventually result in calls to functions in the package 16 | // github.com/gatecheckdev/gatecheck/pkg/gatecheck 17 | 18 | // The root file contains common helper functions used by other commands. 19 | // Major commands can be in seperate files for ease of readability. 20 | package cmd 21 | 22 | import ( 23 | "log/slog" 24 | 25 | "github.com/gatecheckdev/gatecheck/pkg/gatecheck" 26 | "github.com/spf13/cobra" 27 | "github.com/spf13/viper" 28 | ) 29 | 30 | var ( 31 | ApplicationMetadata gatecheck.ApplicationMetadata 32 | LogLeveler *slog.LevelVar = &slog.LevelVar{} 33 | ) 34 | 35 | var gatecheckCmd = &cobra.Command{ 36 | Use: "gatecheck", 37 | Short: "Report validation tool", 38 | PersistentPreRun: func(cmd *cobra.Command, args []string) { 39 | verbose := RuntimeConfig.Verbose.Value().(bool) 40 | silent := RuntimeConfig.Silent.Value().(bool) 41 | 42 | switch { 43 | case verbose: 44 | LogLeveler.Set(slog.LevelDebug) 45 | slog.Debug("debug logging enabled") 46 | case silent: 47 | LogLeveler.Set(slog.LevelError) 48 | slog.Debug("silent logging enabled") 49 | } 50 | }, 51 | RunE: func(cmd *cobra.Command, args []string) error { 52 | versionFlag, _ := cmd.Flags().GetBool("version") 53 | if versionFlag { 54 | return versionCmd.RunE(cmd, args) 55 | } 56 | return nil 57 | }, 58 | } 59 | 60 | var versionCmd = &cobra.Command{ 61 | Use: "version", 62 | Short: "print version and build information", 63 | RunE: func(cmd *cobra.Command, args []string) error { 64 | _, err := ApplicationMetadata.WriteTo(cmd.OutOrStdout()) 65 | return err 66 | }, 67 | } 68 | 69 | // NewGatecheckCommand the root for all CLI commands 70 | func NewGatecheckCommand() *cobra.Command { 71 | RuntimeConfig.Verbose.SetupCobra(gatecheckCmd) 72 | RuntimeConfig.Silent.SetupCobra(gatecheckCmd) 73 | 74 | gatecheckCmd.MarkFlagsMutuallyExclusive("verbose", "silent") 75 | gatecheckCmd.Flags().Bool("version", false, "print version and build information") 76 | 77 | _ = viper.BindEnv("cli.audit", "GATECHECK_CLI_AUDIT") 78 | 79 | _ = viper.BindEnv("cli.list.epss-file", "GATECHECK_EPSS_FILE") 80 | _ = viper.BindEnv("cli.validate.epss-file", "GATECHECK_EPSS_FILE") 81 | 82 | _ = viper.BindEnv("cli.validate.kev-file", "GATECHECK_KEV_FILE") 83 | 84 | _ = viper.BindEnv("api.epss-url", "GATECHECK_EPSS_URL") 85 | _ = viper.BindEnv("api.kev-url", "GATECHECK_KEV_URL") 86 | 87 | gatecheckCmd.SilenceUsage = true 88 | 89 | gatecheckCmd.AddCommand( 90 | versionCmd, 91 | newConfigCommand(), 92 | newListCommand(), 93 | newListAllCommand(), 94 | newBundleCommand(), 95 | newValidateCommand(), 96 | newDownloadCommand(), 97 | ) 98 | return gatecheckCmd 99 | } 100 | -------------------------------------------------------------------------------- /cmd/validate.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "os" 7 | 8 | "github.com/gatecheckdev/gatecheck/pkg/gatecheck" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | var validateCmd = &cobra.Command{ 13 | Use: "validate [FILE]", 14 | Short: "compare vulnerabilities to configured thresholds", 15 | Args: cobra.ExactArgs(1), 16 | PreRunE: func(cmd *cobra.Command, args []string) error { 17 | configFilename := RuntimeConfig.ConfigFilename.Value().(string) 18 | 19 | RuntimeConfig.gatecheckConfig = gatecheck.NewDefaultConfig() 20 | if configFilename != "" { 21 | err := gatecheck.NewConfigDecoder(configFilename).Decode(RuntimeConfig.gatecheckConfig) 22 | if err != nil { 23 | return err 24 | } 25 | } 26 | 27 | var err error 28 | 29 | epssFilename := RuntimeConfig.EPSSFilename.Value().(string) 30 | if epssFilename != "" { 31 | RuntimeConfig.epssFile, err = os.Open(epssFilename) 32 | } 33 | if err != nil { 34 | return err 35 | } 36 | 37 | kevFilename := RuntimeConfig.KEVFilename.Value().(string) 38 | if kevFilename != "" { 39 | RuntimeConfig.kevFile, err = os.Open(kevFilename) 40 | } 41 | if err != nil { 42 | return err 43 | } 44 | 45 | targetFilename := args[0] 46 | slog.Debug("open target file", "filename", targetFilename) 47 | RuntimeConfig.targetFile, err = os.Open(targetFilename) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | return nil 53 | }, 54 | RunE: func(cmd *cobra.Command, args []string) error { 55 | err := gatecheck.Validate( 56 | RuntimeConfig.gatecheckConfig, 57 | RuntimeConfig.targetFile, 58 | args[0], 59 | gatecheck.WithEPSSURL(RuntimeConfig.EPSSURL.Value().(string)), 60 | gatecheck.WithKEVURL(RuntimeConfig.KEVURL.Value().(string)), 61 | gatecheck.WithEPSSFile(RuntimeConfig.epssFile), // TODO: fix this 62 | gatecheck.WithKEVFile(RuntimeConfig.kevFile), 63 | ) 64 | 65 | audit := RuntimeConfig.Audit.Value().(bool) 66 | if audit && err != nil { 67 | slog.Error("validation failure in audit mode") 68 | _, err = fmt.Fprintln(cmd.ErrOrStderr(), err) 69 | return err 70 | } 71 | 72 | return err 73 | }, 74 | } 75 | 76 | func newValidateCommand() *cobra.Command { 77 | 78 | RuntimeConfig.ConfigFilename.SetupCobra(validateCmd) 79 | RuntimeConfig.EPSSFilename.SetupCobra(validateCmd) 80 | RuntimeConfig.KEVFilename.SetupCobra(validateCmd) 81 | RuntimeConfig.Audit.SetupCobra(validateCmd) 82 | 83 | return validateCmd 84 | } 85 | -------------------------------------------------------------------------------- /demos/bundle.tape: -------------------------------------------------------------------------------- 1 | Set Theme "Catppuccin Mocha" 2 | 3 | Set FontSize 14 4 | Set FontFamily "JetBrainsMonoNL Nerd Font Mono" 5 | Set Margin 20 6 | Set MarginFill "#89B4FA" 7 | Set BorderRadius 10 8 | 9 | Set Width 1800 10 | 11 | Output dist/gatecheck-bundle.gif 12 | 13 | Type "gatecheck ls grype-report.json | less" 14 | Sleep 1 15 | Enter 16 | Sleep 5 17 | Space 18 | Sleep 2 19 | Space 20 | Type "q" 21 | Enter 22 | Sleep 1 23 | 24 | Type "gatecheck ls semgrep-sast-report.json | less" 25 | Sleep 1 26 | Enter 27 | Sleep 5 28 | Space 29 | Sleep 2 30 | Space 31 | Type "q" 32 | Enter 33 | Sleep 1 34 | 35 | Type "gatecheck ls gitleaks-report.json | less" 36 | Sleep 1 37 | Enter 38 | Sleep 5 39 | Space 40 | Sleep 2 41 | Space 42 | Type "q" 43 | Enter 44 | Sleep 1 45 | 46 | Type "gatecheck bundle create gatecheck-bundle.tar.gz gatecheck.yaml" 47 | Sleep 1 48 | Enter 49 | 50 | Type "gatecheck bundle add gatecheck-bundle.tar.gz grype-report.json" 51 | Sleep 1 52 | Enter 53 | 54 | Type "gatecheck bundle add gatecheck-bundle.tar.gz semgrep-sast-report.json" 55 | Sleep 1 56 | Enter 57 | 58 | Type "gatecheck bundle add gatecheck-bundle.tar.gz gitleaks-report.json" 59 | Sleep 1 60 | Enter 61 | Sleep 4 62 | 63 | Type "gatecheck ls gatecheck-bundle.tar.gz" 64 | Sleep 1 65 | Enter 66 | Sleep 10 67 | -------------------------------------------------------------------------------- /demos/list.tape: -------------------------------------------------------------------------------- 1 | Set Theme "Catppuccin Mocha" 2 | 3 | Set FontSize 14 4 | Set FontFamily "JetBrainsMonoNL Nerd Font Mono" 5 | Set Margin 20 6 | Set MarginFill "#89B4FA" 7 | Set BorderRadius 10 8 | 9 | Set Width 1600 10 | 11 | Output dist/gatecheck-list.gif 12 | 13 | Type "grype ubuntu:latest -o json | gatecheck ls -i grype | less" 14 | Sleep 1 15 | Enter 16 | 17 | Sleep 15 18 | -------------------------------------------------------------------------------- /demos/validate.tape: -------------------------------------------------------------------------------- 1 | Set Theme "Catppuccin Mocha" 2 | 3 | Set FontSize 14 4 | Set FontFamily "JetBrainsMonoNL Nerd Font Mono" 5 | Set Margin 20 6 | Set MarginFill "#89B4FA" 7 | Set BorderRadius 10 8 | 9 | Set Width 1700 10 | 11 | Output dist/gatecheck-validate.gif 12 | 13 | Type "gatecheck ls grype-report.json | less" 14 | Sleep 1 15 | Enter 16 | Sleep 5 17 | Space 18 | Sleep 2 19 | Space 20 | Type "q" 21 | Enter 22 | Sleep 1 23 | 24 | Type "cat gatecheck-severity-limit.yaml" 25 | Sleep 1 26 | Enter 27 | Sleep 5 28 | 29 | Type "gatecheck validate -f gatecheck-severity-limit.yaml grype-report.json" 30 | Sleep 1 31 | Enter 32 | Sleep 8 33 | 34 | Type "cat gatecheck-epss-limit.yaml" 35 | Sleep 1 36 | Enter 37 | Sleep 5 38 | 39 | Type "gatecheck validate -f gatecheck-epss-limit.yaml grype-report.json" 40 | Sleep 1 41 | Enter 42 | Sleep 10 43 | -------------------------------------------------------------------------------- /demos/version.tape: -------------------------------------------------------------------------------- 1 | Set Theme "Catppuccin Mocha" 2 | 3 | Set FontSize 14 4 | Set FontFamily "JetBrainsMonoNL Nerd Font Mono" 5 | Set Margin 20 6 | Set MarginFill "#89B4FA" 7 | Set BorderRadius 10 8 | 9 | Output dist/gatecheck-version.gif 10 | 11 | Type "gatecheck version" 12 | 13 | Sleep 500ms 14 | 15 | Enter 16 | 17 | Sleep 10 18 | -------------------------------------------------------------------------------- /docs/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | [Gatecheck](./title-page.md) 4 | 5 | - [Installation](./installation.md) 6 | - [Usage](./usage.md) 7 | - [Introducing CLI v1](./cli-refactor.md) 8 | - [List Reports](./list-reports.md) 9 | - [Gatecheck Bundle](./gatecheck-bundle.md) 10 | - [Validation](./validation.md) 11 | - [Supported Reports](./supported-reports.md) 12 | - [Configuration](./configuration.md) 13 | -------------------------------------------------------------------------------- /docs/assets/gatecheck-logo-splash-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gatecheckdev/gatecheck/222be345966b3f3ab30d6f2c0e8003cc7ed67e86/docs/assets/gatecheck-logo-splash-dark.png -------------------------------------------------------------------------------- /docs/assets/screenshot-grype-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gatecheckdev/gatecheck/222be345966b3f3ab30d6f2c0e8003cc7ed67e86/docs/assets/screenshot-grype-list.png -------------------------------------------------------------------------------- /docs/assets/screenshot-grype-table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gatecheckdev/gatecheck/222be345966b3f3ab30d6f2c0e8003cc7ed67e86/docs/assets/screenshot-grype-table.png -------------------------------------------------------------------------------- /docs/cli-refactor.md: -------------------------------------------------------------------------------- 1 | # Gatecheck Refactor 2 | 3 | ## Package Updates 4 | 5 | The Async Decoder is no longer used for a number of reasons, mostly for code readability. 6 | The user experience improvements and performance improvements were not enough to justify the complexity of the code 7 | required to keep it. 8 | 9 | ## Config Updates 10 | 11 | The new configuration file is more expressive than the original with the ability the selectively enable/disable certain 12 | rules. 13 | 14 | See the (Configuration Guide)[./configuration.md] for details. 15 | 16 | ## New CLI 17 | 18 | Gatecheck is currently going through a refactor which will give the CLI some much-needed love. 19 | The more streamlined CLI will reduce the complexity with usage and provide a clear use case for every command. 20 | 21 | 22 | ## Deprecation Schedule 23 | 24 | The existing Gatecheck CLI is now considered deprecated however, users can still access the legacy CLI by 25 | setting the environment variable `GATECHECK_FF_LEGACY_CLI_ENABLED=1` as of version 0.4.0. 26 | 27 | This Legacy CLI and packages are scheduled to be removed after version 0.5.0. 28 | -------------------------------------------------------------------------------- /docs/configuration.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | ## Header 4 | 5 | ```yaml 6 | # The configuration version, reserved for future use but not required in v1 7 | version: "1" 8 | # Option metadata for the config that doesn't impact functionality 9 | metadata: 10 | tags: 11 | - auto generated from CLI 12 | ``` 13 | 14 | ## Grype Configuration 15 | 16 | ```yaml 17 | grype: 18 | # Severity Limit Rule sets a limit for how many vulnerabilities are allowed in a report 19 | # each severity level can have a different limit 20 | severityLimit: 21 | critical: 22 | enabled: false 23 | limit: 0 24 | high: 25 | enabled: false 26 | limit: 0 27 | medium: 28 | enabled: false 29 | limit: 0 30 | low: 31 | enabled: false 32 | limit: 0 33 | # EPSS Limit Rule sets a limit for the max score allowed for each vulnerability 34 | epssLimit: 35 | enabled: false 36 | score: 0 37 | # KEV Limit Rule fails validation if any vulnerability matches to the 38 | # Known Exploited Vulnerability Catalog 39 | kevLimitEnabled: false 40 | # CVE Limit Rule fails validation if any vulnerability ID matches 41 | # to any CVE in this list 42 | cveLimit: 43 | enabled: false 44 | cves: 45 | - ID: CVE-example-2024-1 46 | Metadata: 47 | Tags: 48 | - Some example tag 49 | # EPSS Risk Acceptance Rule skips validation for vulnerabilities with 50 | # EPSS score less than this score limit 51 | epssRiskAcceptance: 52 | enabled: false 53 | score: 0 54 | # CVE Risk Acceptance Rule skips validation for vulnerability ID that matches 55 | cveRiskAcceptance: 56 | enabled: false 57 | cves: 58 | - ID: CVE-example-2024-2 59 | Metadata: 60 | Tags: 61 | - Some example tag 62 | ``` 63 | 64 | ## Cyclonedx Configuration 65 | 66 | ```yaml 67 | cyclonedx: 68 | # Severity Limit Rule sets a limit for how many vulnerabilities are allowed in a report 69 | # each severity level can have a different limit 70 | severityLimit: 71 | critical: 72 | enabled: false 73 | limit: 0 74 | high: 75 | enabled: false 76 | limit: 0 77 | medium: 78 | enabled: false 79 | limit: 0 80 | low: 81 | enabled: false 82 | limit: 0 83 | # EPSS Limit Rule sets a limit for the max score allowed for each vulnerability 84 | epssLimit: 85 | enabled: false 86 | score: 0 87 | # KEV Limit Rule fails validation if any vulnerability matches to the 88 | # Known Exploited Vulnerability Catalog 89 | kevLimitEnabled: false 90 | # CVE Limit Rule fails validation if any vulnerability ID matches 91 | # to any CVE in this list 92 | cveLimit: 93 | enabled: false 94 | cves: [] 95 | # EPSS Risk Acceptance Rule skips validation for vulnerabilities with 96 | # EPSS score less than this score limit 97 | epssRiskAcceptance: 98 | enabled: false 99 | score: 0 100 | # CVE Risk Acceptance Rule skips validation for vulnerability ID that matches 101 | cveRiskAcceptance: 102 | enabled: false 103 | cves: [] 104 | ``` 105 | 106 | ## Semgrep Configuration 107 | 108 | ```yaml 109 | semgrep: 110 | # Severity Limits can be applied for each level 111 | # if there are findings than the limit permits, 112 | # It will result in validation failure 113 | severityLimit: 114 | error: 115 | enabled: false 116 | limit: 0 117 | warning: 118 | enabled: false 119 | limit: 0 120 | info: 121 | enabled: false 122 | limit: 0 123 | # Impact Risk Acceptance premits findings based 124 | # on their impact level 125 | impactRiskAcceptance: 126 | enabled: false 127 | high: false 128 | medium: false 129 | low: false 130 | ``` 131 | 132 | ## GitLeaks Configuration 133 | 134 | GitLeaks secrets detection validation can be turned on or off. 135 | When the limit is enabled, the presence of any non-ignored finding will result in a validation failure. 136 | 137 | ```yaml 138 | gitleaks: 139 | limitEnabled: false 140 | ``` 141 | -------------------------------------------------------------------------------- /docs/gatecheck-bundle.md: -------------------------------------------------------------------------------- 1 | # Gatecheck Bundle 2 | 3 | in progress 4 | -------------------------------------------------------------------------------- /docs/installation.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | ## Prerequisites 4 | 5 | Before installing Gatecheck, make sure your system meets the following requirements: 6 | 7 | - **Operating System**: Windows, macOS, or Linux. 8 | - **Go**: Version 1.22.0 or newer (only required if you plan to build the application from source). 9 | 10 | ## Installation Options 11 | 12 | Gatecheck can be installed either by downloading the pre-compiled binary for your operating system or by compiling 13 | the source code. 14 | 15 | ### Option 1: Installing from Binary 16 | 17 | 1. **Download the Binary**: Visit the Gatecheck GitHub releases page at 18 | `https://github.com/gatecheckdev/gatecheck/releases` and download the latest version for your operating system. 19 | 2. **Unpack the Binary** (if necessary): For Windows and Linux, you may need to unpack the `.zip` or `.tar.gz` file. 20 | 3. **Move the Binary to a Bin Directory**: 21 | - **Windows**: Move `gatecheck.exe` to a directory within your PATH, such as `C:\Windows`. 22 | - **macOS/Linux**: Move `gatecheck` to a location in your PATH, such as `/usr/local/bin`. 23 | You can use the command `mv gatecheck /usr/local/bin` in the terminal. 24 | 25 | 4. **Verify Installation**: Open a terminal or command prompt and type `gatecheck --version` to ensure the application 26 | is installed correctly. 27 | 28 | ### Option 2: Building from Source 29 | 30 | 1. **Clone the Repository**: Clone the Gatecheck repository to your local machine using Git: 31 | ``` 32 | git clone https://github.com/gatecheckdev/gatecheck 33 | ``` 34 | 2. **Navigate to the Repository Directory**: 35 | ``` 36 | cd gatecheck 37 | ``` 38 | 3. **Build the Application**: Run the following command to compile Gatecheck with appropriate load flags: 39 | ``` 40 | go build -ldflags="-X 'main.cliVersion=$(git describe --tags)' -X 'main.gitCommit=$(git rev-parse HEAD)' -X 'main.buildDate=$(date -u +%Y-%m-%dT%H:%M:%SZ)' -X 'main.gitDescription=$(git log -1 --pretty=%B)'" -o ./bin ./cmd/gatecheck 41 | ``` 42 | 4. **Move the Binary to a Bin Directory** (as described in Option 1, step 3). 43 | 44 | 5. **Verify Installation**: Check the application version to confirm successful installation: 45 | ``` 46 | ./gatecheck --version 47 | ``` 48 | 49 | ### Option 3: Use Just Recipe 50 | 51 | [Just Command Runner](https://github.com/casey/just) 52 | 53 | ```shell 54 | git clone https://github.com/gatecheckdev/gatecheck 55 | cd gatecheck 56 | just install 57 | ``` 58 | 59 | Will default to `/usr/local/bin` as the install directory, but this can be changed. 60 | 61 | ```shell 62 | INSTALL_DIR='custom/location/bin' just install 63 | ``` 64 | 65 | ## Post-Installation Steps 66 | 67 | After installing Gatecheck, you can begin using it by typing `gatecheck` followed by the necessary commands and 68 | options in your terminal or command prompt. For a list of available commands and their descriptions, use: 69 | 70 | ``` 71 | gatecheck --help 72 | ``` 73 | 74 | ## Troubleshooting 75 | 76 | If you encounter any issues during the installation process, ensure that you have the correct permissions to 77 | install software on your system and that your Go environment is properly configured. 78 | For further assistance, please visit the Gatecheck GitHub issues page or contact support. 79 | 80 | For more information on using Gatecheck, refer to the user documentation or the GitHub repository for examples and 81 | advanced usage. 82 | 83 | -------------------------------------------------------------------------------- /docs/list-reports.md: -------------------------------------------------------------------------------- 1 | # List Reports 2 | 3 | Most scanning tools have options to display a formatted report. 4 | Take Grype for example: 5 | 6 | ```shell 7 | grype bkimminich/juice-shop:latest -o table 8 | ``` 9 | ![Screenshot Example Grype Table](assets/screenshot-grype-table.png) 10 | 11 | The report can be printed in a formatted table instead of 6k line JSON file. 12 | 13 | JSON can be piped directly into gatecheck for supported reports. 14 | 15 | ```shell 16 | grype bkimminich/juice-shop:latest -o json | gatecheck ls -i grype 17 | ``` 18 | Or from an existing report 19 | 20 | ```shell 21 | gatecheck ls grype-scan-report.json 22 | ``` 23 | 24 | ![Screenshot Example List](assets/screenshot-grype-list.png) 25 | -------------------------------------------------------------------------------- /docs/supported-reports.md: -------------------------------------------------------------------------------- 1 | # Supported Reports 2 | -------------------------------------------------------------------------------- /docs/title-page.md: -------------------------------------------------------------------------------- 1 | {{#include ../README.md}} 2 | -------------------------------------------------------------------------------- /docs/usage.md: -------------------------------------------------------------------------------- 1 | # Usage 2 | 3 | Gatecheck is designed as a lightweight CLI utility first with configuration flexibility to work in any environment. 4 | 5 | Developers can use it on the command line to summarize lengthy reports for a quick view or run an audit locally before 6 | pushing code to be handed off to CI/CD. 7 | 8 | It simplifies cross-referencing with [FIRST's Exploit Prediction Scoring System (EPSS)](https://www.first.org/epss/) 9 | API to match CVEs to vulnerability scores that provide more context than just 'critical, high, medium, etc.'. 10 | 11 | Gatecheck also has some additional features to make bundling reports for attestation and archiving easy. 12 | -------------------------------------------------------------------------------- /docs/validation.md: -------------------------------------------------------------------------------- 1 | # Validation 2 | 3 | ## Rules Order of Precedence 4 | 5 | 1. **CVE Limit**: Any Matching vulnerabilities will fail validation 6 | 2. **CVE Risk Acceptance**: Any Matching vulnerabilities will remove the CVE from subsequent rules, risk accepted 7 | 3. **KEV Limit**: Any Matching vulnerabilities will fail validation 8 | 4. **EPSS Risk Acceptance**: Any matching vulnerabilities that are below the risk acceptance will be removed from subsequent rules, risk accepted 9 | 5. **EPSS Limit**: Any matching vulnerabilities that exceed the limit will fail validation 10 | 6. **Severity Limit**: A count of severities that exceed the limit in any severity category will fail validation 11 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/gatecheckdev/gatecheck 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.23.3 6 | 7 | require ( 8 | github.com/dustin/go-humanize v1.0.1 9 | github.com/easy-up/go-coverage v0.0.0-20241018034313-3de592d59a78 10 | github.com/gatecheckdev/configkit v0.0.0-20240517005856-da14389dd06a 11 | github.com/lmittmann/tint v1.0.7 12 | github.com/olekukonko/tablewriter v0.0.5 13 | github.com/pelletier/go-toml/v2 v2.2.4 14 | github.com/spf13/cobra v1.9.1 15 | github.com/spf13/viper v1.20.1 16 | gopkg.in/yaml.v3 v3.0.1 17 | ) 18 | 19 | require ( 20 | github.com/fsnotify/fsnotify v1.9.0 // indirect 21 | github.com/go-viper/mapstructure/v2 v2.2.1 // indirect 22 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 23 | github.com/mattn/go-runewidth v0.0.16 // indirect 24 | github.com/rivo/uniseg v0.4.7 // indirect 25 | github.com/rogpeppe/go-internal v1.12.0 // indirect 26 | github.com/sagikazarmark/locafero v0.9.0 // indirect 27 | github.com/sourcegraph/conc v0.3.0 // indirect 28 | github.com/spf13/afero v1.14.0 // indirect 29 | github.com/spf13/cast v1.7.1 // indirect 30 | github.com/spf13/pflag v1.0.6 // indirect 31 | github.com/subosito/gotenv v1.6.0 // indirect 32 | go.uber.org/multierr v1.11.0 // indirect 33 | golang.org/x/sys v0.32.0 // indirect 34 | golang.org/x/text v0.24.0 // indirect 35 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 36 | ) 37 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= 5 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= 6 | github.com/easy-up/go-coverage v0.0.0-20241018034313-3de592d59a78 h1:e2x+TfIgebN3zfr8wGqAYI9lK4ql7Rut6OTEhBmJr5k= 7 | github.com/easy-up/go-coverage v0.0.0-20241018034313-3de592d59a78/go.mod h1:fsSINOc273zPnsBaKNjNffZXZpicAArpv/cTiFYgPys= 8 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 9 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 10 | github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= 11 | github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= 12 | github.com/gatecheckdev/configkit v0.0.0-20240517005856-da14389dd06a h1:SHelO0R65cDxh2CiLHvnvvLi1fkBkO0fpo1g3/eRerQ= 13 | github.com/gatecheckdev/configkit v0.0.0-20240517005856-da14389dd06a/go.mod h1:bS1zFCUnYr3X/8Fd4qWKRnpeD/wawvfQo+HpzVbiX4A= 14 | github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= 15 | github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= 16 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 17 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 18 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= 19 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= 20 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 21 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 22 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 23 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 24 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 25 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 26 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 27 | github.com/lmittmann/tint v1.0.7 h1:D/0OqWZ0YOGZ6AyC+5Y2kD8PBEzBk6rFHVSfOqCkF9Y= 28 | github.com/lmittmann/tint v1.0.7/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= 29 | github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= 30 | github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= 31 | github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= 32 | github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= 33 | github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= 34 | github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= 35 | github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= 36 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 37 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 38 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= 39 | github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= 40 | github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= 41 | github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= 42 | github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= 43 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 44 | github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k= 45 | github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk= 46 | github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= 47 | github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= 48 | github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= 49 | github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= 50 | github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= 51 | github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= 52 | github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= 53 | github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= 54 | github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= 55 | github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 56 | github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= 57 | github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= 58 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 59 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 60 | github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= 61 | github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= 62 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 63 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 64 | golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= 65 | golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 66 | golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= 67 | golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= 68 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 69 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 70 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 71 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 72 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 73 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | INSTALL_DIR := env('INSTALL_DIR', '/usr/local/bin') 2 | 3 | # build gatecheck binary 4 | build: 5 | mkdir -p bin 6 | go build -ldflags="-X 'main.cliVersion=$(git describe --tags)' -X 'main.gitCommit=$(git rev-parse HEAD)' -X 'main.buildDate=$(date -u +%Y-%m-%dT%H:%M:%SZ)' -X 'main.gitDescription=$(git log -1 --pretty=%B)'" -o ./bin ./cmd/gatecheck 7 | 8 | # build and install binary 9 | install: build 10 | cp ./bin/gatecheck {{ INSTALL_DIR }}/gatecheck 11 | 12 | # unit testing with coverage 13 | test: 14 | go test -cover ./... 15 | 16 | # golangci-lint view only 17 | lint: 18 | golangci-lint run --fast 19 | 20 | # golangci-lint fix linting errors and format if possible 21 | fix: 22 | golangci-lint run --fast --fix 23 | 24 | release-snapshot: 25 | goreleaser release --snapshot --rm-dist 26 | 27 | release: 28 | goreleaser release --rm-dist 29 | 30 | upgrade: 31 | git status --porcelain | grep -q . && echo "Repository is dirty, commit changes before upgrading." && exit 1 || exit 0 32 | go get -u ./... 33 | go mod tidy 34 | 35 | # Locally serve documentation 36 | serve-docs: 37 | mdbook serve 38 | -------------------------------------------------------------------------------- /pkg/archive/bundle.go: -------------------------------------------------------------------------------- 1 | // Package archive provides the logic for Gatecheck Bundles 2 | package archive 3 | 4 | import ( 5 | "archive/tar" 6 | "bytes" 7 | "compress/gzip" 8 | "crypto/sha256" 9 | "encoding/hex" 10 | "encoding/json" 11 | "errors" 12 | "fmt" 13 | "io" 14 | "log/slog" 15 | "os" 16 | "sort" 17 | "strings" 18 | "time" 19 | 20 | "github.com/olekukonko/tablewriter" 21 | 22 | "github.com/dustin/go-humanize" 23 | "github.com/gatecheckdev/gatecheck/pkg/format" 24 | ) 25 | 26 | // FileType in plain text 27 | const FileType = "Gatecheck Bundle" 28 | 29 | // BundleVersion the version support by this archive format 30 | const BundleVersion = "1" 31 | 32 | // ManifestFilename the file name to be used as a default 33 | const ManifestFilename = "gatecheck-manifest.json" 34 | 35 | // DefaultBundleFilename the bundle name to be used as a default 36 | const DefaultBundleFilename = "gatecheck-bundle.tar.gz" 37 | 38 | // Manifest is created and loaded into a bundle which contains information on the files 39 | type Manifest struct { 40 | Created time.Time `json:"createdAt"` 41 | Version string `json:"version"` 42 | Files map[string]fileDescriptor `json:"files"` 43 | } 44 | 45 | type fileDescriptor struct { 46 | Added time.Time `json:"addedAt"` 47 | // Deprecated: use tags instead of properties 48 | Properties map[string]string `json:"properties"` 49 | Tags []string `json:"tags"` 50 | // Deprecated: assume file label has the file type 51 | FileType string `json:"fileType"` 52 | Digest string `json:"digest"` 53 | } 54 | 55 | // Bundle uses tar and gzip to collect reports and files into a single file 56 | type Bundle struct { 57 | content map[string][]byte 58 | manifest Manifest 59 | } 60 | 61 | // NewBundle ... 62 | func NewBundle() *Bundle { 63 | return &Bundle{ 64 | content: make(map[string][]byte), 65 | manifest: Manifest{Created: time.Now(), Version: BundleVersion, Files: make(map[string]fileDescriptor)}, 66 | } 67 | } 68 | 69 | // Manifest generated by the bundle 70 | func (b *Bundle) Manifest() Manifest { 71 | return b.manifest 72 | } 73 | 74 | // WriteFileTo Used to write files inside of the bundle to a writer 75 | func (b *Bundle) WriteFileTo(w io.Writer, fileLabel string) (int64, error) { 76 | fileBytes, ok := b.content[fileLabel] 77 | if !ok { 78 | return 0, fmt.Errorf("gatecheck bundle: Label '%s' not found in bundle", fileLabel) 79 | } 80 | return bytes.NewReader(fileBytes).WriteTo(w) 81 | } 82 | 83 | func (b *Bundle) FileBytes(fileLabel string) []byte { 84 | fileBytes, ok := b.content[fileLabel] 85 | if !ok { 86 | slog.Warn("file label not found in bundle", "file_label", fileLabel) 87 | } 88 | return fileBytes 89 | } 90 | 91 | // FileSize get the file size for a specific label 92 | func (b *Bundle) FileSize(fileLabel string) int { 93 | fileBytes, ok := b.content[fileLabel] 94 | slog.Debug("bundle calculate file size", "label", fileLabel, "content_in_bundle", ok) 95 | if !ok { 96 | return 0 97 | } 98 | return len(fileBytes) 99 | } 100 | 101 | // AddFrom reads files into the bundle 102 | func (b *Bundle) AddFrom(r io.Reader, label string, properties map[string]string) error { 103 | hasher := sha256.New() 104 | p, err := io.ReadAll(r) 105 | _, _ = bytes.NewReader(p).WriteTo(hasher) 106 | if err != nil { 107 | return err 108 | } 109 | digest := fmt.Sprintf("%x", hasher.Sum(nil)) 110 | 111 | b.manifest.Files[label] = fileDescriptor{Added: time.Now(), Properties: properties, Digest: digest} 112 | 113 | b.content[label] = p 114 | return nil 115 | } 116 | 117 | func (b *Bundle) Add(content []byte, label string, tags []string) { 118 | hasher := sha256.New() 119 | n, hashErr := hasher.Write(content) 120 | slog.Debug("bundle add hash content", "error", hashErr, "bytes_hashed", n) 121 | digest := hex.EncodeToString(hasher.Sum(nil)) 122 | 123 | b.manifest.Files[label] = fileDescriptor{ 124 | Added: time.Now(), 125 | Tags: tags, 126 | Digest: digest, 127 | } 128 | 129 | b.content[label] = content 130 | } 131 | 132 | // Remove a file from the bundle and manifest by label 133 | // 134 | // If the file doesn't exist, it will log a warning 135 | func (b *Bundle) Remove(label string) { 136 | if _, ok := b.content[label]; !ok { 137 | slog.Error("file does not exist", "label", label) 138 | } 139 | delete(b.content, label) 140 | delete(b.manifest.Files, label) 141 | } 142 | 143 | // Delete will remove files from the bundle by label 144 | // 145 | // Deprecated: use Remove 146 | func (b *Bundle) Delete(label string) { 147 | delete(b.content, label) 148 | delete(b.manifest.Files, label) 149 | } 150 | 151 | func (b *Bundle) Content() string { 152 | matrix := format.NewSortableMatrix(make([][]string, 0), 0, format.AlphabeticLess) 153 | 154 | for label, descriptor := range b.Manifest().Files { 155 | fileSize := humanize.Bytes(uint64(b.FileSize(label))) 156 | tags := strings.Join(descriptor.Tags, ", ") 157 | row := []string{label, descriptor.Digest, tags, fileSize} 158 | matrix.Append(row) 159 | } 160 | 161 | sort.Sort(matrix) 162 | buf := new(bytes.Buffer) 163 | header := []string{"Label", "Digest", "Tags", "Size"} 164 | table := tablewriter.NewWriter(buf) 165 | table.SetHeader(header) 166 | matrix.Table(table) 167 | table.Render() 168 | return buf.String() 169 | } 170 | 171 | func TarGzipBundle(dst io.Writer, bundle *Bundle) (int64, error) { 172 | if bundle == nil { 173 | return 0, errors.New("cannot write nil bundle") 174 | } 175 | tarballBuffer := new(bytes.Buffer) 176 | tarWriter := tar.NewWriter(tarballBuffer) 177 | manifestBytes, _ := json.Marshal(bundle.manifest) 178 | _ = bundle.AddFrom(bytes.NewReader(manifestBytes), "gatecheck-manifest.json", nil) 179 | 180 | for label, data := range bundle.content { 181 | // Using bytes.Buffer so IO errors are unlikely 182 | _ = tarWriter.WriteHeader(&tar.Header{Name: label, Size: int64(len(data)), Mode: int64(os.FileMode(0o666))}) 183 | _, _ = bytes.NewReader(data).WriteTo(tarWriter) 184 | } 185 | if err := tarWriter.Close(); err != nil { 186 | return 0, err 187 | } 188 | 189 | gzipWriter := gzip.NewWriter(dst) 190 | n, err := tarballBuffer.WriteTo(gzipWriter) 191 | if err != nil { 192 | return n, err 193 | } 194 | err = gzipWriter.Close() 195 | 196 | return n, err 197 | } 198 | 199 | func UntarGzipBundle(src io.Reader, bundle *Bundle) error { 200 | gzipReader, err := gzip.NewReader(src) 201 | if err != nil { 202 | slog.Error("failed to create new gzip reader") 203 | return err 204 | } 205 | tarReader := tar.NewReader(gzipReader) 206 | 207 | bundle.content = make(map[string][]byte) 208 | for { 209 | header, err := tarReader.Next() 210 | if err == io.EOF { 211 | break 212 | } 213 | if err != nil { 214 | return err 215 | } 216 | 217 | if header.Typeflag != tar.TypeReg { 218 | return errors.New("gatecheck bundle only supports regular files in a flat directory structure") 219 | } 220 | fileBytes, _ := io.ReadAll(tarReader) 221 | bundle.content[header.Name] = fileBytes 222 | } 223 | manifest := new(Manifest) 224 | manifestBytes, ok := bundle.content[ManifestFilename] 225 | if !ok { 226 | return errors.New("gatecheck bundle manifest not found") 227 | } 228 | if err := json.Unmarshal(manifestBytes, manifest); err != nil { 229 | return fmt.Errorf("gatecheck manifest decoding: %w", err) 230 | } 231 | bundle.manifest = *manifest 232 | 233 | return nil 234 | } 235 | -------------------------------------------------------------------------------- /pkg/archive/bundle_test.go: -------------------------------------------------------------------------------- 1 | package archive 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "os" 7 | "strings" 8 | "testing" 9 | ) 10 | 11 | func TestBundle_WriteFileTo(t *testing.T) { 12 | bundle := NewBundle() 13 | _ = bundle.AddFrom(strings.NewReader("ABCDEF"), "file-1.txt", nil) 14 | _ = bundle.AddFrom(strings.NewReader("GHIJKL"), "file-2.txt", nil) 15 | _ = bundle.AddFrom(strings.NewReader("MNOPQR"), "file-3.txt", nil) 16 | outputBuf := new(bytes.Buffer) 17 | _, err := bundle.WriteFileTo(outputBuf, "file-1.txt") 18 | if err != nil { 19 | t.Fatal(err) 20 | } 21 | if outputBuf.String() != "ABCDEF" { 22 | t.Fatalf("want: 'ABCDEF' got: '%s'", outputBuf.String()) 23 | } 24 | if bundle.FileSize("file-1.txt") != outputBuf.Len() { 25 | t.Fatalf("%d is not equal to %d", bundle.FileSize("file-1.txt"), outputBuf.Len()) 26 | } 27 | 28 | t.Run("not-found", func(t *testing.T) { 29 | _, err := bundle.WriteFileTo(outputBuf, "file-999.txt") 30 | t.Log(err) 31 | if err == nil { 32 | t.Fatal("want error got nil") 33 | } 34 | if bundle.FileSize("file-999.txt") != 0 { 35 | t.Fatal() 36 | } 37 | }) 38 | 39 | t.Run("bad-writer", func(t *testing.T) { 40 | _, err := bundle.WriteFileTo(&badWriter{}, "file-1.txt") 41 | if err == nil { 42 | t.Fatal("want: badreader error got: nil") 43 | } 44 | }) 45 | } 46 | 47 | type badWriter struct{} 48 | 49 | func (r *badWriter) Write(_ []byte) (int, error) { 50 | return 0, errors.New("mock reader error") 51 | } 52 | 53 | func MustOpen(filename string, t *testing.T) *os.File { 54 | f, err := os.Open(filename) 55 | if err != nil { 56 | t.Fatal(err) 57 | } 58 | return f 59 | } 60 | -------------------------------------------------------------------------------- /pkg/artifacts/cyclonedx.go: -------------------------------------------------------------------------------- 1 | package artifacts 2 | 3 | import ( 4 | "cmp" 5 | "fmt" 6 | "slices" 7 | "strings" 8 | ) 9 | 10 | // CyclonedxReportMin is a minimum representation of an Cyclonedx scan report 11 | // 12 | // It contains only the necessary fields for validation and listing 13 | type CyclonedxReportMin struct { 14 | Components []CyclonedxComponent `json:"components"` 15 | Vulnerabilities []CyclonedxVulnerability `json:"vulnerabilities"` 16 | } 17 | 18 | type CyclonedxComponent struct { 19 | BOMRef string `json:"bom-ref"` 20 | Name string `json:"name"` 21 | Version string `json:"version"` 22 | } 23 | 24 | type CyclonedxVulnerability struct { 25 | ID string `json:"id"` 26 | Advisories []CyclonedxAdvisory `json:"advisories"` 27 | Affects []CyclondexAffectedPackage `json:"affects"` 28 | Ratings []CyclonedxRating `json:"ratings"` 29 | } 30 | 31 | type CyclondexAffectedPackage struct { 32 | Ref string `json:"ref"` 33 | } 34 | 35 | type CyclonedxAdvisory struct { 36 | URL string `json:"url"` 37 | } 38 | 39 | type CyclonedxRating struct { 40 | Source CyclonedxSource `json:"source"` 41 | Severity string `json:"severity"` 42 | } 43 | 44 | type CyclonedxSource struct { 45 | Name string `json:"name"` 46 | } 47 | 48 | func (r *CyclonedxReportMin) SelectBySeverity(severity string) []CyclonedxVulnerability { 49 | vulnerabilities := []CyclonedxVulnerability{} 50 | 51 | for _, vulnerability := range r.Vulnerabilities { 52 | if strings.EqualFold(vulnerability.HighestSeverity(), severity) { 53 | vulnerabilities = append(vulnerabilities, vulnerability) 54 | } 55 | } 56 | return vulnerabilities 57 | } 58 | 59 | func (r *CyclonedxVulnerability) HighestSeverity() string { 60 | order := map[string]int{"none": 0, "low": 1, "medium": 2, "high": 3, "critical": 4} 61 | rating := slices.MaxFunc(r.Ratings, func(a, b CyclonedxRating) int { 62 | return cmp.Compare(order[a.Severity], order[b.Severity]) 63 | }) 64 | return rating.Severity 65 | } 66 | 67 | func (r CyclonedxReportMin) AffectedPackages(vulnerabilityIndex int) string { 68 | refs := []string{} 69 | 70 | for _, affected := range r.Vulnerabilities[vulnerabilityIndex].Affects { 71 | refs = append(refs, affected.Ref) 72 | } 73 | 74 | pkgs := []string{} 75 | // The components in the sbom are linked to affected vulnerabilities 76 | for _, ref := range refs { 77 | for _, component := range r.Components { 78 | if ref == component.BOMRef { 79 | pkgs = append(pkgs, fmt.Sprintf("%s [%s]", component.Name, component.Version)) 80 | } 81 | } 82 | } 83 | 84 | return strings.Join(pkgs, ", ") 85 | } 86 | -------------------------------------------------------------------------------- /pkg/artifacts/gitleaks.go: -------------------------------------------------------------------------------- 1 | package artifacts 2 | 3 | import ( 4 | "github.com/gatecheckdev/gatecheck/pkg/format" 5 | ) 6 | 7 | type GitLeaksReportMin []GitleaksFinding 8 | 9 | func (r *GitLeaksReportMin) Count() int { 10 | n := 0 11 | for range *r { 12 | n++ 13 | } 14 | return n 15 | } 16 | 17 | type GitleaksFinding struct { 18 | RuleID string `json:"RuleID"` 19 | File string `json:"File"` 20 | Commit string `json:"Commit"` 21 | StartLine int `json:"StartLine"` 22 | } 23 | 24 | func (f *GitleaksFinding) FileShort() string { 25 | return format.Summarize(f.File, 50, format.ClipMiddle) 26 | } 27 | 28 | func (f *GitleaksFinding) CommitShort() string { 29 | return f.Commit[:8] 30 | } 31 | -------------------------------------------------------------------------------- /pkg/artifacts/grype.go: -------------------------------------------------------------------------------- 1 | package artifacts 2 | 3 | import "strings" 4 | 5 | // GrypeReportMin is a minimum representation of an Anchore Grype scan report 6 | // 7 | // It contains only the necessary fields for validation and listing 8 | type GrypeReportMin struct { 9 | Descriptor GrypeDescriptor `json:"descriptor"` 10 | Matches []GrypeMatch `json:"matches"` 11 | } 12 | 13 | type GrypeMatch struct { 14 | Artifact GrypeArtifact `json:"artifact"` 15 | Vulnerability GrypeVulnerability `json:"vulnerability"` 16 | } 17 | 18 | type GrypeDescriptor struct { 19 | Name string `json:"name"` 20 | Version string `json:"version"` 21 | } 22 | 23 | type GrypeArtifact struct { 24 | Name string `json:"name"` 25 | Version string `json:"version"` 26 | } 27 | 28 | type GrypeVulnerability struct { 29 | ID string `json:"id"` 30 | Severity string `json:"severity"` 31 | DataSource string `json:"dataSource"` 32 | } 33 | 34 | func (g *GrypeReportMin) SelectBySeverity(severity string) []GrypeMatch { 35 | matches := []GrypeMatch{} 36 | for _, match := range g.Matches { 37 | if strings.ToLower(match.Vulnerability.Severity) == severity { 38 | matches = append(matches, match) 39 | } 40 | } 41 | 42 | return matches 43 | } 44 | -------------------------------------------------------------------------------- /pkg/artifacts/lcov.go: -------------------------------------------------------------------------------- 1 | package artifacts 2 | 3 | import ( 4 | "errors" 5 | "log/slog" 6 | "strings" 7 | 8 | "github.com/easy-up/go-coverage" 9 | ) 10 | 11 | func IsCoverageReport(inputFilename string) bool { 12 | return strings.Contains(inputFilename, "lcov") || 13 | strings.HasSuffix(inputFilename, ".info") || 14 | strings.Contains(inputFilename, "clover") || 15 | strings.Contains(inputFilename, "cobertura") || 16 | strings.Contains(inputFilename, "coverage") 17 | } 18 | 19 | func GetCoverageMode(inputFilename string) (coverage.CoverageMode, error) { 20 | var coverageFormat coverage.CoverageMode 21 | if strings.Contains(inputFilename, "lcov") || strings.HasSuffix(inputFilename, ".info") { 22 | coverageFormat = coverage.LCOV 23 | } else if strings.Contains(inputFilename, "clover") { 24 | coverageFormat = coverage.CLOVER 25 | } else if strings.HasSuffix(inputFilename, ".xml") { 26 | coverageFormat = coverage.COBERTURA 27 | } else { 28 | slog.Error("unsupported coverage file type, cannot be determined from filename", "filename", inputFilename) 29 | return "", errors.New("failed to list coverage content") 30 | } 31 | return coverageFormat, nil 32 | } 33 | -------------------------------------------------------------------------------- /pkg/artifacts/semgrep.go: -------------------------------------------------------------------------------- 1 | package artifacts 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | type SemgrepReportMin struct { 9 | Version string `json:"version"` 10 | Errors []semgrepError `json:"errors"` 11 | Results []SemgrepResults `json:"results"` 12 | } 13 | 14 | type semgrepError struct { 15 | Level string `json:"level"` 16 | Message string `json:"message"` 17 | Path string `json:"path"` 18 | } 19 | 20 | type SemgrepResults struct { 21 | Extra SemgrepExtra `json:"extra"` 22 | CheckID string `json:"check_id"` 23 | } 24 | 25 | type SemgrepExtra struct { 26 | Severity string `json:"severity"` 27 | Metadata SemgrepMetadata `json:"metadata"` 28 | Message string `json:"message"` 29 | } 30 | 31 | type SemgrepMetadata struct { 32 | Category string `json:"category"` 33 | Confidence string `json:"confidence"` 34 | CWE any `json:"cwe"` 35 | Impact string `json:"impact"` 36 | Likelihood string `json:"likelihood"` 37 | Shortlink string `json:"shortlink"` 38 | Owasp any `json:"owasp"` 39 | } 40 | 41 | func (s *SemgrepReportMin) SelectBySeverity(severity string) []SemgrepResults { 42 | results := []SemgrepResults{} 43 | for _, result := range s.Results { 44 | if strings.EqualFold(result.Extra.Severity, severity) { 45 | results = append(results, result) 46 | } 47 | } 48 | return results 49 | } 50 | 51 | func (s *semgrepError) ShortMessage() string { 52 | parts := strings.Split(s.Message, "\n") 53 | if len(parts) == 0 { 54 | return "-" 55 | } 56 | return parts[0] 57 | } 58 | 59 | func (s *SemgrepResults) ShortCheckID() string { 60 | parts := strings.Split(s.CheckID, ".") 61 | switch len(parts) { 62 | case 0: 63 | return "-" 64 | case 1, 2, 3: 65 | return s.CheckID 66 | } 67 | 68 | return fmt.Sprintf("%s...%s", parts[0], parts[len(parts)-1]) 69 | } 70 | 71 | func (s *SemgrepMetadata) OwaspIDs() string { 72 | switch v := s.Owasp.(type) { 73 | case string: 74 | return v 75 | case []interface{}: 76 | ids := []string{} 77 | for _, id := range v { 78 | ids = append(ids, fmt.Sprintf("%v", id)) 79 | } 80 | return strings.Join(ids, ", ") 81 | default: 82 | return "-" 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /pkg/epss/epss.go: -------------------------------------------------------------------------------- 1 | package epss 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "compress/gzip" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "log/slog" 11 | "net/http" 12 | "strconv" 13 | "strings" 14 | "time" 15 | 16 | "github.com/dustin/go-humanize" 17 | ) 18 | 19 | const ( 20 | dataModel = "v2025.03.14" 21 | modelDateLayout = "2006-01-02T15:04:05Z" 22 | defaultEPSSURL = "https://epss.cyentia.com" 23 | ) 24 | 25 | // Data a representation of the CSV data from first API 26 | type Data struct { 27 | ModelVersion string 28 | ScoreDate time.Time 29 | CVEs map[string]CVE 30 | } 31 | 32 | // CVE represents a row in the CSV data 33 | // 34 | // Values can be converted lazily to a float on read 35 | type CVE struct { 36 | EPSS string 37 | Percentile string 38 | } 39 | 40 | // EPSSValue lazy convert to float 41 | func (c CVE) EPSSValue() float64 { 42 | value, err := strconv.ParseFloat(c.EPSS, 64) 43 | if err != nil { 44 | slog.Warn("failed to parse EPSS value to float", "string_value", c.EPSS) 45 | } 46 | return value 47 | } 48 | 49 | // PercentileValue lazy convert to float 50 | func (c CVE) PercentileValue() float64 { 51 | value, err := strconv.ParseFloat(c.Percentile, 64) 52 | if err != nil { 53 | slog.Warn("failed to parse Percentile value to float", "string_value", c.Percentile) 54 | } 55 | return value 56 | } 57 | 58 | type fetchOptionFunc func(*FetchOptions) 59 | 60 | func WithURL(url string) fetchOptionFunc { 61 | if url == "" { 62 | return func(_ *FetchOptions) {} 63 | } 64 | return func(o *FetchOptions) { 65 | o.URL = url 66 | } 67 | } 68 | 69 | func WithClient(client *http.Client) fetchOptionFunc { 70 | if client == nil { 71 | return func(_ *FetchOptions) {} 72 | } 73 | return func(o *FetchOptions) { 74 | o.Client = client 75 | } 76 | } 77 | 78 | // FetchOptions optional settings for the request 79 | type FetchOptions struct { 80 | Client *http.Client 81 | URL string 82 | } 83 | 84 | // DefaultFetchOptions use the default client and url for today's scores 85 | func DefaultFetchOptions() *FetchOptions { 86 | return &FetchOptions{ 87 | Client: http.DefaultClient, 88 | URL: defaultEPSSURL, 89 | } 90 | } 91 | 92 | func DownloadData(w io.Writer, optionFuncs ...fetchOptionFunc) error { 93 | options := DefaultFetchOptions() 94 | for _, optionFunc := range optionFuncs { 95 | optionFunc(options) 96 | } 97 | 98 | logger := slog.Default().With("method", "GET", "url", options.URL) 99 | defer func(started time.Time) { 100 | logger.Debug("epss csv fetch done", "elapsed", time.Since(started)) 101 | }(time.Now()) 102 | 103 | logger.Debug("request epss data from api") 104 | res, err := options.Client.Get(options.URL) 105 | 106 | switch { 107 | case err != nil: 108 | logger.Error("epss api request failed during fetch data", "error", err) 109 | return errors.New("failed to get EPSS Scores. see log for details") 110 | case res.StatusCode != http.StatusOK: 111 | logger.Error("epss api bad status code", "res_status", res.Status) 112 | return errors.New("failed to get EPSS Scores. see log for details") 113 | } 114 | 115 | gunzipReader, err := gzip.NewReader(res.Body) 116 | if err != nil { 117 | logger.Error("gzip reader", "error", err) 118 | return errors.New("failed to parse EPSS Scores. see log for details") 119 | } 120 | 121 | n, err := io.Copy(w, gunzipReader) 122 | if err != nil { 123 | logger.Error("io copy to writer from gzip reader", "error", err) 124 | return errors.New("failed to get EPSS Scores. see log for details") 125 | } 126 | 127 | size := humanize.Bytes(uint64(n)) 128 | 129 | slog.Debug("successfully downloaded and decompressed epss data", "decompressed_size", size) 130 | return nil 131 | } 132 | 133 | // FetchData do a GET request and gunzip on the CSV 134 | func FetchData(destData *Data, optionFuncs ...fetchOptionFunc) error { 135 | buf := new(bytes.Buffer) 136 | if err := DownloadData(buf, optionFuncs...); err != nil { 137 | return err 138 | } 139 | 140 | return ParseEPSSDataCSV(buf, destData) 141 | } 142 | 143 | // ParseEPSSDataCSV custom CSV parsing function 144 | func ParseEPSSDataCSV(r io.Reader, data *Data) error { 145 | // Debug the total elapsed time 146 | defer func(started time.Time) { 147 | slog.Debug("csv parse done", "elapsed", time.Since(started)) 148 | }(time.Now()) 149 | 150 | scanner := bufio.NewScanner(r) 151 | scanner.Scan() 152 | if err := scanner.Err(); err != nil { 153 | return err 154 | } 155 | data.CVEs = make(map[string]CVE) 156 | slog.Debug("parse csv metadata header") 157 | parts := strings.Split(scanner.Text(), ",") 158 | if len(parts) != 2 { 159 | return fmt.Errorf("failed to parse EPSS CSV, malformed metadata header: '%s'", scanner.Text()) 160 | } 161 | 162 | data.ModelVersion = strings.ReplaceAll(parts[0], "#model_version:", "") 163 | 164 | if data.ModelVersion != dataModel { 165 | slog.Warn("data model does not match supported model", "want", dataModel, "got", data.ModelVersion) 166 | } 167 | 168 | scoreDate, err := time.Parse(modelDateLayout, strings.ReplaceAll(parts[1], "score_date:", "")) 169 | if err != nil { 170 | return fmt.Errorf("failed to parse EPSS CSV, invalid date format in metadata header '%s'", scanner.Text()) 171 | } 172 | 173 | data.ScoreDate = scoreDate 174 | 175 | // Next Line should be header 176 | scanner.Scan() 177 | if scanner.Text() != "cve,epss,percentile" { 178 | return fmt.Errorf("failed to parse EPSS CSV, invalid header '%s'", scanner.Text()) 179 | } 180 | 181 | slog.Debug("parse csv rows") 182 | 183 | for scanner.Scan() { 184 | line := scanner.Text() 185 | // Add the newline back in so it would make a full file hash 186 | values := strings.Split(line, ",") 187 | 188 | if len(values) != 3 { 189 | return fmt.Errorf("failed to parse EPSS CSV, unexpected number of items '%s'", line) 190 | } 191 | 192 | data.CVEs[values[0]] = CVE{EPSS: values[1], Percentile: values[2]} 193 | } 194 | 195 | return nil 196 | } 197 | -------------------------------------------------------------------------------- /pkg/format/matrix.go: -------------------------------------------------------------------------------- 1 | package format 2 | 3 | import ( 4 | "github.com/olekukonko/tablewriter" 5 | ) 6 | 7 | type SortableMatrix struct { 8 | data [][]string 9 | selectedColumn int 10 | lessFunc func(a, b string) bool 11 | } 12 | 13 | func NewSortableMatrix(data [][]string, sortColIdx int, sortFunc func(a, b string) bool) *SortableMatrix { 14 | return &SortableMatrix{ 15 | data: data, 16 | selectedColumn: sortColIdx, 17 | lessFunc: sortFunc, 18 | } 19 | } 20 | 21 | func (m *SortableMatrix) Append(row []string) { 22 | m.data = append(m.data, row) 23 | } 24 | 25 | func (m *SortableMatrix) Matrix() [][]string { 26 | return m.data 27 | } 28 | 29 | func (m *SortableMatrix) Table(table *tablewriter.Table) { 30 | table.AppendBulk(m.data) 31 | } 32 | 33 | func (m *SortableMatrix) Len() int { 34 | return len(m.data) 35 | } 36 | 37 | func (m *SortableMatrix) Swap(i, j int) { 38 | m.data[i], m.data[j] = m.data[j], m.data[i] 39 | } 40 | 41 | func (m *SortableMatrix) Less(i, j int) bool { 42 | return m.lessFunc(m.data[i][m.selectedColumn], m.data[j][m.selectedColumn]) 43 | } 44 | 45 | func AlphabeticLess(a, b string) bool { 46 | return a < b 47 | } 48 | 49 | func NewCatagoricLess(categories []string) func(a, b string) bool { 50 | return func(a, b string) bool { 51 | aIndex, bIndex := 0, 0 52 | for i, category := range categories { 53 | if a == category { 54 | aIndex = i 55 | } 56 | if b == category { 57 | bIndex = i 58 | } 59 | } 60 | return aIndex < bIndex 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /pkg/format/strings.go: -------------------------------------------------------------------------------- 1 | package format 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | type ClipDirection int 9 | 10 | const ( 11 | ClipLeft ClipDirection = iota 12 | ClipRight 13 | ClipMiddle 14 | ) 15 | 16 | func Summarize(content string, length int, clip ClipDirection) string { 17 | if len(content) <= length { 18 | return content 19 | } 20 | 21 | if length <= 3 { 22 | if clip == ClipLeft { 23 | return content[:length] 24 | } 25 | if clip == ClipRight { 26 | return content[len(content)-length:] 27 | } 28 | } 29 | 30 | out := content 31 | 32 | if clip == ClipLeft { 33 | out = "..." + out[len(out)-length+3:] 34 | } 35 | if clip == ClipRight { 36 | out = out[:length-3] + "..." 37 | } 38 | 39 | if clip == ClipMiddle { 40 | totalLength := len(out) 41 | clipLength := length - 3 42 | 43 | halfClipLength := clipLength / 2 44 | out = out[:halfClipLength] + "..." + out[totalLength-halfClipLength:] 45 | } 46 | 47 | return out 48 | } 49 | 50 | func PrettyPrintMap[K comparable, V any](m map[K]V) string { 51 | s := make([]string, 0, len(m)) 52 | for k, v := range m { 53 | s = append(s, fmt.Sprintf("%v: %v", k, v)) 54 | } 55 | return fmt.Sprintf("(%s)", strings.Join(s, ", ")) 56 | } 57 | 58 | func PrettyPrintMapOrdered[K comparable, V any](m map[K]V, orderedKeys []K) string { 59 | s := make([]string, 0, len(orderedKeys)) 60 | for _, key := range orderedKeys { 61 | s = append(s, fmt.Sprintf("%v: %v", key, m[key])) 62 | } 63 | return fmt.Sprintf("(%s)", strings.Join(s, ", ")) 64 | } 65 | -------------------------------------------------------------------------------- /pkg/format/strings_test.go: -------------------------------------------------------------------------------- 1 | package format 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestSummarize(t *testing.T) { 8 | testTable := []struct { 9 | content string 10 | length int 11 | clip ClipDirection 12 | want string 13 | }{ 14 | {content: "abcdefg", length: 5, clip: ClipRight, want: "ab..."}, 15 | {content: "abcdefg", length: 5, clip: ClipLeft, want: "...fg"}, 16 | {content: "abcdefg", length: 5, clip: ClipMiddle, want: "a...g"}, 17 | {content: "a", length: 5, clip: ClipLeft, want: "a"}, 18 | {content: "abc", length: 5, clip: ClipLeft, want: "abc"}, 19 | {content: "abcde", length: 5, clip: ClipLeft, want: "abcde"}, 20 | {content: "abcde", length: 2, clip: ClipLeft, want: "ab"}, 21 | {content: "abcde", length: 2, clip: ClipRight, want: "de"}, 22 | } 23 | 24 | for _, testCase := range testTable { 25 | summarizedContent := Summarize(testCase.content, testCase.length, testCase.clip) 26 | t.Log(summarizedContent) 27 | if summarizedContent != testCase.want { 28 | t.Fatalf("given: %s len %d want: %s got: %s", testCase.content, testCase.length, testCase.want, summarizedContent) 29 | } 30 | } 31 | } 32 | 33 | func TestPrettyPrintMap(t *testing.T) { 34 | obj := map[string]string{"Key A": "Value A", "Key B": "Value B"} 35 | t.Log(PrettyPrintMap(obj)) 36 | } 37 | 38 | func TestPrettyPrintMapOrdered(t *testing.T) { 39 | obj := map[string]string{"Key A": "Value A", "Key B": "Value B"} 40 | t.Log(PrettyPrintMapOrdered(obj, []string{"Key B", "Key A"})) 41 | } 42 | -------------------------------------------------------------------------------- /pkg/gatecheck/bundle.go: -------------------------------------------------------------------------------- 1 | package gatecheck 2 | 3 | import ( 4 | "io" 5 | "log/slog" 6 | 7 | "github.com/gatecheckdev/gatecheck/pkg/archive" 8 | ) 9 | 10 | // CreateBundle create a new bundle with a file 11 | // 12 | // If the bundle already exist, use CreateBundle. 13 | // this function will completely overwrite an existing bundle 14 | func CreateBundle(dstBundle io.Writer, src io.Reader, label string, tags []string) error { 15 | slog.Debug("add to source file content to bundle", "label", label, "tags", tags) 16 | srcContent, err := io.ReadAll(src) 17 | if err != nil { 18 | return err 19 | } 20 | 21 | bundle := archive.NewBundle() 22 | bundle.Add(srcContent, label, tags) 23 | 24 | slog.Debug("write bundle") 25 | n, err := archive.TarGzipBundle(dstBundle, bundle) 26 | if err != nil { 27 | return err 28 | } 29 | 30 | slog.Info("bundle write success", "bytes_written", n, "label", label, "tags", tags) 31 | 32 | return nil 33 | } 34 | 35 | // AppendToBundle adds a file to an existing bundle 36 | // 37 | // If the bundle doesn't exist, use CreateBundle 38 | func AppendToBundle(bundleRWS io.ReadWriteSeeker, src io.Reader, label string, tags []string) error { 39 | slog.Debug("load bundle") 40 | bundle := archive.NewBundle() 41 | if err := archive.UntarGzipBundle(bundleRWS, bundle); err != nil { 42 | return err 43 | } 44 | 45 | slog.Debug("load source file") 46 | srcContent, err := io.ReadAll(src) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | slog.Debug("add to source file content to bundle", "label", label, "tags", tags) 52 | bundle.Add(srcContent, label, tags) 53 | 54 | // Seek errors are unlikely so just capture for edge cases 55 | _, seekErr := bundleRWS.Seek(0, io.SeekStart) 56 | 57 | slog.Debug("write bundle", "seek_err", seekErr) 58 | n, err := archive.TarGzipBundle(bundleRWS, bundle) 59 | if err != nil { 60 | return err 61 | } 62 | 63 | slog.Info("bundle write success", "bytes_written", n, "label", label, "tags", tags) 64 | 65 | return nil 66 | } 67 | 68 | // RemoveFromBundle removes a file from an existing bundle 69 | func RemoveFromBundle(bundleRWS io.ReadWriteSeeker, label string) error { 70 | slog.Debug("load bundle") 71 | bundle := archive.NewBundle() 72 | if err := archive.UntarGzipBundle(bundleRWS, bundle); err != nil { 73 | return err 74 | } 75 | bundle.Remove(label) 76 | // Seek errors are unlikely so just capture for edge cases 77 | _, seekErr := bundleRWS.Seek(0, io.SeekStart) 78 | 79 | slog.Debug("write bundle", "seek_err", seekErr) 80 | n, err := archive.TarGzipBundle(bundleRWS, bundle) 81 | if err != nil { 82 | return err 83 | } 84 | 85 | slog.Info("bundle write after remove success", "bytes_written", n, "label", label) 86 | return nil 87 | } 88 | -------------------------------------------------------------------------------- /pkg/gatecheck/config.go: -------------------------------------------------------------------------------- 1 | package gatecheck 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "log/slog" 10 | "os" 11 | "path" 12 | 13 | "github.com/olekukonko/tablewriter" 14 | "github.com/pelletier/go-toml/v2" 15 | "github.com/spf13/viper" 16 | "gopkg.in/yaml.v3" 17 | ) 18 | 19 | // Config is used to set limits and allowances during validation 20 | // 21 | // The report can be encoded/decoded into json, yaml, or toml 22 | // Metadata fields are intended for arbitrary data and shouldn't 23 | // conflict with rule validation 24 | type Config struct { 25 | Version string `json:"version" toml:"version" yaml:"version"` 26 | Metadata configMetadata `json:"metadata" toml:"metadata" yaml:"metadata"` 27 | Grype reportWithCVEs `json:"grype" toml:"grype" yaml:"grype"` 28 | Cyclonedx reportWithCVEs `json:"cyclonedx" toml:"cyclonedx" yaml:"cyclonedx"` 29 | Semgrep configSemgrepReport `json:"semgrep" toml:"semgrep" yaml:"semgrep"` 30 | Gitleaks configGitleaksReport `json:"gitleaks" toml:"gitleaks" yaml:"gitleaks"` 31 | Coverage configCoverageReport `json:"coverage" toml:"coverage" yaml:"coverage"` 32 | } 33 | 34 | func (c *Config) String() string { 35 | buf := new(bytes.Buffer) 36 | _ = json.NewEncoder(buf).Encode(c) 37 | 38 | v := viper.New() 39 | v.SetConfigType("json") 40 | _ = v.ReadConfig(buf) 41 | 42 | contentBuf := new(bytes.Buffer) 43 | table := tablewriter.NewWriter(contentBuf) 44 | table.SetHeader([]string{"config key", "value"}) 45 | 46 | for _, key := range v.AllKeys() { 47 | table.Append([]string{key, fmt.Sprintf("%v", v.Get(key))}) 48 | } 49 | return contentBuf.String() 50 | } 51 | 52 | type configCoverageReport struct { 53 | LineThreshold float32 `json:"lineThreshold" toml:"lineThreshold" yaml:"lineThreshold"` 54 | FunctionThreshold float32 `json:"functionThreshold" toml:"functionThreshold" yaml:"functionThreshold"` 55 | BranchThreshold float32 `json:"branchThreshold" toml:"branchThreshold" yaml:"branchThreshold"` 56 | } 57 | 58 | type configGitleaksReport struct { 59 | LimitEnabled bool `json:"limitEnabled" toml:"limitEnabled" yaml:"limitEnabled"` 60 | } 61 | 62 | type configSemgrepReport struct { 63 | SeverityLimit configSemgrepSeverityLimit `json:"severityLimit" toml:"severityLimit" yaml:"severityLimit"` 64 | ImpactRiskAcceptance configSemgrepImpactRiskAcceptance `json:"impactRiskAcceptance" toml:"impactRiskAcceptance" yaml:"impactRiskAcceptance"` 65 | } 66 | 67 | type configSemgrepSeverityLimit struct { 68 | Error configLimit `json:"error" toml:"error" yaml:"error"` 69 | Warning configLimit `json:"warning" toml:"warning" yaml:"warning"` 70 | Info configLimit `json:"info" toml:"info" yaml:"info"` 71 | } 72 | 73 | type configSemgrepImpactRiskAcceptance struct { 74 | Enabled bool `json:"enabled" toml:"enabled" yaml:"enabled"` 75 | High bool `json:"high" toml:"high" yaml:"high"` 76 | Medium bool `json:"medium" toml:"medium" yaml:"medium"` 77 | Low bool `json:"low" toml:"low" yaml:"low"` 78 | } 79 | 80 | type configMetadata struct { 81 | Tags []string `json:"tags" toml:"tags" yaml:"tags"` 82 | } 83 | 84 | type reportWithCVEs struct { 85 | SeverityLimit configServerityLimit `json:"severityLimit" toml:"severityLimit" yaml:"severityLimit"` 86 | EPSSLimit configEPSSLimit `json:"epssLimit" toml:"epssLimit" yaml:"epssLimit"` 87 | KEVLimitEnabled bool `json:"kevLimitEnabled" toml:"kevLimitEnabled" yaml:"kevLimitEnabled"` 88 | CVELimit configCVELimit `json:"cveLimit" toml:"cveLimit" yaml:"cveLimit"` 89 | EPSSRiskAcceptance configEPSSRiskAcceptance `json:"epssRiskAcceptance" toml:"epssRiskAcceptance" yaml:"epssRiskAcceptance"` 90 | CVERiskAcceptance configCVERiskAcceptance `json:"cveRiskAcceptance" toml:"cveRiskAcceptance" yaml:"cveRiskAcceptance"` 91 | } 92 | 93 | type configEPSSRiskAcceptance struct { 94 | Enabled bool `json:"enabled" toml:"enabled" yaml:"enabled"` 95 | Score float64 `json:"score" toml:"score" yaml:"score"` 96 | } 97 | type configCVERiskAcceptance struct { 98 | Enabled bool `json:"enabled" toml:"enabled" yaml:"enabled"` 99 | CVEs []configCVE `json:"cves" toml:"cves" yaml:"cves"` 100 | } 101 | type configServerityLimit struct { 102 | Critical configLimit `json:"critical" toml:"critical" yaml:"critical"` 103 | High configLimit `json:"high" toml:"high" yaml:"high"` 104 | Medium configLimit `json:"medium" toml:"medium" yaml:"medium"` 105 | Low configLimit `json:"low" toml:"low" yaml:"low"` 106 | } 107 | 108 | type configEPSSLimit struct { 109 | Enabled bool `json:"enabled" toml:"enabled" yaml:"enabled"` 110 | Score float64 `json:"score" toml:"score" yaml:"score"` 111 | } 112 | 113 | type configCVELimit struct { 114 | Enabled bool `json:"enabled" toml:"enabled" yaml:"enabled"` 115 | CVEs []configCVE `json:"cves" toml:"cves" yaml:"cves"` 116 | } 117 | 118 | type configCVE struct { 119 | ID string `json:"id" toml:"id" yaml:"id"` 120 | Metadata struct { 121 | Tags []string `json:"tags" toml:"tags" yaml:"tags"` 122 | } 123 | } 124 | 125 | type configLimit struct { 126 | Enabled bool `json:"enabled" toml:"enabled" yaml:"enabled"` 127 | Limit uint `json:"limit" toml:"limit" yaml:"limit"` 128 | } 129 | 130 | func NewDefaultConfig() *Config { 131 | return &Config{ 132 | Version: "1", 133 | Metadata: configMetadata{ 134 | Tags: []string{}, 135 | }, 136 | Semgrep: configSemgrepReport{ 137 | SeverityLimit: configSemgrepSeverityLimit{ 138 | Error: configLimit{ 139 | Enabled: false, 140 | Limit: 0, 141 | }, 142 | Warning: configLimit{ 143 | Enabled: false, 144 | Limit: 0, 145 | }, 146 | Info: configLimit{ 147 | Enabled: false, 148 | Limit: 0, 149 | }, 150 | }, 151 | ImpactRiskAcceptance: configSemgrepImpactRiskAcceptance{ 152 | Enabled: false, 153 | High: false, 154 | Medium: false, 155 | Low: false, 156 | }, 157 | }, 158 | Grype: reportWithCVEs{ 159 | SeverityLimit: configServerityLimit{ 160 | Critical: configLimit{ 161 | Enabled: false, 162 | Limit: 0, 163 | }, 164 | High: configLimit{ 165 | Enabled: false, 166 | Limit: 0, 167 | }, 168 | Medium: configLimit{ 169 | Enabled: false, 170 | Limit: 0, 171 | }, 172 | Low: configLimit{ 173 | Enabled: false, 174 | Limit: 0, 175 | }, 176 | }, 177 | EPSSLimit: configEPSSLimit{ 178 | Enabled: false, 179 | Score: 0, 180 | }, 181 | KEVLimitEnabled: false, 182 | CVELimit: configCVELimit{ 183 | Enabled: false, 184 | CVEs: make([]configCVE, 0), 185 | }, 186 | EPSSRiskAcceptance: configEPSSRiskAcceptance{ 187 | Enabled: false, 188 | Score: 0, 189 | }, 190 | CVERiskAcceptance: configCVERiskAcceptance{ 191 | Enabled: false, 192 | CVEs: make([]configCVE, 0), 193 | }, 194 | }, 195 | Cyclonedx: reportWithCVEs{ 196 | SeverityLimit: configServerityLimit{ 197 | Critical: configLimit{ 198 | Enabled: false, 199 | Limit: 0, 200 | }, 201 | High: configLimit{ 202 | Enabled: false, 203 | Limit: 0, 204 | }, 205 | Medium: configLimit{ 206 | Enabled: false, 207 | Limit: 0, 208 | }, 209 | Low: configLimit{ 210 | Enabled: false, 211 | Limit: 0, 212 | }, 213 | }, 214 | EPSSLimit: configEPSSLimit{ 215 | Enabled: false, 216 | Score: 0, 217 | }, 218 | KEVLimitEnabled: false, 219 | CVELimit: configCVELimit{ 220 | Enabled: false, 221 | CVEs: make([]configCVE, 0), 222 | }, 223 | EPSSRiskAcceptance: configEPSSRiskAcceptance{ 224 | Enabled: false, 225 | Score: 0, 226 | }, 227 | CVERiskAcceptance: configCVERiskAcceptance{ 228 | Enabled: false, 229 | CVEs: make([]configCVE, 0), 230 | }, 231 | }, 232 | Gitleaks: configGitleaksReport{ 233 | LimitEnabled: false, 234 | }, 235 | Coverage: configCoverageReport{ 236 | LineThreshold: 0, 237 | FunctionThreshold: 0, 238 | BranchThreshold: 0, 239 | }, 240 | } 241 | } 242 | 243 | func WriteDefaultConfig(w io.Writer, format string) error { 244 | config := NewDefaultConfig() 245 | config.Metadata.Tags = append(config.Metadata.Tags, "auto generated from CLI") 246 | return EncodeConfigTo(w, config, format) 247 | } 248 | 249 | func EncodeConfigTo(w io.Writer, config *Config, format string) error { 250 | var encoder interface { 251 | Encode(any) error 252 | } 253 | 254 | slog.Debug("encode config file", "format", format) 255 | switch format { 256 | case "json": 257 | enc := json.NewEncoder(w) 258 | enc.SetIndent("", " ") 259 | encoder = enc 260 | case "yaml", "yml": 261 | enc := yaml.NewEncoder(w) 262 | enc.SetIndent(2) 263 | encoder = enc 264 | case "toml": 265 | encoder = toml.NewEncoder(w) 266 | default: 267 | return fmt.Errorf("unsupported format '%s'", format) 268 | } 269 | 270 | return encoder.Encode(config) 271 | } 272 | 273 | type ConfigEncoder struct { 274 | writer io.Writer 275 | ext string 276 | } 277 | 278 | func NewConfigEncoder(w io.Writer, ext string) *ConfigEncoder { 279 | return &ConfigEncoder{ 280 | writer: w, 281 | ext: ext, 282 | } 283 | } 284 | 285 | func (e *ConfigEncoder) Encode(config *Config) error { 286 | var encoder interface { 287 | Encode(any) error 288 | } 289 | 290 | switch e.ext { 291 | case ".json": 292 | encoder = json.NewEncoder(e.writer) 293 | case ".toml": 294 | encoder = toml.NewEncoder(e.writer) 295 | case ".yaml", ".yml": 296 | encoder = yaml.NewEncoder(e.writer) 297 | default: 298 | return errors.New("invalid file extension, only json, toml, yaml or yml supported") 299 | } 300 | 301 | return encoder.Encode(config) 302 | 303 | } 304 | 305 | type ConfigDecoder struct { 306 | filename string 307 | } 308 | 309 | func NewConfigDecoder(filename string) *ConfigDecoder { 310 | return &ConfigDecoder{ 311 | filename: filename, 312 | } 313 | } 314 | 315 | func (d *ConfigDecoder) Decode(config *Config) error { 316 | ext := path.Ext(d.filename) 317 | 318 | slog.Debug("decode", "filename", d.filename, "extension", ext) 319 | f, err := os.Open(d.filename) 320 | if err != nil { 321 | return err 322 | } 323 | 324 | var decoder interface { 325 | Decode(any) error 326 | } 327 | 328 | switch ext { 329 | case ".json": 330 | decoder = json.NewDecoder(f) 331 | case ".toml": 332 | decoder = toml.NewDecoder(f) 333 | case ".yaml", ".yml": 334 | decoder = yaml.NewDecoder(f) 335 | default: 336 | return errors.New("invalid file extension, only json, toml, yaml or yml supported") 337 | } 338 | 339 | return decoder.Decode(config) 340 | } 341 | -------------------------------------------------------------------------------- /pkg/gatecheck/download.go: -------------------------------------------------------------------------------- 1 | package gatecheck 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "os" 7 | 8 | "github.com/gatecheckdev/gatecheck/pkg/epss" 9 | "github.com/gatecheckdev/gatecheck/pkg/kev" 10 | ) 11 | 12 | type fetchOptions struct { 13 | epssClient *http.Client 14 | epssURL string 15 | 16 | kevClient *http.Client 17 | kevURL string 18 | 19 | epssFile *os.File 20 | kevFile *os.File 21 | } 22 | 23 | func defaultOptions() *fetchOptions { 24 | epssDefault := epss.DefaultFetchOptions() 25 | kevDefault := kev.DefaultFetchOptions() 26 | return &fetchOptions{ 27 | epssClient: epssDefault.Client, 28 | epssURL: epssDefault.URL, 29 | kevClient: kevDefault.Client, 30 | kevURL: kevDefault.URL, 31 | } 32 | } 33 | 34 | // WithEPSSURL optionFunc that sets the fetch URL for EPSS data 35 | // 36 | // Will use the default option if "" is passed 37 | func WithEPSSURL(url string) optionFunc { 38 | if url == "" { 39 | return func(_ *fetchOptions) {} 40 | } 41 | 42 | return func(o *fetchOptions) { 43 | o.epssURL = url 44 | } 45 | } 46 | 47 | // WithKEVURL optionFunc that sets the fetch URL for KEV data 48 | // 49 | // Will use the default option if "" is passed 50 | func WithKEVURL(url string) optionFunc { 51 | if url == "" { 52 | return func(_ *fetchOptions) {} 53 | } 54 | 55 | return func(o *fetchOptions) { 56 | o.kevURL = url 57 | } 58 | } 59 | 60 | func WithEPSSFile(epssFile *os.File) optionFunc { 61 | return func(o *fetchOptions) { 62 | o.epssFile = epssFile 63 | } 64 | } 65 | 66 | func WithKEVFile(kevFile *os.File) optionFunc { 67 | return func(o *fetchOptions) { 68 | o.kevFile = kevFile 69 | } 70 | } 71 | 72 | type optionFunc func(*fetchOptions) 73 | 74 | func DownloadEPSS(w io.Writer, optionFuncs ...optionFunc) error { 75 | options := defaultOptions() 76 | for _, f := range optionFuncs { 77 | f(options) 78 | } 79 | 80 | return epss.DownloadData(w, epss.WithClient(options.epssClient), epss.WithURL(options.epssURL)) 81 | } 82 | 83 | func DownloadKEV(w io.Writer, optionFuncs ...optionFunc) error { 84 | options := defaultOptions() 85 | for _, f := range optionFuncs { 86 | f(options) 87 | } 88 | 89 | return kev.DownloadData(w, kev.WithClient(options.kevClient), kev.WithURL(options.kevURL)) 90 | } 91 | -------------------------------------------------------------------------------- /pkg/gatecheck/list.go: -------------------------------------------------------------------------------- 1 | package gatecheck 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "github.com/easy-up/go-coverage" 8 | "io" 9 | "log/slog" 10 | "os" 11 | "sort" 12 | "strconv" 13 | "strings" 14 | 15 | "github.com/gatecheckdev/gatecheck/pkg/archive" 16 | "github.com/gatecheckdev/gatecheck/pkg/artifacts" 17 | "github.com/gatecheckdev/gatecheck/pkg/epss" 18 | "github.com/gatecheckdev/gatecheck/pkg/format" 19 | "github.com/olekukonko/tablewriter" 20 | ) 21 | 22 | type listOptions struct { 23 | displayFormat string 24 | epssData *epss.Data 25 | } 26 | 27 | type ListOptionFunc func(*listOptions) 28 | 29 | func WithDisplayFormat(displayFormat string) func(*listOptions) { 30 | return func(o *listOptions) { 31 | o.displayFormat = displayFormat 32 | } 33 | } 34 | 35 | func WithEPSS(epssFile *os.File, epssURL string) (func(*listOptions), error) { 36 | data := &epss.Data{} 37 | f := func(o *listOptions) { 38 | o.epssData = data 39 | } 40 | 41 | if epssFile == nil { 42 | err := epss.FetchData(data, epss.WithURL(epssURL)) 43 | return f, err 44 | } 45 | 46 | err := epss.ParseEPSSDataCSV(epssFile, data) 47 | 48 | return f, err 49 | } 50 | 51 | func List(dst io.Writer, src io.Reader, inputFilename string, options ...ListOptionFunc) error { 52 | table := tablewriter.NewWriter(dst) 53 | var err error 54 | o := &listOptions{} 55 | for _, f := range options { 56 | f(o) 57 | } 58 | 59 | switch { 60 | case strings.Contains(inputFilename, "grype"): 61 | slog.Debug("list", "filename", inputFilename, "filetype", "grype") 62 | if o.epssData != nil { 63 | err = listGrypeWithEPSS(table, src, o.epssData) 64 | } else { 65 | err = ListGrypeReport(table, src) 66 | } 67 | 68 | case strings.Contains(inputFilename, "cyclonedx"): 69 | slog.Debug("list", "filename", inputFilename, "filetype", "cyclonedx") 70 | if o.epssData != nil { 71 | err = listCyclonedxWithEPSS(table, src, o.epssData) 72 | } else { 73 | err = ListCyclonedx(table, src) 74 | } 75 | 76 | case strings.Contains(inputFilename, "semgrep"): 77 | slog.Debug("list", "filename", inputFilename, "filetype", "semgrep") 78 | err = ListSemgrep(table, src) 79 | 80 | case strings.Contains(inputFilename, "gitleaks"): 81 | slog.Debug("list", "filename", inputFilename, "filetype", "gitleaks") 82 | err = listGitleaks(table, src) 83 | 84 | case strings.Contains(inputFilename, "syft"): 85 | slog.Debug("list", "filename", inputFilename, "filetype", "syft") 86 | slog.Warn("syft decoder is not supported yet") 87 | return errors.New("syft not implemented yet") 88 | 89 | case strings.Contains(inputFilename, "bundle") || strings.Contains(inputFilename, "gatecheck"): 90 | slog.Debug("list", "filename", inputFilename, "filetype", "bundle") 91 | bundle := archive.NewBundle() 92 | if err = archive.UntarGzipBundle(src, bundle); err != nil { 93 | return err 94 | } 95 | _, err = fmt.Fprintln(dst, bundle.Content()) 96 | return err 97 | 98 | case artifacts.IsCoverageReport(inputFilename): 99 | slog.Debug("list", "filename", inputFilename, "filetype", "coverage") 100 | 101 | err = listCoverage(table, inputFilename, src) 102 | default: 103 | slog.Error("unsupported file type, cannot be determined from filename", "filename", inputFilename) 104 | return errors.New("failed to list artifact content") 105 | } 106 | 107 | if err != nil { 108 | return err 109 | } 110 | 111 | switch strings.ToLower(strings.TrimSpace(o.displayFormat)) { 112 | case "markdown", "md": 113 | table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) 114 | table.SetCenterSeparator("|") 115 | table.SetAutoWrapText(false) 116 | } 117 | 118 | table.Render() 119 | 120 | return nil 121 | } 122 | 123 | func listCoverage(table *tablewriter.Table, inputFilename string, src io.Reader) error { 124 | coverageFormat, err := artifacts.GetCoverageMode(inputFilename) 125 | if err != nil { 126 | return err 127 | } 128 | 129 | parser := coverage.New(coverageFormat) 130 | report, err := parser.ParseReader(src) 131 | if err != nil { 132 | return err 133 | } 134 | 135 | header := []string{"Lines Covered", "Functions Covered", "Branches Covered"} 136 | table.SetHeader(header) 137 | table.Append([]string{strconv.Itoa(report.CoveredLines), strconv.Itoa(report.CoveredFunctions), strconv.Itoa(report.CoveredBranches)}) 138 | 139 | lineCoverageStr := fmt.Sprintf("%0.2f%%", (float32(report.CoveredLines)/float32(report.TotalLines))*100) 140 | funcCoverageStr := fmt.Sprintf("%0.2f%%", (float32(report.CoveredFunctions)/float32(report.TotalFunctions))*100) 141 | branchCoverageStr := fmt.Sprintf("%0.2f%%", (float32(report.CoveredBranches)/float32(report.TotalBranches))*100) 142 | table.SetFooter([]string{lineCoverageStr, funcCoverageStr, branchCoverageStr}) 143 | 144 | return nil 145 | } 146 | 147 | func ListGrypeReport(table *tablewriter.Table, src io.Reader) error { 148 | report := &artifacts.GrypeReportMin{} 149 | slog.Debug("decode grype report", "format", "json") 150 | if err := json.NewDecoder(src).Decode(&report); err != nil { 151 | return err 152 | } 153 | 154 | catLess := format.NewCatagoricLess([]string{"Critical", "High", "Medium", "Low", "Negligible", "Unknown"}) 155 | matrix := format.NewSortableMatrix(make([][]string, 0), 0, catLess) 156 | 157 | for _, item := range report.Matches { 158 | row := []string{item.Vulnerability.Severity, item.Artifact.Name, item.Artifact.Version, item.Vulnerability.DataSource} 159 | matrix.Append(row) 160 | } 161 | sort.Sort(matrix) 162 | 163 | header := []string{"Grype Severity", "Package", "Version", "Link"} 164 | 165 | table.SetHeader(header) 166 | matrix.Table(table) 167 | 168 | if len(report.Matches) == 0 { 169 | footer := make([]string, len(header)) 170 | footer[len(header)-1] = "No Grype Vulnerabilities" 171 | table.SetFooter(footer) 172 | table.SetBorder(false) 173 | } 174 | 175 | return nil 176 | } 177 | 178 | func listGrypeWithEPSS(table *tablewriter.Table, src io.Reader, epssData *epss.Data) error { 179 | report := &artifacts.GrypeReportMin{} 180 | slog.Debug("decode grype report", "format", "json") 181 | if err := json.NewDecoder(src).Decode(&report); err != nil { 182 | return err 183 | } 184 | 185 | catLess := format.NewCatagoricLess([]string{"Critical", "High", "Medium", "Low", "Negligible", "Unknown"}) 186 | matrix := format.NewSortableMatrix(make([][]string, 0), 1, catLess) 187 | 188 | for _, item := range report.Matches { 189 | cve, ok := epssData.CVEs[item.Vulnerability.ID] 190 | score := "-" 191 | prctl := "-" 192 | if ok { 193 | score = cve.EPSS 194 | prctl = cve.Percentile 195 | } 196 | 197 | row := []string{ 198 | item.Vulnerability.ID, 199 | item.Vulnerability.Severity, 200 | score, 201 | prctl, 202 | item.Artifact.Name, 203 | item.Artifact.Version, 204 | item.Vulnerability.DataSource, 205 | } 206 | matrix.Append(row) 207 | } 208 | 209 | header := []string{ 210 | "Grype CVE ID", 211 | "Severity", 212 | "EPSS Score", 213 | "EPSS Prctl", 214 | "Package", 215 | "Version", 216 | "Link", 217 | } 218 | 219 | sort.Sort(matrix) 220 | 221 | table.SetHeader(header) 222 | matrix.Table(table) 223 | 224 | if len(report.Matches) == 0 { 225 | footer := make([]string, len(header)) 226 | footer[len(header)-1] = "No Grype Vulnerabilities" 227 | table.SetFooter(footer) 228 | table.SetBorder(false) 229 | } 230 | 231 | return nil 232 | } 233 | 234 | func ListCyclonedx(table *tablewriter.Table, src io.Reader) error { 235 | report := &artifacts.CyclonedxReportMin{} 236 | slog.Debug("decode cyclonedx report", "format", "json") 237 | if err := json.NewDecoder(src).Decode(&report); err != nil { 238 | return err 239 | } 240 | 241 | catLess := format.NewCatagoricLess([]string{"critical", "high", "medium", "low", "none"}) 242 | matrix := format.NewSortableMatrix(make([][]string, 0), 1, catLess) 243 | 244 | link := "-" 245 | for idx, vul := range report.Vulnerabilities { 246 | severity := vul.HighestSeverity() 247 | pkgs := report.AffectedPackages(idx) 248 | if len(vul.Advisories) > 0 { 249 | link = vul.Advisories[0].URL 250 | } 251 | // get the affected vulnerability 252 | matrix.Append([]string{vul.ID, severity, pkgs, link}) 253 | } 254 | 255 | sort.Sort(matrix) 256 | 257 | header := []string{"Cyclonedx CVE ID", "Severity", "Package", "Link"} 258 | table.SetHeader(header) 259 | matrix.Table(table) 260 | 261 | if len(report.Vulnerabilities) == 0 { 262 | footer := make([]string, len(header)) 263 | footer[len(header)-1] = "No Cyclonedx Vulnerabilities" 264 | table.SetFooter(footer) 265 | table.SetBorder(false) 266 | } 267 | 268 | return nil 269 | } 270 | 271 | func listCyclonedxWithEPSS(table *tablewriter.Table, src io.Reader, epssData *epss.Data) error { 272 | report := &artifacts.CyclonedxReportMin{} 273 | slog.Debug("decode grype report", "format", "json") 274 | if err := json.NewDecoder(src).Decode(&report); err != nil { 275 | return err 276 | } 277 | 278 | catLess := format.NewCatagoricLess([]string{"critical", "high", "medium", "low", "info", "none", "unknown"}) 279 | matrix := format.NewSortableMatrix(make([][]string, 0), 1, catLess) 280 | 281 | for idx, item := range report.Vulnerabilities { 282 | cve, ok := epssData.CVEs[item.ID] 283 | score := "-" 284 | prctl := "-" 285 | if ok { 286 | score = cve.EPSS 287 | prctl = cve.Percentile 288 | } 289 | link := "-" 290 | if len(item.Advisories) > 0 { 291 | link = item.Advisories[0].URL 292 | } 293 | row := []string{ 294 | item.ID, 295 | item.HighestSeverity(), 296 | score, 297 | prctl, 298 | report.AffectedPackages(idx), 299 | link, 300 | } 301 | matrix.Append(row) 302 | } 303 | 304 | sort.Sort(matrix) 305 | 306 | header := []string{"Cyclonedx CVE ID", "Severity", "EPSS Score", "EPSS Prctl", "affected Packages", "Link"} 307 | table.SetHeader(header) 308 | matrix.Table(table) 309 | 310 | if len(report.Vulnerabilities) == 0 { 311 | footer := make([]string, len(header)) 312 | footer[len(header)-1] = "No Cyclonedx Vulnerabilities" 313 | table.SetFooter(footer) 314 | table.SetBorder(false) 315 | } 316 | 317 | return nil 318 | } 319 | 320 | func ListSemgrep(table *tablewriter.Table, src io.Reader) error { 321 | report := &artifacts.SemgrepReportMin{} 322 | 323 | if err := json.NewDecoder(src).Decode(report); err != nil { 324 | return err 325 | } 326 | 327 | for _, semgrepError := range report.Errors { 328 | slog.Warn("semgrep runtime error", 329 | "level", semgrepError.Level, 330 | "message", semgrepError.ShortMessage(), 331 | "path", semgrepError.Path, 332 | ) 333 | } 334 | 335 | catLess := format.NewCatagoricLess([]string{"ERROR", "WARNING", "INFO"}) 336 | 337 | matrix := format.NewSortableMatrix(make([][]string, 0), 1, catLess) 338 | 339 | for _, result := range report.Results { 340 | row := []string{ 341 | result.ShortCheckID(), 342 | result.Extra.Metadata.OwaspIDs(), 343 | result.Extra.Severity, 344 | result.Extra.Metadata.Impact, 345 | result.Extra.Metadata.Shortlink, 346 | } 347 | matrix.Append(row) 348 | } 349 | 350 | sort.Sort(matrix) 351 | 352 | header := []string{"Semgrep Check ID", "Owasp IDs", "Severity", "Impact", "link"} 353 | table.SetHeader(header) 354 | matrix.Table(table) 355 | 356 | if len(report.Results) == 0 { 357 | footer := make([]string, len(header)) 358 | footer[len(header)-1] = "No Semgrep Findings" 359 | table.SetFooter(footer) 360 | table.SetBorder(false) 361 | } 362 | 363 | return nil 364 | } 365 | 366 | func listGitleaks(table *tablewriter.Table, src io.Reader) error { 367 | report := artifacts.GitLeaksReportMin{} 368 | if err := json.NewDecoder(src).Decode(&report); err != nil { 369 | return err 370 | } 371 | 372 | header := []string{"Gitleaks Rule ID", "File", "Commit", "Start Line"} 373 | table.SetHeader(header) 374 | for _, finding := range report { 375 | row := []string{ 376 | finding.RuleID, 377 | finding.FileShort(), 378 | finding.CommitShort(), 379 | fmt.Sprintf("%d", finding.StartLine), 380 | } 381 | table.Append(row) 382 | } 383 | 384 | if report.Count() == 0 { 385 | footer := make([]string, len(header)) 386 | footer[len(header)-1] = "No Gitleaks Findings" 387 | table.SetFooter(footer) 388 | table.SetBorder(false) 389 | } 390 | 391 | return nil 392 | } 393 | -------------------------------------------------------------------------------- /pkg/gatecheck/logo.go: -------------------------------------------------------------------------------- 1 | package gatecheck 2 | 3 | const gatecheckLogo string = ` 4 | ______ _ _________ ________ ______ ____ ____ ________ ______ ___ ____ 5 | .' ___ | / \ | _ _ ||_ __ | .' ___ ||_ || _||_ __ | .' ___ ||_ ||_ _| 6 | / .' \_| / _ \ |_/ | | \_| | |_ \_|/ .' \_| | |__| | | |_ \_|/ .' \_| | |_/ / 7 | | | ____ / ___ \ | | | _| _ | | | __ | | _| _ | | | __'. 8 | \ ..___] |_/ / \ \_ _| |_ _| |__/ |\ ..___.'\ _| | | |_ _| |__/ |\ ..___.'\ _| | \ \_ 9 | .._____.'|____| |____||_____| |________| ..____ .'|____||____||________| ..____ .'|____||____| 10 | ` 11 | -------------------------------------------------------------------------------- /pkg/gatecheck/metadata.go: -------------------------------------------------------------------------------- 1 | package gatecheck 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | ) 7 | 8 | // ApplicationMetadata ... 9 | type ApplicationMetadata struct { 10 | CLIVersion string 11 | GitCommit string 12 | BuildDate string 13 | GitDescription string 14 | Platform string 15 | GoVersion string 16 | Compiler string 17 | } 18 | 19 | func (m ApplicationMetadata) String() string { 20 | return fmt.Sprintf(`CLIVersion: %s 21 | GitCommit: %s 22 | Build Date: %s 23 | GitDescription: %s 24 | Platform: %s 25 | GoVersion: %s 26 | Compiler: %s 27 | `, 28 | m.CLIVersion, m.GitCommit, m.BuildDate, m.GitDescription, 29 | m.Platform, m.GoVersion, m.Compiler) 30 | } 31 | 32 | func (m ApplicationMetadata) WriteTo(w io.Writer) (int64, error) { 33 | n, err := fmt.Fprintf(w, "%s\n\n%s", gatecheckLogo, m) 34 | return int64(n), err 35 | } 36 | -------------------------------------------------------------------------------- /pkg/gatecheck/validate.go: -------------------------------------------------------------------------------- 1 | package gatecheck 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "log/slog" 10 | "slices" 11 | "sort" 12 | "strings" 13 | 14 | "github.com/easy-up/go-coverage" 15 | 16 | "github.com/gatecheckdev/gatecheck/pkg/archive" 17 | "github.com/gatecheckdev/gatecheck/pkg/artifacts" 18 | "github.com/gatecheckdev/gatecheck/pkg/epss" 19 | "github.com/gatecheckdev/gatecheck/pkg/kev" 20 | ) 21 | 22 | var ErrValidationFailure = errors.New("validation failure") 23 | 24 | func newValidationErr(details string) error { 25 | return fmt.Errorf("%w: %s", ErrValidationFailure, details) 26 | } 27 | 28 | // Validate against config thresholds 29 | func Validate(config *Config, reportSrc io.Reader, targetFilename string, optionFuncs ...optionFunc) error { 30 | options := defaultOptions() 31 | for _, f := range optionFuncs { 32 | f(options) 33 | } 34 | 35 | switch { 36 | case strings.Contains(targetFilename, "grype"): 37 | slog.Debug("validate grype report", "filename", targetFilename) 38 | return validateGrypeReportWithFetch(reportSrc, config, options) 39 | 40 | case strings.Contains(targetFilename, "cyclonedx"): 41 | slog.Debug("validate", "filename", targetFilename, "filetype", "cyclonedx") 42 | return validateCyclonedxReportWithFetch(reportSrc, config, options) 43 | 44 | case strings.Contains(targetFilename, "semgrep"): 45 | slog.Debug("validate", "filename", targetFilename, "filetype", "semgrep") 46 | return validateSemgrepReport(reportSrc, config) 47 | 48 | case strings.Contains(targetFilename, "gitleaks"): 49 | slog.Debug("validate", "filename", targetFilename, "filetype", "gitleaks") 50 | return validateGitleaksReport(reportSrc, config) 51 | 52 | case strings.Contains(targetFilename, "syft"): 53 | slog.Debug("validate", "filename", targetFilename, "filetype", "syft") 54 | return errors.New("syft validation not supported yet") 55 | 56 | case strings.Contains(targetFilename, "bundle"): 57 | slog.Debug("validate", "filename", targetFilename, "filetype", "bundle") 58 | return validateBundle(reportSrc, config, options) 59 | 60 | case artifacts.IsCoverageReport(targetFilename): 61 | slog.Debug("validate", "filename", targetFilename, "filetype", "coverage") 62 | return validateCoverage(reportSrc, targetFilename, config) 63 | 64 | default: 65 | slog.Error("unsupported file type, cannot be determined from filename", "filename", targetFilename) 66 | return errors.New("failed to validate artifact, See log for details") 67 | } 68 | } 69 | 70 | func removeIgnoredSeverityCVEs(config *Config, report *artifacts.GrypeReportMin, data *epss.Data) { 71 | hasLimits := map[string]bool{ 72 | "critical": config.Grype.SeverityLimit.Critical.Enabled, 73 | "high": config.Grype.SeverityLimit.High.Enabled, 74 | "medium": config.Grype.SeverityLimit.Medium.Enabled, 75 | "low": config.Grype.SeverityLimit.Low.Enabled, 76 | "unknown": false, 77 | "negligible": false, 78 | } 79 | 80 | for severity, hasLimit := range hasLimits { 81 | if hasLimit { 82 | continue 83 | } 84 | 85 | if config.Grype.EPSSLimit.Enabled { 86 | report.Matches = slices.DeleteFunc(report.Matches, func(match artifacts.GrypeMatch) bool { 87 | epssCVE, ok := data.CVEs[match.Vulnerability.ID] 88 | return strings.ToLower(match.Vulnerability.Severity) == severity && (!ok || epssCVE.EPSSValue() < config.Grype.EPSSLimit.Score) 89 | }) 90 | } else { 91 | report.Matches = slices.DeleteFunc(report.Matches, func(match artifacts.GrypeMatch) bool { 92 | return strings.ToLower(match.Vulnerability.Severity) == severity 93 | }) 94 | } 95 | } 96 | } 97 | 98 | func ruleGrypeSeverityLimit(config *Config, report *artifacts.GrypeReportMin) bool { 99 | validationPass := true 100 | 101 | limits := map[string]configLimit{ 102 | "critical": config.Grype.SeverityLimit.Critical, 103 | "high": config.Grype.SeverityLimit.High, 104 | "medium": config.Grype.SeverityLimit.Medium, 105 | "low": config.Grype.SeverityLimit.Low, 106 | } 107 | 108 | for _, severity := range []string{"critical", "high", "medium", "low"} { 109 | 110 | configuredLimit := limits[severity] 111 | matches := report.SelectBySeverity(severity) 112 | matchCount := len(matches) 113 | if !configuredLimit.Enabled { 114 | slog.Debug("severity limit not enabled", "artifact", "grype", "severity", severity, "reported", matchCount) 115 | continue 116 | } 117 | if matchCount > int(configuredLimit.Limit) { 118 | slog.Error("grype severity limit exceeded", "severity", severity, "report", matchCount, "limit", configuredLimit.Limit) 119 | for _, match := range matches { 120 | slog.Info("vulnerability detected", "id", match.Vulnerability.ID, "severity", match.Vulnerability.Severity) 121 | } 122 | validationPass = false 123 | continue 124 | } 125 | slog.Info("severity limit valid", "artifact", "grype", "severity", severity, "reported", matchCount, "limit", configuredLimit.Limit) 126 | } 127 | 128 | return validationPass 129 | } 130 | 131 | func ruleCyclonedxSeverityLimit(config *Config, report *artifacts.CyclonedxReportMin) bool { 132 | validationPass := true 133 | 134 | limits := map[string]configLimit{ 135 | "critical": config.Cyclonedx.SeverityLimit.Critical, 136 | "high": config.Cyclonedx.SeverityLimit.High, 137 | "medium": config.Cyclonedx.SeverityLimit.Medium, 138 | "low": config.Cyclonedx.SeverityLimit.Low, 139 | } 140 | 141 | for _, severity := range []string{"critical", "high", "medium", "low"} { 142 | 143 | configuredLimit := limits[severity] 144 | vulnerabilities := report.SelectBySeverity(severity) 145 | matchCount := len(vulnerabilities) 146 | if !configuredLimit.Enabled { 147 | slog.Debug("severity limit not enabled", "artifact", "cyclonedx", "severity", severity, "reported", matchCount) 148 | continue 149 | } 150 | if matchCount > int(configuredLimit.Limit) { 151 | slog.Error("severity limit exceeded", "artifact", "cyclonedx", "severity", severity, "report", matchCount, "limit", configuredLimit.Limit) 152 | validationPass = false 153 | continue 154 | } 155 | slog.Info("severity limit valid", "artifact", "cyclonedx", "severity", severity, "reported", matchCount, "limit", configuredLimit.Limit) 156 | } 157 | 158 | return validationPass 159 | } 160 | 161 | func ruleGrypeCVEDeny(config *Config, report *artifacts.GrypeReportMin) bool { 162 | if !config.Grype.CVELimit.Enabled { 163 | slog.Debug("cve id limits not enabled", "artifact", "grype", "count_denied", len(config.Grype.CVELimit.CVEs)) 164 | return true 165 | } 166 | for _, cve := range config.Grype.CVELimit.CVEs { 167 | contains := slices.ContainsFunc(report.Matches, func(match artifacts.GrypeMatch) bool { 168 | return strings.EqualFold(match.Vulnerability.ID, cve.ID) 169 | }) 170 | 171 | if contains { 172 | slog.Error("cve matched to Deny List", "artifact", "grype", "id", cve.ID, "metadata", fmt.Sprintf("%+v", cve)) 173 | return false 174 | } 175 | } 176 | return true 177 | } 178 | 179 | func ruleCyclonedxCVEDeny(config *Config, report *artifacts.CyclonedxReportMin) bool { 180 | if !config.Cyclonedx.CVELimit.Enabled { 181 | slog.Debug("cve id limits not enabled", "artifact", "cyclonedx", "count_denied", len(config.Cyclonedx.CVELimit.CVEs)) 182 | return true 183 | } 184 | for _, cve := range config.Cyclonedx.CVELimit.CVEs { 185 | contains := slices.ContainsFunc(report.Vulnerabilities, func(vulerability artifacts.CyclonedxVulnerability) bool { 186 | return strings.EqualFold(vulerability.ID, cve.ID) 187 | }) 188 | 189 | if contains { 190 | slog.Error("cve matched to Deny List", "artifact", "cyclonedx", "id", cve.ID, "metadata", fmt.Sprintf("%+v", cve)) 191 | return false 192 | } 193 | } 194 | return true 195 | } 196 | 197 | func ruleGrypeCVEAllow(config *Config, report *artifacts.GrypeReportMin) { 198 | slog.Debug("cve id risk acceptance rule", "artifact", "grype", 199 | "enabled", config.Grype.CVERiskAcceptance.Enabled, 200 | "risk_accepted_cves", len(config.Grype.CVERiskAcceptance.CVEs), 201 | ) 202 | 203 | if !config.Grype.CVERiskAcceptance.Enabled { 204 | return 205 | } 206 | matches := slices.DeleteFunc(report.Matches, func(match artifacts.GrypeMatch) bool { 207 | allowed := slices.ContainsFunc(config.Grype.CVERiskAcceptance.CVEs, func(cve configCVE) bool { 208 | return strings.EqualFold(cve.ID, match.Vulnerability.ID) 209 | }) 210 | if allowed { 211 | slog.Info("CVE explicitly allowed, removing from subsequent rules", 212 | "id", match.Vulnerability.ID, "severity", match.Vulnerability.Severity) 213 | } 214 | return allowed 215 | }) 216 | 217 | report.Matches = matches 218 | } 219 | 220 | func ruleCyclonedxCVEAllow(config *Config, report *artifacts.CyclonedxReportMin) { 221 | slog.Debug( 222 | "cve id risk acceptance rule", "artifact", "cyclonedx", 223 | "enabled", config.Cyclonedx.CVERiskAcceptance.Enabled, 224 | "risk_accepted_cves", len(config.Cyclonedx.CVERiskAcceptance.CVEs), 225 | ) 226 | 227 | if !config.Cyclonedx.CVERiskAcceptance.Enabled { 228 | return 229 | } 230 | 231 | vulnerabilities := slices.DeleteFunc(report.Vulnerabilities, func(vulnerability artifacts.CyclonedxVulnerability) bool { 232 | allowed := slices.ContainsFunc(config.Cyclonedx.CVERiskAcceptance.CVEs, func(cve configCVE) bool { 233 | return strings.EqualFold(cve.ID, vulnerability.ID) 234 | }) 235 | if allowed { 236 | slog.Info("CVE explicitly allowed, removing from subsequent rules", 237 | "id", vulnerability.ID, "severity", vulnerability.HighestSeverity()) 238 | } 239 | return allowed 240 | }) 241 | 242 | report.Vulnerabilities = vulnerabilities 243 | } 244 | 245 | func ruleGrypeKEVLimit(config *Config, report *artifacts.GrypeReportMin, catalog *kev.Catalog) bool { 246 | if !config.Grype.KEVLimitEnabled { 247 | slog.Debug("kev limit not enabled", "artifact", "grype") 248 | return true 249 | } 250 | if catalog == nil { 251 | slog.Error("kev limit enabled but no catalog data exists") 252 | return false 253 | } 254 | badCVEs := make([]string, 0) 255 | // Check if vulnerability is in the KEV Catalog 256 | for _, vulnerability := range report.Matches { 257 | inKEVCatalog := slices.ContainsFunc(catalog.Vulnerabilities, func(kevVul kev.Vulnerability) bool { 258 | return kevVul.CveID == vulnerability.Vulnerability.ID 259 | }) 260 | if inKEVCatalog { 261 | badCVEs = append(badCVEs, vulnerability.Vulnerability.ID) 262 | slog.Warn("cve found in kev catalog", 263 | "cve_id", vulnerability.Vulnerability.ID) 264 | } 265 | } 266 | if len(badCVEs) > 0 { 267 | slog.Error("cve(s) found in kev catalog", 268 | "vulnerabilities", len(badCVEs), "kev_catalog_count", len(catalog.Vulnerabilities)) 269 | return false 270 | } 271 | slog.Info("kev limit validated, no cves in catalog", 272 | "vulnerabilities", len(report.Matches), "kev_catalog_count", len(catalog.Vulnerabilities)) 273 | return true 274 | } 275 | 276 | func ruleCyclonedxKEVLimit(config *Config, report *artifacts.CyclonedxReportMin, catalog *kev.Catalog) bool { 277 | if !config.Cyclonedx.KEVLimitEnabled { 278 | slog.Debug("kev limit not enabled", "artifact", "cyclonedx") 279 | return true 280 | } 281 | if catalog == nil { 282 | slog.Error("kev limit enabled but no catalog data exists", "artifact", "cyclonedx") 283 | return false 284 | } 285 | badCVEs := make([]string, 0) 286 | // Check if vulnerability is in the KEV Catalog 287 | for _, vulnerability := range report.Vulnerabilities { 288 | inKEVCatalog := slices.ContainsFunc(catalog.Vulnerabilities, func(kevVul kev.Vulnerability) bool { 289 | return strings.EqualFold(kevVul.CveID, vulnerability.ID) 290 | }) 291 | 292 | if inKEVCatalog { 293 | badCVEs = append(badCVEs, vulnerability.ID) 294 | slog.Warn("cve found in kev catalog", 295 | "cve_id", vulnerability.ID) 296 | } 297 | } 298 | if len(badCVEs) > 0 { 299 | slog.Error("cve(s) found in kev catalog", 300 | "vulnerabilities", len(badCVEs), "kev_catalog_count", len(catalog.Vulnerabilities)) 301 | return false 302 | } 303 | slog.Info("kev limit validated, no cves in catalog", 304 | "vulnerabilities", len(report.Vulnerabilities), "kev_catalog_count", len(catalog.Vulnerabilities)) 305 | return true 306 | } 307 | 308 | func ruleGrypeEPSSAllow(config *Config, report *artifacts.GrypeReportMin, data *epss.Data) { 309 | if !config.Grype.EPSSRiskAcceptance.Enabled { 310 | slog.Debug("epss risk acceptance not enabled", "artifact", "grype") 311 | return 312 | } 313 | if data == nil { 314 | slog.Error("epss allowance enabled but no data exists") 315 | return 316 | } 317 | slog.Debug("run epss risk acceptance filter", 318 | "artifact", "grype", 319 | "vulnerabilities", len(report.Matches), 320 | "epss_risk_acceptance_score", config.Cyclonedx.EPSSRiskAcceptance.Score, 321 | ) 322 | matches := slices.DeleteFunc(report.Matches, func(match artifacts.GrypeMatch) bool { 323 | epssCVE, ok := data.CVEs[match.Vulnerability.ID] 324 | if !ok { 325 | slog.Debug("no epss score", "cve_id", match.Vulnerability.ID, "severity", match.Vulnerability.Severity) 326 | return false 327 | } 328 | riskAccepted := config.Grype.EPSSRiskAcceptance.Score > epssCVE.EPSSValue() 329 | if riskAccepted { 330 | slog.Info( 331 | "risk accepted reason: epss score", 332 | "cve_id", match.Vulnerability.ID, 333 | "severity", match.Vulnerability.Severity, 334 | "epss_score", epssCVE.EPSS, 335 | ) 336 | return true 337 | } 338 | return false 339 | }) 340 | 341 | report.Matches = matches 342 | } 343 | 344 | func ruleCyclonedxEPSSAllow(config *Config, report *artifacts.CyclonedxReportMin, data *epss.Data) { 345 | if !config.Cyclonedx.EPSSRiskAcceptance.Enabled { 346 | slog.Debug("epss risk acceptance not enabled", "artifact", "cyclonedx") 347 | return 348 | } 349 | if data == nil { 350 | slog.Error("epss allowance enabled but no data exists", "artifact", "cyclonedx") 351 | return 352 | } 353 | slog.Debug("run epss risk acceptance filter", 354 | "artifact", "cyclonedx", 355 | "vulnerabilities", len(report.Vulnerabilities), 356 | "epss_risk_acceptance_score", config.Cyclonedx.EPSSRiskAcceptance.Score, 357 | ) 358 | vulnerabilities := slices.DeleteFunc(report.Vulnerabilities, func(vulnerability artifacts.CyclonedxVulnerability) bool { 359 | epssCVE, ok := data.CVEs[vulnerability.ID] 360 | if !ok { 361 | slog.Debug("no epss score", "cve_id", vulnerability.ID, "severity", vulnerability.HighestSeverity()) 362 | return false 363 | } 364 | riskAccepted := config.Cyclonedx.EPSSRiskAcceptance.Score > epssCVE.EPSSValue() 365 | if riskAccepted { 366 | slog.Info( 367 | "risk accepted reason: epss score", 368 | "cve_id", vulnerability.ID, 369 | "severity", vulnerability.HighestSeverity(), 370 | "epss_score", epssCVE.EPSS, 371 | ) 372 | return true 373 | } 374 | return false 375 | }) 376 | 377 | report.Vulnerabilities = vulnerabilities 378 | } 379 | 380 | func ruleGrypeEPSSLimit(config *Config, report *artifacts.GrypeReportMin, data *epss.Data) bool { 381 | if !config.Grype.EPSSLimit.Enabled { 382 | slog.Debug("epss limit not enabled", "artifact", "grype") 383 | return true 384 | } 385 | if data == nil { 386 | slog.Error("epss allowance enabled but no data exists") 387 | return false 388 | } 389 | 390 | badCVEs := make([]epss.CVE, 0) 391 | 392 | slog.Debug("run epss limit rule", 393 | "artifact", "grype", 394 | "vulnerabilities", len(report.Matches), 395 | "epss_limit_score", config.Grype.EPSSLimit.Score, 396 | ) 397 | for _, match := range report.Matches { 398 | epssCVE, ok := data.CVEs[match.Vulnerability.ID] 399 | if !ok { 400 | continue 401 | } 402 | // add to badCVEs if the score is higher than the limit 403 | if epssCVE.EPSSValue() > config.Grype.EPSSLimit.Score { 404 | badCVEs = append(badCVEs, epssCVE) 405 | slog.Warn( 406 | "epss score limit violation", 407 | "cve_id", match.Vulnerability.ID, 408 | "severity", match.Vulnerability.Severity, 409 | "epss_score", epssCVE.EPSS, 410 | ) 411 | } 412 | } 413 | if len(badCVEs) > 0 { 414 | slog.Error("cve(s) with epss scores over limit", 415 | "over_limit_cves", len(badCVEs), 416 | "epss_limit_score", config.Grype.EPSSLimit.Score, 417 | ) 418 | return false 419 | } 420 | return true 421 | } 422 | 423 | func ruleCyclonedxEPSSLimit(config *Config, report *artifacts.CyclonedxReportMin, data *epss.Data) bool { 424 | if !config.Cyclonedx.EPSSLimit.Enabled { 425 | slog.Debug("epss limit not enabled", "artifact", "cyclonedx") 426 | return true 427 | } 428 | if data == nil { 429 | slog.Error("epss allowance enabled but no data exists") 430 | return false 431 | } 432 | 433 | badCVEs := make([]epss.CVE, 0) 434 | 435 | slog.Debug("run epss limit rule", 436 | "artifact", "cyclonedx", 437 | "vulnerabilities", len(report.Vulnerabilities), 438 | "epss_limit_score", config.Cyclonedx.EPSSLimit.Score, 439 | ) 440 | 441 | for _, vulnerability := range report.Vulnerabilities { 442 | epssCVE, ok := data.CVEs[vulnerability.ID] 443 | if !ok { 444 | continue 445 | } 446 | // add to badCVEs if the score is higher than the limit 447 | if epssCVE.EPSSValue() > config.Cyclonedx.EPSSLimit.Score { 448 | badCVEs = append(badCVEs, epssCVE) 449 | slog.Warn( 450 | "epss score limit violation", 451 | "cve_id", vulnerability.ID, 452 | "severity", vulnerability.HighestSeverity(), 453 | "epss_score", epssCVE.EPSS, 454 | ) 455 | } 456 | } 457 | if len(badCVEs) > 0 { 458 | slog.Error("cve(s) with epss scores over limit", 459 | "over_limit_cves", len(badCVEs), 460 | "epss_limit_score", config.Cyclonedx.EPSSLimit.Score, 461 | ) 462 | return false 463 | } 464 | return true 465 | } 466 | 467 | func removeIgnoredSemgrepIssues(config *Config, report *artifacts.SemgrepReportMin) { 468 | hasLimits := map[string]bool{ 469 | "error": config.Semgrep.SeverityLimit.Error.Enabled, 470 | "warning": config.Semgrep.SeverityLimit.Warning.Enabled, 471 | "info": config.Semgrep.SeverityLimit.Info.Enabled, 472 | } 473 | 474 | for severity, hasLimit := range hasLimits { 475 | if hasLimit { 476 | continue 477 | } 478 | 479 | report.Results = slices.DeleteFunc(report.Results, func(result artifacts.SemgrepResults) bool { 480 | return strings.EqualFold(result.Extra.Severity, severity) 481 | }) 482 | } 483 | } 484 | 485 | func ruleSemgrepSeverityLimit(config *Config, report *artifacts.SemgrepReportMin) bool { 486 | slog.Debug( 487 | "severity limit rule", "artifact", "semgrep", 488 | "error_enabled", config.Semgrep.SeverityLimit.Error.Enabled, 489 | "info_enabled", config.Semgrep.SeverityLimit.Info.Enabled, 490 | "warning_enabled", config.Semgrep.SeverityLimit.Warning.Enabled, 491 | ) 492 | 493 | validationPass := true 494 | 495 | limits := map[string]configLimit{ 496 | "error": config.Semgrep.SeverityLimit.Error, 497 | "warning": config.Semgrep.SeverityLimit.Warning, 498 | "info": config.Semgrep.SeverityLimit.Info, 499 | } 500 | 501 | for _, severity := range []string{"error", "warning", "info"} { 502 | 503 | configuredLimit := limits[severity] 504 | matches := report.SelectBySeverity(severity) 505 | matchCount := len(matches) 506 | if !configuredLimit.Enabled { 507 | slog.Debug("severity limit not enabled", "artifact", "semgrep", "severity", severity, "reported", matchCount) 508 | continue 509 | } 510 | if matchCount > int(configuredLimit.Limit) { 511 | slog.Error("severity limit exceeded", "artifact", "semgrep", "severity", severity, "report", matchCount, "limit", configuredLimit.Limit) 512 | for _, match := range matches { 513 | slog.Info("Potential issue detected", "severity", match.Extra.Severity, "check_id", match.CheckID, "message", match.Extra.Message) 514 | } 515 | validationPass = false 516 | continue 517 | } 518 | slog.Info("severity limit valid", "artifact", "semgrep", "severity", severity, "reported", matchCount, "limit", configuredLimit.Limit) 519 | } 520 | 521 | return validationPass 522 | } 523 | 524 | func ruleSemgrepImpactRiskAccept(config *Config, report *artifacts.SemgrepReportMin) { 525 | slog.Debug( 526 | "impact risk accept rule", "artifact", "semgrep", 527 | "enabled", config.Semgrep.ImpactRiskAcceptance.Enabled, 528 | "high", config.Semgrep.ImpactRiskAcceptance.High, 529 | "medium", config.Semgrep.ImpactRiskAcceptance.Medium, 530 | "low", config.Semgrep.ImpactRiskAcceptance.Low, 531 | ) 532 | 533 | if !config.Semgrep.ImpactRiskAcceptance.Enabled { 534 | slog.Debug("impact risk acceptance not enabled", "artifact", "semgrep") 535 | return 536 | } 537 | 538 | results := slices.DeleteFunc(report.Results, func(result artifacts.SemgrepResults) bool { 539 | riskAccepted := false 540 | // TODO: make the configuration for risk acceptance less dumb (what would you accept high medium impact and not accept low impact) 541 | switch { 542 | case config.Semgrep.ImpactRiskAcceptance.High && strings.EqualFold(result.Extra.Metadata.Impact, "high"): 543 | riskAccepted = true 544 | case config.Semgrep.ImpactRiskAcceptance.Medium && strings.EqualFold(result.Extra.Metadata.Impact, "medium"): 545 | riskAccepted = true 546 | case config.Semgrep.ImpactRiskAcceptance.Low && strings.EqualFold(result.Extra.Metadata.Impact, "low"): 547 | riskAccepted = true 548 | } 549 | 550 | if riskAccepted { 551 | slog.Info( 552 | "risk accepted: Semgrep issue impact is below acceptance threshold", 553 | "check_id", result.CheckID, 554 | "severity", result.Extra.Severity, 555 | "impact", result.Extra.Metadata.Impact, 556 | ) 557 | return true 558 | } 559 | return false 560 | }) 561 | 562 | report.Results = results 563 | } 564 | 565 | func ruleGitLeaksLimit(config *Config, report *artifacts.GitLeaksReportMin) bool { 566 | if !config.Gitleaks.LimitEnabled { 567 | slog.Debug("secrets limit not enabled", "artifact", "gitleaks") 568 | return true 569 | } 570 | detectedSecrets := report.Count() 571 | if detectedSecrets > 0 { 572 | slog.Error("committed secrets violation", "artifacts", "gitleaks", "secrets_detected", detectedSecrets) 573 | return false 574 | } 575 | return true 576 | } 577 | 578 | func loadCatalogFromFileOrAPI(catalog *kev.Catalog, options *fetchOptions) error { 579 | if options.kevFile != nil { 580 | slog.Debug("load kev catalog from file", "filename", options.kevFile) 581 | err := kev.DecodeData(options.kevFile, catalog) 582 | return err 583 | } 584 | 585 | slog.Debug("load kev catalog from API") 586 | err := kev.FetchData(catalog, kev.WithClient(options.kevClient), kev.WithURL(options.kevURL)) 587 | return err 588 | } 589 | 590 | func loadDataFromFileOrAPI(epssData *epss.Data, options *fetchOptions) error { 591 | if options.epssFile != nil { 592 | err := epss.ParseEPSSDataCSV(options.epssFile, epssData) 593 | return err 594 | } 595 | 596 | slog.Debug("load epss data from API") 597 | err := epss.FetchData(epssData, epss.WithClient(options.epssClient), epss.WithURL(options.epssURL)) 598 | 599 | return err 600 | } 601 | 602 | func LoadCatalogAndData(config *Config, catalog *kev.Catalog, epssData *epss.Data, options *fetchOptions) error { 603 | if config.Grype.KEVLimitEnabled || config.Cyclonedx.KEVLimitEnabled { 604 | if err := loadCatalogFromFileOrAPI(catalog, options); err != nil { 605 | return err 606 | } 607 | } 608 | 609 | grypeEPSSNeeded := config.Grype.EPSSLimit.Enabled || config.Grype.EPSSRiskAcceptance.Enabled 610 | cyclonedxEPSSNeeded := config.Cyclonedx.EPSSLimit.Enabled || config.Cyclonedx.EPSSRiskAcceptance.Enabled 611 | 612 | if grypeEPSSNeeded || cyclonedxEPSSNeeded { 613 | if err := loadDataFromFileOrAPI(epssData, options); err != nil { 614 | return err 615 | } 616 | } 617 | return nil 618 | } 619 | 620 | // Validate Reports 621 | 622 | func validateGrypeReportWithFetch(r io.Reader, config *Config, options *fetchOptions) error { 623 | catalog := kev.NewCatalog() 624 | epssData := new(epss.Data) 625 | 626 | if err := LoadCatalogAndData(config, catalog, epssData, options); err != nil { 627 | slog.Error("validate grype report: load epss data from file or api", "error", err) 628 | return errors.New("cannot run Grype validation: Cannot load external validation data, see log for details") 629 | } 630 | 631 | return validateGrypeFrom(r, config, catalog, epssData) 632 | } 633 | 634 | func validateGrypeFrom(r io.Reader, config *Config, catalog *kev.Catalog, epssData *epss.Data) error { 635 | slog.Debug("validate grype report") 636 | report := &artifacts.GrypeReportMin{} 637 | if err := json.NewDecoder(r).Decode(report); err != nil { 638 | slog.Error("decode grype report for validation", "error", err) 639 | return errors.New("cannot run Grype validation: Report decoding failed, See log for details") 640 | } 641 | 642 | return validateGrypeRules(config, report, catalog, epssData) 643 | } 644 | 645 | func validateCyclonedxReportWithFetch(r io.Reader, config *Config, options *fetchOptions) error { 646 | slog.Debug("validate cyclonedx report") 647 | 648 | catalog := kev.NewCatalog() 649 | epssData := new(epss.Data) 650 | 651 | if err := LoadCatalogAndData(config, catalog, epssData, options); err != nil { 652 | slog.Error("validate cyclonedx report: load epss data from file or api", "error", err) 653 | return errors.New("cannot run Cyclonedx validation: Cannot load external validation data, See log for details") 654 | } 655 | return validateCyclonedxFrom(r, config, catalog, epssData) 656 | } 657 | 658 | func validateCyclonedxFrom(r io.Reader, config *Config, catalog *kev.Catalog, epssData *epss.Data) error { 659 | report := &artifacts.CyclonedxReportMin{} 660 | if err := json.NewDecoder(r).Decode(report); err != nil { 661 | slog.Error("decode cyclonedx report for validation", "error", err) 662 | return errors.New("cannot run Cyclonedx validation: Report decoding failed, See log for details") 663 | } 664 | 665 | return validateCyclonedxRules(config, report, catalog, epssData) 666 | } 667 | 668 | func validateSemgrepReport(r io.Reader, config *Config) error { 669 | slog.Debug("validate semgrep report") 670 | report := &artifacts.SemgrepReportMin{} 671 | if err := json.NewDecoder(r).Decode(report); err != nil { 672 | slog.Error("decode semgrep report for validation", "error", err) 673 | return errors.New("cannot run Semgrep report validation: Report decoding failed, See log for details") 674 | } 675 | 676 | return validateSemgrepRules(config, report) 677 | } 678 | 679 | func validateGitleaksReport(r io.Reader, config *Config) error { 680 | slog.Debug("validate gitleaks report") 681 | report := &artifacts.GitLeaksReportMin{} 682 | if err := json.NewDecoder(r).Decode(report); err != nil { 683 | slog.Error("decode gitleaks report for validation", "error", err) 684 | return errors.New("cannot run Semgrep report validation: Report decoding failed, See log for details") 685 | } 686 | return validateGitleaksRules(config, report) 687 | } 688 | 689 | func validateCoverage(src io.Reader, targetFilename string, config *Config) error { 690 | coverageFormat, err := artifacts.GetCoverageMode(targetFilename) 691 | if err != nil { 692 | return err 693 | } 694 | 695 | parser := coverage.New(coverageFormat) 696 | report, err := parser.ParseReader(src) 697 | if err != nil { 698 | return err 699 | } 700 | 701 | lineCoverage := float32(report.CoveredLines) / float32(report.TotalLines) 702 | functionCoverage := float32(report.CoveredFunctions) / float32(report.TotalFunctions) 703 | branchCoverage := float32(report.CoveredBranches) / float32(report.TotalBranches) 704 | 705 | slog.Info( 706 | "validate coverage", 707 | "line_coverage", lineCoverage, 708 | "function_coverage", functionCoverage, 709 | "branch_coverage", branchCoverage, 710 | ) 711 | 712 | var errs error 713 | 714 | if lineCoverage < config.Coverage.LineThreshold { 715 | slog.Error("line coverage below threshold", "line_coverage", lineCoverage, "threshold", config.Coverage.LineThreshold) 716 | coverageErr := newValidationErr("Coverage: Line coverage below threshold") 717 | errs = errors.Join(errs, coverageErr) 718 | } 719 | 720 | if functionCoverage < config.Coverage.FunctionThreshold { 721 | slog.Error("function coverage below threshold", "function_coverage", functionCoverage, "threshold", config.Coverage.FunctionThreshold) 722 | coverageErr := newValidationErr("Coverage: Function coverage below threshold") 723 | errs = errors.Join(errs, coverageErr) 724 | } 725 | 726 | if branchCoverage < config.Coverage.BranchThreshold { 727 | slog.Error("branch coverage below threshold", "branch_coverage", branchCoverage, "threshold", config.Coverage.BranchThreshold) 728 | coverageErr := newValidationErr("Coverage: Branch coverage below threshold") 729 | errs = errors.Join(errs, coverageErr) 730 | } 731 | 732 | return errs 733 | } 734 | 735 | func validateBundle(r io.Reader, config *Config, options *fetchOptions) error { 736 | slog.Debug("validate gatecheck bundle") 737 | bundle := archive.NewBundle() 738 | if err := archive.UntarGzipBundle(r, bundle); err != nil { 739 | slog.Error("decode gatecheck bundle") 740 | return errors.New("cannot run Gatecheck Bundle validation: Bundle decoding failed, See log for details") 741 | } 742 | 743 | catalog := kev.NewCatalog() 744 | epssData := new(epss.Data) 745 | 746 | if err := LoadCatalogAndData(config, catalog, epssData, options); err != nil { 747 | slog.Error("validate cyclonedx report: load epss data from file or api", "error", err) 748 | return errors.New("cannot run Cyclonedx validation: Cannot load external validation data, See log for details") 749 | } 750 | 751 | var errs error 752 | for fileLabel, descriptor := range bundle.Manifest().Files { 753 | slog.Info("gatecheck bundle validation", "file_label", fileLabel, "digest", descriptor.Digest) 754 | switch { 755 | case strings.Contains(fileLabel, "grype"): 756 | err := validateGrypeFrom(bytes.NewBuffer(bundle.FileBytes(fileLabel)), config, catalog, epssData) 757 | errs = errors.Join(errs, err) 758 | case strings.Contains(fileLabel, "cyclonedx"): 759 | err := validateCyclonedxFrom(bytes.NewBuffer(bundle.FileBytes(fileLabel)), config, catalog, epssData) 760 | errs = errors.Join(errs, err) 761 | case strings.Contains(fileLabel, "semgrep"): 762 | err := validateSemgrepReport(bytes.NewBuffer(bundle.FileBytes(fileLabel)), config) 763 | errs = errors.Join(errs, err) 764 | case strings.Contains(fileLabel, "gitleaks"): 765 | err := validateGitleaksReport(bytes.NewBuffer(bundle.FileBytes(fileLabel)), config) 766 | errs = errors.Join(errs, err) 767 | case artifacts.IsCoverageReport(fileLabel): 768 | err := validateCoverage(bytes.NewBuffer(bundle.FileBytes(fileLabel)), fileLabel, config) 769 | errs = errors.Join(errs, err) 770 | } 771 | } 772 | if errs != nil { 773 | return errors.Join(newValidationErr("Gatecheck Bundle"), errs) 774 | } 775 | return nil 776 | } 777 | 778 | // Validate Rules 779 | 780 | func validateGrypeRules(config *Config, report *artifacts.GrypeReportMin, catalog *kev.Catalog, data *epss.Data) error { 781 | severityRank := []string{ 782 | "critical", 783 | "high", 784 | "medium", 785 | "low", 786 | "negligible", 787 | "unknown", 788 | } 789 | sort.Slice(report.Matches, func(i, j int) bool { 790 | if report.Matches[i].Vulnerability.Severity == report.Matches[j].Vulnerability.Severity { 791 | epssi, oki := data.CVEs[report.Matches[i].Vulnerability.ID] 792 | epssj, okj := data.CVEs[report.Matches[j].Vulnerability.ID] 793 | 794 | // Sort EPPS from highest to lowest 795 | return !okj || oki && epssi.EPSSValue() > epssj.EPSSValue() 796 | } 797 | ranki := slices.Index(severityRank, strings.ToLower(report.Matches[i].Vulnerability.Severity)) 798 | rankj := slices.Index(severityRank, strings.ToLower(report.Matches[j].Vulnerability.Severity)) 799 | return ranki < rankj 800 | }) 801 | // 1. Deny List - Fail Matching 802 | if !ruleGrypeCVEDeny(config, report) { 803 | return newValidationErr("Grype: CVE explicitly denied") 804 | } 805 | 806 | // Ignore any CVEs that don't meet the vulnerability threshold or the EPPS threshold 807 | removeIgnoredSeverityCVEs(config, report, data) 808 | 809 | // 2. CVE Allowance - remove from matches 810 | ruleGrypeCVEAllow(config, report) 811 | 812 | // 3. KEV Catalog Limit - fail matching 813 | if !ruleGrypeKEVLimit(config, report, catalog) { 814 | return newValidationErr("Grype: CVE matched to KEV Catalog") 815 | } 816 | 817 | // 4. EPSS Allowance - remove from matches 818 | ruleGrypeEPSSAllow(config, report, data) 819 | 820 | // 5. EPSS Limit - Fail Exceeding TODO: Implement 821 | if !ruleGrypeEPSSLimit(config, report, data) { 822 | return newValidationErr("Grype: EPSS Limit Exceeded") 823 | } 824 | 825 | // 6. Severity Count Limit 826 | if !ruleGrypeSeverityLimit(config, report) { 827 | return newValidationErr("Grype: Severity Limit Exceeded") 828 | } 829 | 830 | return nil 831 | } 832 | 833 | func validateCyclonedxRules(config *Config, report *artifacts.CyclonedxReportMin, catalog *kev.Catalog, data *epss.Data) error { 834 | // 1. Deny List - Fail Matching 835 | if !ruleCyclonedxCVEDeny(config, report) { 836 | return newValidationErr("CycloneDx: CVE explicitly denied") 837 | } 838 | 839 | // 2. CVE Allowance - remove from matches 840 | ruleCyclonedxCVEAllow(config, report) 841 | 842 | // 3. KEV Catalog Limit - fail matching 843 | if !ruleCyclonedxKEVLimit(config, report, catalog) { 844 | return newValidationErr("CycloneDx: CVE Matched to KEV Catalog") 845 | } 846 | 847 | // 4. EPSS Allowance - remove from matches 848 | ruleCyclonedxEPSSAllow(config, report, data) 849 | 850 | // 5. EPSS Limit - Fail Exceeding 851 | if !ruleCyclonedxEPSSLimit(config, report, data) { 852 | return newValidationErr("CycloneDx: EPSS Limit Exceeded") 853 | } 854 | 855 | // 6. Severity Count Limit 856 | if !ruleCyclonedxSeverityLimit(config, report) { 857 | return newValidationErr("CycloneDx: Severity Limit Exceeded") 858 | } 859 | 860 | return nil 861 | } 862 | 863 | func validateSemgrepRules(config *Config, report *artifacts.SemgrepReportMin) error { 864 | slog.Info("validating semgrep rules", "findings", len(report.Results)) 865 | // Ignore issues for which there is no severity limit 866 | removeIgnoredSemgrepIssues(config, report) 867 | 868 | // 1. Impact Allowance - remove result 869 | ruleSemgrepImpactRiskAccept(config, report) 870 | 871 | // 2. Severity Count Limit 872 | if !ruleSemgrepSeverityLimit(config, report) { 873 | return newValidationErr("Semgrep: Severity Limit Exceeded") 874 | } 875 | 876 | return nil 877 | } 878 | 879 | func validateGitleaksRules(config *Config, report *artifacts.GitLeaksReportMin) error { 880 | // 1. Limit Secrets - fail 881 | if !ruleGitLeaksLimit(config, report) { 882 | return newValidationErr("Gitleaks: Secrets Detected") 883 | } 884 | return nil 885 | } 886 | -------------------------------------------------------------------------------- /pkg/gatecheck/validate_test.go: -------------------------------------------------------------------------------- 1 | package gatecheck 2 | 3 | import ( 4 | "log/slog" 5 | "os" 6 | "testing" 7 | "time" 8 | 9 | "github.com/gatecheckdev/gatecheck/pkg/artifacts" 10 | "github.com/lmittmann/tint" 11 | ) 12 | 13 | func TestMain(m *testing.M) { 14 | h := tint.NewHandler(os.Stderr, &tint.Options{ 15 | AddSource: true, 16 | Level: slog.LevelDebug, 17 | TimeFormat: time.TimeOnly, 18 | }) 19 | slog.SetDefault(slog.New(h)) 20 | os.Exit(m.Run()) 21 | } 22 | 23 | func Test_ruleGrypeSeverityLimit(t *testing.T) { 24 | t.Run("empty-report-empty-config", func(t *testing.T) { 25 | config := new(Config) 26 | report := new(artifacts.GrypeReportMin) 27 | 28 | want := true 29 | got := ruleGrypeSeverityLimit(config, report) 30 | 31 | if want != got { 32 | t.Fatalf("want: %t got: %t", want, got) 33 | } 34 | }) 35 | 36 | t.Run("empty-report-limit-0", func(t *testing.T) { 37 | config := new(Config) 38 | config.Grype.SeverityLimit.Critical.Enabled = true 39 | config.Grype.SeverityLimit.Critical.Limit = 0 40 | report := new(artifacts.GrypeReportMin) 41 | 42 | want := true 43 | got := ruleGrypeSeverityLimit(config, report) 44 | 45 | if want != got { 46 | t.Fatalf("want: %t got: %t", want, got) 47 | } 48 | }) 49 | 50 | t.Run("violate-limit", func(t *testing.T) { 51 | config := new(Config) 52 | config.Grype.SeverityLimit.Critical.Enabled = true 53 | config.Grype.SeverityLimit.Critical.Limit = 0 54 | report := new(artifacts.GrypeReportMin) 55 | report.Matches = []artifacts.GrypeMatch{ 56 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "critical"}}, 57 | } 58 | 59 | want := false 60 | got := ruleGrypeSeverityLimit(config, report) 61 | 62 | if want != got { 63 | t.Fatalf("want: %t got: %t", want, got) 64 | } 65 | }) 66 | 67 | t.Run("violate-limit-all-severities-1", func(t *testing.T) { 68 | config := new(Config) 69 | config.Grype.SeverityLimit.Critical.Enabled = true 70 | config.Grype.SeverityLimit.Critical.Limit = 0 71 | config.Grype.SeverityLimit.High.Enabled = true 72 | config.Grype.SeverityLimit.High.Limit = 0 73 | config.Grype.SeverityLimit.Medium.Enabled = true 74 | config.Grype.SeverityLimit.Medium.Limit = 0 75 | config.Grype.SeverityLimit.Low.Enabled = true 76 | config.Grype.SeverityLimit.Low.Limit = 0 77 | 78 | report := new(artifacts.GrypeReportMin) 79 | report.Matches = []artifacts.GrypeMatch{ 80 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "critical"}}, 81 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "high"}}, 82 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "medium"}}, 83 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "low"}}, 84 | } 85 | 86 | want := false 87 | got := ruleGrypeSeverityLimit(config, report) 88 | 89 | if want != got { 90 | t.Fatalf("want: %t got: %t", want, got) 91 | } 92 | }) 93 | 94 | t.Run("violate-limit-all-severities-2", func(t *testing.T) { 95 | config := new(Config) 96 | config.Grype.SeverityLimit.Critical.Enabled = true 97 | config.Grype.SeverityLimit.Critical.Limit = 1 98 | config.Grype.SeverityLimit.High.Enabled = true 99 | config.Grype.SeverityLimit.High.Limit = 1 100 | config.Grype.SeverityLimit.Medium.Enabled = true 101 | config.Grype.SeverityLimit.Medium.Limit = 1 102 | config.Grype.SeverityLimit.Low.Enabled = true 103 | config.Grype.SeverityLimit.Low.Limit = 1 104 | 105 | report := new(artifacts.GrypeReportMin) 106 | report.Matches = []artifacts.GrypeMatch{ 107 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "critical"}}, 108 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "high"}}, 109 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "medium"}}, 110 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "low"}}, 111 | } 112 | 113 | want := true 114 | got := ruleGrypeSeverityLimit(config, report) 115 | 116 | if want != got { 117 | t.Fatalf("want: %t got: %t", want, got) 118 | } 119 | }) 120 | 121 | t.Run("violate-limit-all-severities-3", func(t *testing.T) { 122 | config := new(Config) 123 | config.Grype.SeverityLimit.Critical.Enabled = true 124 | config.Grype.SeverityLimit.Critical.Limit = 1 125 | config.Grype.SeverityLimit.High.Enabled = true 126 | config.Grype.SeverityLimit.High.Limit = 0 127 | config.Grype.SeverityLimit.Medium.Enabled = true 128 | config.Grype.SeverityLimit.Medium.Limit = 1 129 | config.Grype.SeverityLimit.Low.Enabled = true 130 | config.Grype.SeverityLimit.Low.Limit = 0 131 | 132 | report := new(artifacts.GrypeReportMin) 133 | report.Matches = []artifacts.GrypeMatch{ 134 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "critical"}}, 135 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "high"}}, 136 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "medium"}}, 137 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "low"}}, 138 | } 139 | 140 | want := false 141 | got := ruleGrypeSeverityLimit(config, report) 142 | 143 | if want != got { 144 | t.Fatalf("want: %t got: %t", want, got) 145 | } 146 | }) 147 | 148 | t.Run("violate-limit-accepted", func(t *testing.T) { 149 | config := new(Config) 150 | config.Grype.SeverityLimit.Critical.Enabled = true 151 | config.Grype.SeverityLimit.Critical.Limit = 0 152 | config.Grype.CVERiskAcceptance.Enabled = true 153 | config.Grype.CVERiskAcceptance.CVEs = []configCVE{{ID: "cve-1"}} 154 | report := new(artifacts.GrypeReportMin) 155 | report.Matches = []artifacts.GrypeMatch{ 156 | {Vulnerability: artifacts.GrypeVulnerability{Severity: "critical", ID: "cve-1"}}, 157 | } 158 | 159 | want := true 160 | got := false 161 | err := validateGrypeRules(config, report, nil, nil) 162 | if err == nil { 163 | got = true 164 | } 165 | 166 | if want != got { 167 | t.Fatalf("want: %t got: %t error: %v", want, got, err) 168 | } 169 | }) 170 | } 171 | 172 | func Test_ruleCyclonedxSeverityLimit(t *testing.T) { 173 | t.Run("empty-report-empty-config", func(t *testing.T) { 174 | config := new(Config) 175 | report := new(artifacts.CyclonedxReportMin) 176 | 177 | want := true 178 | got := ruleCyclonedxSeverityLimit(config, report) 179 | 180 | if want != got { 181 | t.Fatalf("want: %t got: %t", want, got) 182 | } 183 | }) 184 | 185 | t.Run("empty-report-limit-0", func(t *testing.T) { 186 | config := new(Config) 187 | config.Cyclonedx.SeverityLimit.Critical.Enabled = true 188 | config.Cyclonedx.SeverityLimit.Critical.Limit = 0 189 | report := new(artifacts.CyclonedxReportMin) 190 | 191 | want := true 192 | got := ruleCyclonedxSeverityLimit(config, report) 193 | 194 | if want != got { 195 | t.Fatalf("want: %t got: %t", want, got) 196 | } 197 | }) 198 | 199 | t.Run("violate-limit", func(t *testing.T) { 200 | config := new(Config) 201 | config.Cyclonedx.SeverityLimit.Critical.Enabled = true 202 | config.Cyclonedx.SeverityLimit.Critical.Limit = 0 203 | report := new(artifacts.CyclonedxReportMin) 204 | report.Vulnerabilities = []artifacts.CyclonedxVulnerability{ 205 | {Ratings: []artifacts.CyclonedxRating{{Severity: "critical"}}}, 206 | } 207 | 208 | want := false 209 | got := ruleCyclonedxSeverityLimit(config, report) 210 | 211 | if want != got { 212 | t.Fatalf("want: %t got: %t", want, got) 213 | } 214 | }) 215 | 216 | t.Run("violate-limit-all-severities-1", func(t *testing.T) { 217 | config := new(Config) 218 | config.Cyclonedx.SeverityLimit.Critical.Enabled = true 219 | config.Cyclonedx.SeverityLimit.Critical.Limit = 0 220 | config.Cyclonedx.SeverityLimit.High.Enabled = true 221 | config.Cyclonedx.SeverityLimit.High.Limit = 0 222 | config.Cyclonedx.SeverityLimit.Medium.Enabled = true 223 | config.Cyclonedx.SeverityLimit.Medium.Limit = 0 224 | config.Cyclonedx.SeverityLimit.Low.Enabled = true 225 | config.Cyclonedx.SeverityLimit.Low.Limit = 0 226 | 227 | report := new(artifacts.CyclonedxReportMin) 228 | report.Vulnerabilities = []artifacts.CyclonedxVulnerability{ 229 | {Ratings: []artifacts.CyclonedxRating{{Severity: "critical"}}}, 230 | {Ratings: []artifacts.CyclonedxRating{{Severity: "high"}}}, 231 | {Ratings: []artifacts.CyclonedxRating{{Severity: "medium"}}}, 232 | {Ratings: []artifacts.CyclonedxRating{{Severity: "low"}}}, 233 | } 234 | 235 | want := false 236 | got := ruleCyclonedxSeverityLimit(config, report) 237 | 238 | if want != got { 239 | t.Fatalf("want: %t got: %t", want, got) 240 | } 241 | }) 242 | 243 | t.Run("violate-limit-all-severities-2", func(t *testing.T) { 244 | config := new(Config) 245 | config.Cyclonedx.SeverityLimit.Critical.Enabled = true 246 | config.Cyclonedx.SeverityLimit.Critical.Limit = 1 247 | config.Cyclonedx.SeverityLimit.High.Enabled = true 248 | config.Cyclonedx.SeverityLimit.High.Limit = 1 249 | config.Cyclonedx.SeverityLimit.Medium.Enabled = true 250 | config.Cyclonedx.SeverityLimit.Medium.Limit = 1 251 | config.Cyclonedx.SeverityLimit.Low.Enabled = true 252 | config.Cyclonedx.SeverityLimit.Low.Limit = 1 253 | 254 | report := new(artifacts.CyclonedxReportMin) 255 | report.Vulnerabilities = []artifacts.CyclonedxVulnerability{ 256 | {Ratings: []artifacts.CyclonedxRating{{Severity: "critical"}}}, 257 | {Ratings: []artifacts.CyclonedxRating{{Severity: "high"}}}, 258 | {Ratings: []artifacts.CyclonedxRating{{Severity: "medium"}}}, 259 | {Ratings: []artifacts.CyclonedxRating{{Severity: "low"}}}, 260 | } 261 | 262 | want := true 263 | got := ruleCyclonedxSeverityLimit(config, report) 264 | 265 | if want != got { 266 | t.Fatalf("want: %t got: %t", want, got) 267 | } 268 | }) 269 | 270 | t.Run("violate-limit-all-severities-3", func(t *testing.T) { 271 | config := new(Config) 272 | config.Cyclonedx.SeverityLimit.Critical.Enabled = true 273 | config.Cyclonedx.SeverityLimit.Critical.Limit = 1 274 | config.Cyclonedx.SeverityLimit.High.Enabled = true 275 | config.Cyclonedx.SeverityLimit.High.Limit = 0 276 | config.Cyclonedx.SeverityLimit.Medium.Enabled = true 277 | config.Cyclonedx.SeverityLimit.Medium.Limit = 1 278 | config.Cyclonedx.SeverityLimit.Low.Enabled = true 279 | config.Cyclonedx.SeverityLimit.Low.Limit = 0 280 | 281 | report := new(artifacts.CyclonedxReportMin) 282 | report.Vulnerabilities = []artifacts.CyclonedxVulnerability{ 283 | {Ratings: []artifacts.CyclonedxRating{{Severity: "critical"}}}, 284 | {Ratings: []artifacts.CyclonedxRating{{Severity: "high"}}}, 285 | {Ratings: []artifacts.CyclonedxRating{{Severity: "medium"}}}, 286 | {Ratings: []artifacts.CyclonedxRating{{Severity: "low"}}}, 287 | } 288 | 289 | want := false 290 | got := ruleCyclonedxSeverityLimit(config, report) 291 | 292 | if want != got { 293 | t.Fatalf("want: %t got: %t", want, got) 294 | } 295 | }) 296 | 297 | t.Run("violate-limit-accepted", func(t *testing.T) { 298 | config := new(Config) 299 | config.Cyclonedx.SeverityLimit.Critical.Enabled = true 300 | config.Cyclonedx.SeverityLimit.Critical.Limit = 0 301 | config.Cyclonedx.CVERiskAcceptance.Enabled = true 302 | config.Cyclonedx.CVERiskAcceptance.CVEs = []configCVE{{ID: "cve-1"}} 303 | report := new(artifacts.CyclonedxReportMin) 304 | report.Vulnerabilities = []artifacts.CyclonedxVulnerability{ 305 | { 306 | Ratings: []artifacts.CyclonedxRating{{Severity: "critical"}}, 307 | ID: "cve-1", 308 | }, 309 | } 310 | 311 | want := true 312 | got := false 313 | err := validateCyclonedxRules(config, report, nil, nil) 314 | if err == nil { 315 | got = true 316 | } 317 | 318 | if want != got { 319 | t.Fatalf("want: %t got: %t error: %v", got, err, err) 320 | } 321 | }) 322 | } 323 | 324 | func Test_ruleSemgrepSeverityLimit(t *testing.T) { 325 | t.Run("empty-report-empty-config", func(t *testing.T) { 326 | config := new(Config) 327 | report := new(artifacts.SemgrepReportMin) 328 | 329 | want := true 330 | 331 | got := ruleSemgrepSeverityLimit(config, report) 332 | 333 | if want != got { 334 | t.Fatalf("want: %t got: %t", want, got) 335 | } 336 | }) 337 | 338 | t.Run("empty-report-limit-0", func(t *testing.T) { 339 | config := new(Config) 340 | config.Semgrep.SeverityLimit.Error.Enabled = true 341 | config.Semgrep.SeverityLimit.Error.Limit = 0 342 | report := new(artifacts.SemgrepReportMin) 343 | 344 | want := true 345 | 346 | got := ruleSemgrepSeverityLimit(config, report) 347 | 348 | if want != got { 349 | t.Fatalf("want: %t got: %t", want, got) 350 | } 351 | }) 352 | 353 | t.Run("violate-limit-all", func(t *testing.T) { 354 | config := new(Config) 355 | config.Semgrep.SeverityLimit.Error.Enabled = true 356 | config.Semgrep.SeverityLimit.Error.Limit = 0 357 | config.Semgrep.SeverityLimit.Warning.Enabled = true 358 | config.Semgrep.SeverityLimit.Warning.Limit = 10 359 | config.Semgrep.SeverityLimit.Info.Enabled = true 360 | config.Semgrep.SeverityLimit.Info.Limit = 2 361 | report := new(artifacts.SemgrepReportMin) 362 | 363 | report.Results = []artifacts.SemgrepResults{ 364 | {Extra: artifacts.SemgrepExtra{Severity: "error"}}, 365 | {Extra: artifacts.SemgrepExtra{Severity: "error"}}, 366 | {Extra: artifacts.SemgrepExtra{Severity: "warning"}}, 367 | {Extra: artifacts.SemgrepExtra{Severity: "info"}}, 368 | {Extra: artifacts.SemgrepExtra{Severity: "info"}}, 369 | {Extra: artifacts.SemgrepExtra{Severity: "info"}}, 370 | } 371 | 372 | want := false 373 | 374 | got := ruleSemgrepSeverityLimit(config, report) 375 | 376 | if want != got { 377 | t.Fatalf("want: %t got: %t", want, got) 378 | } 379 | }) 380 | 381 | t.Run("impact-risk-acceptance-1", func(t *testing.T) { 382 | config := new(Config) 383 | config.Semgrep.SeverityLimit.Error.Enabled = true 384 | config.Semgrep.SeverityLimit.Error.Limit = 0 385 | config.Semgrep.ImpactRiskAcceptance.Enabled = true 386 | config.Semgrep.ImpactRiskAcceptance.Low = true 387 | report := new(artifacts.SemgrepReportMin) 388 | 389 | report.Results = []artifacts.SemgrepResults{ 390 | {Extra: artifacts.SemgrepExtra{Severity: "error", Metadata: artifacts.SemgrepMetadata{Impact: "low"}}}, 391 | {Extra: artifacts.SemgrepExtra{Severity: "error", Metadata: artifacts.SemgrepMetadata{Impact: "low"}}}, 392 | {Extra: artifacts.SemgrepExtra{Severity: "error", Metadata: artifacts.SemgrepMetadata{Impact: "low"}}}, 393 | {Extra: artifacts.SemgrepExtra{Severity: "error", Metadata: artifacts.SemgrepMetadata{Impact: "low"}}}, 394 | } 395 | 396 | want := true 397 | got := true 398 | 399 | err := validateSemgrepRules(config, report) 400 | if err != nil { 401 | got = false 402 | } 403 | 404 | if want != got { 405 | t.Fatalf("want: %t got: %t", want, got) 406 | } 407 | }) 408 | 409 | t.Run("impact-risk-acceptance-2", func(t *testing.T) { 410 | config := new(Config) 411 | config.Semgrep.SeverityLimit.Error.Enabled = true 412 | config.Semgrep.SeverityLimit.Error.Limit = 0 413 | config.Semgrep.ImpactRiskAcceptance.Enabled = true 414 | config.Semgrep.ImpactRiskAcceptance.Low = true 415 | report := new(artifacts.SemgrepReportMin) 416 | 417 | report.Results = []artifacts.SemgrepResults{ 418 | {Extra: artifacts.SemgrepExtra{Severity: "error", Metadata: artifacts.SemgrepMetadata{Impact: "low"}}}, 419 | {Extra: artifacts.SemgrepExtra{Severity: "error", Metadata: artifacts.SemgrepMetadata{Impact: "low"}}}, 420 | {Extra: artifacts.SemgrepExtra{Severity: "error", Metadata: artifacts.SemgrepMetadata{Impact: "low"}}}, 421 | {Extra: artifacts.SemgrepExtra{Severity: "error", Metadata: artifacts.SemgrepMetadata{Impact: "low"}}}, 422 | {Extra: artifacts.SemgrepExtra{Severity: "error", Metadata: artifacts.SemgrepMetadata{Impact: "medium"}}}, 423 | } 424 | 425 | want := false 426 | got := true 427 | 428 | err := validateSemgrepRules(config, report) 429 | if err != nil { 430 | got = false 431 | } 432 | 433 | if want != got { 434 | t.Fatalf("want: %t got: %t", want, got) 435 | } 436 | }) 437 | } 438 | -------------------------------------------------------------------------------- /pkg/kev/kev.go: -------------------------------------------------------------------------------- 1 | package kev 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "errors" 7 | "io" 8 | "log/slog" 9 | "net/http" 10 | "time" 11 | 12 | "github.com/dustin/go-humanize" 13 | ) 14 | 15 | const DefaultBaseURL = "https://www.cisa.gov/sites/default/files/feeds/known_exploited_vulnerabilities.json" 16 | 17 | // Catalog data model for KEVs 18 | type Catalog struct { 19 | Title string `json:"title"` 20 | CatalogVersion string `json:"catalogVersion"` 21 | DateReleased time.Time `json:"dateReleased"` 22 | Count int `json:"count"` 23 | Vulnerabilities []Vulnerability `json:"vulnerabilities"` 24 | } 25 | 26 | // Vulnerability data model for a single record 27 | type Vulnerability struct { 28 | CveID string `json:"cveID"` 29 | VendorProject string `json:"vendorProject"` 30 | Product string `json:"product"` 31 | VulnerabilityName string `json:"vulnerabilityName"` 32 | DateAdded string `json:"dateAdded"` 33 | ShortDescription string `json:"shortDescription"` 34 | RequiredAction string `json:"requiredAction"` 35 | DueDate string `json:"dueDate"` 36 | Notes string `json:"notes"` 37 | } 38 | 39 | func NewCatalog() *Catalog { 40 | return &Catalog{ 41 | Vulnerabilities: make([]Vulnerability, 0), 42 | } 43 | } 44 | 45 | type FetchOptions struct { 46 | Client *http.Client 47 | URL string 48 | } 49 | 50 | type fetchOptionFunc func(*FetchOptions) 51 | 52 | func WithURL(url string) fetchOptionFunc { 53 | return func(o *FetchOptions) { 54 | o.URL = url 55 | } 56 | } 57 | 58 | func WithClient(client *http.Client) fetchOptionFunc { 59 | return func(o *FetchOptions) { 60 | o.Client = client 61 | } 62 | } 63 | 64 | func DefaultFetchOptions() *FetchOptions { 65 | return &FetchOptions{ 66 | Client: http.DefaultClient, 67 | URL: DefaultBaseURL, 68 | } 69 | } 70 | 71 | func DownloadData(w io.Writer, optionFuncs ...fetchOptionFunc) error { 72 | options := DefaultFetchOptions() 73 | for _, optionFunc := range optionFuncs { 74 | optionFunc(options) 75 | } 76 | logger := slog.Default().With("method", "GET", "url", options.URL) 77 | 78 | defer func(started time.Time) { 79 | logger.Debug("kev json fetch done", "elapsed", time.Since(started)) 80 | }(time.Now()) 81 | 82 | logger.Debug("request kev data from api") 83 | res, err := options.Client.Get(options.URL) 84 | 85 | switch { 86 | case err != nil: 87 | logger.Error("kev api request failed during fetch data", "error", err) 88 | return errors.New("failed to get KEV Catalog. see log for details") 89 | case res.StatusCode != http.StatusOK: 90 | logger.Error("kev api bad status code", "res_status", res.Status) 91 | return errors.New("failed to get KEV Catalog. see log for details") 92 | } 93 | 94 | n, err := io.Copy(w, res.Body) 95 | size := humanize.Bytes(uint64(n)) 96 | if err != nil { 97 | logger.Error("io copy to writer from res body", "error", err) 98 | return errors.New("failed to get EPSS Scores. see log for details") 99 | } 100 | slog.Debug("successfully downloaded and decompressed epss data", "decompressed_size", size) 101 | return err 102 | } 103 | 104 | func FetchData(catalog *Catalog, optionFuncs ...fetchOptionFunc) error { 105 | buf := new(bytes.Buffer) 106 | if err := DownloadData(buf, optionFuncs...); err != nil { 107 | return err 108 | } 109 | 110 | return DecodeData(buf, catalog) 111 | } 112 | 113 | func DecodeData(r io.Reader, catalog *Catalog) error { 114 | if err := json.NewDecoder(r).Decode(catalog); err != nil { 115 | slog.Error("kev decoding failure", "error", err) 116 | return errors.New("failed to get KEV Catalog. see log for details") 117 | 118 | } 119 | return nil 120 | } 121 | -------------------------------------------------------------------------------- /pkg/validate/validate.go: -------------------------------------------------------------------------------- 1 | // Package validate provides a generic implementation for any object type using validation rules 2 | package validate 3 | 4 | import ( 5 | "errors" 6 | "fmt" 7 | "io" 8 | "log/slog" 9 | "slices" 10 | 11 | "gopkg.in/yaml.v3" 12 | ) 13 | 14 | // ErrFailedRule return this error if an object fails a validation rule 15 | var ErrFailedRule = errors.New("failed validation rule") 16 | 17 | // ErrConfig return this error if the configuration file is invalid 18 | var ErrConfig = errors.New("cannot validate, invalid configuration") 19 | 20 | // NewFailedRuleError convenience function for error wrapping 21 | func NewFailedRuleError(rule string, id string) error { 22 | return fmt.Errorf("%w: %v: %s", ErrFailedRule, rule, id) 23 | } 24 | 25 | // DenyFunc generic execution of a check function over a slice of objects 26 | func DenyFunc[S ~[]E, E any](target S, check func(E) error) error { 27 | var errs error 28 | for _, element := range target { 29 | errs = errors.Join(errs, check(element)) 30 | } 31 | return errs 32 | } 33 | 34 | // Validator generic validation runner 35 | type Validator[ObjectT any, ConfigT any] struct { 36 | validationRules []func([]ObjectT, ConfigT) error 37 | allowListRules []func(ObjectT, ConfigT) bool 38 | } 39 | 40 | // WithValidationRules define the fail validation rules, all must pass 41 | func (v Validator[ObjectT, ConfigT]) WithValidationRules(rules ...func([]ObjectT, ConfigT) error) Validator[ObjectT, ConfigT] { 42 | v.validationRules = append(v.validationRules, rules...) 43 | return v 44 | } 45 | 46 | // WithAllowRules define the allow rules which will skip validation 47 | func (v Validator[ObjectT, ConfigT]) WithAllowRules(rules ...func(ObjectT, ConfigT) bool) Validator[ObjectT, ConfigT] { 48 | v.allowListRules = append(v.allowListRules, rules...) 49 | return v 50 | } 51 | 52 | // NewValidator used to create specific implementations of a validator 53 | func NewValidator[ObjectT any, ConfigT any]() Validator[ObjectT, ConfigT] { 54 | return Validator[ObjectT, ConfigT]{} 55 | } 56 | 57 | // Validate run validation rules on a slice of objects 58 | func (v Validator[ObjectT, ConfigT]) Validate(objects []ObjectT, config ConfigT) error { 59 | var errs error 60 | filteredObjects := slices.DeleteFunc(objects, func(obj ObjectT) bool { 61 | for _, allow := range v.allowListRules { 62 | if allow(obj, config) { 63 | return true 64 | } 65 | } 66 | return false 67 | }) 68 | 69 | oCount := len(objects) - len(filteredObjects) 70 | filteredCount := len(filteredObjects) 71 | slog.Debug("validation", "object_count", oCount, "allowed_count", filteredCount) 72 | for _, validate := range v.validationRules { 73 | errs = errors.Join(errs, validate(filteredObjects, config)) 74 | } 75 | 76 | return errs 77 | } 78 | 79 | // ReadConfigAndValidate validate after decoding the configuration object 80 | func (v Validator[ObjectT, ConfigT]) ReadConfigAndValidate(objects []ObjectT, configReader io.Reader, field string) error { 81 | config, err := ConfigByField[ConfigT](configReader, field) 82 | if err != nil { 83 | return err 84 | } 85 | return v.Validate(objects, config) 86 | } 87 | 88 | // ConfigByField get the config field name after decoding 89 | func ConfigByField[T any](configReader io.Reader, fieldname string) (T, error) { 90 | configMap := make(map[string]T) 91 | nilObj := *new(T) 92 | 93 | if err := yaml.NewDecoder(configReader).Decode(configMap); err != nil { 94 | return nilObj, fmt.Errorf("%w: %v", ErrConfig, err) 95 | } 96 | 97 | c, ok := configMap[fieldname] 98 | if !ok { 99 | return nilObj, fmt.Errorf("%w: No configuration provided for field '%s'", ErrConfig, fieldname) 100 | } 101 | return c, nil 102 | } 103 | -------------------------------------------------------------------------------- /pkg/validate/validate_test.go: -------------------------------------------------------------------------------- 1 | package validate 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "strings" 8 | "testing" 9 | 10 | "gopkg.in/yaml.v3" 11 | ) 12 | 13 | func TestValidateFunc(t *testing.T) { 14 | sample := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} 15 | err := DenyFunc(sample, func(value int) error { 16 | isEven := value%2 == 0 17 | if isEven { 18 | return nil 19 | } 20 | return NewFailedRuleError("must be even", fmt.Sprint(value)) 21 | }) 22 | t.Log(err) 23 | if !errors.Is(err, ErrFailedRule) { 24 | t.Fatalf("want: %v got: %v", ErrFailedRule, err) 25 | } 26 | } 27 | 28 | type mockConfig struct { 29 | Enabled bool `yaml:"enabled"` 30 | } 31 | 32 | func isEven(values []int, config mockConfig) error { 33 | if !config.Enabled { 34 | return nil 35 | } 36 | return DenyFunc(values, func(value int) error { 37 | if value%2 == 0 { 38 | return nil 39 | } 40 | return NewFailedRuleError("must be even", fmt.Sprint(value)) 41 | }) 42 | } 43 | 44 | func underFive(values []int, config mockConfig) error { 45 | if !config.Enabled { 46 | return nil 47 | } 48 | return DenyFunc(values, func(value int) error { 49 | if value < 5 { 50 | return nil 51 | } 52 | return NewFailedRuleError("must be less than 5", fmt.Sprint(value)) 53 | }) 54 | } 55 | 56 | func TestValidator(t *testing.T) { 57 | sample := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} 58 | 59 | validator := NewValidator[int, mockConfig]() 60 | validator = validator.WithValidationRules(isEven, underFive) 61 | validator = validator.WithAllowRules(func(i int, _ mockConfig) bool { return i == 5 }) 62 | 63 | t.Run("success", func(t *testing.T) { 64 | configBuf := new(bytes.Buffer) 65 | _ = yaml.NewEncoder(configBuf).Encode(map[string]any{"config": mockConfig{Enabled: true}}) 66 | err := validator.ReadConfigAndValidate(sample, configBuf, "config") 67 | t.Log(err) 68 | }) 69 | t.Run("bad-config", func(t *testing.T) { 70 | configBuf := new(bytes.Buffer) 71 | _ = yaml.NewEncoder(configBuf).Encode(map[string]any{"config": mockConfig{Enabled: true}}) 72 | err := validator.ReadConfigAndValidate(sample, configBuf, "someotherfield") 73 | t.Log(err) 74 | if !errors.Is(err, ErrConfig) { 75 | t.Fatalf("want: %v got: %v", ErrConfig, err) 76 | } 77 | }) 78 | t.Run("bad-config-encoding", func(t *testing.T) { 79 | configBuf := strings.NewReader("{{{") 80 | err := validator.ReadConfigAndValidate(sample, configBuf, "someotherfield") 81 | t.Log(err) 82 | if !errors.Is(err, ErrConfig) { 83 | t.Fatalf("want: %v got: %v", ErrConfig, err) 84 | } 85 | }) 86 | } 87 | -------------------------------------------------------------------------------- /static/gatecheck-logo-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gatecheckdev/gatecheck/222be345966b3f3ab30d6f2c0e8003cc7ed67e86/static/gatecheck-logo-dark.png -------------------------------------------------------------------------------- /static/gatecheck-logo-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gatecheckdev/gatecheck/222be345966b3f3ab30d6f2c0e8003cc7ed67e86/static/gatecheck-logo-light.png -------------------------------------------------------------------------------- /static/gatecheck-logo-splash-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gatecheckdev/gatecheck/222be345966b3f3ab30d6f2c0e8003cc7ed67e86/static/gatecheck-logo-splash-dark.png -------------------------------------------------------------------------------- /static/gatecheck-logo-splash-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gatecheckdev/gatecheck/222be345966b3f3ab30d6f2c0e8003cc7ed67e86/static/gatecheck-logo-splash-light.png -------------------------------------------------------------------------------- /test/gatecheck.yaml: -------------------------------------------------------------------------------- 1 | version: "1" 2 | metadata: 3 | tags: 4 | - auto generated from CLI 5 | grype: 6 | severityLimit: 7 | critical: 8 | enabled: false 9 | limit: 0 10 | high: 11 | enabled: false 12 | limit: 0 13 | medium: 14 | enabled: false 15 | limit: 0 16 | low: 17 | enabled: false 18 | limit: 0 19 | epssLimit: 20 | enabled: false 21 | score: 0 22 | kevLimitEnabled: false 23 | cveLimit: 24 | enabled: false 25 | cves: [] 26 | epssRiskAcceptance: 27 | enabled: false 28 | score: 0 29 | cveRiskAcceptance: 30 | enabled: false 31 | cves: [] 32 | cyclonedx: 33 | severityLimit: 34 | critical: 35 | enabled: false 36 | limit: 0 37 | high: 38 | enabled: false 39 | limit: 0 40 | medium: 41 | enabled: false 42 | limit: 0 43 | low: 44 | enabled: false 45 | limit: 0 46 | epssLimit: 47 | enabled: false 48 | score: 0 49 | kevLimitEnabled: false 50 | cveLimit: 51 | enabled: false 52 | cves: [] 53 | epssRiskAcceptance: 54 | enabled: false 55 | score: 0 56 | cveRiskAcceptance: 57 | enabled: false 58 | cves: [] 59 | semgrep: 60 | severityLimit: 61 | error: 62 | enabled: false 63 | limit: 0 64 | warning: 65 | enabled: false 66 | limit: 0 67 | info: 68 | enabled: false 69 | limit: 0 70 | impactRiskAcceptance: 71 | enabled: false 72 | high: false 73 | medium: false 74 | low: false 75 | gitleaks: 76 | limitEnabled: false 77 | --------------------------------------------------------------------------------