├── .github └── workflows │ ├── CI.yml │ └── Lint.yml ├── .gitignore ├── .lintrunner.toml ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── cliff.toml ├── do_release.sh ├── examples ├── config_example.toml ├── flake8_linter.py └── rustfmt_linter.py ├── pyproject.toml ├── rustfmt.toml ├── src ├── git.rs ├── init.rs ├── lib.rs ├── lint_config.rs ├── lint_message.rs ├── linter.rs ├── log_utils.rs ├── main.rs ├── path.rs ├── persistent_data.rs ├── rage.rs ├── render.rs ├── sapling.rs ├── testing.rs └── version_control.rs ├── tests ├── fixtures │ └── fake_source_file.rs ├── integration_test.rs └── snapshots │ ├── integration_test__changed_init_causes_warning_1.snap │ ├── integration_test__changed_init_causes_warning_2.snap │ ├── integration_test__duplicate_code_fails.snap │ ├── integration_test__empty_command_fails.snap │ ├── integration_test__excluding_dryrun_fails.snap │ ├── integration_test__format_command_doesnt_use_nonformat_linter.snap │ ├── integration_test__git_head_files.snap │ ├── integration_test__git_no_changes.snap │ ├── integration_test__init_suppresses_warning.snap │ ├── integration_test__invalid_args.snap │ ├── integration_test__invalid_config_fails.snap │ ├── integration_test__invalid_paths_cmd_and_from.snap │ ├── integration_test__invalid_paths_cmd_and_specified_paths.snap │ ├── integration_test__lint_with_no_linters.snap │ ├── integration_test__linter_hard_failure_is_caught.snap │ ├── integration_test__linter_nonexistent_command.snap │ ├── integration_test__linter_providing_nonexistent_path_degrades_gracefully.snap │ ├── integration_test__linter_replacement_trailing_newlines.snap │ ├── integration_test__no_op_config_succeeds.snap │ ├── integration_test__rage_command_output.snap │ ├── integration_test__simple_linter.snap │ ├── integration_test__simple_linter_fails_on_nonexistent_file.snap │ ├── integration_test__simple_linter_fake_second_config.snap │ ├── integration_test__simple_linter_oneline.snap │ ├── integration_test__simple_linter_only_under_dir.snap │ ├── integration_test__simple_linter_replacement_message.snap │ ├── integration_test__simple_linter_two_configs.snap │ ├── integration_test__skip_nonexistent_linter.snap │ ├── integration_test__take_nonexistent_linter.snap │ ├── integration_test__tee_json.snap │ └── integration_test__unknown_config_fails.snap └── tools ├── convert_to_sarif.py └── convert_to_sarif_test.py /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by maturin v1.4.0 2 | # To update, run 3 | # 4 | # maturin generate-ci github 5 | # 6 | name: CI 7 | 8 | on: 9 | push: 10 | branches: 11 | - main 12 | tags: 13 | - 'v*' 14 | pull_request: 15 | workflow_dispatch: 16 | 17 | concurrency: 18 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} 19 | cancel-in-progress: true 20 | 21 | permissions: 22 | contents: read 23 | 24 | jobs: 25 | test: 26 | strategy: 27 | fail-fast: false 28 | matrix: 29 | os: [ubuntu-latest, windows-latest, macos-latest] 30 | 31 | runs-on: ${{ matrix.os }} 32 | steps: 33 | - uses: actions/checkout@v4 34 | with: 35 | fetch-depth: 0 36 | - uses: actions-rs/toolchain@v1 37 | with: 38 | toolchain: stable 39 | - name: Initialize git config for testing 40 | run: | 41 | git config --global user.email "fake@email.com" 42 | git config --global user.name "mr fake" 43 | git config --global init.defaultBranch main 44 | - name: Install and initialize sapling on macos 45 | if: matrix.os == 'macos-latest' 46 | run: | 47 | brew install sapling 48 | sl config --user ui.username "mr fake " 49 | - name: Run cargo test 50 | uses: nick-fields/retry@v2.8.2 51 | env: 52 | TMPDIR: ${{ runner.temp }} 53 | with: 54 | timeout_minutes: 10 55 | max_attempts: 10 56 | retry_wait_seconds: 90 57 | command: | 58 | cargo test 59 | 60 | linux: 61 | runs-on: ubuntu-latest 62 | needs: test 63 | strategy: 64 | matrix: 65 | target: [x86_64, aarch64] 66 | steps: 67 | - uses: actions/checkout@v4 68 | - name: Build wheels 69 | uses: PyO3/maturin-action@v1 70 | with: 71 | target: ${{ matrix.target }} 72 | args: --release --out dist 73 | sccache: 'true' 74 | manylinux: auto 75 | - name: Upload wheels 76 | uses: actions/upload-artifact@v3 77 | with: 78 | name: wheels 79 | path: dist 80 | 81 | windows: 82 | runs-on: windows-latest 83 | needs: test 84 | strategy: 85 | matrix: 86 | target: [x86, x86_64] 87 | steps: 88 | - uses: actions/checkout@v4 89 | - name: Build wheels 90 | uses: PyO3/maturin-action@v1 91 | with: 92 | target: ${{ matrix.target }} 93 | args: --release --out dist 94 | sccache: 'true' 95 | - name: Upload wheels 96 | uses: actions/upload-artifact@v3 97 | with: 98 | name: wheels 99 | path: dist 100 | 101 | macos: 102 | runs-on: macos-latest 103 | needs: test 104 | strategy: 105 | matrix: 106 | target: [x86_64, aarch64] 107 | steps: 108 | - uses: actions/checkout@v4 109 | - name: Build wheels 110 | uses: PyO3/maturin-action@v1 111 | with: 112 | target: ${{ matrix.target }} 113 | args: --release --out dist 114 | sccache: 'true' 115 | - name: Upload wheels 116 | uses: actions/upload-artifact@v3 117 | with: 118 | name: wheels 119 | path: dist 120 | 121 | sdist: 122 | runs-on: ubuntu-latest 123 | needs: test 124 | steps: 125 | - uses: actions/checkout@v4 126 | - name: Build sdist 127 | uses: PyO3/maturin-action@v1 128 | with: 129 | command: sdist 130 | args: --out dist 131 | - name: Upload sdist 132 | uses: actions/upload-artifact@v3 133 | with: 134 | name: wheels 135 | path: dist 136 | 137 | release: 138 | name: Release 139 | runs-on: ubuntu-latest 140 | if: "startsWith(github.ref, 'refs/tags/')" 141 | needs: [linux, windows, macos, sdist] 142 | steps: 143 | - uses: actions/download-artifact@v3 144 | with: 145 | name: wheels 146 | - name: Publish to PyPI 147 | uses: PyO3/maturin-action@v1 148 | env: 149 | MATURIN_PYPI_TOKEN: ${{ secrets.PYPI_API_TOKEN }} 150 | with: 151 | command: upload 152 | args: --non-interactive --skip-existing * 153 | -------------------------------------------------------------------------------- /.github/workflows/Lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | tags: 8 | - v* 9 | pull_request: 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | lintrunner: 17 | name: lintrunner 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | os: [ubuntu-latest, windows-latest] 22 | python_version: ["3.11"] 23 | 24 | runs-on: ${{ matrix.os }} 25 | steps: 26 | - uses: actions/checkout@v3 27 | - name: Setup Python 28 | uses: actions/setup-python@v4 29 | with: 30 | python-version: ${{ matrix.python_version }} 31 | - name: Install Lintrunner 32 | run: | 33 | pip install . 34 | lintrunner init 35 | - name: Run lintrunner on all files - Linux 36 | if: matrix.os != 'windows-latest' 37 | run: | 38 | set +e 39 | if ! lintrunner -v --force-color --all-files --tee-json=lint.json; then 40 | echo "" 41 | echo -e "\e[1m\e[36mYou can reproduce these results locally by using \`lintrunner -m main\`.\e[0m" 42 | exit 1 43 | fi 44 | - name: Run lintrunner on all files - Windows 45 | if: matrix.os == 'windows-latest' 46 | run: lintrunner -v --force-color --all-files 47 | - name: Produce SARIF 48 | if: always() && matrix.os == 'ubuntu-latest' 49 | run: | 50 | python tools/convert_to_sarif.py --input lint.json --output lintrunner.sarif 51 | - name: Upload SARIF file 52 | if: always() && matrix.os == 'ubuntu-latest' 53 | continue-on-error: true 54 | uses: github/codeql-action/upload-sarif@v2 55 | with: 56 | # Path to SARIF file relative to the root of the repository 57 | sarif_file: lintrunner.sarif 58 | category: lintrunner 59 | checkout_path: ${{ github.workspace }} 60 | - name: Run Clippy 61 | run: cargo clippy -- -D warnings --verbose 62 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | # Ignore new insta snapshots 13 | tests/snapshots/*.new 14 | 15 | 16 | # Generated by Maturin 17 | # Byte-compiled / optimized / DLL files 18 | __pycache__/ 19 | .pytest_cache/ 20 | *.py[cod] 21 | 22 | # C extensions 23 | *.so 24 | 25 | # Distribution / packaging 26 | .Python 27 | .venv/ 28 | env/ 29 | bin/ 30 | build/ 31 | develop-eggs/ 32 | dist/ 33 | eggs/ 34 | lib/ 35 | lib64/ 36 | parts/ 37 | sdist/ 38 | var/ 39 | include/ 40 | man/ 41 | venv/ 42 | *.egg-info/ 43 | .installed.cfg 44 | *.egg 45 | 46 | # Installer logs 47 | pip-log.txt 48 | pip-delete-this-directory.txt 49 | pip-selfcheck.json 50 | 51 | # Unit test / coverage reports 52 | htmlcov/ 53 | .tox/ 54 | .coverage 55 | .cache 56 | nosetests.xml 57 | coverage.xml 58 | 59 | # Translations 60 | *.mo 61 | 62 | # Mr Developer 63 | .mr.developer.cfg 64 | .project 65 | .pydevproject 66 | 67 | # Rope 68 | .ropeproject 69 | 70 | # Django stuff: 71 | *.log 72 | *.pot 73 | 74 | .DS_Store 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyCharm 80 | .idea/ 81 | 82 | # VSCode 83 | .vscode/ 84 | 85 | # Pyenv 86 | .python-version 87 | -------------------------------------------------------------------------------- /.lintrunner.toml: -------------------------------------------------------------------------------- 1 | merge_base_with="main" 2 | 3 | [[linter]] 4 | code = 'RUSTFMT' 5 | include_patterns = ['**/*.rs'] 6 | command = [ 7 | 'python', 8 | 'examples/rustfmt_linter.py', 9 | '--binary=rustfmt', 10 | '--config-path=rustfmt.toml', 11 | '--', 12 | '@{{PATHSFILE}}' 13 | ] 14 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | ## [0.12.7] - 2024-12-05 5 | 6 | ### Bug Fixes 7 | 8 | - Build x86\_64 wheels for Windows ([a4d6b74](https://github.com/suo/lintrunner/commit/a4d6b7469307acae7228d95ee08a4764b1e655f2)) 9 | - Fix [Clippy](https://doc.rust-lang.org/clippy/) violatoins ([05ff643](https://github.com/suo/lintrunner/commit/05ff6431bbeab7359e7b4e13e22cbb9e85c5c433)) 10 | - Fetch all commit history to fix MacOS builds ([3770be6](https://github.com/suo/lintrunner/commit/3770be65ee7b1186b83cf3873d47a439b275369d)) 11 | 12 | 13 | ## [0.12.5] - 2024-04-09 14 | 15 | ### Features 16 | 17 | - Add gist/pastry upload to lintrunner rage ([bd224ae](https://github.com/suo/lintrunner/commit/bd224aefdc5144cc85276695cd4fabf170f183a0)) 18 | 19 | ## [0.12.1] - 2024-02-10 20 | 21 | ### Bug Fixes 22 | 23 | - Properly filter ignored files in sapling all-files ([34dd8b2](https://github.com/suo/lintrunner/commit/34dd8b295c58b1055916f18dd55272fffc64e705)) 24 | - Make rage command robust to missing data ([a96ceec](https://github.com/suo/lintrunner/commit/a96ceec6ce3f8ae798f08d34c5f6de133814363b)) 25 | 26 | ## [0.12.0] - 2024-02-10 27 | 28 | ### Bug Fixes 29 | 30 | - Properly fail if --get\_paths cmd fails ([53c4961](https://github.com/suo/lintrunner/commit/53c496184eac9545e83d6a2714adf0bc1457316c)) 31 | - Dont panic when no linters are specified ([0527cf0](https://github.com/suo/lintrunner/commit/0527cf0792956a88a5fb2454688c975d0a8c8baf)) 32 | - Make --all-files work with sl ([2df4572](https://github.com/suo/lintrunner/commit/2df4572ddf1a630889a0150ac8e6bf9d63340839)) 33 | 34 | ### Features 35 | 36 | - Add a list subcommand to show available linters ([0765237](https://github.com/suo/lintrunner/commit/0765237900aaa7e0ecb3491227b073fa72216b36)) 37 | 38 | ### Testing 39 | 40 | - Fix snapshot test ([7c345da](https://github.com/suo/lintrunner/commit/7c345da7342aca42645cab8551ef232581083f11)) 41 | 42 | ## [0.11.1] - 2024-01-30 43 | 44 | ### Bug Fixes 45 | 46 | - Log relative-to argument ([2bbe82f](https://github.com/suo/lintrunner/commit/2bbe82f0d652d59a04ab010be16cf0e85d525d50)) 47 | 48 | ### Documentation 49 | 50 | - Update README to include options and link to github action ([#55](https://github.com/suo/lintrunner/issues/55)) ([9efb969](https://github.com/suo/lintrunner/commit/9efb969e27f5775e619a3bbf51576a175557e3f8)) 51 | - Remove redirect to pytorch/test-infra ([f065574](https://github.com/suo/lintrunner/commit/f065574253f003bd7604c5ef67956628add3269c)) 52 | 53 | ### Testing 54 | 55 | - Guard sl tests with mutex ([a9feaea](https://github.com/suo/lintrunner/commit/a9feaea47735c98b32685b65e9fe85f0672874f0)) 56 | 57 | ## [0.10.7] - 2023-03-02 58 | 59 | ### Bug Fixes 60 | 61 | - Run clippy and rustfmt; fix issues ([#34](https://github.com/suo/lintrunner/issues/34)) ([b0e8be2](https://github.com/suo/lintrunner/commit/b0e8be295e5a0e959f36ea740b95780a9abe7400)) 62 | - Fix and enable rustfmt linter ([#35](https://github.com/suo/lintrunner/issues/35)) ([507d273](https://github.com/suo/lintrunner/commit/507d27314283fd5c6acede4e75800766921e358d)) 63 | 64 | ### Features 65 | 66 | - Enable setting default --merge-base-with values ([75ea9c0](https://github.com/suo/lintrunner/commit/75ea9c09cd6904e6e53170af0661fd3dcb39c9e9)) 67 | 68 | ## [0.10.5] - 2023-01-19 69 | 70 | ### Bug Fixes 71 | 72 | - Add a space to the severity on oneline format ([#30](https://github.com/suo/lintrunner/issues/30)) ([5120786](https://github.com/suo/lintrunner/commit/5120786d3a61bf9013563a126f61f9cb5727be1a)) 73 | 74 | ## [0.10.2] - 2023-01-13 75 | 76 | ### Features 77 | 78 | - Update the message format produced by `convert_to_sarif.py` ([#28](https://github.com/suo/lintrunner/issues/28)) ([b3370bf](https://github.com/suo/lintrunner/commit/b3370bff64ee5bdaad7faef89b4127c2d3b4f357)) 79 | 80 | ## [0.10.1] - 2023-01-13 81 | 82 | ### Bug Fixes 83 | 84 | - Allow --paths-cmd to run on Windows ([#23](https://github.com/suo/lintrunner/issues/23)) ([a1c4191](https://github.com/suo/lintrunner/commit/a1c4191575959974ce5b17269f624b17e93951a0)) 85 | 86 | ## [0.10.0] - 2022-11-28 87 | 88 | ### Bug Fixes 89 | 90 | - Typo in init_command doc ([#17](https://github.com/suo/lintrunner/issues/17)) ([fa8d7b3](https://github.com/suo/lintrunner/commit/fa8d7b32641e58c041e9f3bf15a4b26e1afff915)) 91 | - Path construction errors on Windows ([#19](https://github.com/suo/lintrunner/issues/19)) ([032bea6](https://github.com/suo/lintrunner/commit/032bea69f31f6ccfab5cb6670edfb5adb22f1840)) 92 | 93 | ### Features 94 | 95 | - A tool to convert json output to SARIF format ([#16](https://github.com/suo/lintrunner/issues/16)) ([1c991af](https://github.com/suo/lintrunner/commit/1c991affb15edac2bb67080e49bf0e5037b47e92)) 96 | - Add lint_message.name to oneline output ([#21](https://github.com/suo/lintrunner/issues/21)) ([84f3d34](https://github.com/suo/lintrunner/commit/84f3d34c6db340bdbbe63a4d192004f17769758b)) 97 | 98 | ### Testing 99 | 100 | - Fix linux ci ([c443387](https://github.com/suo/lintrunner/commit/c443387ff9a42a6f8c9b0e8add04220d2fea46a1)) 101 | 102 | ## [0.9.3] - 2022-09-23 103 | 104 | ### Bug Fixes 105 | 106 | - Don't check files that were deleted/moved in working tree ([0fbb2f3](https://github.com/suo/lintrunner/commit/0fbb2f3d01a08088606ee6650e98d9db9b0b7b3a)) 107 | 108 | ### Testing 109 | 110 | - Add unit test for trailing whitespace ([bbbcffd](https://github.com/suo/lintrunner/commit/bbbcffd7d095b16fc831fe48c163b4805e6a9aa0)) 111 | - Add missing snapshot ([9fda576](https://github.com/suo/lintrunner/commit/9fda576f330392c244527defb6e80250663744c6)) 112 | 113 | ## [0.9.2] - 2022-05-11 114 | 115 | ### Bug Fixes 116 | 117 | - Add more runtime info to logs ([80e78de](https://github.com/suo/lintrunner/commit/80e78dee128f834f4f696c652bcec32a4f0e0d1c)) 118 | 119 | ### Features 120 | 121 | - Add --all-files command ([3d64ad3](https://github.com/suo/lintrunner/commit/3d64ad33ca94172ee27830fb772c35d469b41028)) 122 | 123 | ## [0.9.1] - 2022-05-11 124 | 125 | ### Features 126 | 127 | - Add --tee-json option ([5978ec0](https://github.com/suo/lintrunner/commit/5978ec0e47f38bd0252c3f5afa02d27314edd875)) 128 | 129 | ## [0.9.0] - 2022-05-10 130 | 131 | ### Bug Fixes 132 | 133 | - Add --version command-line arg ([7932c44](https://github.com/suo/lintrunner/commit/7932c44d80279e54b67e02d256b356104ba4bcc2)) 134 | - Escape command-line args in log ([1018103](https://github.com/suo/lintrunner/commit/10181032e2093bcf0cb233300b982da459a71975)) 135 | - Error if duplicate linters found ([89064c1](https://github.com/suo/lintrunner/commit/89064c1f808d7e76ecc183c182b9c1ac4d765704)) 136 | - Escape linter initializer in logs ([0a0f0ec](https://github.com/suo/lintrunner/commit/0a0f0ec1d86b02f77a680ad8e4560ed80219b849)) 137 | - Properly ignore current run on `rage -i` ([#6](https://github.com/suo/lintrunner/issues/6)) ([e4989eb](https://github.com/suo/lintrunner/commit/e4989ebe598d7268d4ae715484ec21a57aadd426)) 138 | - Show milliseconds in rage run timestamp ([9780a2b](https://github.com/suo/lintrunner/commit/9780a2b8774b3c6e52b29414435a038840a3aabf)) 139 | 140 | ### Documentation 141 | 142 | - Update changelog ([82c3335](https://github.com/suo/lintrunner/commit/82c33359f0cde758e7153d4ba450751afbc6c6c8)) 143 | 144 | ### Features 145 | 146 | - Add rage command for bug reporting ([bb80fef](https://github.com/suo/lintrunner/commit/bb80fef49fabad5558e77786e157b4ea822d0f23)) 147 | 148 | ## [0.8.0] - 2022-05-02 149 | 150 | ### Bug Fixes 151 | 152 | - Add severity to oneline message ([14495be](https://github.com/suo/lintrunner/commit/14495be590d1b8c223a07f59ccdb6600d22e92c4)) 153 | - Unify output controlling commands into --output ([8b95e7b](https://github.com/suo/lintrunner/commit/8b95e7b76c65dc4187b17b9851ce902aebc58944)) 154 | 155 | ### Documentation 156 | 157 | - Improve help message ([0630560](https://github.com/suo/lintrunner/commit/06305606f9d840610487a9b7dff9a159a05fb8d1)) 158 | 159 | ### Features 160 | 161 | - Warn if init seems out of date ([4050dd7](https://github.com/suo/lintrunner/commit/4050dd7fe883c419e0af110a7d2c6887b6ba08f0)) 162 | - Format command ([bf7925d](https://github.com/suo/lintrunner/commit/bf7925df7b1aac0265e3bf88ef8ca05d720e0560)) 163 | 164 | ### Testing 165 | 166 | - Add integration test for init warnings ([9c75f29](https://github.com/suo/lintrunner/commit/9c75f293cdccbd662f922548861b277c70f9d14d)) 167 | - Add integration test for dryrun error on init config ([88738ca](https://github.com/suo/lintrunner/commit/88738ca299179588e9abae6b8265c8287270edb6)) 168 | 169 | ### Build 170 | 171 | - Run cargo upgrade ([0241c01](https://github.com/suo/lintrunner/commit/0241c01630187ce3817ee1964f858ebc7b85d10a)) 172 | 173 | ## [0.7.0] - 2022-04-15 174 | 175 | ### Features 176 | 177 | - Add --oneline arg for compact lint rendering ([a0a9e87](https://github.com/suo/lintrunner/commit/a0a9e878781a2ead70ff7bfc94064275eeb79020)) 178 | 179 | ## [0.6.2] - 2022-04-15 180 | 181 | ### Bug Fixes 182 | 183 | - Do not allow * to match across path segments ([382413a](https://github.com/suo/lintrunner/commit/382413aa40edf2dead74fd9f25fdd01bac00bd80)) 184 | 185 | ### Testing 186 | 187 | - Add test for deleted files with --revision specified ([19c6fee](https://github.com/suo/lintrunner/commit/19c6fee0d11096c4ba7e7182fd3178b170cddb10)) 188 | 189 | ## [0.6.1] - 2022-04-15 190 | 191 | ### Bug Fixes 192 | 193 | - Correct order of arguments while gathering files to lint ([9c2093d](https://github.com/suo/lintrunner/commit/9c2093d4dace6e3570cad9bc5b363e0b3fc50b3c)) 194 | 195 | ### Documentation 196 | 197 | - Update install instructions ([a3095fd](https://github.com/suo/lintrunner/commit/a3095fde2edacb0dba93250cfca35f2000c4c009)) 198 | - Add --merge-base-with to readme ([8d51a11](https://github.com/suo/lintrunner/commit/8d51a117e833211ef275355d27c64eacab40cbce)) 199 | 200 | 201 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lintrunner" 3 | version = "0.12.7" 4 | authors = ["Michael Suo "] 5 | edition = "2021" 6 | description = "A lint running tool and framework." 7 | license = "BSD-3-Clause" 8 | 9 | [dependencies] 10 | clap = { version = "3.2.25", features = ["derive"] } 11 | anyhow = "1.0.79" 12 | glob = "0.3.1" 13 | tempfile = "3.10.0" 14 | serde = { version = "1.0.196", features = ["derive"] } 15 | serde_json = "1.0.113" 16 | textwrap = { version = "0.15.2", features = ["terminal_size"] } 17 | similar = { version = "2.4.0", features = ["inline"] } 18 | console = "0.15.8" 19 | toml = "0.5.11" 20 | log = "0.4.20" 21 | indicatif = "0.16.2" 22 | regex = "1.10.3" 23 | itertools = "0.10.5" 24 | directories = "4.0.1" 25 | blake3 = "1.5.0" 26 | fern = { version = "0.6.2", features = ["colored"] } 27 | chrono = "0.4.33" 28 | dialoguer = "0.10.4" 29 | shell-words = "1.1.0" 30 | figment = { version = "0.10", features = ["toml", "env"] } 31 | 32 | [dev-dependencies] 33 | assert_cmd = "2.0.13" 34 | insta = { version = "1.34.0", features = ["redactions", "yaml"] } 35 | once_cell = "1.19.0" 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Michael Suo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Lintrunner 2 | 3 | ## Overview 4 | `lintrunner` is a tool that runs linters. It is responsible for: 5 | - Deciding which files need to be linted. 6 | - Invoking linters according to a common protocol. 7 | - Gathering results and presenting them to users. 8 | 9 | The intention is to provide a universal way to configure and invoke linters, 10 | which is useful on large polyglot projects. 11 | 12 | The design of `lintrunner` is heavily inspired by `linttool`, a project that exists internally at Meta. 13 | 14 | ## Installation 15 | ``` 16 | pip install lintrunner 17 | ``` 18 | 19 | ## Usage 20 | First, you need to add a configuration file to your repo. See the [Linter 21 | configuration](#linter-configuration) section for more info. 22 | 23 | Then, simply run `lintrunner` to lint your changes! 24 | 25 | ## How to control what paths to lint `lintrunner` 26 | When run with no arguments, `lintrunner` will check: 27 | - The files changed in the `HEAD` commit. 28 | - The files changed in the user’s working tree. 29 | 30 | It does *not* check: 31 | - Any files not tracked by `git`; `git add` them to lint them. 32 | 33 | There are multiple ways to customize how paths are checked: 34 | 35 | ### Pass paths as positional arguments 36 | For example: 37 | ``` 38 | lintrunner foo.py bar.cpp 39 | ``` 40 | 41 | This naturally composes with `xargs`, for example the canonical way to check 42 | every path in the repo is: 43 | ``` 44 | git grep -Il . | xargs lintrunner 45 | ``` 46 | 47 | ### `--configs`/ `--config` 48 | "Comma-separated paths to lintrunner configuration files. 49 | Multiple files are merged, with later definitions overriding earlier ones. 50 | ONLY THE FIRST is required to be present on your machine. 51 | Defaults to `lintrunner.toml, lintrunner.private.toml`. Extra configs like `lintrunner.private.toml` 52 | are useful for combining project-wide and local configs." 53 | 54 | ### `--paths-cmd` 55 | Some ways to invoke `xargs` will cause multiple `lintrunner` processes to be 56 | run, increasing lint time (especially on huge path sets). As an alternative that 57 | gives `lintrunner` control of parallelization, you can use `--paths-cmd`. If 58 | `--paths-cmd` is specified `lintrunner` will execute that command and consider 59 | each line of its `stdout` to be a file to lint. 60 | 61 | For example, the same command above would be: 62 | ``` 63 | lintrunner --paths-cmd='git grep -Il .' 64 | ``` 65 | 66 | ### `--paths-file` 67 | If this is specified, `lintrunner` will read paths from the given file, one per 68 | line, and check those. This can be useful if you have some really complex logic 69 | to determine which paths to check. 70 | 71 | ### `--revision` 72 | This value can be any `` accepted by `git diff-tree`, like a commit 73 | hash or revspec. If this is specified, `lintrunner` will check: 74 | - All paths changed from `` to `HEAD` 75 | - All paths changed in the user's working tree. 76 | 77 | ### `--merge-base-with` 78 | Like `--revision`, except the revision is determined by computing the merge-base 79 | of `HEAD` and the provided ``. This is useful for linting all commits 80 | in a specific pull request. For example, for a pull request targeting master, 81 | you can run: 82 | ``` 83 | lintrunner -m master 84 | ``` 85 | 86 | ### `--all-files` 87 | This will run lint on all files specified in `.lintrunner.toml`. 88 | 89 | ### `--only-lint-under-config-dir` 90 | If set, will only lint files under the directory where the configuration file is located and its subdirectories. 91 | 92 | ## Linter configuration 93 | `lintrunner` knows which linters to run and how by looking at a configuration 94 | file, conventionally named `.lintrunner.toml`. 95 | 96 | Here is an example linter configuration: 97 | 98 | ```toml 99 | merge_base_with = 'main' 100 | 101 | [[linter]] 102 | name = 'FLAKE8' 103 | include_patterns = [ 104 | 'src/**/*.py', # unix-style globs supported 105 | 'test/**/*.py', 106 | ] 107 | exclude_patterns = ['src/my_bad_file.py'] 108 | command = [ 109 | 'python3', 110 | 'flake8_linter.py', 111 | '—-', 112 | # {{PATHSFILE}} gets rewritten to a tmpfile containing all paths to lint 113 | '@{{PATHSFILE}}', 114 | ] 115 | ``` 116 | 117 | A complete description of the configuration schema can be found 118 | [here](https://docs.rs/lintrunner/latest/lintrunner/lint_config/struct.LintConfig.html). 119 | 120 | ## Linter protocol 121 | Most linters have their own output format and arguments. In order to impose 122 | consistency on linter invocation and outputs, `lintrunner` implements a protocol 123 | that it expects linters to fulfill. In most cases, a small script (called a 124 | *linter adapter*) is required to implement the protocol for a given external 125 | linter. You can see some example adapters in `examples/` . 126 | 127 | ### Invocation 128 | Linters will be invoked according to the `command` specified by their 129 | configuration. They will be called once per lint run. 130 | 131 | If a linter needs to know which paths to run on, it should take a 132 | `{{PATHSFILE}}` argument. During invocation, the string `{{PATHSFILE}}` will be 133 | replaced with the name of a temporary file containing which paths the linter 134 | should run on, one path per line. 135 | 136 | A common way to implement this in a linter adapter is to use `argparse`’s 137 | [`fromfile_prefix_chars`](https://docs.python.org/3/library/argparse.html#fromfile-prefix-chars) 138 | feature. In the Flake8 example above, we use `@` as the `fromfile_prefix_chars` 139 | argument, so `argparse` will automatically read the `{{PATHSFILE}}` and supply 140 | its contents as a list of arguments. 141 | 142 | ### Output 143 | Any lint messages a linter would like to communicate the user must be 144 | represented as a `LintMessage`. The linter, must print `LintMessage`s as [JSON 145 | Lines](https://jsonlines.org/) to `stdout`, one message per line. Output to 146 | `stderr` will be ignored. 147 | 148 | A complete description of the LintMessage schema can be found 149 | [here](https://docs.rs/lintrunner/latest/lintrunner/lint_message/struct.LintMessage.html). 150 | 151 | ### Exiting 152 | Linters **should always exit with code 0**. This is true even if lint errors are 153 | reported; `lintrunner` itself will determine how to exit based on what linters 154 | report. 155 | 156 | To signal a general linter failure (which should ideally never happen!), linters 157 | can return a `LintMessage` with `path = None`. 158 | 159 | In the event a linter exits non-zero, it will be caught by `lintrunner`and 160 | presented as a “general linter failure” with stdout/stderr shown to the user. 161 | This should be considered a bug in the linter’s implementation of this protocol. 162 | 163 | ## Tips for adopting `lintrunner` in a new project 164 | 165 | When adopting lintrunner in a previously un-linted project, it may generate a lot 166 | of lint messages. You can use the `--output oneline` option to make 167 | `lintrunner` display each lint message in its separate line to quickly navigate 168 | through them. 169 | 170 | Additionally, you can selectively run specific linters with the `--take` option, 171 | like `--take RUFF,CLANGFORMAT`, to focus on resolving specific lint errors, or 172 | use `--skip` to skip a long running linter like `MYPY`. 173 | 174 | ## GitHub Action 175 | 176 | To use `lintrunner` in a GitHub workflow, you can consider [`lintrunner-action`](https://github.com/justinchuby/lintrunner-action). 177 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | # configuration file for git-cliff (0.1.0) 2 | 3 | [changelog] 4 | # changelog header 5 | header = """ 6 | # Changelog\n 7 | All notable changes to this project will be documented in this file.\n 8 | """ 9 | # template for the changelog body 10 | # https://tera.netlify.app/docs/#introduction 11 | body = """ 12 | {% if version %}\ 13 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 14 | {% else %}\ 15 | ## [unreleased] 16 | {% endif %}\ 17 | {% for group, commits in commits | group_by(attribute="group") %} 18 | ### {{ group | upper_first }} 19 | {% for commit in commits %} 20 | - {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | upper_first }} ([{{ commit.id | truncate(length=7, end=\"\") }}](https://github.com/suo/lintrunner/commit/{{ commit.id }}))\ 21 | {% endfor %} 22 | {% endfor %}\n 23 | """ 24 | # remove the leading and trailing whitespace from the template 25 | trim = true 26 | # changelog footer 27 | footer = """ 28 | 29 | """ 30 | 31 | [git] 32 | # parse the commits based on https://www.conventionalcommits.org 33 | conventional_commits = true 34 | # filter out the commits that are not conventional 35 | filter_unconventional = true 36 | # regex for preprocessing the commit messages 37 | commit_preprocessors = [ 38 | { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](https://github.com/suo/lintrunner/issues/${2}))"}, 39 | ] 40 | # regex for parsing and grouping commits 41 | commit_parsers = [ 42 | { message = "^feat", group = "Features"}, 43 | { message = "^fix", group = "Bug Fixes"}, 44 | { message = "^doc", group = "Documentation"}, 45 | { message = "^perf", group = "Performance"}, 46 | { message = "^refactor", group = "Refactor", skip = true}, 47 | { message = "^test", group = "Testing"}, 48 | { message = "^chore\\(release\\): prepare for", skip = true}, 49 | { message = "^chore", group = "Miscellaneous Tasks", skip = true}, 50 | { body = ".*security", group = "Security"}, 51 | ] 52 | # filter out the commits that are not matched by commit parsers 53 | filter_commits = false 54 | # glob pattern for matching git tags 55 | tag_pattern = "v[0-9]*" 56 | # regex for skipping tags 57 | skip_tags = "v0.1.0-beta.1" 58 | # regex for ignoring tags 59 | ignore_tags = "" 60 | # sort the tags chronologically 61 | date_order = false 62 | # sort the commits inside sections by oldest/newest order 63 | sort_commits = "oldest" 64 | -------------------------------------------------------------------------------- /do_release.sh: -------------------------------------------------------------------------------- 1 | set -eux 2 | OS=$(uname) 3 | 4 | git cliff --tag "$1" > CHANGELOG.md 5 | if [[ "$OS" == "Linux" ]]; then 6 | sed -i "s/^version.*/version = \"$1\"/" Cargo.toml 7 | elif [[ "$OS" == "Darwin" ]]; then 8 | sed -i '' "s/^version.*/version = \"$1\"/" Cargo.toml 9 | fi 10 | git commit -am "chore(release): prep for $1" 11 | git tag "v$1" 12 | git push 13 | git push origin "v$1" 14 | -------------------------------------------------------------------------------- /examples/config_example.toml: -------------------------------------------------------------------------------- 1 | [[linter]] 2 | code = 'flake8' 3 | exclude_patterns = ['**/*.py'] 4 | command = [ 5 | 'python3', 6 | 'flake8_linter.py', 7 | '--binary=flake8', 8 | '--', 9 | '@{{PATHSFILE}}' 10 | ] 11 | 12 | [[linter]] 13 | code = 'rustfmt' 14 | include_patterns = ['**/*.rs'] 15 | command = [ 16 | 'python3', 17 | 'rustfmt_linter.py', 18 | '--binary=rustfmt', 19 | '--config-path=rustfmt.toml', 20 | '--', 21 | '@{{PATHSFILE}}' 22 | ] 23 | -------------------------------------------------------------------------------- /examples/flake8_linter.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import concurrent.futures 3 | import json 4 | import logging 5 | import os 6 | import re 7 | import subprocess 8 | import sys 9 | import time 10 | from enum import Enum 11 | from typing import Any, Dict, List, NamedTuple, Optional, Set, Pattern 12 | 13 | 14 | IS_WINDOWS: bool = os.name == "nt" 15 | 16 | 17 | def eprint(*args: Any, **kwargs: Any) -> None: 18 | print(*args, file=sys.stderr, flush=True, **kwargs) 19 | 20 | 21 | class LintSeverity(str, Enum): 22 | ERROR = "error" 23 | WARNING = "warning" 24 | ADVICE = "advice" 25 | DISABLED = "disabled" 26 | 27 | 28 | class LintMessage(NamedTuple): 29 | path: str 30 | line: Optional[int] 31 | char: Optional[int] 32 | code: str 33 | severity: LintSeverity 34 | name: str 35 | original: Optional[str] 36 | replacement: Optional[str] 37 | description: Optional[str] 38 | 39 | 40 | def as_posix(name: str) -> str: 41 | return name.replace("\\", "/") if IS_WINDOWS else name 42 | 43 | 44 | # fmt: off 45 | # https://www.flake8rules.com/ 46 | DOCUMENTED_IN_FLAKE8RULES: Set[str] = { 47 | "E101", "E111", "E112", "E113", "E114", "E115", "E116", "E117", 48 | "E121", "E122", "E123", "E124", "E125", "E126", "E127", "E128", "E129", 49 | "E131", "E133", 50 | "E201", "E202", "E203", 51 | "E211", 52 | "E221", "E222", "E223", "E224", "E225", "E226", "E227", "E228", 53 | "E231", 54 | "E241", "E242", 55 | "E251", 56 | "E261", "E262", "E265", "E266", 57 | "E271", "E272", "E273", "E274", "E275", 58 | "E301", "E302", "E303", "E304", "E305", "E306", 59 | "E401", "E402", 60 | "E501", "E502", 61 | "E701", "E702", "E703", "E704", 62 | "E711", "E712", "E713", "E714", 63 | "E721", "E722", 64 | "E731", 65 | "E741", "E742", "E743", 66 | "E901", "E902", "E999", 67 | "W191", 68 | "W291", "W292", "W293", 69 | "W391", 70 | "W503", "W504", 71 | "W601", "W602", "W603", "W604", "W605", 72 | "F401", "F402", "F403", "F404", "F405", 73 | "F811", "F812", 74 | "F821", "F822", "F823", 75 | "F831", 76 | "F841", 77 | "F901", 78 | "C901", 79 | } 80 | 81 | # https://pypi.org/project/flake8-comprehensions/#rules 82 | DOCUMENTED_IN_FLAKE8COMPREHENSIONS: Set[str] = { 83 | "C400", "C401", "C402", "C403", "C404", "C405", "C406", "C407", "C408", "C409", 84 | "C410", 85 | "C411", "C412", "C413", "C413", "C414", "C415", "C416", 86 | } 87 | 88 | # https://github.com/PyCQA/flake8-bugbear#list-of-warnings 89 | DOCUMENTED_IN_BUGBEAR: Set[str] = { 90 | "B001", "B002", "B003", "B004", "B005", "B006", "B007", "B008", "B009", "B010", 91 | "B011", "B012", "B013", "B014", "B015", 92 | "B301", "B302", "B303", "B304", "B305", "B306", 93 | "B901", "B902", "B903", "B950", 94 | } 95 | # fmt: on 96 | 97 | 98 | # stdin:2: W802 undefined name 'foo' 99 | # stdin:3:6: T484 Name 'foo' is not defined 100 | # stdin:3:-100: W605 invalid escape sequence '\/' 101 | # stdin:3:1: E302 expected 2 blank lines, found 1 102 | RESULTS_RE: Pattern[str] = re.compile( 103 | r"""(?mx) 104 | ^ 105 | (?P.*?): 106 | (?P\d+): 107 | (?:(?P-?\d+):)? 108 | \s(?P\S+?):? 109 | \s(?P.*) 110 | $ 111 | """ 112 | ) 113 | 114 | 115 | def _test_results_re() -> None: 116 | """ 117 | >>> def t(s): return RESULTS_RE.search(s).groupdict() 118 | 119 | >>> t(r"file.py:80:1: E302 expected 2 blank lines, found 1") 120 | ... # doctest: +NORMALIZE_WHITESPACE 121 | {'file': 'file.py', 'line': '80', 'column': '1', 'code': 'E302', 122 | 'message': 'expected 2 blank lines, found 1'} 123 | 124 | >>> t(r"file.py:7:1: P201: Resource `stdout` is acquired but not always released.") 125 | ... # doctest: +NORMALIZE_WHITESPACE 126 | {'file': 'file.py', 'line': '7', 'column': '1', 'code': 'P201', 127 | 'message': 'Resource `stdout` is acquired but not always released.'} 128 | 129 | >>> t(r"file.py:8:-10: W605 invalid escape sequence '/'") 130 | ... # doctest: +NORMALIZE_WHITESPACE 131 | {'file': 'file.py', 'line': '8', 'column': '-10', 'code': 'W605', 132 | 'message': "invalid escape sequence '/'"} 133 | """ 134 | pass 135 | 136 | 137 | def _run_command( 138 | args: List[str], 139 | *, 140 | extra_env: Optional[Dict[str, str]], 141 | ) -> "subprocess.CompletedProcess[str]": 142 | logging.debug( 143 | "$ %s", 144 | " ".join( 145 | ([f"{k}={v}" for (k, v) in extra_env.items()] if extra_env else []) + args 146 | ), 147 | ) 148 | start_time = time.monotonic() 149 | try: 150 | return subprocess.run( 151 | args, 152 | stdout=subprocess.PIPE, 153 | stderr=subprocess.PIPE, 154 | check=True, 155 | encoding="utf-8", 156 | ) 157 | finally: 158 | end_time = time.monotonic() 159 | logging.debug("took %dms", (end_time - start_time) * 1000) 160 | 161 | 162 | def run_command( 163 | args: List[str], 164 | *, 165 | extra_env: Optional[Dict[str, str]], 166 | retries: int, 167 | ) -> "subprocess.CompletedProcess[str]": 168 | remaining_retries = retries 169 | while True: 170 | try: 171 | return _run_command(args, extra_env=extra_env) 172 | except subprocess.CalledProcessError as err: 173 | if remaining_retries == 0 or not re.match( 174 | r"^ERROR:1:1: X000 linting with .+ timed out after \d+ seconds", 175 | err.stdout, 176 | ): 177 | raise err 178 | remaining_retries -= 1 179 | logging.warning( 180 | "(%s/%s) Retrying because command failed with: %r", 181 | retries - remaining_retries, 182 | retries, 183 | err, 184 | ) 185 | time.sleep(1) 186 | 187 | 188 | def get_issue_severity(code: str) -> LintSeverity: 189 | # "B901": `return x` inside a generator 190 | # "B902": Invalid first argument to a method 191 | # "B903": __slots__ efficiency 192 | # "B950": Line too long 193 | # "C4": Flake8 Comprehensions 194 | # "C9": Cyclomatic complexity 195 | # "E2": PEP8 horizontal whitespace "errors" 196 | # "E3": PEP8 blank line "errors" 197 | # "E5": PEP8 line length "errors" 198 | # "F401": Name imported but unused 199 | # "F403": Star imports used 200 | # "F405": Name possibly from star imports 201 | # "T400": type checking Notes 202 | # "T49": internal type checker errors or unmatched messages 203 | if any( 204 | code.startswith(x) 205 | for x in [ 206 | "B9", 207 | "C4", 208 | "C9", 209 | "E2", 210 | "E3", 211 | "E5", 212 | "F401", 213 | "F403", 214 | "F405", 215 | "T400", 216 | "T49", 217 | ] 218 | ): 219 | return LintSeverity.ADVICE 220 | 221 | # "F821": Undefined name 222 | # "E999": syntax error 223 | if any(code.startswith(x) for x in ["F821", "E999"]): 224 | return LintSeverity.ERROR 225 | 226 | # "F": PyFlakes Error 227 | # "B": flake8-bugbear Error 228 | # "E": PEP8 "Error" 229 | # "W": PEP8 Warning 230 | # possibly other plugins... 231 | return LintSeverity.WARNING 232 | 233 | 234 | def get_issue_documentation_url(code: str) -> str: 235 | if code in DOCUMENTED_IN_FLAKE8RULES: 236 | return f"https://www.flake8rules.com/rules/{code}.html" 237 | 238 | if code in DOCUMENTED_IN_FLAKE8COMPREHENSIONS: 239 | return "https://pypi.org/project/flake8-comprehensions/#rules" 240 | 241 | if code in DOCUMENTED_IN_BUGBEAR: 242 | return "https://github.com/PyCQA/flake8-bugbear#list-of-warnings" 243 | 244 | return "" 245 | 246 | 247 | def check_file( 248 | filename: str, 249 | binary: str, 250 | flake8_plugins_path: Optional[str], 251 | severities: Dict[str, LintSeverity], 252 | retries: int, 253 | ) -> List[LintMessage]: 254 | try: 255 | proc = run_command( 256 | [binary, "--exit-zero", filename], 257 | extra_env={"FLAKE8_PLUGINS_PATH": flake8_plugins_path} 258 | if flake8_plugins_path 259 | else None, 260 | retries=retries, 261 | ) 262 | except (OSError, subprocess.CalledProcessError) as err: 263 | return [ 264 | LintMessage( 265 | path=filename, 266 | line=None, 267 | char=None, 268 | code="FLAKE8", 269 | severity=LintSeverity.ERROR, 270 | name="command-failed", 271 | original=None, 272 | replacement=None, 273 | description=( 274 | f"Failed due to {err.__class__.__name__}:\n{err}" 275 | if not isinstance(err, subprocess.CalledProcessError) 276 | else ( 277 | "COMMAND (exit code {returncode})\n" 278 | "{command}\n\n" 279 | "STDERR\n{stderr}\n\n" 280 | "STDOUT\n{stdout}" 281 | ).format( 282 | returncode=err.returncode, 283 | command=" ".join(as_posix(x) for x in err.cmd), 284 | stderr=err.stderr.strip() or "(empty)", 285 | stdout=err.stdout.strip() or "(empty)", 286 | ) 287 | ), 288 | ) 289 | ] 290 | 291 | return [ 292 | LintMessage( 293 | path=match["file"], 294 | name=match["code"], 295 | description="{}\nSee {}".format( 296 | match["message"], 297 | get_issue_documentation_url(match["code"]), 298 | ), 299 | line=int(match["line"]), 300 | char=int(match["column"]) 301 | if match["column"] is not None and not match["column"].startswith("-") 302 | else None, 303 | code="FLAKE8", 304 | severity=severities.get(match["code"]) or get_issue_severity(match["code"]), 305 | original=None, 306 | replacement=None, 307 | ) 308 | for match in RESULTS_RE.finditer(proc.stdout) 309 | ] 310 | 311 | 312 | def main() -> None: 313 | parser = argparse.ArgumentParser( 314 | description="Flake8 wrapper linter.", 315 | fromfile_prefix_chars="@", 316 | ) 317 | parser.add_argument( 318 | "--binary", 319 | required=True, 320 | help="flake8 binary path", 321 | ) 322 | parser.add_argument( 323 | "--flake8-plugins-path", 324 | help="FLAKE8_PLUGINS_PATH env value", 325 | ) 326 | parser.add_argument( 327 | "--severity", 328 | action="append", 329 | help="map code to severity (e.g. `B950:advice`)", 330 | ) 331 | parser.add_argument( 332 | "--retries", 333 | default=3, 334 | type=int, 335 | help="times to retry timed out flake8", 336 | ) 337 | parser.add_argument( 338 | "--verbose", 339 | action="store_true", 340 | help="verbose logging", 341 | ) 342 | parser.add_argument( 343 | "filenames", 344 | nargs="+", 345 | help="paths to lint", 346 | ) 347 | args = parser.parse_args() 348 | 349 | logging.basicConfig( 350 | format="<%(threadName)s:%(levelname)s> %(message)s", 351 | level=logging.NOTSET 352 | if args.verbose 353 | else logging.DEBUG 354 | if len(args.filenames) < 1000 355 | else logging.INFO, 356 | stream=sys.stderr, 357 | ) 358 | 359 | flake8_plugins_path = ( 360 | None 361 | if args.flake8_plugins_path is None 362 | else os.path.realpath(args.flake8_plugins_path) 363 | ) 364 | 365 | severities: Dict[str, LintSeverity] = {} 366 | if args.severity: 367 | for severity in args.severity: 368 | parts = severity.split(":", 1) 369 | assert len(parts) == 2, f"invalid severity `{severity}`" 370 | severities[parts[0]] = LintSeverity(parts[1]) 371 | 372 | with concurrent.futures.ThreadPoolExecutor( 373 | max_workers=os.cpu_count(), 374 | thread_name_prefix="Thread", 375 | ) as executor: 376 | futures = { 377 | executor.submit( 378 | check_file, 379 | filename, 380 | args.binary, 381 | flake8_plugins_path, 382 | severities, 383 | args.retries, 384 | ): filename 385 | for filename in args.filenames 386 | } 387 | for future in concurrent.futures.as_completed(futures): 388 | try: 389 | for lint_message in future.result(): 390 | print(json.dumps(lint_message._asdict()), flush=True) 391 | except Exception: 392 | logging.critical('Failed at "%s".', futures[future]) 393 | raise 394 | 395 | 396 | if __name__ == "__main__": 397 | main() 398 | -------------------------------------------------------------------------------- /examples/rustfmt_linter.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import argparse 4 | import concurrent.futures 5 | import json 6 | import logging 7 | import os 8 | import re 9 | import subprocess 10 | import sys 11 | import time 12 | from enum import Enum 13 | from typing import Any, BinaryIO, List, NamedTuple, Optional, Pattern 14 | 15 | 16 | IS_WINDOWS: bool = os.name == "nt" 17 | 18 | 19 | def eprint(*args: Any, **kwargs: Any) -> None: 20 | print(*args, file=sys.stderr, flush=True, **kwargs) 21 | 22 | 23 | class LintSeverity(str, Enum): 24 | ERROR = "error" 25 | WARNING = "warning" 26 | ADVICE = "advice" 27 | DISABLED = "disabled" 28 | 29 | 30 | class LintMessage(NamedTuple): 31 | path: str 32 | line: Optional[int] 33 | char: Optional[int] 34 | code: str 35 | severity: LintSeverity 36 | name: str 37 | original: Optional[str] 38 | replacement: Optional[str] 39 | description: Optional[str] 40 | 41 | 42 | def as_posix(name: str) -> str: 43 | return name.replace("\\", "/") if IS_WINDOWS else name 44 | 45 | 46 | SYNTAX_ERROR_ARROW_RE: Pattern[str] = re.compile( 47 | r"(?m)^( +--> )(.+)(:(?P\d+):(?P\d+))\n" 48 | ) 49 | 50 | SYNTAX_ERROR_PARSE_RE: Pattern[str] = re.compile(r"(?m)^failed to parse .*\n") 51 | 52 | 53 | def strip_path_from_error(error: str) -> str: 54 | # Remove full paths from the description to have deterministic messages. 55 | error = SYNTAX_ERROR_ARROW_RE.sub("", error, count=1) 56 | error = SYNTAX_ERROR_PARSE_RE.sub("", error, count=1) 57 | return error 58 | 59 | 60 | def run_command( 61 | args: list[str], 62 | *, 63 | stdin: BinaryIO | None = None, 64 | check: bool = False, 65 | ) -> subprocess.CompletedProcess[bytes]: 66 | logging.debug("$ %s", " ".join(args)) 67 | start_time = time.monotonic() 68 | try: 69 | return subprocess.run( 70 | args, 71 | capture_output=True, 72 | shell=False, 73 | stdin=stdin, 74 | check=check, 75 | ) 76 | finally: 77 | end_time = time.monotonic() 78 | logging.debug("took %dms", (end_time - start_time) * 1000) 79 | 80 | 81 | def check_file( 82 | filename: str, 83 | binary: str, 84 | config_path: str, 85 | ) -> List[LintMessage]: 86 | try: 87 | with open(filename, "rb") as f: 88 | original = f.read() 89 | with open(filename, "rb") as f: 90 | proc = run_command( 91 | [ 92 | binary, 93 | "--config-path", 94 | config_path, 95 | "--emit=stdout", 96 | "--quiet", 97 | ], 98 | stdin=f, 99 | check=True, 100 | ) 101 | except (OSError, subprocess.CalledProcessError) as err: 102 | # https://github.com/rust-lang/rustfmt#running 103 | # TODO: Fix the syntax error regexp to handle multiple issues and 104 | # to handle the empty result case. 105 | if ( 106 | isinstance(err, subprocess.CalledProcessError) 107 | and err.returncode == 1 108 | and err.stderr 109 | ): 110 | line = None 111 | char = None 112 | description = err.stderr.decode("utf-8") 113 | match = SYNTAX_ERROR_ARROW_RE.search(description) 114 | if match: 115 | line = int(match["line"]) 116 | char = int(match["column"]) 117 | description = strip_path_from_error(description) 118 | return [ 119 | LintMessage( 120 | path=filename, 121 | line=line, 122 | char=char, 123 | code="RUSTFMT", 124 | severity=LintSeverity.ERROR, 125 | name="parsing-error", 126 | original=None, 127 | replacement=None, 128 | description=description, 129 | ) 130 | ] 131 | 132 | return [ 133 | LintMessage( 134 | path=filename, 135 | line=None, 136 | char=None, 137 | code="RUSTFMT", 138 | severity=LintSeverity.ERROR, 139 | name="command-failed", 140 | original=None, 141 | replacement=None, 142 | description=( 143 | f"Failed due to {err.__class__.__name__}:\n{err}" 144 | if not isinstance(err, subprocess.CalledProcessError) 145 | else ( 146 | "COMMAND (exit code {returncode})\n" 147 | "{command}\n\n" 148 | "STDERR\n{stderr}\n\n" 149 | "STDOUT\n{stdout}" 150 | ).format( 151 | returncode=err.returncode, 152 | command=" ".join(as_posix(x) for x in err.cmd), 153 | stderr=err.stderr.decode("utf-8").strip() or "(empty)", 154 | stdout=err.stdout.decode("utf-8").strip() or "(empty)", 155 | ) 156 | ), 157 | ) 158 | ] 159 | 160 | replacement = proc.stdout 161 | if original == replacement: 162 | return [] 163 | 164 | if proc.stderr.startswith(b"error: "): 165 | clean_err = strip_path_from_error(proc.stderr.decode("utf-8")).strip() 166 | return [ 167 | LintMessage( 168 | path=filename, 169 | line=None, 170 | char=None, 171 | code="RUSTFMT", 172 | severity=LintSeverity.WARNING, 173 | name="rustfmt-bug", 174 | original=None, 175 | replacement=None, 176 | description=( 177 | "Possible rustfmt bug. " 178 | "rustfmt returned error output but didn't fail:\n{}" 179 | ).format(clean_err), 180 | ) 181 | ] 182 | 183 | return [ 184 | LintMessage( 185 | path=filename, 186 | line=1, 187 | char=1, 188 | code="RUSTFMT", 189 | severity=LintSeverity.WARNING, 190 | name="format", 191 | original=original.decode("utf-8"), 192 | replacement=replacement.decode("utf-8"), 193 | description="See https://github.com/rust-lang/rustfmt#tips", 194 | ) 195 | ] 196 | 197 | 198 | def main() -> None: 199 | parser = argparse.ArgumentParser( 200 | description="Format rust files with rustfmt.", 201 | fromfile_prefix_chars="@", 202 | ) 203 | parser.add_argument( 204 | "--binary", 205 | required=True, 206 | help="rustfmt binary path", 207 | ) 208 | parser.add_argument( 209 | "--config-path", 210 | required=True, 211 | help="rustfmt config path", 212 | ) 213 | parser.add_argument( 214 | "--verbose", 215 | action="store_true", 216 | help="verbose logging", 217 | ) 218 | parser.add_argument( 219 | "filenames", 220 | nargs="+", 221 | help="paths to lint", 222 | ) 223 | args = parser.parse_args() 224 | 225 | logging.basicConfig( 226 | format="<%(threadName)s:%(levelname)s> %(message)s", 227 | level=logging.NOTSET 228 | if args.verbose 229 | else logging.DEBUG 230 | if len(args.filenames) < 1000 231 | else logging.INFO, 232 | stream=sys.stderr, 233 | ) 234 | 235 | with concurrent.futures.ThreadPoolExecutor( 236 | max_workers=os.cpu_count(), 237 | thread_name_prefix="Thread", 238 | ) as executor: 239 | futures = { 240 | executor.submit(check_file, x, args.binary, args.config_path): x 241 | for x in args.filenames 242 | } 243 | for future in concurrent.futures.as_completed(futures): 244 | try: 245 | for lint_message in future.result(): 246 | print(json.dumps(lint_message._asdict()), flush=True) 247 | except Exception: 248 | logging.critical('Failed at "%s".', futures[future]) 249 | raise 250 | 251 | 252 | if __name__ == "__main__": 253 | main() 254 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["maturin>=0.12,<0.13"] 3 | build-backend = "maturin" 4 | 5 | [project] 6 | name = "lintrunner" 7 | requires-python = ">=3.6" 8 | classifiers = [ 9 | "Programming Language :: Rust", 10 | "Programming Language :: Python :: Implementation :: CPython", 11 | "Programming Language :: Python :: Implementation :: PyPy", 12 | ] 13 | 14 | [project.urls] 15 | repository = "https://github.com/suo/lintrunner" 16 | 17 | [tool.maturin] 18 | bindings = "bin" 19 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # Get help on options with `rustfmt --help=config` 2 | # Please keep these in alphabetical order. 3 | edition = "2018" 4 | merge_derives = false 5 | newline_style = "Native" 6 | use_field_init_shorthand = true 7 | -------------------------------------------------------------------------------- /src/git.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashSet, convert::TryFrom, process::Command}; 2 | 3 | use crate::{ 4 | log_utils::{ensure_output, log_files}, 5 | path::AbsPath, 6 | version_control::VersionControl, 7 | }; 8 | use anyhow::{ensure, Context, Result}; 9 | use log::debug; 10 | use regex::Regex; 11 | 12 | pub struct Repo { 13 | root: AbsPath, 14 | } 15 | 16 | impl VersionControl for Repo { 17 | fn new() -> Result { 18 | // Retrieve the git root based on the current working directory. 19 | let output = Command::new("git") 20 | .arg("rev-parse") 21 | .arg("--show-toplevel") 22 | .output()?; 23 | ensure!(output.status.success(), "Failed to determine git root"); 24 | let root = std::str::from_utf8(&output.stdout)?.trim(); 25 | Ok(Repo { 26 | root: AbsPath::try_from(root)?, 27 | }) 28 | } 29 | 30 | fn get_head(&self) -> Result { 31 | let output = Command::new("git").arg("rev-parse").arg("HEAD").output()?; 32 | ensure_output("git rev-parse", &output)?; 33 | let head = std::str::from_utf8(&output.stdout)?.trim(); 34 | Ok(head.to_string()) 35 | } 36 | 37 | fn get_merge_base_with(&self, merge_base_with: &str) -> Result { 38 | let output = Command::new("git") 39 | .arg("merge-base") 40 | .arg("HEAD") 41 | .arg(merge_base_with) 42 | .current_dir(&self.root) 43 | .output()?; 44 | 45 | ensure!( 46 | output.status.success(), 47 | format!("Failed to get merge-base between HEAD and {merge_base_with}") 48 | ); 49 | let merge_base = std::str::from_utf8(&output.stdout)?.trim(); 50 | Ok(merge_base.to_string()) 51 | } 52 | 53 | fn get_changed_files(&self, relative_to: Option<&str>) -> Result> { 54 | // Output of --name-status looks like: 55 | // D src/lib.rs 56 | // M foo/bar.baz 57 | let re = Regex::new(r"^[A-Z]\s+")?; 58 | 59 | // Retrieve changed files in current commit. 60 | let mut args = vec![ 61 | "diff-tree", 62 | "--ignore-submodules", 63 | "--no-commit-id", 64 | "--name-status", 65 | "-r", 66 | ]; 67 | if let Some(relative_to) = relative_to { 68 | args.push(relative_to); 69 | } 70 | args.push("HEAD"); 71 | 72 | let output = Command::new("git") 73 | .args(&args) 74 | .current_dir(&self.root) 75 | .output()?; 76 | ensure_output("git diff-tree", &output)?; 77 | 78 | let commit_files_str = std::str::from_utf8(&output.stdout)?; 79 | 80 | let commit_files: HashSet = commit_files_str 81 | .split('\n') 82 | .map(|x| x.to_string()) 83 | // Filter out deleted files. 84 | .filter(|line| !line.starts_with('D')) 85 | // Strip the status prefix. 86 | .map(|line| re.replace(&line, "").to_string()) 87 | .filter(|line| !line.is_empty()) 88 | .collect(); 89 | 90 | log_files("Linting commit diff files: ", &commit_files); 91 | 92 | // Retrieve changed files in the working tree 93 | let output = Command::new("git") 94 | .arg("diff-index") 95 | .arg("--ignore-submodules") 96 | .arg("--no-commit-id") 97 | .arg("--name-status") 98 | .arg("-r") 99 | .arg("HEAD") 100 | .current_dir(&self.root) 101 | .output()?; 102 | ensure_output("git diff-index", &output)?; 103 | 104 | let working_tree_files_str = std::str::from_utf8(&output.stdout)?; 105 | let working_tree_files: HashSet = working_tree_files_str 106 | .lines() 107 | .filter(|line| !line.is_empty()) 108 | // Filter out deleted files. 109 | .filter(|line| !line.starts_with('D')) 110 | // Strip the status prefix. 111 | .map(|line| re.replace(line, "").to_string()) 112 | .collect(); 113 | 114 | log_files("Linting working tree diff files: ", &working_tree_files); 115 | 116 | let deleted_working_tree_files: HashSet = working_tree_files_str 117 | .lines() 118 | .filter(|line| !line.is_empty()) 119 | // Filter IN deleted files. 120 | .filter(|line| line.starts_with('D')) 121 | // Strip the status prefix. 122 | .map(|line| re.replace(line, "").to_string()) 123 | .collect(); 124 | 125 | log_files( 126 | "These files were deleted in the working tree and won't be checked: ", 127 | &working_tree_files, 128 | ); 129 | 130 | let all_files = working_tree_files 131 | .union(&commit_files) 132 | .map(|s| s.to_string()) 133 | .collect::>(); 134 | 135 | let filtered_files = all_files 136 | .difference(&deleted_working_tree_files) 137 | .map(|f| format!("{}", self.root.join(f).display())) 138 | .filter_map(|f| match AbsPath::try_from(&f) { 139 | Ok(abs_path) => Some(abs_path), 140 | Err(_) => { 141 | eprintln!("Failed to find file while gathering files to lint: {}", f); 142 | None 143 | } 144 | }) 145 | .collect::>(); 146 | Ok(filtered_files) 147 | } 148 | 149 | fn get_all_files(&self, _under: Option<&AbsPath>) -> Result> { 150 | let output = Command::new("git") 151 | .arg("grep") 152 | .arg("-Il") 153 | .arg(".") 154 | .current_dir(&self.root) 155 | .output()?; 156 | 157 | ensure_output("git grep -Il", &output)?; 158 | 159 | let files = 160 | std::str::from_utf8(&output.stdout).context("failed to parse paths_cmd output")?; 161 | let files = files 162 | .lines() 163 | .map(|s| s.to_string()) 164 | .collect::>(); 165 | let mut files = files.into_iter().collect::>(); 166 | files.sort(); 167 | files 168 | .into_iter() 169 | .map(AbsPath::try_from) 170 | .collect::>() 171 | } 172 | } 173 | 174 | pub fn get_paths_from_cmd(paths_cmd: &str) -> Result> { 175 | debug!("Running paths_cmd: {}", paths_cmd); 176 | if paths_cmd.is_empty() { 177 | return Err(anyhow::Error::msg( 178 | "paths_cmd is empty. Please provide an executable command.", 179 | )); 180 | } 181 | let argv = shell_words::split(paths_cmd).context("failed to split paths_cmd")?; 182 | debug!("Parsed paths_cmd: {:?}", argv); 183 | 184 | let output = Command::new(&argv[0]) 185 | .args(&argv[1..]) 186 | .output() 187 | .context("failed to run provided paths_cmd")?; 188 | 189 | ensure!( 190 | output.status.success(), 191 | format!("Failed to run provided paths_cmd: '{}'", paths_cmd) 192 | ); 193 | 194 | let files = std::str::from_utf8(&output.stdout).context("failed to parse paths_cmd output")?; 195 | let files = files 196 | .lines() 197 | .map(|s| s.to_string()) 198 | .collect::>(); 199 | let mut files = files.into_iter().collect::>(); 200 | files.sort(); 201 | files 202 | .into_iter() 203 | .map(AbsPath::try_from) 204 | .collect::>() 205 | } 206 | 207 | #[cfg(test)] 208 | mod tests { 209 | use super::*; 210 | use crate::testing::GitCheckout; 211 | 212 | // Should properly detect changes in the commit (and not check other files) 213 | #[test] 214 | fn doesnt_detect_unchanged() -> Result<()> { 215 | let git = GitCheckout::new()?; 216 | git.write_file("test_1.txt", "Initial commit")?; 217 | git.write_file("test_2.txt", "Initial commit")?; 218 | git.write_file("test_3.txt", "Initial commit")?; 219 | 220 | git.add(".")?; 221 | git.commit("commit 1")?; 222 | 223 | // Don't write anthing to file 2 for this! 224 | git.write_file("test_1.txt", "commit 2")?; 225 | 226 | git.add(".")?; 227 | git.commit("commit 2")?; 228 | 229 | // Add some uncomitted changes to the working tree 230 | git.write_file("test_3.txt", "commit 2")?; 231 | 232 | let files = git.changed_files(None)?; 233 | assert_eq!(files.len(), 2); 234 | assert!(files.contains(&"test_1.txt".to_string())); 235 | assert!(files.contains(&"test_3.txt".to_string())); 236 | Ok(()) 237 | } 238 | 239 | // Files that were deleted in the commit should not be checked, since 240 | // obviously they are gone. 241 | #[test] 242 | fn deleted_files_in_commit() -> Result<()> { 243 | let git = GitCheckout::new()?; 244 | git.write_file("test_1.txt", "Initial commit")?; 245 | git.write_file("test_2.txt", "Initial commit")?; 246 | git.write_file("test_3.txt", "Initial commit")?; 247 | 248 | git.add(".")?; 249 | git.commit("commit 1")?; 250 | 251 | git.rm_file("test_1.txt")?; 252 | 253 | let files = git.changed_files(None)?; 254 | assert_eq!(files.len(), 2); 255 | 256 | git.add(".")?; 257 | git.commit("removal commit")?; 258 | 259 | // Remove a file in the working tree as well. 260 | git.rm_file("test_2.txt")?; 261 | 262 | let files = git.changed_files(None)?; 263 | assert_eq!(files.len(), 0); 264 | Ok(()) 265 | } 266 | 267 | // Files that were deleted/moved in the working tree should not be checked, 268 | // since obviously they are gone. 269 | #[test] 270 | fn moved_files_working_tree() -> Result<()> { 271 | let git = GitCheckout::new()?; 272 | git.write_file("test_1.txt", "Initial commit")?; 273 | git.add(".")?; 274 | git.commit("commit 1")?; 275 | 276 | git.write_file("test_2.txt", "foo")?; 277 | git.add(".")?; 278 | git.commit("commit 2")?; 279 | 280 | let output = git.run("mv").arg("test_2.txt").arg("new.txt").output()?; 281 | assert!(output.status.success()); 282 | 283 | let files = git.changed_files(None)?; 284 | assert!(files.contains(&"new.txt".to_string())); 285 | Ok(()) 286 | } 287 | 288 | #[test] 289 | fn relative_revision() -> Result<()> { 290 | let git = GitCheckout::new()?; 291 | git.write_file("test_1.txt", "Initial commit")?; 292 | git.write_file("test_2.txt", "Initial commit")?; 293 | git.write_file("test_3.txt", "Initial commit")?; 294 | 295 | git.add(".")?; 296 | git.commit("I am HEAD~2")?; 297 | 298 | git.write_file("test_1.txt", "foo")?; 299 | 300 | git.add(".")?; 301 | git.commit("I am HEAD~1")?; 302 | 303 | git.write_file("test_2.txt", "foo")?; 304 | 305 | git.add(".")?; 306 | git.commit("I am HEAD")?; 307 | 308 | // Add some uncomitted changes to the working tree 309 | git.write_file("test_3.txt", "commit 2")?; 310 | 311 | { 312 | // Relative to the HEAD commit, only the working tree changes should 313 | // be checked. 314 | let files = git.changed_files(Some("HEAD"))?; 315 | assert_eq!(files.len(), 1); 316 | assert!(files.contains(&"test_3.txt".to_string())); 317 | } 318 | { 319 | let files = git.changed_files(Some("HEAD~1"))?; 320 | assert_eq!(files.len(), 2); 321 | assert!(files.contains(&"test_2.txt".to_string())); 322 | assert!(files.contains(&"test_3.txt".to_string())); 323 | } 324 | { 325 | let files = git.changed_files(Some("HEAD~2"))?; 326 | assert_eq!(files.len(), 3); 327 | assert!(files.contains(&"test_1.txt".to_string())); 328 | assert!(files.contains(&"test_2.txt".to_string())); 329 | assert!(files.contains(&"test_3.txt".to_string())); 330 | } 331 | Ok(()) 332 | } 333 | 334 | // File deletions should work correctly even if a relative revision is 335 | // specified. 336 | #[test] 337 | fn deleted_files_relative_revision() -> Result<()> { 338 | let git = GitCheckout::new()?; 339 | git.write_file("test_1.txt", "Initial commit")?; 340 | git.write_file("test_2.txt", "Initial commit")?; 341 | git.write_file("test_3.txt", "Initial commit")?; 342 | 343 | git.add(".")?; 344 | git.commit("commit 1")?; 345 | 346 | git.rm_file("test_1.txt")?; 347 | 348 | let files = git.changed_files(None)?; 349 | assert_eq!(files.len(), 2); 350 | 351 | git.add(".")?; 352 | git.commit("removal commit")?; 353 | 354 | git.write_file("test_2.txt", "Initial commit")?; 355 | git.add(".")?; 356 | git.commit("another commit")?; 357 | 358 | let files = git.changed_files(Some("HEAD~2"))?; 359 | assert_eq!(files.len(), 1); 360 | Ok(()) 361 | } 362 | 363 | #[test] 364 | fn invalid_get_paths_from_cmd_fails() { 365 | assert!(get_paths_from_cmd("asoidjfoaisdjf").is_err()); 366 | assert!(get_paths_from_cmd("false").is_err()); 367 | } 368 | 369 | #[test] 370 | fn merge_base_with() -> Result<()> { 371 | let git = GitCheckout::new()?; 372 | git.write_file("test_1.txt", "Initial commit")?; 373 | git.write_file("test_2.txt", "Initial commit")?; 374 | git.write_file("test_3.txt", "Initial commit")?; 375 | git.write_file("test_4.txt", "Initial commit")?; 376 | 377 | git.add(".")?; 378 | git.commit("I am main")?; 379 | 380 | git.checkout_new_branch("branch1")?; 381 | git.write_file("test_1.txt", "foo")?; 382 | git.add(".")?; 383 | git.commit("I am on branch1")?; 384 | 385 | git.checkout_new_branch("branch2")?; 386 | git.write_file("test_2.txt", "foo")?; 387 | git.add(".")?; 388 | git.commit("I am branch2")?; 389 | 390 | git.checkout_new_branch("branch3")?; 391 | git.write_file("test_3.txt", "blah")?; 392 | git.add(".")?; 393 | git.commit("I am branch3")?; 394 | 395 | // Add some uncomitted changes to the working tree 396 | git.write_file("test_4.txt", "blahblah")?; 397 | 398 | { 399 | let merge_base = Some(git.merge_base_with("branch2")?); 400 | let files = git.changed_files(merge_base.as_deref())?; 401 | assert_eq!(files.len(), 2); 402 | assert!(files.contains(&"test_4.txt".to_string())); 403 | assert!(files.contains(&"test_3.txt".to_string())); 404 | } 405 | { 406 | let merge_base = Some(git.merge_base_with("branch1")?); 407 | let files = git.changed_files(merge_base.as_deref())?; 408 | assert_eq!(files.len(), 3); 409 | assert!(files.contains(&"test_4.txt".to_string())); 410 | assert!(files.contains(&"test_3.txt".to_string())); 411 | assert!(files.contains(&"test_2.txt".to_string())); 412 | } 413 | Ok(()) 414 | } 415 | } 416 | -------------------------------------------------------------------------------- /src/init.rs: -------------------------------------------------------------------------------- 1 | use crate::{lint_config::LintRunnerConfig, persistent_data::PersistentDataStore}; 2 | use anyhow::Result; 3 | use console::{style, Term}; 4 | 5 | // Check whether or not the currently configured init commands are different 6 | // from the last time we ran `init`, and warn the user if so. 7 | pub fn check_init_changed( 8 | persistent_data_store: &PersistentDataStore, 9 | current_config: &LintRunnerConfig, 10 | ) -> Result<()> { 11 | let stderr = Term::stderr(); 12 | 13 | let last_init = persistent_data_store.last_init()?; 14 | if last_init.is_none() { 15 | stderr.write_line(&format!( 16 | "{}", 17 | style( 18 | "WARNING: No previous init data found. If this is the first time you're \ 19 | running lintrunner, you should run `lintrunner init`.", 20 | ) 21 | .bold() 22 | .yellow(), 23 | ))?; 24 | return Ok(()); 25 | } 26 | let last_init = last_init.unwrap(); 27 | let old_config: LintRunnerConfig = serde_json::from_str(&last_init)?; 28 | 29 | let old_init_commands: Vec<_> = old_config.linters.iter().map(|l| &l.init_command).collect(); 30 | let current_init_commands: Vec<_> = current_config 31 | .linters 32 | .iter() 33 | .map(|l| &l.init_command) 34 | .collect(); 35 | 36 | if old_init_commands != current_init_commands { 37 | stderr.write_line(&format!( 38 | "{}", 39 | style( 40 | "WARNING: The init commands have changed since you last ran lintrunner. \ 41 | You may need to run `lintrunner init`.", 42 | ) 43 | .bold() 44 | .yellow(), 45 | ))?; 46 | } 47 | 48 | Ok(()) 49 | } 50 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Context, Result}; 2 | use clap::ArgEnum; 3 | use console::{style, Term}; 4 | use indicatif::{MultiProgress, ProgressBar}; 5 | use linter::Linter; 6 | use log::debug; 7 | use path::AbsPath; 8 | use persistent_data::PersistentDataStore; 9 | use render::{render_lint_messages, render_lint_messages_json}; 10 | use std::collections::HashMap; 11 | use std::collections::HashSet; 12 | use std::convert::TryFrom; 13 | use std::fs::OpenOptions; 14 | use std::sync::{Arc, Mutex}; 15 | use std::thread; 16 | use version_control::VersionControl; 17 | 18 | pub mod git; 19 | pub mod init; 20 | pub mod lint_config; 21 | pub mod lint_message; 22 | pub mod linter; 23 | pub mod log_utils; 24 | pub mod path; 25 | pub mod persistent_data; 26 | pub mod rage; 27 | pub mod render; 28 | pub mod sapling; 29 | pub mod version_control; 30 | 31 | #[cfg(test)] 32 | pub mod testing; 33 | 34 | use git::get_paths_from_cmd; 35 | use lint_message::LintMessage; 36 | use render::PrintedLintErrors; 37 | 38 | use crate::render::render_lint_messages_oneline; 39 | 40 | fn group_lints_by_file( 41 | all_lints: &mut HashMap, Vec>, 42 | lints: Vec, 43 | ) { 44 | lints.into_iter().fold(all_lints, |acc, lint| { 45 | acc.entry(lint.path.clone()).or_default().push(lint); 46 | acc 47 | }); 48 | } 49 | 50 | fn apply_patches(lint_messages: &[LintMessage]) -> Result<()> { 51 | let mut patched_paths = HashSet::new(); 52 | for lint_message in lint_messages { 53 | if let (Some(replacement), Some(path)) = (&lint_message.replacement, &lint_message.path) { 54 | let path = AbsPath::try_from(path)?; 55 | if patched_paths.contains(&path) { 56 | bail!( 57 | "Two different linters proposed changes for the same file: 58 | {}.\n This is not yet supported, file an issue if you want it.", 59 | path.display() 60 | ); 61 | } 62 | patched_paths.insert(path.clone()); 63 | 64 | std::fs::write(&path, replacement).context(format!( 65 | "Failed to write apply patch to file: '{}'", 66 | path.display() 67 | ))?; 68 | } 69 | } 70 | Ok(()) 71 | } 72 | 73 | pub fn do_init( 74 | linters: Vec, 75 | dry_run: bool, 76 | persistent_data_store: &PersistentDataStore, 77 | config_paths: &Vec, 78 | ) -> Result { 79 | debug!( 80 | "Initializing linters: {:?}", 81 | linters.iter().map(|l| &l.code).collect::>() 82 | ); 83 | 84 | for linter in linters { 85 | linter.init(dry_run)?; 86 | } 87 | persistent_data_store.update_last_init(config_paths)?; 88 | Ok(0) 89 | } 90 | 91 | fn remove_patchable_lints(lints: Vec) -> Vec { 92 | lints 93 | .into_iter() 94 | .filter(|lint| lint.replacement.is_none()) 95 | .collect() 96 | } 97 | 98 | fn get_paths_from_input(paths: Vec) -> Result> { 99 | let mut ret = Vec::new(); 100 | for path in &paths { 101 | let path = AbsPath::try_from(path) 102 | .with_context(|| format!("Failed to find provided file: '{}'", path))?; 103 | ret.push(path); 104 | } 105 | Ok(ret) 106 | } 107 | 108 | fn get_paths_from_file(file: AbsPath) -> Result> { 109 | let file = std::fs::read_to_string(&file).with_context(|| { 110 | format!( 111 | "Failed to read file specified in `--paths-from`: '{}'", 112 | file.display() 113 | ) 114 | })?; 115 | let files = file 116 | .trim() 117 | .lines() 118 | .map(|l| l.to_string()) 119 | .collect::>(); 120 | get_paths_from_input(files) 121 | } 122 | 123 | /// Represents the set of paths the user wants to lint. 124 | pub enum PathsOpt { 125 | /// The user didn't specify any paths, so we'll automatically determine 126 | /// which paths to check. 127 | Auto, 128 | AllFiles, 129 | PathsFile(AbsPath), 130 | PathsCmd(String), 131 | Paths(Vec), 132 | } 133 | 134 | /// Represents the scope of revisions that the auto paths finder will look at to 135 | /// determine which paths to lint. 136 | pub enum RevisionOpt { 137 | /// Look at changes in HEAD and changes in the working tree. 138 | Head, 139 | /// Look at changes from revision..HEAD and changes in the working tree. 140 | Revision(String), 141 | /// Look at changes from merge_base(revision, HEAD)..HEAD and changes in the working tree. 142 | MergeBaseWith(String), 143 | } 144 | 145 | #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ArgEnum)] 146 | pub enum RenderOpt { 147 | Default, 148 | Json, 149 | Oneline, 150 | } 151 | 152 | pub fn get_version_control() -> Result> { 153 | let repo = git::Repo::new(); 154 | if let Ok(repo) = repo { 155 | return Ok(Box::new(repo)); 156 | } 157 | 158 | Ok(Box::new(sapling::Repo::new()?)) 159 | } 160 | 161 | #[allow(clippy::too_many_arguments)] 162 | pub fn do_lint( 163 | linters: Vec, 164 | paths_opt: PathsOpt, 165 | should_apply_patches: bool, 166 | render_opt: RenderOpt, 167 | enable_spinners: bool, 168 | revision_opt: RevisionOpt, 169 | tee_json: Option, 170 | only_lint_under_config_dir: bool, 171 | ) -> Result { 172 | debug!( 173 | "Running linters: {:?}", 174 | linters.iter().map(|l| &l.code).collect::>() 175 | ); 176 | let repo = get_version_control()?; 177 | let mut stdout = Term::stdout(); 178 | if linters.is_empty() { 179 | stdout.write_line("No linters ran.")?; 180 | return Ok(0); 181 | } 182 | 183 | let config_dir = if only_lint_under_config_dir { 184 | Some(AbsPath::try_from(linters[0].get_config_dir())?) 185 | } else { 186 | None 187 | }; 188 | 189 | let mut files = match paths_opt { 190 | PathsOpt::Auto => { 191 | let relative_to = match revision_opt { 192 | RevisionOpt::Head => None, 193 | RevisionOpt::Revision(revision) => Some(revision), 194 | RevisionOpt::MergeBaseWith(merge_base_with) => { 195 | Some(repo.get_merge_base_with(&merge_base_with)?) 196 | } 197 | }; 198 | debug!("Relative to: {:?}", relative_to); 199 | repo.get_changed_files(relative_to.as_deref())? 200 | } 201 | PathsOpt::PathsCmd(paths_cmd) => get_paths_from_cmd(&paths_cmd)?, 202 | PathsOpt::Paths(paths) => get_paths_from_input(paths)?, 203 | PathsOpt::PathsFile(file) => get_paths_from_file(file)?, 204 | PathsOpt::AllFiles => repo.get_all_files(config_dir.as_ref())?, 205 | }; 206 | 207 | // Sort and unique the files so we pass a consistent ordering to linters 208 | if let Some(config_dir) = config_dir { 209 | files.retain(|path| path.starts_with(&config_dir)); 210 | } 211 | files.sort(); 212 | files.dedup(); 213 | 214 | let files = Arc::new(files); 215 | 216 | log_utils::log_files("Linting files: ", &files); 217 | 218 | let mut thread_handles = Vec::new(); 219 | let spinners = Arc::new(MultiProgress::new()); 220 | 221 | // Too lazy to learn rust's fancy concurrent programming stuff, just spawn a thread per linter and join them. 222 | let all_lints = Arc::new(Mutex::new(HashMap::new())); 223 | 224 | for linter in linters { 225 | let all_lints = Arc::clone(&all_lints); 226 | let files = Arc::clone(&files); 227 | let spinners = Arc::clone(&spinners); 228 | 229 | let handle = thread::spawn(move || -> Result<()> { 230 | let mut spinner = None; 231 | if enable_spinners { 232 | let _spinner = spinners.add(ProgressBar::new_spinner()); 233 | _spinner.set_message(format!("{} running...", linter.code)); 234 | _spinner.enable_steady_tick(100); 235 | spinner = Some(_spinner); 236 | } 237 | 238 | let lints = linter.run(&files); 239 | 240 | // If we're applying patches later, don't consider lints that would 241 | // be fixed by that. 242 | let lints = if should_apply_patches { 243 | apply_patches(&lints)?; 244 | remove_patchable_lints(lints) 245 | } else { 246 | lints 247 | }; 248 | 249 | let mut all_lints = all_lints.lock().unwrap(); 250 | let is_success = lints.is_empty(); 251 | 252 | group_lints_by_file(&mut all_lints, lints); 253 | 254 | let spinner_message = if is_success { 255 | format!("{} {}", linter.code, style("success!").green()) 256 | } else { 257 | format!("{} {}", linter.code, style("failure").red()) 258 | }; 259 | 260 | if enable_spinners { 261 | spinner.unwrap().finish_with_message(spinner_message); 262 | } 263 | Ok(()) 264 | }); 265 | thread_handles.push(handle); 266 | } 267 | 268 | spinners.join()?; 269 | for handle in thread_handles { 270 | handle.join().unwrap()?; 271 | } 272 | 273 | // Unwrap is fine because all other owners hsould have been joined. 274 | let all_lints = all_lints.lock().unwrap(); 275 | 276 | // Flush the logger before rendering results. 277 | log::logger().flush(); 278 | 279 | let did_print = match render_opt { 280 | RenderOpt::Default => render_lint_messages(&mut stdout, &all_lints)?, 281 | RenderOpt::Json => render_lint_messages_json(&mut stdout, &all_lints)?, 282 | RenderOpt::Oneline => render_lint_messages_oneline(&mut stdout, &all_lints)?, 283 | }; 284 | 285 | if let Some(tee_json) = tee_json { 286 | let mut file = OpenOptions::new() 287 | .write(true) 288 | .create_new(true) 289 | .open(tee_json) 290 | .context("Couldn't open file for --tee-json")?; 291 | render_lint_messages_json(&mut file, &all_lints)?; 292 | } 293 | 294 | if should_apply_patches { 295 | stdout.write_line("Successfully applied all patches.")?; 296 | } 297 | 298 | match did_print { 299 | PrintedLintErrors::No => Ok(0), 300 | PrintedLintErrors::Yes => Ok(1), 301 | } 302 | } 303 | 304 | #[cfg(test)] 305 | mod tests { 306 | use super::*; 307 | use std::{convert::TryFrom, io::Write}; 308 | use tempfile::NamedTempFile; 309 | 310 | #[test] 311 | fn test_paths_file() -> Result<()> { 312 | let file1 = NamedTempFile::new()?; 313 | let file2 = NamedTempFile::new()?; 314 | 315 | let mut paths_file = NamedTempFile::new()?; 316 | 317 | writeln!(paths_file, "{}", file1.path().display())?; 318 | writeln!(paths_file, "{}", file2.path().display())?; 319 | 320 | let paths_file = AbsPath::try_from(paths_file.path())?; 321 | let paths = get_paths_from_file(paths_file)?; 322 | 323 | let file1_abspath = AbsPath::try_from(file1.path())?; 324 | let file2_abspath = AbsPath::try_from(file2.path())?; 325 | 326 | assert!(paths.contains(&file1_abspath)); 327 | assert!(paths.contains(&file2_abspath)); 328 | 329 | Ok(()) 330 | } 331 | } 332 | -------------------------------------------------------------------------------- /src/lint_config.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashSet, fs}; 2 | 3 | use crate::{linter::Linter, path::AbsPath}; 4 | use anyhow::{bail, ensure, Context, Result}; 5 | use figment::{ 6 | providers::{Format, Toml}, 7 | Figment, 8 | }; 9 | use glob::Pattern; 10 | use log::debug; 11 | use serde::{Deserialize, Serialize}; 12 | 13 | #[derive(Serialize, Deserialize)] 14 | pub struct LintRunnerConfig { 15 | #[serde(rename = "linter")] 16 | pub linters: Vec, 17 | 18 | /// The default value for the `merge_base_with` parameter. 19 | /// Recommend setting this is set to your default branch, e.g. `main` 20 | #[serde()] 21 | pub merge_base_with: Option, 22 | 23 | /// If set, will only lint files under the directory where the configuration file is located and its subdirectories. 24 | /// Supercedes command line argument. 25 | #[serde()] 26 | pub only_lint_under_config_dir: Option, 27 | } 28 | 29 | fn is_false(b: &bool) -> bool { 30 | !(*b) 31 | } 32 | 33 | /// Represents a single linter, along with all the information necessary to invoke it. 34 | /// 35 | /// This goes in the linter configuration TOML file. 36 | /// 37 | /// # Examples: 38 | /// 39 | /// ```toml 40 | /// [[linter]] 41 | /// code = 'NOQA' 42 | /// include_patterns = ['**/*.py', '**/*.pyi'] 43 | /// exclude_patterns = ['caffe2/**'] 44 | /// command = [ 45 | /// 'python3', 46 | /// 'linters/check_noqa.py', 47 | /// '--', 48 | /// '@{{PATHSFILE}}' 49 | /// ] 50 | /// ``` 51 | #[derive(Serialize, Deserialize, Clone)] 52 | pub struct LintConfig { 53 | /// The name of the linter, conventionally capitals and numbers, no spaces, 54 | /// dashes, or underscores 55 | /// 56 | /// # Examples 57 | /// - `'FLAKE8'` 58 | /// - `'CLANGFORMAT'` 59 | pub code: String, 60 | 61 | /// A list of UNIX-style glob patterns. Paths matching any of these patterns 62 | /// will be linted. Patterns should be specified relative to the location 63 | /// of the config file. 64 | /// 65 | /// # Examples 66 | /// - Matching against everything: 67 | /// ```toml 68 | /// include_patterns = ['**'] 69 | /// ``` 70 | /// - Matching against a specific file extension: 71 | /// ```toml 72 | /// include_patterns = ['include/**/*.h', 'src/**/*.cpp'] 73 | /// ``` 74 | /// - Match a specific file: 75 | /// ```toml 76 | /// include_patterns = ['include/caffe2/caffe2_operators.h', 'torch/csrc/jit/script_type.h'] 77 | /// ``` 78 | pub include_patterns: Vec, 79 | 80 | /// A list of UNIX-style glob patterns. Paths matching any of these patterns 81 | /// will be never be linted, even if they match an include pattern. 82 | /// 83 | /// For examples, see: [`LintConfig::include_patterns`] 84 | #[serde(skip_serializing_if = "Option::is_none")] 85 | pub exclude_patterns: Option>, 86 | 87 | /// A list of arguments describing how the linter will be called. lintrunner 88 | /// will create a subprocess and invoke this command. 89 | /// 90 | /// If the string `{{PATHSFILE}}` is present in the list, it will be 91 | /// replaced by the location of a file containing a list of paths to lint, 92 | /// one per line. 93 | /// 94 | /// The paths in `{{PATHSFILE}}` will always be canoncalized (e.g. they are 95 | /// absolute paths with symlinks resolved). 96 | /// 97 | /// Commands are run with the current working directory set to the parent 98 | /// directory of the config file. 99 | /// 100 | /// # Examples 101 | /// - Calling a Python script: 102 | /// ```toml 103 | /// command = ['python3', 'my_linter.py', '--', '@{{PATHSFILE}}'] 104 | /// ``` 105 | pub command: Vec, 106 | 107 | /// A list of arguments describing how to set up the right dependencies for 108 | /// this linter. This command will be run when `lintrunner init` is called. 109 | /// 110 | /// The string `{{DRYRUN}}` must be present in the arguments provided. It 111 | /// will be 1 if `lintrunner init --dry-run` is called, 0 otherwise. 112 | /// 113 | /// If `{{DRYRUN}}` is set, this command is expected to not make any changes 114 | /// to the user's environment, instead it should only print what it will do. 115 | /// 116 | /// Commands are run with the current working directory set to the parent 117 | /// directory of the config file. 118 | /// 119 | /// # Examples 120 | /// - Calling a Python script: 121 | /// ```toml 122 | /// init_command = ['python3', 'my_linter_init.py', '--dry-run={{DRYRUN}}'] 123 | /// ``` 124 | pub init_command: Option>, 125 | 126 | /// If true, this linter will be considered a formatter, and will invoked by 127 | /// `lintrunner format`. Formatters should be *safe*: people should be able 128 | /// to blindly accept the output without worrying that it will change the 129 | /// meaning of their code. 130 | #[serde(skip_serializing_if = "is_false", default = "bool::default")] 131 | pub is_formatter: bool, 132 | } 133 | 134 | /// Given options specified by the user, return a list of linters to run. 135 | pub fn get_linters_from_configs( 136 | linter_configs: &[LintConfig], 137 | skipped_linters: Option>, 138 | taken_linters: Option>, 139 | primary_config_path: &AbsPath, 140 | ) -> Result> { 141 | let mut linters = Vec::new(); 142 | let mut all_linters: HashSet = HashSet::new(); 143 | 144 | for lint_config in linter_configs { 145 | if all_linters.contains(&lint_config.code) { 146 | bail!( 147 | "Invalid linter configuration: linter '{}' is defined multiple times.", 148 | lint_config.code 149 | ); 150 | } 151 | all_linters.insert(lint_config.code.clone()); 152 | 153 | let include_patterns = patterns_from_strs(&lint_config.include_patterns)?; 154 | let exclude_patterns = if let Some(exclude_patterns) = &lint_config.exclude_patterns { 155 | patterns_from_strs(exclude_patterns)? 156 | } else { 157 | Vec::new() 158 | }; 159 | 160 | ensure!( 161 | !lint_config.command.is_empty(), 162 | "Invalid linter configuration: '{}' has an empty command list.", 163 | lint_config.code 164 | ); 165 | 166 | linters.push(Linter { 167 | code: lint_config.code.clone(), 168 | include_patterns, 169 | exclude_patterns, 170 | commands: lint_config.command.clone(), 171 | init_commands: lint_config.init_command.clone(), 172 | primary_config_path: primary_config_path.clone(), 173 | }); 174 | } 175 | 176 | debug!("Found linters: {:?}", all_linters); 177 | 178 | // Apply --take 179 | if let Some(taken_linters) = taken_linters { 180 | debug!("Taking linters: {:?}", taken_linters); 181 | for linter in &taken_linters { 182 | ensure!( 183 | all_linters.contains(linter), 184 | "Unknown linter specified in --take: {}. These linters are available: {:?}", 185 | linter, 186 | all_linters, 187 | ); 188 | } 189 | 190 | linters.retain(|linter| taken_linters.contains(&linter.code)); 191 | } 192 | 193 | // Apply --skip 194 | if let Some(skipped_linters) = skipped_linters { 195 | debug!("Skipping linters: {:?}", skipped_linters); 196 | for linter in &skipped_linters { 197 | ensure!( 198 | all_linters.contains(linter), 199 | "Unknown linter specified in --skip: {}. These linters are available: {:?}", 200 | linter, 201 | all_linters, 202 | ); 203 | } 204 | linters.retain(|linter| !skipped_linters.contains(&linter.code)); 205 | } 206 | Ok(linters) 207 | } 208 | 209 | impl LintRunnerConfig { 210 | pub fn new(paths: &Vec) -> Result { 211 | let mut config = Figment::new(); 212 | for path in paths { 213 | let config_str = fs::read_to_string(path) 214 | .context(format!("Could not read config file at {}", path))?; 215 | 216 | // schema check 217 | let _test_str = toml::from_str::(&config_str) 218 | .context(format!("Config file at {} had invalid schema", path))?; 219 | 220 | config = config.merge(Toml::file(path)); 221 | } 222 | 223 | let config = config 224 | .extract::() 225 | .context("Config file had invalid schema")?; 226 | 227 | for linter in &config.linters { 228 | if let Some(init_args) = &linter.init_command { 229 | if init_args.iter().all(|arg| !arg.contains("{{DRYRUN}}")) { 230 | bail!( 231 | "Config for linter {} defines init args \ 232 | but does not take a {{{{DRYRUN}}}} argument.", 233 | linter.code 234 | ); 235 | } 236 | } 237 | } 238 | Ok(config) 239 | } 240 | } 241 | 242 | fn patterns_from_strs(pattern_strs: &[String]) -> Result> { 243 | pattern_strs 244 | .iter() 245 | .map(|pattern_str| { 246 | Pattern::new(pattern_str).map_err(|err| { 247 | anyhow::Error::msg(err) 248 | .context("Could not parse pattern from linter configuration.") 249 | }) 250 | }) 251 | .collect() 252 | } 253 | -------------------------------------------------------------------------------- /src/lint_message.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Deserialize, Clone, Serialize, Copy)] 4 | #[serde(rename_all = "lowercase")] 5 | pub enum LintSeverity { 6 | Error, 7 | Warning, 8 | Advice, 9 | Disabled, 10 | } 11 | 12 | impl LintSeverity { 13 | pub fn label(self) -> &'static str { 14 | match self { 15 | Self::Error => "Error", 16 | Self::Warning => "Warning", 17 | Self::Advice => "Advice", 18 | Self::Disabled => "Disabled", 19 | } 20 | } 21 | } 22 | 23 | /// Represents a single lint message. This version of the struct is used as the 24 | /// canonical protocol representation, intended to be serialized directly into JSON. 25 | #[derive(Debug, Deserialize, Clone, Serialize)] 26 | pub struct LintMessage { 27 | /// Path to the file this lint message pertains to. 28 | /// 29 | /// This can either be an absolute path, or relative to the current working 30 | /// directory when `lintrunner` was invoked. 31 | /// 32 | /// When the path is None, this message will be displayed as a general 33 | /// linter error. 34 | pub path: Option, 35 | 36 | /// The line number that the lint message pertains to. 37 | pub line: Option, 38 | 39 | /// The column number that the lint message pertains to. 40 | pub char: Option, 41 | 42 | /// Linter code (e.g. `FLAKE8`). Must match the code specified in the linter config. 43 | pub code: String, 44 | 45 | /// The severity of the lint message. 46 | pub severity: LintSeverity, 47 | 48 | /// The name of the type of lint message, e.g. "syntax error" 49 | pub name: String, 50 | 51 | /// A more substantive description of the lint message. This can include 52 | /// suggestions for remediation, links to further documentation, etc. 53 | pub description: Option, 54 | 55 | /// The original text of the entire file, encoded as a utf-8 string. 56 | #[serde(skip_serializing_if = "Option::is_none")] 57 | pub original: Option, 58 | 59 | /// If a fix was suggested, this is the replacement text of the entire file, 60 | /// encoded as a utf-8 string. 61 | #[serde(skip_serializing_if = "Option::is_none")] 62 | pub replacement: Option, 63 | } 64 | -------------------------------------------------------------------------------- /src/linter.rs: -------------------------------------------------------------------------------- 1 | use std::io::Write; 2 | use std::path::Path; 3 | use std::process::Command; 4 | 5 | use crate::{ 6 | lint_message::LintMessage, 7 | log_utils::{ensure_output, log_files}, 8 | path::{path_relative_from, AbsPath}, 9 | }; 10 | use anyhow::{anyhow, bail, ensure, Context, Result}; 11 | use glob::{MatchOptions, Pattern}; 12 | use log::{debug, info}; 13 | 14 | pub struct Linter { 15 | pub code: String, 16 | pub include_patterns: Vec, 17 | pub exclude_patterns: Vec, 18 | pub commands: Vec, 19 | pub init_commands: Option>, 20 | pub primary_config_path: AbsPath, 21 | } 22 | 23 | fn matches_relative_path(base: &Path, from: &Path, pattern: &Pattern) -> bool { 24 | // Unwrap ok because we already checked that both paths are absolute. 25 | let relative_path = path_relative_from(from, base).unwrap(); 26 | pattern.matches_with( 27 | relative_path.to_str().unwrap(), 28 | MatchOptions { 29 | case_sensitive: true, 30 | // Explicitly set this option to true. Most unix implementations do 31 | // not allow `*` to match across path segments, so the default 32 | // (false) behavior is unexpected for people. 33 | require_literal_separator: true, 34 | require_literal_leading_dot: false, 35 | }, 36 | ) 37 | } 38 | 39 | impl Linter { 40 | pub fn get_config_dir(&self) -> &Path { 41 | // Unwrap is fine here because we know this path is absolute and won't be `/` 42 | self.primary_config_path.parent().unwrap() 43 | } 44 | 45 | fn get_matches(&self, files: &[AbsPath]) -> Vec { 46 | let config_dir = self.get_config_dir(); 47 | files 48 | .iter() 49 | .filter(|name| { 50 | self.include_patterns 51 | .iter() 52 | .any(|pattern| matches_relative_path(config_dir, name, pattern)) 53 | }) 54 | .filter(|name| { 55 | !self 56 | .exclude_patterns 57 | .iter() 58 | .any(|pattern| matches_relative_path(config_dir, name, pattern)) 59 | }) 60 | .cloned() 61 | .collect() 62 | } 63 | 64 | fn run_command(&self, matched_files: Vec) -> Result> { 65 | let tmp_file = tempfile::NamedTempFile::new()?; 66 | for matched_file in &matched_files { 67 | let name = matched_file 68 | .to_str() 69 | .ok_or_else(|| anyhow!("Could not convert path to string."))?; 70 | writeln!(&tmp_file, "{}", name)?; 71 | } 72 | 73 | let file_path = tmp_file 74 | .path() 75 | .to_str() 76 | .ok_or_else(|| anyhow!("tempfile corrupted"))?; 77 | 78 | let (program, arguments) = self.commands.split_at(1); 79 | let arguments: Vec = arguments 80 | .iter() 81 | .map(|arg| arg.replace("{{PATHSFILE}}", file_path)) 82 | .collect(); 83 | 84 | debug!( 85 | "Running linter {}: {} {}", 86 | self.code, 87 | program[0], 88 | arguments 89 | .iter() 90 | .map(|x| format!("'{x}'")) 91 | .collect::>() 92 | .join(" ") 93 | ); 94 | 95 | let start = std::time::Instant::now(); 96 | let command = Command::new(&program[0]) 97 | .args(&arguments) 98 | .current_dir(self.get_config_dir()) 99 | .output() 100 | .with_context(|| { 101 | format!( 102 | "Failed to execute linter command {} with args: {:?}", 103 | program[0], arguments 104 | ) 105 | })?; 106 | debug!("Linter {} took: {:?}", self.code, start.elapsed()); 107 | ensure_output("Linter command", &command)?; 108 | 109 | if !&command.status.success() { 110 | let stderr = std::str::from_utf8(&command.stderr)?; 111 | let stdout = std::str::from_utf8(&command.stdout)?; 112 | bail!( 113 | "Linter command failed with non-zero exit code.\n\ 114 | STDERR:\n{}\n\nSTDOUT:{}\n", 115 | stderr, 116 | stdout, 117 | ); 118 | } 119 | let stdout_str = std::str::from_utf8(&command.stdout)?; 120 | let mut messages = Vec::new(); 121 | for line in stdout_str.lines() { 122 | if line.is_empty() { 123 | continue; 124 | } 125 | let msg = serde_json::from_str(line).with_context(|| { 126 | format!( 127 | "Failed to deserialize output for lint adapter, line: {}", 128 | line 129 | ) 130 | })?; 131 | messages.push(msg); 132 | } 133 | Ok(messages) 134 | } 135 | 136 | pub fn run(&self, files: &[AbsPath]) -> Vec { 137 | let matches = self.get_matches(files); 138 | log_files(&format!("Linter '{}' matched files: ", self.code), &matches); 139 | if matches.is_empty() { 140 | return Vec::new(); 141 | } 142 | // Wrap the command in a Result to ensure uniform error handling. 143 | // This way, linters are guaranteed to exit cleanly, and any issue will 144 | // be reported using the same mechanism that we use to report regular 145 | // lint errors. 146 | match self.run_command(matches) { 147 | Err(e) => { 148 | let err_lint = LintMessage { 149 | path: None, 150 | line: None, 151 | char: None, 152 | code: self.code.clone(), 153 | severity: crate::lint_message::LintSeverity::Error, 154 | name: "Linter failed".to_string(), 155 | description: Some(format!( 156 | "Linter failed. This a bug, please file an issue against \ 157 | the linter maintainer.\n\nCONTEXT:\n{}", 158 | e 159 | )), 160 | original: None, 161 | replacement: None, 162 | }; 163 | vec![err_lint] 164 | } 165 | Ok(messages) => messages, 166 | } 167 | } 168 | 169 | pub fn init(&self, dry_run: bool) -> Result<()> { 170 | match &self.init_commands { 171 | Some(init_commands) => { 172 | info!("Initializing linter: '{}'", self.code); 173 | if init_commands.is_empty() { 174 | return Ok(()); 175 | } 176 | 177 | let dry_run = if dry_run { "1" } else { "0" }; 178 | 179 | let init_commands: Vec = init_commands 180 | .iter() 181 | .map(|arg| arg.replace("{{DRYRUN}}", dry_run)) 182 | .collect(); 183 | info!("the init commands are {:?}", init_commands); 184 | let (program, arguments) = init_commands.split_at(1); 185 | debug!( 186 | "Running: {} {}", 187 | program[0], 188 | arguments 189 | .iter() 190 | .map(|i| format!("'{i}'")) 191 | .collect::>() 192 | .join(" ") 193 | ); 194 | let status = Command::new(&program[0]) 195 | .args(arguments) 196 | .current_dir(self.get_config_dir()) 197 | .status()?; 198 | info!("the status is {:?}", status); 199 | ensure!( 200 | status.success(), 201 | "lint initializer for '{}' failed with non-zero exit code", 202 | self.code 203 | ); 204 | Ok(()) 205 | } 206 | None => Ok(()), 207 | } 208 | } 209 | } 210 | 211 | #[cfg(test)] 212 | mod tests { 213 | use std::path::PathBuf; 214 | 215 | use super::*; 216 | 217 | // Check that `*` does not match across path segments. 218 | #[test] 219 | fn test_glob_with_separator() -> Result<()> { 220 | assert!(!matches_relative_path( 221 | &PathBuf::from(""), 222 | &PathBuf::from("foo/bar/baz"), 223 | &Pattern::new("foo/b*")?, 224 | )); 225 | Ok(()) 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /src/log_utils.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Result}; 2 | use console::{style, Term}; 3 | use fern::colors::{Color, ColoredLevelConfig}; 4 | use std::path::Path; 5 | use std::process::Output; 6 | 7 | use log::Level::Trace; 8 | use log::{debug, log_enabled, trace, LevelFilter}; 9 | 10 | pub fn log_files(message: &str, files: &T) 11 | where 12 | T: std::fmt::Debug, 13 | { 14 | if !log_enabled!(Trace) { 15 | debug!("{} ", message); 16 | } 17 | 18 | trace!("{}{:?}", message, files); 19 | } 20 | 21 | pub fn ensure_output(program_name: &str, output: &Output) -> Result<()> { 22 | if !output.status.success() { 23 | let stderr = std::str::from_utf8(&output.stderr)?; 24 | let stdout = std::str::from_utf8(&output.stdout)?; 25 | bail!( 26 | "{} failed with non-zero exit code.\n\ 27 | STDERR:\n{}\n\nSTDOUT:{}\n", 28 | program_name, 29 | stderr, 30 | stdout, 31 | ); 32 | } 33 | Ok(()) 34 | } 35 | 36 | pub fn setup_logger(log_level: LevelFilter, log_file: &Path, force_color: bool) -> Result<()> { 37 | let builder = fern::Dispatch::new(); 38 | 39 | let isatty = Term::stderr().features().is_attended(); 40 | if isatty || force_color { 41 | // Use colors in our terminal output if we're on a tty 42 | let log_colors = ColoredLevelConfig::new() 43 | .trace(Color::Cyan) 44 | .debug(Color::Blue) 45 | .info(Color::Green) 46 | .warn(Color::Yellow) 47 | .error(Color::Red); 48 | builder 49 | .chain( 50 | fern::Dispatch::new() 51 | .format(move |out, message, record| { 52 | out.finish(format_args!( 53 | "{}{} {} {}{} {}", 54 | style("[").dim(), 55 | chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Secs, true), 56 | log_colors.color(record.level()), 57 | record.target(), 58 | style("]").dim(), 59 | message 60 | )) 61 | }) 62 | .level(log_level) 63 | .chain(std::io::stderr()), 64 | ) 65 | .chain( 66 | fern::Dispatch::new() 67 | .format(move |out, message, record| { 68 | out.finish(format_args!( 69 | "[{} {} {}] {}", 70 | chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Secs, true), 71 | record.level(), 72 | record.target(), 73 | message 74 | )) 75 | }) 76 | .level(LevelFilter::Trace) 77 | .chain(fern::log_file(log_file)?), 78 | ) 79 | .apply()?; 80 | } else { 81 | builder 82 | .format(move |out, message, record| { 83 | out.finish(format_args!( 84 | "[{} {} {}] {}", 85 | chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Secs, true), 86 | record.level(), 87 | record.target(), 88 | message 89 | )) 90 | }) 91 | .chain( 92 | fern::Dispatch::new() 93 | .level(log_level) 94 | .chain(std::io::stderr()), 95 | ) 96 | .chain( 97 | fern::Dispatch::new() 98 | .level(LevelFilter::Trace) 99 | .chain(fern::log_file(log_file)?), 100 | ) 101 | .apply()?; 102 | } 103 | Ok(()) 104 | } 105 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashSet, convert::TryFrom, io::Write, path::Path}; 2 | 3 | use anyhow::{Context, Result}; 4 | use chrono::SecondsFormat; 5 | use clap::Parser; 6 | 7 | use itertools::Itertools; 8 | use lintrunner::{ 9 | do_init, do_lint, 10 | init::check_init_changed, 11 | lint_config::{get_linters_from_configs, LintRunnerConfig}, 12 | log_utils::setup_logger, 13 | path::AbsPath, 14 | persistent_data::{ExitInfo, PersistentDataStore, RunInfo}, 15 | rage::do_rage, 16 | render::print_error, 17 | PathsOpt, RenderOpt, RevisionOpt, 18 | }; 19 | use log::debug; 20 | 21 | const VERSION: &str = env!("CARGO_PKG_VERSION"); 22 | 23 | #[derive(Debug, Parser)] 24 | #[clap(version, name = "lintrunner", infer_subcommands(true))] 25 | struct Args { 26 | /// Verbose mode (-v, or -vv to show full list of paths being linted) 27 | #[clap(short, long, parse(from_occurrences), global = true)] 28 | verbose: u8, 29 | 30 | /// Paths to TOML files specifying linters. Configs are merged, with later files overriding earlier ones. 31 | /// Except for the first, all files are optional, with missing ones triggering a warning. 32 | /// Relative paths are interpreted with respect to the first config file. 33 | #[clap( 34 | long, 35 | global = true, 36 | alias = "config", 37 | multiple = true, 38 | default_value = ".lintrunner.toml, .lintrunner.private.toml" 39 | )] 40 | configs: String, 41 | 42 | /// If set, any suggested patches will be applied 43 | #[clap(short, long, global = true)] 44 | apply_patches: bool, 45 | 46 | /// Shell command that returns new-line separated paths to lint 47 | /// 48 | /// Example: To run on all files in the repo, use `--paths-cmd='git grep -Il .'`. 49 | #[clap(long, conflicts_with = "paths-from", global = true)] 50 | paths_cmd: Option, 51 | 52 | /// File with new-line separated paths to lint 53 | #[clap(long, global = true)] 54 | paths_from: Option, 55 | 56 | /// Lint all files that differ between the working directory and the 57 | /// specified revision. This argument can be any that is accepted 58 | /// by `git diff-tree` 59 | #[clap(long, short, conflicts_with_all=&["paths", "paths-cmd", "paths-from"], global = true)] 60 | revision: Option, 61 | 62 | /// Lint all files that differ between the merge base of HEAD with the 63 | /// specified revision and HEAD. This argument can be any that is 64 | /// accepted by `git diff-tree` 65 | /// 66 | /// Example: lintrunner -m master 67 | #[clap(long, short, conflicts_with_all=&["paths", "paths-cmd", "paths-from", "revision"], global = true)] 68 | merge_base_with: Option, 69 | 70 | /// Comma-separated list of linters to skip (e.g. --skip CLANGFORMAT,NOQA). 71 | /// 72 | /// You can run: `lintrunner list` to see available linters. 73 | #[clap(long, global = true)] 74 | skip: Option, 75 | 76 | /// Comma-separated list of linters to run (opposite of --skip). 77 | /// 78 | /// You can run: `lintrunner list` to see available linters. 79 | #[clap(long, global = true)] 80 | take: Option, 81 | 82 | /// With 'default' show lint issues in human-readable format, for interactive use. 83 | /// With 'json', show lint issues as machine-readable JSON (one per line) 84 | /// With 'oneline', show lint issues in compact format (one per line) 85 | #[clap(long, arg_enum, default_value_t = RenderOpt::Default, global=true)] 86 | output: RenderOpt, 87 | 88 | #[clap(subcommand)] 89 | cmd: Option, 90 | 91 | /// Paths to lint. lintrunner will still respect the inclusions and 92 | /// exclusions defined in .lintrunner.toml; manually specifying a path will 93 | /// not override them. 94 | #[clap(conflicts_with_all = &["paths-cmd", "paths-from"], global = true)] 95 | paths: Vec, 96 | 97 | /// If set, always output with ANSI colors, even if we detect the output is 98 | /// not a user-attended terminal. 99 | #[clap(long, global = true)] 100 | force_color: bool, 101 | 102 | /// If set, use ths provided path to store any metadata generated by 103 | /// lintrunner. By default, this is a platform-specific location for 104 | /// application data (e.g. $XDG_DATA_HOME for UNIX systems.) 105 | #[clap(long, global = true)] 106 | data_path: Option, 107 | 108 | /// If set, output json to the provided path as well as the terminal. 109 | #[clap(long, global = true)] 110 | tee_json: Option, 111 | 112 | /// Run lintrunner on all files in the repo. This could take a while! 113 | #[clap(long, conflicts_with_all=&["paths", "paths-cmd", "paths-from", "revision", "merge-base-with"], global = true)] 114 | all_files: bool, 115 | 116 | /// If set, will only lint files under the directory where the configuration file is located and its subdirectories. 117 | #[clap(long, global = true)] 118 | only_lint_under_config_dir: bool, 119 | } 120 | 121 | #[derive(Debug, Parser)] 122 | enum SubCommand { 123 | /// Perform first-time setup for linters 124 | Init { 125 | /// If set, do not actually execute initialization commands, just print them 126 | #[clap(long, short)] 127 | dry_run: bool, 128 | }, 129 | /// Run and accept changes for formatting linters only. Equivalent to 130 | /// `lintrunner --apply-patches --take `. 131 | Format, 132 | 133 | /// Run linters. This is the default if no subcommand is provided. 134 | Lint, 135 | 136 | /// Show the list of available linters, based on this repo's .lintrunner.toml. 137 | List, 138 | 139 | /// Create a bug report for a past invocation of lintrunner. 140 | Rage { 141 | /// Choose a specific invocation to report on. 0 is the most recent run. 142 | #[clap(long, short)] 143 | invocation: Option, 144 | /// Set to upload the report to github gist (if available) 145 | #[clap(long, short, action)] 146 | gist: bool, 147 | /// Set to upload the report to pastry (if available) 148 | #[clap(long, short, action)] 149 | pastry: bool, 150 | }, 151 | } 152 | 153 | fn do_main() -> Result { 154 | let args = Args::parse(); 155 | 156 | if args.force_color { 157 | console::set_colors_enabled(true); 158 | console::set_colors_enabled_stderr(true); 159 | } 160 | let log_level = match (args.verbose, args.output != RenderOpt::Default) { 161 | // Default 162 | (0, false) => log::LevelFilter::Info, 163 | // If just json is asked for, suppress most output except hard errors. 164 | (0, true) => log::LevelFilter::Error, 165 | 166 | // Verbose overrides json. 167 | (1, false) => log::LevelFilter::Debug, 168 | (1, true) => log::LevelFilter::Debug, 169 | 170 | // Any higher verbosity goes to trace. 171 | (_, _) => log::LevelFilter::Trace, 172 | }; 173 | 174 | let run_info = RunInfo { 175 | args: std::env::args().collect(), 176 | timestamp: chrono::Local::now().to_rfc3339_opts(SecondsFormat::Millis, true), 177 | }; 178 | // clone split by commas and trim whitespace 179 | let config_paths: Vec = args 180 | .configs 181 | .split(',') 182 | .map(|path| path.trim().to_string()) 183 | .collect_vec(); 184 | // check if first config path exists 185 | let primary_config_path = AbsPath::try_from(config_paths[0].clone()) 186 | .with_context(|| format!("Could not read lintrunner config at: '{}'", config_paths[0]))?; 187 | 188 | let persistent_data_store = PersistentDataStore::new(&primary_config_path, run_info)?; 189 | 190 | setup_logger( 191 | log_level, 192 | &persistent_data_store.log_file(), 193 | args.force_color, 194 | )?; 195 | 196 | debug!("Version: {VERSION}"); 197 | debug!("Passed args: {:?}", std::env::args()); 198 | debug!("Computed args: {:?}", args); 199 | 200 | // report config paths which do not exist 201 | for path in &config_paths { 202 | match AbsPath::try_from(path) { 203 | Ok(_) => {}, // do nothing on success 204 | Err(_) => eprintln!("Warning: Could not find a lintrunner config at: '{}'. Continuing without using configuration file.", path), 205 | } 206 | } 207 | 208 | let config_paths: Vec = config_paths 209 | .into_iter() 210 | .filter(|path| Path::new(&path).exists()) 211 | .collect(); 212 | let cmd = args.cmd.unwrap_or(SubCommand::Lint); 213 | let lint_runner_config = LintRunnerConfig::new(&config_paths)?; 214 | let skipped_linters = args.skip.map(|linters| { 215 | linters 216 | .split(',') 217 | .map(|linter_name| linter_name.to_string()) 218 | .collect::>() 219 | }); 220 | let taken_linters = args.take.map(|linters| { 221 | linters 222 | .split(',') 223 | .map(|linter_name| linter_name.to_string()) 224 | .collect::>() 225 | }); 226 | 227 | // If we are formatting, the universe of linters to select from should be 228 | // restricted to only formatters. 229 | // (NOTE: we pay an allocation for `placeholder` even in cases where we are 230 | // just passing through a reference in the else-branch. This doesn't matter, 231 | // but if we want to fix it we should impl Cow for LintConfig and use that 232 | // instead.). 233 | let mut placeholder = Vec::new(); 234 | let all_linters = if let SubCommand::Format = &cmd { 235 | let iter = lint_runner_config 236 | .linters 237 | .iter() 238 | .filter(|l| l.is_formatter) 239 | .cloned(); 240 | placeholder.extend(iter); 241 | &placeholder 242 | } else { 243 | // If we're not formatting, all linters defined in the config are 244 | // eligible to run. 245 | &lint_runner_config.linters 246 | }; 247 | 248 | let linters = get_linters_from_configs( 249 | all_linters, 250 | skipped_linters, 251 | taken_linters, 252 | &primary_config_path, 253 | )?; 254 | 255 | let enable_spinners = args.verbose == 0 && args.output == RenderOpt::Default; 256 | 257 | let revision_opt = if let Some(revision) = args.revision { 258 | RevisionOpt::Revision(revision) 259 | } else if let Some(merge_base_with) = args.merge_base_with { 260 | RevisionOpt::MergeBaseWith(merge_base_with) 261 | } else if lint_runner_config.merge_base_with.is_some() { 262 | RevisionOpt::MergeBaseWith( 263 | lint_runner_config 264 | .merge_base_with 265 | .clone() 266 | .expect("Merge base should be defined"), 267 | ) 268 | } else { 269 | RevisionOpt::Head 270 | }; 271 | 272 | let only_lint_under_config_dir = if lint_runner_config.only_lint_under_config_dir.is_some() { 273 | lint_runner_config.only_lint_under_config_dir.unwrap() 274 | } else { 275 | args.only_lint_under_config_dir 276 | }; 277 | 278 | let paths_opt = if let Some(paths_file) = args.paths_from { 279 | let path_file = AbsPath::try_from(&paths_file) 280 | .with_context(|| format!("Failed to find `--paths-from` file '{}'", paths_file))?; 281 | PathsOpt::PathsFile(path_file) 282 | } else if let Some(paths_cmd) = args.paths_cmd { 283 | PathsOpt::PathsCmd(paths_cmd) 284 | } else if !args.paths.is_empty() { 285 | PathsOpt::Paths(args.paths) 286 | } else if args.all_files { 287 | PathsOpt::AllFiles 288 | } else { 289 | PathsOpt::Auto 290 | }; 291 | 292 | let res = match cmd { 293 | SubCommand::Init { dry_run } => { 294 | // Just run initialization commands, don't actually lint. 295 | do_init(linters, dry_run, &persistent_data_store, &config_paths) 296 | } 297 | SubCommand::Format => { 298 | check_init_changed(&persistent_data_store, &lint_runner_config)?; 299 | do_lint( 300 | linters, 301 | paths_opt, 302 | true, // always apply patches when we use the format command 303 | args.output, 304 | enable_spinners, 305 | revision_opt, 306 | args.tee_json, 307 | only_lint_under_config_dir, 308 | ) 309 | } 310 | SubCommand::Lint => { 311 | // Default command is to just lint. 312 | check_init_changed(&persistent_data_store, &lint_runner_config)?; 313 | do_lint( 314 | linters, 315 | paths_opt, 316 | args.apply_patches, 317 | args.output, 318 | enable_spinners, 319 | revision_opt, 320 | args.tee_json, 321 | only_lint_under_config_dir, 322 | ) 323 | } 324 | SubCommand::Rage { 325 | invocation, 326 | gist, 327 | pastry, 328 | } => do_rage(&persistent_data_store, invocation, gist, pastry), 329 | SubCommand::List => { 330 | println!("Available linters:"); 331 | for linter in &lint_runner_config.linters { 332 | println!(" {}", linter.code); 333 | } 334 | Ok(0) 335 | } 336 | }; 337 | 338 | let exit_info = match &res { 339 | Ok(code) => ExitInfo { 340 | code: *code, 341 | err: None, 342 | }, 343 | Err(err) => ExitInfo { 344 | code: 1, 345 | err: Some(err.to_string()), 346 | }, 347 | }; 348 | 349 | // Write data related to this run out to the persistent data store. 350 | persistent_data_store.write_run_info(exit_info)?; 351 | 352 | res 353 | } 354 | 355 | fn main() { 356 | let code = match do_main() { 357 | Ok(code) => code, 358 | Err(err) => { 359 | print_error(&err) 360 | .context("failed to print exit error") 361 | .unwrap(); 362 | 1 363 | } 364 | }; 365 | 366 | // Flush the output before exiting, in case there is anything left in the buffers. 367 | drop(std::io::stdout().flush()); 368 | drop(std::io::stderr().flush()); 369 | 370 | // exit() abruptly ends the process while running no destructors. We should 371 | // make sure that nothing is alive before running this. 372 | std::process::exit(code); 373 | } 374 | -------------------------------------------------------------------------------- /src/path.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use std::{ 3 | convert::TryFrom, 4 | fmt, 5 | ops::Deref, 6 | path::{Path, PathBuf}, 7 | }; 8 | 9 | /// Represents a canonicalized path to a file or directory. 10 | #[derive(PartialOrd, Ord, Eq, PartialEq, Hash, Clone)] 11 | pub struct AbsPath { 12 | inner: PathBuf, 13 | } 14 | 15 | impl fmt::Debug for AbsPath { 16 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 17 | write!(f, "{}", self.inner.display()) 18 | } 19 | } 20 | 21 | // Ideally we could could create a generic TryFrom implementation for anything 22 | // that implements Into, but apparently this is not possible? 23 | // https://github.com/rust-lang/rust/issues/50133 24 | impl TryFrom for AbsPath { 25 | type Error = anyhow::Error; 26 | fn try_from(p: PathBuf) -> Result { 27 | Ok(AbsPath { 28 | inner: p.canonicalize()?, 29 | }) 30 | } 31 | } 32 | 33 | impl TryFrom<&Path> for AbsPath { 34 | type Error = anyhow::Error; 35 | fn try_from(p: &Path) -> Result { 36 | Ok(AbsPath { 37 | inner: PathBuf::from(p).canonicalize()?, 38 | }) 39 | } 40 | } 41 | 42 | impl TryFrom<&String> for AbsPath { 43 | type Error = anyhow::Error; 44 | fn try_from(p: &String) -> Result { 45 | Ok(AbsPath { 46 | inner: PathBuf::from(p).canonicalize()?, 47 | }) 48 | } 49 | } 50 | impl TryFrom for AbsPath { 51 | type Error = anyhow::Error; 52 | fn try_from(p: String) -> Result { 53 | Ok(AbsPath { 54 | inner: PathBuf::from(p).canonicalize()?, 55 | }) 56 | } 57 | } 58 | 59 | impl TryFrom<&str> for AbsPath { 60 | type Error = anyhow::Error; 61 | fn try_from(p: &str) -> Result { 62 | Ok(AbsPath { 63 | inner: PathBuf::from(p).canonicalize()?, 64 | }) 65 | } 66 | } 67 | 68 | impl Deref for AbsPath { 69 | type Target = Path; 70 | 71 | fn deref(&self) -> &Self::Target { 72 | self.inner.as_path() 73 | } 74 | } 75 | 76 | impl AsRef for AbsPath { 77 | fn as_ref(&self) -> &Path { 78 | self.inner.as_path() 79 | } 80 | } 81 | 82 | // This routine is adapted from the *old* Path's `path_relative_from` 83 | // function, which works differently from the new `relative_from` function. 84 | // In particular, this handles the case on unix where both paths are 85 | // absolute but with only the root as the common directory. 86 | // From: https://stackoverflow.com/questions/39340924/given-two-absolute-paths-how-can-i-express-one-of-the-paths-relative-to-the-oth 87 | // 88 | // path_relative_from(/foo/bar, /foo) -> bar 89 | pub fn path_relative_from(path: &Path, base: &Path) -> Option { 90 | use std::path::Component; 91 | 92 | if path.is_absolute() != base.is_absolute() { 93 | if path.is_absolute() { 94 | Some(PathBuf::from(path)) 95 | } else { 96 | None 97 | } 98 | } else { 99 | let mut ita = path.components(); 100 | let mut itb = base.components(); 101 | let mut comps: Vec = vec![]; 102 | loop { 103 | match (ita.next(), itb.next()) { 104 | (None, None) => break, 105 | (Some(a), None) => { 106 | comps.push(a); 107 | comps.extend(ita.by_ref()); 108 | break; 109 | } 110 | (None, _) => comps.push(Component::ParentDir), 111 | (Some(a), Some(b)) if comps.is_empty() && a == b => (), 112 | (Some(a), Some(Component::CurDir)) => comps.push(a), 113 | (Some(_), Some(Component::ParentDir)) => return None, 114 | (Some(a), Some(_)) => { 115 | comps.push(Component::ParentDir); 116 | for _ in itb { 117 | comps.push(Component::ParentDir); 118 | } 119 | comps.push(a); 120 | comps.extend(ita.by_ref()); 121 | break; 122 | } 123 | } 124 | } 125 | Some(comps.iter().map(|c| c.as_os_str()).collect()) 126 | } 127 | } 128 | 129 | // 130 | pub fn get_display_path(path: &str, current_dir: &Path) -> String { 131 | let abs_path = AbsPath::try_from(path); 132 | match abs_path { 133 | Ok(abs_path) => { 134 | // unwrap will never panic because we know `abs_path` is absolute. 135 | let relative_path = path_relative_from(&abs_path, current_dir).unwrap(); 136 | 137 | relative_path.display().to_string() 138 | } 139 | // If we can't relativize for some reason, just return the path as 140 | // reported by the linter. 141 | Err(_) => path.to_string(), 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /src/persistent_data.rs: -------------------------------------------------------------------------------- 1 | //! Utilities to handle data that we want to persist across invocations of 2 | //! lintrunner. 3 | //! 4 | //! This data will be placed in a platform specific location (unless overridden 5 | //! by the user). To distinguish between different `.lintrunner.toml` configs, 6 | //! we hash the absolute path to the config and include that as part of the 7 | //! directory structure for persistent data. 8 | 9 | use anyhow::{anyhow, bail, Context, Result}; 10 | use directories::ProjectDirs; 11 | use figment::providers::{Format, Toml}; 12 | use log::debug; 13 | use serde::{Deserialize, Serialize}; 14 | use std::{ 15 | fmt::Write, 16 | path::{Path, PathBuf}, 17 | }; 18 | 19 | use crate::{lint_config::LintRunnerConfig, path::AbsPath}; 20 | 21 | const CONFIG_DATA_NAME: &str = ".lintrunner.toml"; 22 | const RUNS_DIR_NAME: &str = "runs"; 23 | const MAX_RUNS_TO_STORE: usize = 10; 24 | 25 | /// Single way to interact with persistent data for a given run of lintrunner. 26 | /// This is scoped to a single .lintrunner.toml config. 27 | pub struct PersistentDataStore { 28 | data_dir: PathBuf, 29 | runs_dir: PathBuf, 30 | cur_run_info: RunInfo, 31 | } 32 | 33 | /// Encapsulates information about a specific run of `lintrunner` 34 | #[derive(Serialize, Deserialize)] 35 | pub struct RunInfo { 36 | pub args: Vec, 37 | pub timestamp: String, 38 | } 39 | 40 | #[derive(Serialize, Deserialize)] 41 | pub struct ExitInfo { 42 | pub code: i32, 43 | pub err: Option, 44 | } 45 | 46 | impl RunInfo { 47 | // Get the directory (relative to the runs dir) that stores data specific to 48 | // this run. 49 | fn dir_name(&self) -> String { 50 | let args = blake3::hash(self.args.join("_").as_bytes()).to_string(); 51 | self.timestamp.clone().replace(':', "-").replace('+', "_") + "_" + &args 52 | } 53 | } 54 | 55 | impl PersistentDataStore { 56 | pub fn new( 57 | primary_config_path: &AbsPath, 58 | cur_run_info: RunInfo, 59 | ) -> Result { 60 | // Retrieve the lintrunner-wide data directory. 61 | let project_dirs = ProjectDirs::from("", "", "lintrunner"); 62 | let project_dirs = 63 | project_dirs.ok_or_else(|| anyhow!("Could not find project directories"))?; 64 | let project_data_dir = project_dirs.data_dir(); 65 | 66 | // Now compute one specific to this lintrunner config. 67 | let config_path_hash = 68 | blake3::hash(primary_config_path.to_string_lossy().as_bytes()).to_string(); 69 | let config_data_dir = project_data_dir.join(config_path_hash); 70 | 71 | // Create the runs dir as well. 72 | let runs_dir = config_data_dir.join(RUNS_DIR_NAME); 73 | let cur_run_dir = runs_dir.join(cur_run_info.dir_name()); 74 | 75 | std::fs::create_dir_all(cur_run_dir)?; 76 | 77 | PersistentDataStore::clean_old_runs(&runs_dir)?; 78 | 79 | Ok(PersistentDataStore { 80 | data_dir: config_data_dir, 81 | runs_dir, 82 | cur_run_info, 83 | }) 84 | } 85 | 86 | fn clean_old_runs(runs_dir: &Path) -> Result<()> { 87 | let mut entries = std::fs::read_dir(runs_dir)? 88 | .map(|res| res.map(|e| e.path())) 89 | .collect::, std::io::Error>>()?; 90 | 91 | if entries.len() >= MAX_RUNS_TO_STORE { 92 | debug!("Found more than {MAX_RUNS_TO_STORE} runs, cleaning some up"); 93 | 94 | entries.sort_unstable(); 95 | 96 | let num_to_delete = entries.len() - MAX_RUNS_TO_STORE; 97 | for dir in entries.iter().take(num_to_delete) { 98 | debug!("Deleting old run: {}", dir.display()); 99 | std::fs::remove_dir_all(dir)?; 100 | } 101 | } 102 | Ok(()) 103 | } 104 | 105 | pub fn log_file(&self) -> PathBuf { 106 | self.runs_dir 107 | .join(self.cur_run_info.dir_name()) 108 | .join("log.txt") 109 | } 110 | 111 | pub fn write_run_info(&self, exit_info: ExitInfo) -> Result<()> { 112 | let run_path = self.runs_dir.join(self.cur_run_info.dir_name()); 113 | debug!("Writing run info to {}", run_path.display()); 114 | 115 | if !run_path.exists() { 116 | std::fs::create_dir(&run_path)?; 117 | } 118 | let run_info = serde_json::to_string_pretty(&self.cur_run_info)?; 119 | std::fs::write(run_path.join("run_info.json"), run_info)?; 120 | 121 | let exit_info = serde_json::to_string_pretty(&exit_info)?; 122 | std::fs::write(run_path.join("exit_info.json"), exit_info)?; 123 | Ok(()) 124 | } 125 | 126 | pub fn get_run_report(&self, run_info: &RunInfo) -> Result { 127 | let run_path = self.runs_dir.join(run_info.dir_name()); 128 | debug!("Generating run report from {}", run_path.display()); 129 | 130 | let log = 131 | std::fs::read_to_string(run_path.join("log.txt")).context("retrieving log file")?; 132 | 133 | let mut ret = String::new(); 134 | 135 | write!( 136 | ret, 137 | "lintrunner rage report:\n\ 138 | timestamp: {}\n\ 139 | args: {}\n", 140 | run_info.timestamp, 141 | run_info 142 | .args 143 | .iter() 144 | .map(|x| format!("'{x}'")) 145 | .collect::>() 146 | .join(" "), 147 | )?; 148 | 149 | let exit_info_path = run_path.join("exit_info.json"); 150 | if exit_info_path.exists() { 151 | let exit_info = 152 | std::fs::read_to_string(exit_info_path).context("retrieving exit info json")?; 153 | let exit_info: ExitInfo = 154 | serde_json::from_str(&exit_info).context("deserializing exit info")?; 155 | write!( 156 | ret, 157 | "exit code: {}\n\ 158 | err msg: {:?}\n\n", 159 | exit_info.code, exit_info.err, 160 | )?; 161 | } else { 162 | writeln!(ret, "EXIT INFO MISSING")?; 163 | } 164 | writeln!(ret, "========= BEGIN LOGS =========")?; 165 | ret.write_str(&log)?; 166 | 167 | Ok(ret) 168 | } 169 | 170 | fn past_run_dirs(&self) -> Result> { 171 | debug!("Reading past runs from {}", self.runs_dir.display()); 172 | 173 | let mut run_dirs = std::fs::read_dir(&self.runs_dir)? 174 | .map(|res| res.map(|e| e.path())) 175 | .collect::, std::io::Error>>()?; 176 | 177 | run_dirs.sort_unstable(); 178 | run_dirs.pop(); // pop most recent job as it won't have a report yet. 179 | run_dirs.reverse(); 180 | 181 | debug!("Found past runs: {:?}", run_dirs); 182 | Ok(run_dirs) 183 | } 184 | 185 | pub fn past_run(&self, invocation: usize) -> Result { 186 | let run_dirs = self.past_run_dirs()?; 187 | 188 | let dir = run_dirs.get(invocation); 189 | match dir { 190 | Some(dir) => { 191 | debug!("Reading run info from {}", dir.display()); 192 | let run_info = std::fs::read_to_string(dir.join("run_info.json")) 193 | .context("couldn't read run info json")?; 194 | let run_info: RunInfo = 195 | serde_json::from_str(&run_info).context("couldn't deserialize run info")?; 196 | Ok(run_info) 197 | } 198 | None => { 199 | bail!( 200 | "Tried to request run #{invocation}, but didn't find it. \ 201 | (lintrunner only stores the last {MAX_RUNS_TO_STORE} runs)" 202 | ); 203 | } 204 | } 205 | } 206 | 207 | pub fn past_runs(&self) -> Result> { 208 | let run_dirs = self.past_run_dirs()?; 209 | 210 | let mut ret = Vec::new(); 211 | 212 | for dir in run_dirs.into_iter() { 213 | debug!("Reading run info from {}", dir.display()); 214 | let run_data = std::fs::read_to_string(dir.join("run_info.json")); 215 | let exit_data = std::fs::read_to_string(dir.join("exit_info.json")); 216 | if run_data.is_err() || exit_data.is_err() { 217 | // If we couldn't find one of the runfiles, just skip it. We can 218 | // fail to write it for a variety of reasons, including a simple 219 | // sigterm. 220 | continue; 221 | } 222 | 223 | let run_info: RunInfo = serde_json::from_str(&run_data?)?; 224 | let exit_info: ExitInfo = serde_json::from_str(&exit_data?)?; 225 | ret.push((run_info, exit_info)); 226 | } 227 | Ok(ret) 228 | } 229 | 230 | pub fn last_init(&self) -> Result> { 231 | debug!( 232 | "Checking data file '{}/{}' to see if config has changed", 233 | self.data_dir.display(), 234 | CONFIG_DATA_NAME 235 | ); 236 | let init_path = self.relative_path(CONFIG_DATA_NAME); 237 | if !init_path.exists() { 238 | return Ok(None); 239 | } 240 | 241 | Ok(Some(std::fs::read_to_string(init_path)?)) 242 | } 243 | 244 | pub fn update_last_init(&self, config_paths: &Vec) -> Result<()> { 245 | debug!( 246 | "Writing used config(s) to {}/{}", 247 | self.data_dir.display(), 248 | CONFIG_DATA_NAME 249 | ); 250 | 251 | let mut config_contents = figment::Figment::new(); 252 | 253 | for path in config_paths { 254 | config_contents = config_contents.join(Toml::file(path)); 255 | } 256 | 257 | let config_contents = config_contents.extract::()?; 258 | let path = self.relative_path(CONFIG_DATA_NAME); 259 | let serialized_contents = serde_json::to_string_pretty(&config_contents)?; 260 | std::fs::write(path, serialized_contents)?; 261 | Ok(()) 262 | } 263 | 264 | fn relative_path(&self, path: impl AsRef) -> PathBuf { 265 | self.data_dir.join(path) 266 | } 267 | } 268 | 269 | #[cfg(test)] 270 | mod tests { 271 | use super::*; 272 | use tempfile::NamedTempFile; 273 | 274 | #[test] 275 | fn basic_data_doesnt_fail() { 276 | let f = NamedTempFile::new().unwrap(); 277 | let config = AbsPath::try_from(f.path()).unwrap(); 278 | 279 | let run_info = RunInfo { 280 | timestamp: "0".to_string(), 281 | args: vec!["foo".to_string(), "bar".to_string()], 282 | }; 283 | let store = PersistentDataStore::new(&config, run_info).unwrap(); 284 | // Try to cleanup 285 | std::fs::remove_dir_all(store.data_dir).unwrap(); 286 | } 287 | 288 | #[test] 289 | fn old_run_cleanup() { 290 | let f = NamedTempFile::new().unwrap(); 291 | let config = AbsPath::try_from(f.path()).unwrap(); 292 | 293 | let run_info = RunInfo { 294 | timestamp: "0".to_string(), 295 | args: vec!["foo".to_string(), "bar".to_string()], 296 | }; 297 | let store = PersistentDataStore::new(&config, run_info).unwrap(); 298 | 299 | // Simulate some more runs. 300 | for i in 1..20 { 301 | let run_info = RunInfo { 302 | timestamp: i.to_string(), 303 | args: vec!["foo".to_string(), "bar".to_string()], 304 | }; 305 | let store = PersistentDataStore::new(&config, run_info).unwrap(); 306 | store 307 | .write_run_info(ExitInfo { code: 0, err: None }) 308 | .unwrap() 309 | } 310 | 311 | // We should have 10 runs, since old ones should have been collected. 312 | let num_entries = std::fs::read_dir(store.runs_dir).unwrap().count(); 313 | assert_eq!(num_entries, MAX_RUNS_TO_STORE); 314 | 315 | // Try to clean up 316 | std::fs::remove_dir_all(store.data_dir).unwrap(); 317 | } 318 | } 319 | -------------------------------------------------------------------------------- /src/rage.rs: -------------------------------------------------------------------------------- 1 | use crate::persistent_data::{PersistentDataStore, RunInfo}; 2 | use anyhow::{Context, Result}; 3 | use console::style; 4 | use dialoguer::{theme::ColorfulTheme, Select}; 5 | use std::io::Write; 6 | use std::process::Command; 7 | use std::process::Stdio; 8 | 9 | fn select_past_runs(persistent_data_store: &PersistentDataStore) -> Result> { 10 | let runs = persistent_data_store.past_runs()?; 11 | if runs.is_empty() { 12 | return Ok(None); 13 | } 14 | let items: Vec = runs 15 | .iter() 16 | .map(|(run_info, exit_info)| { 17 | let starting_glyph = if exit_info.code == 0 { 18 | style("✓").green() 19 | } else { 20 | style("✕").red() 21 | }; 22 | format!( 23 | "{} {}: {}", 24 | starting_glyph, 25 | run_info.timestamp, 26 | run_info.args.join(" "), 27 | ) 28 | }) 29 | .collect(); 30 | 31 | let selection = Select::with_theme(&ColorfulTheme::default()) 32 | .with_prompt("Select a past invocation to report") 33 | .items(&items) 34 | .default(0) 35 | .interact_opt()?; 36 | 37 | Ok(selection.map(|i| runs.into_iter().nth(i).unwrap().0)) 38 | } 39 | 40 | fn upload(report: String, cmd: &mut Command) -> Result<()> { 41 | let mut child = cmd.stdin(Stdio::piped()).spawn()?; 42 | 43 | if let Some(mut stdin) = child.stdin.take() { 44 | stdin.write_all(report.as_bytes())?; 45 | } 46 | 47 | child.wait()?; 48 | Ok(()) 49 | } 50 | 51 | pub fn do_rage( 52 | persistent_data_store: &PersistentDataStore, 53 | invocation: Option, 54 | gist: bool, 55 | pastry: bool, 56 | ) -> Result { 57 | let run = match invocation { 58 | Some(invocation) => Some(persistent_data_store.past_run(invocation)?), 59 | None => select_past_runs(persistent_data_store)?, 60 | }; 61 | 62 | match run { 63 | Some(run) => { 64 | let report = persistent_data_store 65 | .get_run_report(&run) 66 | .context("getting selected run report")?; 67 | if gist { 68 | upload( 69 | report.clone(), 70 | Command::new("gh").args(["gist", "create", "-"]), 71 | )?; 72 | } else if pastry { 73 | upload(report.clone(), &mut Command::new("pastry"))?; 74 | } else { 75 | print!("{}", report); 76 | } 77 | } 78 | None => { 79 | println!("{}", style("Nothing selected, exiting.").yellow()); 80 | } 81 | } 82 | Ok(0) 83 | } 84 | -------------------------------------------------------------------------------- /src/render.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::io::Write; 3 | use std::{cmp, collections::HashMap, fs}; 4 | 5 | use anyhow::{anyhow, Result}; 6 | use console::{style, Style, Term}; 7 | use itertools::Itertools; 8 | use similar::{ChangeTag, DiffableStr, TextDiff}; 9 | use textwrap::indent; 10 | 11 | use crate::lint_message::{LintMessage, LintSeverity}; 12 | use crate::path::get_display_path; 13 | 14 | static CONTEXT_LINES: usize = 3; 15 | 16 | pub enum PrintedLintErrors { 17 | Yes, 18 | No, 19 | } 20 | 21 | pub fn render_lint_messages_oneline( 22 | stdout: &mut impl Write, 23 | lint_messages: &HashMap, Vec>, 24 | ) -> Result { 25 | let mut printed = false; 26 | let current_dir = std::env::current_dir()?; 27 | 28 | for lint_message in lint_messages.values().flatten() { 29 | printed = true; 30 | let display_path = match &lint_message.path { 31 | None => "[General linter failure]".to_string(), 32 | Some(path) => { 33 | // Try to render the path relative to user's current working directory. 34 | // But if we fail to relativize the path, just print what the linter 35 | // gave us directly. 36 | get_display_path(path, ¤t_dir) 37 | } 38 | }; 39 | let line_number = match lint_message.line { 40 | None => "".to_string(), 41 | Some(line) => format!("{}", line), 42 | }; 43 | let column = match lint_message.char { 44 | None => "".to_string(), 45 | Some(char) => format!("{}", char), 46 | }; 47 | let description = match &lint_message.description { 48 | None => "", 49 | Some(desc) => desc.as_str(), 50 | }; 51 | let description = description.lines().join(" "); 52 | let severity = lint_message.severity.label(); 53 | 54 | writeln!( 55 | stdout, 56 | "{}:{}:{} :{} {} [{}/{}]", 57 | display_path, 58 | line_number, 59 | column, 60 | severity, 61 | description, 62 | lint_message.code, 63 | lint_message.name 64 | )?; 65 | } 66 | 67 | if printed { 68 | Ok(PrintedLintErrors::Yes) 69 | } else { 70 | Ok(PrintedLintErrors::No) 71 | } 72 | } 73 | 74 | pub fn render_lint_messages_json( 75 | stdout: &mut impl Write, 76 | lint_messages: &HashMap, Vec>, 77 | ) -> Result { 78 | let mut printed = false; 79 | for lint_message in lint_messages.values().flatten() { 80 | printed = true; 81 | writeln!(stdout, "{}", serde_json::to_string(lint_message)?)?; 82 | } 83 | 84 | if printed { 85 | Ok(PrintedLintErrors::Yes) 86 | } else { 87 | Ok(PrintedLintErrors::No) 88 | } 89 | } 90 | 91 | pub fn render_lint_messages( 92 | stdout: &mut impl Write, 93 | lint_messages: &HashMap, Vec>, 94 | ) -> Result { 95 | if lint_messages.is_empty() { 96 | writeln!(stdout, "{} No lint issues.", style("ok").green())?; 97 | 98 | return Ok(PrintedLintErrors::No); 99 | } 100 | 101 | let wrap_78_indent_4 = textwrap::Options::new(78) 102 | .initial_indent(spaces(4)) 103 | .subsequent_indent(spaces(4)); 104 | 105 | // Always render messages in sorted order. 106 | let mut paths: Vec<&Option> = lint_messages.keys().collect(); 107 | paths.sort(); 108 | 109 | let current_dir = std::env::current_dir()?; 110 | for path in paths { 111 | let lint_messages = lint_messages.get(path).unwrap(); 112 | 113 | stdout.write_all(b"\n\n")?; 114 | 115 | match path { 116 | None => write!(stdout, ">>> General linter failure:\n\n")?, 117 | Some(path) => { 118 | // Try to render the path relative to user's current working directory. 119 | // But if we fail to relativize the path, just print what the linter 120 | // gave us directly. 121 | let path_to_print = get_display_path(path, ¤t_dir); 122 | 123 | write!( 124 | stdout, 125 | "{} Lint for {}:\n\n", 126 | style(">>>").bold(), 127 | style(path_to_print).underlined() 128 | )?; 129 | } 130 | } 131 | 132 | for lint_message in lint_messages { 133 | write_summary_line(stdout, lint_message)?; 134 | 135 | // Write the description. 136 | if let Some(description) = &lint_message.description { 137 | for line in textwrap::wrap(description, &wrap_78_indent_4) { 138 | writeln!(stdout, "{}", line)?; 139 | } 140 | } 141 | 142 | // If we have original and replacement, show the diff. 143 | if let (Some(original), Some(replacement)) = 144 | (&lint_message.original, &lint_message.replacement) 145 | { 146 | write_context_diff(stdout, original, replacement)?; 147 | } else if let (Some(highlight_line), Some(path)) = (&lint_message.line, path) { 148 | // Otherwise, write the context code snippet. 149 | write_context(stdout, path, highlight_line)?; 150 | } 151 | } 152 | } 153 | 154 | Ok(PrintedLintErrors::Yes) 155 | } 156 | 157 | // Write formatted context lines, with an styled indicator for which line the lint is about 158 | fn write_context(stdout: &mut impl Write, path: &str, highlight_line: &usize) -> Result<()> { 159 | stdout.write_all(b"\n")?; 160 | let file = fs::read_to_string(path); 161 | match file { 162 | Ok(file) => { 163 | let lines = file.tokenize_lines(); 164 | 165 | let highlight_idx = highlight_line.saturating_sub(1); 166 | 167 | let max_idx = lines.len().saturating_sub(1); 168 | let start_idx = highlight_idx.saturating_sub(CONTEXT_LINES); 169 | let end_idx = cmp::min(max_idx, highlight_idx + CONTEXT_LINES); 170 | 171 | for cur_idx in start_idx..=end_idx { 172 | let line = lines 173 | .get(cur_idx) 174 | .ok_or_else(|| anyhow!("TODO line mismatch"))?; 175 | let line_number = cur_idx + 1; 176 | 177 | let max_line_number = max_idx + 1; 178 | let max_pad = max_line_number.to_string().len(); 179 | 180 | // Write `123 | my failing line content 181 | if cur_idx == highlight_idx { 182 | // Highlight the actually failing line with a chevron + different color 183 | write!( 184 | stdout, 185 | " >>> {:>width$} |{}", 186 | style(line_number).dim(), 187 | style(line).yellow(), 188 | width = max_pad 189 | )?; 190 | } else { 191 | write!( 192 | stdout, 193 | " {:>width$} |{}", 194 | style(line_number).dim(), 195 | line, 196 | width = max_pad 197 | )?; 198 | } 199 | } 200 | } 201 | Err(e) => { 202 | let msg = textwrap::indent( 203 | &format!( 204 | "Could not retrieve source context: {}\n\ 205 | This is typically a linter bug.", 206 | e 207 | ), 208 | spaces(8), 209 | ); 210 | write!(stdout, "{}", style(msg).red())?; 211 | } 212 | } 213 | stdout.write_all(b"\n")?; 214 | Ok(()) 215 | } 216 | 217 | // Write the context, computing and styling a diff from the original to the suggested replacement. 218 | fn write_context_diff(stdout: &mut impl Write, original: &str, replacement: &str) -> Result<()> { 219 | writeln!( 220 | stdout, 221 | "\n {}", 222 | style("You can run `lintrunner -a` to apply this patch.").cyan() 223 | )?; 224 | stdout.write_all(b"\n")?; 225 | let diff = TextDiff::from_lines(original, replacement); 226 | 227 | let mut max_line_number = 1; 228 | for group in diff.grouped_ops(3).iter() { 229 | for op in group { 230 | for change in diff.iter_inline_changes(op) { 231 | let old_line = change.old_index().unwrap_or(0) + 1; 232 | let new_line = change.new_index().unwrap_or(0) + 1; 233 | max_line_number = cmp::max(max_line_number, old_line); 234 | max_line_number = cmp::max(max_line_number, new_line); 235 | } 236 | } 237 | } 238 | let max_pad = max_line_number.to_string().len(); 239 | 240 | for (idx, group) in diff.grouped_ops(3).iter().enumerate() { 241 | if idx > 0 { 242 | writeln!(stdout, "{:-^1$}", "-", 80)?; 243 | } 244 | for op in group { 245 | for change in diff.iter_inline_changes(op) { 246 | let (sign, s) = match change.tag() { 247 | ChangeTag::Delete => ("-", Style::new().red()), 248 | ChangeTag::Insert => ("+", Style::new().green()), 249 | ChangeTag::Equal => (" ", Style::new().dim()), 250 | }; 251 | let changeset = Changeset { 252 | max_pad, 253 | old: change.old_index(), 254 | new: change.new_index(), 255 | }; 256 | write!( 257 | stdout, 258 | " {} |{}", 259 | style(changeset).dim(), 260 | s.apply_to(sign).bold() 261 | )?; 262 | for (emphasized, value) in change.iter_strings_lossy() { 263 | if emphasized { 264 | write!(stdout, "{}", s.apply_to(value).underlined().on_black())?; 265 | } else { 266 | write!(stdout, "{}", s.apply_to(value))?; 267 | } 268 | } 269 | if change.missing_newline() { 270 | stdout.write_all(b"\n")?; 271 | } 272 | } 273 | } 274 | } 275 | stdout.write_all(b"\n")?; 276 | Ok(()) 277 | } 278 | 279 | // Write: ` Error (LINTER) prefer-using-this-over-that\n` 280 | fn write_summary_line(stdout: &mut impl Write, lint_message: &LintMessage) -> Result<()> { 281 | let error_style = match lint_message.severity { 282 | LintSeverity::Error => Style::new().on_red().bold(), 283 | LintSeverity::Warning | LintSeverity::Advice | LintSeverity::Disabled => { 284 | Style::new().on_yellow().bold() 285 | } 286 | }; 287 | writeln!( 288 | stdout, 289 | " {} ({}) {}", 290 | error_style.apply_to(lint_message.severity.label()), 291 | lint_message.code, 292 | style(&lint_message.name).underlined(), 293 | )?; 294 | Ok(()) 295 | } 296 | 297 | fn bspaces(len: u8) -> &'static [u8] { 298 | const SPACES: [u8; 255] = [b' '; 255]; 299 | &SPACES[0..len as usize] 300 | } 301 | 302 | /// Short 'static strs of spaces. 303 | fn spaces(len: u8) -> &'static str { 304 | // SAFETY: `SPACES` is valid UTF-8 since it is all spaces. 305 | unsafe { std::str::from_utf8_unchecked(bspaces(len)) } 306 | } 307 | 308 | struct Changeset { 309 | // The length of the largest line number we'll be printing. 310 | max_pad: usize, 311 | old: Option, 312 | new: Option, 313 | } 314 | impl fmt::Display for Changeset { 315 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 316 | // We want things to get formatted like: 317 | // 1234 1235 318 | // ^^ two spaces 319 | match (self.old, self.new) { 320 | (Some(old), Some(new)) => { 321 | // +1 because we want to print the line number, not the vector index. 322 | let old = old + 1; 323 | let new = new + 1; 324 | write!( 325 | f, 326 | "{:>left_pad$} {:>right_pad$}", 327 | old, 328 | new, 329 | left_pad = self.max_pad, 330 | right_pad = self.max_pad, 331 | ) 332 | } 333 | // In cases where old/new are missing, do an approximation: 334 | // '1234 ' 335 | // ^^^^ length of '1234' mirrored to the other side 336 | // ^^ two spaces still 337 | (Some(old), None) => write!(f, "{:>width$} {:width$}", old, " ", width = self.max_pad), 338 | (None, Some(new)) => { 339 | let new = new + 1; 340 | write!(f, "{:width$} {:>width$}", " ", new, width = self.max_pad) 341 | } 342 | (None, None) => unreachable!(), 343 | } 344 | } 345 | } 346 | 347 | pub fn print_error(err: &anyhow::Error) -> std::io::Result<()> { 348 | let mut stderr = Term::stderr(); 349 | let mut chain = err.chain(); 350 | 351 | if let Some(error) = chain.next() { 352 | write!(stderr, "{} ", style("error:").red().bold())?; 353 | let indented = indent(&format!("{}", error), spaces(7)); 354 | writeln!(stderr, "{}", indented)?; 355 | 356 | for cause in chain { 357 | write!(stderr, "{} ", style("caused_by:").red().bold())?; 358 | write!(stderr, " ")?; 359 | let indented = indent(&format!("{}", cause), spaces(11)); 360 | writeln!(stderr, "{}", indented)?; 361 | } 362 | } 363 | 364 | Ok(()) 365 | } 366 | -------------------------------------------------------------------------------- /src/sapling.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | log_utils, 3 | path::{self, AbsPath}, 4 | version_control::VersionControl, 5 | }; 6 | 7 | use anyhow; 8 | 9 | pub struct Repo { 10 | root: path::AbsPath, 11 | } 12 | 13 | impl VersionControl for Repo { 14 | fn new() -> anyhow::Result { 15 | let output = std::process::Command::new("sl").arg("root").output()?; 16 | anyhow::ensure!(output.status.success(), "Failed to determine Sapling root"); 17 | let root = std::str::from_utf8(&output.stdout)?.trim(); 18 | Ok(Repo { 19 | root: path::AbsPath::try_from(root)?, 20 | }) 21 | } 22 | 23 | fn get_head(&self) -> anyhow::Result { 24 | let mut cmd = std::process::Command::new("sl"); 25 | cmd.arg("whereami"); 26 | let output = cmd.current_dir(&self.root).output()?; 27 | log_utils::ensure_output(&format!("{:?}", cmd), &output)?; 28 | let head = std::str::from_utf8(&output.stdout)?.trim(); 29 | Ok(head.to_string()) 30 | } 31 | 32 | fn get_merge_base_with(&self, merge_base_with: &str) -> anyhow::Result { 33 | let output = std::process::Command::new("sl") 34 | .arg("log") 35 | .arg(format!("--rev=ancestor(., {})", merge_base_with)) 36 | .arg("--template={node}") 37 | .current_dir(&self.root) 38 | .output()?; 39 | 40 | anyhow::ensure!( 41 | output.status.success(), 42 | format!("Failed to get most recent common ancestor between . and {merge_base_with}") 43 | ); 44 | let merge_base = std::str::from_utf8(&output.stdout)?.trim(); 45 | Ok(merge_base.to_string()) 46 | } 47 | 48 | fn get_all_files(&self, under: Option<&AbsPath>) -> anyhow::Result> { 49 | // Output of sl status looks like: 50 | // D src/lib.rs 51 | // M foo/bar.baz 52 | let re = regex::Regex::new(r"^[A-Z?]\s+")?; 53 | 54 | let mut cmd = std::process::Command::new("sl"); 55 | cmd.arg("status").arg("--all"); 56 | if let Some(under) = under { 57 | cmd.arg(under.as_os_str()); 58 | } 59 | cmd.current_dir(&self.root); 60 | let output = cmd.output()?; 61 | log_utils::ensure_output(&format!("{:?}", cmd), &output)?; 62 | let all_files_str = std::str::from_utf8(&output.stdout)?; 63 | let all_files: std::collections::HashSet = all_files_str 64 | .split('\n') 65 | .map(|x| x.to_string()) 66 | .filter(|line| !line.starts_with('I')) 67 | .map(|line| re.replace(&line, "").to_string()) 68 | .filter(|line| !line.is_empty()) 69 | .collect(); 70 | 71 | let filtered_all_files = all_files 72 | .into_iter() 73 | .map(|f| format!("{}", self.root.join(f).display())) 74 | .filter_map(|f| match path::AbsPath::try_from(&f) { 75 | Ok(abs_path) => Some(abs_path), 76 | Err(_) => { 77 | eprintln!("Failed to find file while gathering files to lint: {}", f); 78 | None 79 | } 80 | }) 81 | .collect::>(); 82 | 83 | Ok(filtered_all_files) 84 | } 85 | 86 | fn get_changed_files(&self, relative_to: Option<&str>) -> anyhow::Result> { 87 | // Output of sl status looks like: 88 | // D src/lib.rs 89 | // M foo/bar.baz 90 | let re = regex::Regex::new(r"^[A-Z?]\s+")?; 91 | 92 | // Retrieve changed files in current commit. 93 | let mut cmd = std::process::Command::new("sl"); 94 | cmd.arg("status"); 95 | cmd.arg(format!("--rev={}", relative_to.unwrap_or(".^"))); 96 | cmd.current_dir(&self.root); 97 | let output = cmd.output()?; 98 | log_utils::ensure_output(&format!("{:?}", cmd), &output)?; 99 | 100 | let commit_files_str = std::str::from_utf8(&output.stdout)?; 101 | 102 | let commit_files: std::collections::HashSet = commit_files_str 103 | .split('\n') 104 | .map(|x| x.to_string()) 105 | // Filter out deleted files. 106 | .filter(|line| !line.starts_with('R')) 107 | .filter(|line| !line.starts_with('!')) 108 | // Strip the status prefix. 109 | .map(|line| re.replace(&line, "").to_string()) 110 | .filter(|line| !line.is_empty()) 111 | .collect(); 112 | 113 | log_utils::log_files("Linting commit diff files: ", &commit_files); 114 | 115 | let filtered_commit_files = commit_files 116 | .into_iter() 117 | .map(|f| format!("{}", self.root.join(f).display())) 118 | .filter_map(|f| match path::AbsPath::try_from(&f) { 119 | Ok(abs_path) => Some(abs_path), 120 | Err(_) => { 121 | eprintln!("Failed to find file while gathering files to lint: {}", f); 122 | None 123 | } 124 | }) 125 | .collect::>(); 126 | 127 | Ok(filtered_commit_files) 128 | } 129 | } 130 | 131 | #[cfg(test)] 132 | mod tests { 133 | use once_cell::sync::Lazy; 134 | use std::{fs::OpenOptions, io::Write, sync::Mutex}; // 1.4.0 135 | 136 | static SL_GLOBAL_MUTEX: Lazy> = Lazy::new(Mutex::default); 137 | use crate::testing; 138 | 139 | use super::*; 140 | use anyhow::Result; 141 | use tempfile::TempDir; 142 | 143 | struct SaplingClone { 144 | _temp_dir: TempDir, 145 | root: std::path::PathBuf, 146 | } 147 | 148 | impl SaplingClone { 149 | fn new(git_repo: &testing::GitCheckout) -> Result { 150 | let _shared = SL_GLOBAL_MUTEX.lock().unwrap(); 151 | let temp_dir = TempDir::new()?; 152 | assert_eq!( 153 | std::process::Command::new("sl") 154 | .arg("clone") 155 | .arg("--git") 156 | .arg(git_repo.root()) 157 | .current_dir(temp_dir.path()) 158 | .status()? 159 | .code(), 160 | Some(0) 161 | ); 162 | let root = temp_dir.path().join(git_repo.root().file_name().unwrap()); 163 | let sl = SaplingClone { 164 | _temp_dir: temp_dir, 165 | root, 166 | }; 167 | Ok(sl) 168 | } 169 | 170 | fn run(&self, subcommand: &str) -> std::process::Command { 171 | let _shared = SL_GLOBAL_MUTEX.lock().unwrap(); 172 | let mut cmd = std::process::Command::new("sl"); 173 | cmd.current_dir(&self.root); 174 | cmd.arg(subcommand); 175 | cmd 176 | } 177 | 178 | fn rm_file(&self, name: &str) -> Result<()> { 179 | let path = self.root.join(name); 180 | std::fs::remove_file(path)?; 181 | Ok(()) 182 | } 183 | 184 | fn write_file(&self, name: &str, contents: &str) -> Result<()> { 185 | let path = self.root.join(name); 186 | let mut file = OpenOptions::new() 187 | .read(true) 188 | .append(true) 189 | .create(true) 190 | .open(path)?; 191 | 192 | writeln!(file, "{}", contents)?; 193 | Ok(()) 194 | } 195 | 196 | fn add(&self, pathspec: &str) -> Result<()> { 197 | assert_eq!(self.run("add").arg(pathspec).status()?.code(), Some(0)); 198 | Ok(()) 199 | } 200 | 201 | fn rm(&self, pathspec: &str) -> Result<()> { 202 | assert_eq!(self.run("rm").arg(pathspec).status()?.code(), Some(0)); 203 | Ok(()) 204 | } 205 | 206 | fn commit(&self, message: &str) -> Result<()> { 207 | assert_eq!( 208 | self.run("commit") 209 | .arg(format!("--message={}", message)) 210 | .status()? 211 | .code(), 212 | Some(0) 213 | ); 214 | Ok(()) 215 | } 216 | 217 | fn changed_files(&self, relative_to: Option<&str>) -> Result> { 218 | let _shared = SL_GLOBAL_MUTEX.lock().unwrap(); 219 | std::env::set_current_dir(&self.root)?; 220 | let repo = Repo::new()?; 221 | let files = repo.get_changed_files(relative_to)?; 222 | let files = files 223 | .into_iter() 224 | .map(|abs_path| abs_path.file_name().unwrap().to_string_lossy().to_string()) 225 | .collect::>(); 226 | Ok(files) 227 | } 228 | 229 | fn merge_base_with(&self, merge_base_with: &str) -> Result { 230 | let _shared = SL_GLOBAL_MUTEX.lock().unwrap(); 231 | std::env::set_current_dir(&self.root)?; 232 | let repo = Repo::new()?; 233 | repo.get_merge_base_with(merge_base_with) 234 | } 235 | 236 | fn get_all_files(&self) -> Result> { 237 | let _shared = SL_GLOBAL_MUTEX.lock().unwrap(); 238 | std::env::set_current_dir(&self.root)?; 239 | let repo = Repo::new()?; 240 | repo.get_all_files(None) 241 | } 242 | } 243 | 244 | // Should properly detect changes in the commit (and not check other files) 245 | #[test] 246 | #[cfg_attr(target_os = "windows", ignore)] // remove when sapling installation is better 247 | #[cfg_attr(target_os = "linux", ignore)] // remove when sapling installation is better 248 | fn doesnt_detect_unchanged() -> Result<()> { 249 | let git = testing::GitCheckout::new()?; 250 | git.write_file("test_1.txt", "Initial commit")?; 251 | git.write_file("test_2.txt", "Initial commit")?; 252 | git.write_file("test_3.txt", "Initial commit")?; 253 | 254 | git.add(".")?; 255 | git.commit("commit 1")?; 256 | 257 | // Don't write anthing to file 2 for this! 258 | git.write_file("test_1.txt", "commit 2")?; 259 | 260 | git.add(".")?; 261 | git.commit("commit 2")?; 262 | 263 | let sl = SaplingClone::new(&git)?; 264 | 265 | // Add some uncomitted changes to the working tree 266 | sl.write_file("test_3.txt", "commit 2")?; 267 | 268 | let files = sl.changed_files(None)?; 269 | assert_eq!(files.len(), 2); 270 | assert!(files.contains(&"test_1.txt".to_string())); 271 | assert!(files.contains(&"test_3.txt".to_string())); 272 | Ok(()) 273 | } 274 | 275 | // Files that were deleted in the commit should not be checked, since 276 | // obviously they are gone. 277 | #[test] 278 | #[cfg_attr(target_os = "windows", ignore)] // remove when sapling installation is better 279 | #[cfg_attr(target_os = "linux", ignore)] // remove when sapling installation is better 280 | fn deleted_files_in_commit() -> Result<()> { 281 | let git = testing::GitCheckout::new()?; 282 | git.write_file("test_1.txt", "Initial commit")?; 283 | git.write_file("test_2.txt", "Initial commit")?; 284 | git.write_file("test_3.txt", "Initial commit")?; 285 | 286 | git.add(".")?; 287 | git.commit("commit 1")?; 288 | 289 | let sl = SaplingClone::new(&git)?; 290 | 291 | sl.rm_file("test_1.txt")?; 292 | 293 | let files = sl.changed_files(None)?; // still looks at the parent commit 294 | assert_eq!(files.len(), 2); 295 | 296 | sl.rm("test_1.txt")?; 297 | 298 | sl.commit("removal commit")?; 299 | 300 | // Remove a file in the working tree as well. 301 | sl.rm("test_2.txt")?; 302 | 303 | let files = sl.changed_files(None)?; 304 | assert_eq!(files.len(), 0); 305 | Ok(()) 306 | } 307 | 308 | // Files that were deleted/moved in the working tree should not be checked, 309 | // since obviously they are gone. 310 | #[test] 311 | #[cfg_attr(target_os = "windows", ignore)] // remove when sapling installation is better 312 | #[cfg_attr(target_os = "linux", ignore)] // remove when sapling installation is better 313 | fn moved_files_working_tree() -> Result<()> { 314 | let git = testing::GitCheckout::new()?; 315 | git.write_file("test_1.txt", "Initial commit")?; 316 | git.add(".")?; 317 | git.commit("commit 1")?; 318 | 319 | git.write_file("test_2.txt", "foo")?; 320 | git.add(".")?; 321 | git.commit("commit 2")?; 322 | 323 | let sl = SaplingClone::new(&git)?; 324 | 325 | assert_eq!( 326 | sl.run("move") 327 | .arg("test_2.txt") 328 | .arg("new.txt") 329 | .status()? 330 | .code(), 331 | Some(0) 332 | ); 333 | 334 | let files = sl.changed_files(None)?; 335 | assert!(files.contains(&"new.txt".to_string())); 336 | Ok(()) 337 | } 338 | 339 | #[test] 340 | #[cfg_attr(target_os = "windows", ignore)] // remove when sapling installation is better 341 | #[cfg_attr(target_os = "linux", ignore)] // remove when sapling installation is better 342 | fn relative_revision() -> Result<()> { 343 | let git = testing::GitCheckout::new()?; 344 | git.write_file("test_1.txt", "Initial commit")?; 345 | git.write_file("test_2.txt", "Initial commit")?; 346 | git.write_file("test_3.txt", "Initial commit")?; 347 | 348 | git.add(".")?; 349 | git.commit("I am HEAD~2")?; 350 | 351 | git.write_file("test_1.txt", "foo")?; 352 | 353 | git.add(".")?; 354 | git.commit("I am HEAD~1")?; 355 | 356 | git.write_file("test_2.txt", "foo")?; 357 | 358 | git.add(".")?; 359 | git.commit("I am HEAD")?; 360 | 361 | let sl = SaplingClone::new(&git)?; 362 | 363 | // Add some uncomitted changes to the working tree 364 | sl.write_file("test_3.txt", "commit 2")?; 365 | 366 | { 367 | // Relative to the HEAD commit, only the working tree changes should 368 | // be checked. 369 | let files = sl.changed_files(Some("."))?; 370 | assert_eq!(files.len(), 1); 371 | assert!(files.contains(&"test_3.txt".to_string())); 372 | } 373 | { 374 | let files = sl.changed_files(Some(".^"))?; 375 | assert_eq!(files.len(), 2); 376 | assert!(files.contains(&"test_2.txt".to_string())); 377 | assert!(files.contains(&"test_3.txt".to_string())); 378 | } 379 | { 380 | let files = sl.changed_files(Some(".^^"))?; 381 | assert_eq!(files.len(), 3); 382 | assert!(files.contains(&"test_1.txt".to_string())); 383 | assert!(files.contains(&"test_2.txt".to_string())); 384 | assert!(files.contains(&"test_3.txt".to_string())); 385 | } 386 | Ok(()) 387 | } 388 | 389 | // File deletions should work correctly even if a relative revision is 390 | // specified. 391 | #[test] 392 | #[cfg_attr(target_os = "windows", ignore)] // remove when sapling installation is better 393 | #[cfg_attr(target_os = "linux", ignore)] // remove when sapling installation is better 394 | fn deleted_files_relative_revision() -> Result<()> { 395 | let git = testing::GitCheckout::new()?; 396 | git.write_file("test_1.txt", "Initial commit")?; 397 | git.write_file("test_2.txt", "Initial commit")?; 398 | git.write_file("test_3.txt", "Initial commit")?; 399 | 400 | git.add(".")?; 401 | git.commit("commit 1")?; 402 | 403 | let sl = SaplingClone::new(&git)?; 404 | 405 | sl.rm_file("test_1.txt")?; 406 | 407 | let files = sl.changed_files(None)?; 408 | assert_eq!(files.len(), 2); 409 | 410 | sl.rm("test_1.txt")?; 411 | sl.commit("removal commit")?; 412 | 413 | sl.write_file("test_2.txt", "Initial commit")?; 414 | sl.add(".")?; 415 | sl.commit("another commit")?; 416 | 417 | assert_eq!(sl.run("sl").status()?.code(), Some(0)); 418 | 419 | let files = sl.changed_files(Some(".^^"))?; 420 | assert_eq!(files.len(), 1); 421 | Ok(()) 422 | } 423 | 424 | #[test] 425 | #[cfg_attr(target_os = "windows", ignore)] // remove when sapling installation is better 426 | #[cfg_attr(target_os = "linux", ignore)] // remove when sapling installation is better 427 | fn get_all_files() -> Result<()> { 428 | let git = testing::GitCheckout::new()?; 429 | git.write_file("test_1.txt", "Initial commit")?; 430 | git.write_file("test_2.txt", "Initial commit")?; 431 | git.write_file("test_3.txt", "Initial commit")?; 432 | git.write_file("test_4.txt", "Initial commit")?; 433 | 434 | git.add(".")?; 435 | git.commit("I am main")?; 436 | let sl = SaplingClone::new(&git)?; 437 | let mut all_files = sl.get_all_files()?; 438 | all_files.sort(); 439 | assert_eq!( 440 | all_files, 441 | vec!( 442 | AbsPath::try_from("README")?, 443 | AbsPath::try_from("test_1.txt")?, 444 | AbsPath::try_from("test_2.txt")?, 445 | AbsPath::try_from("test_3.txt")?, 446 | AbsPath::try_from("test_4.txt")? 447 | ) 448 | ); 449 | Ok(()) 450 | } 451 | 452 | #[test] 453 | #[cfg_attr(target_os = "windows", ignore)] // remove when sapling installation is better 454 | #[cfg_attr(target_os = "linux", ignore)] // remove when sapling installation is better 455 | fn merge_base_with() -> Result<()> { 456 | let git = testing::GitCheckout::new()?; 457 | git.write_file("test_1.txt", "Initial commit")?; 458 | git.write_file("test_2.txt", "Initial commit")?; 459 | git.write_file("test_3.txt", "Initial commit")?; 460 | git.write_file("test_4.txt", "Initial commit")?; 461 | 462 | git.add(".")?; 463 | git.commit("I am main")?; 464 | 465 | git.checkout_new_branch("branch1")?; 466 | git.write_file("test_1.txt", "foo")?; 467 | git.add(".")?; 468 | git.commit("I am on branch1")?; 469 | 470 | git.checkout_new_branch("branch2")?; 471 | git.write_file("test_2.txt", "foo")?; 472 | git.add(".")?; 473 | git.commit("I am branch2")?; 474 | 475 | git.checkout_new_branch("branch3")?; 476 | git.write_file("test_3.txt", "blah")?; 477 | git.add(".")?; 478 | git.commit("I am branch3")?; 479 | 480 | let sl = SaplingClone::new(&git)?; 481 | 482 | // Add some uncomitted changes to the working tree 483 | sl.write_file("test_4.txt", "blahblah")?; 484 | 485 | assert_eq!( 486 | sl.run("pull") 487 | .arg("--bookmark=branch1") 488 | .arg("--bookmark=branch2") 489 | .status()? 490 | .code(), 491 | Some(0) 492 | ); 493 | 494 | { 495 | let merge_base = Some(sl.merge_base_with("branch2")?); 496 | let files = sl.changed_files(merge_base.as_deref())?; 497 | assert_eq!(files.len(), 2); 498 | assert!(files.contains(&"test_4.txt".to_string())); 499 | assert!(files.contains(&"test_3.txt".to_string())); 500 | } 501 | { 502 | let merge_base = Some(sl.merge_base_with("branch1")?); 503 | let files = sl.changed_files(merge_base.as_deref())?; 504 | assert_eq!(files.len(), 3); 505 | assert!(files.contains(&"test_4.txt".to_string())); 506 | assert!(files.contains(&"test_3.txt".to_string())); 507 | assert!(files.contains(&"test_2.txt".to_string())); 508 | } 509 | Ok(()) 510 | } 511 | } 512 | -------------------------------------------------------------------------------- /src/testing.rs: -------------------------------------------------------------------------------- 1 | use std::{fs::OpenOptions, io::Write, process::Command, sync::Mutex}; 2 | 3 | use crate::get_version_control; 4 | 5 | use anyhow::Result; 6 | use once_cell::sync::Lazy; 7 | use tempfile::TempDir; 8 | 9 | // Global mutex to prevent race conditions when changing current directory 10 | // This is the same pattern used in Sapling tests 11 | static GIT_GLOBAL_MUTEX: Lazy> = Lazy::new(Mutex::default); 12 | 13 | pub struct GitCheckout { 14 | root: TempDir, 15 | } 16 | 17 | impl GitCheckout { 18 | pub fn new() -> Result { 19 | let git = GitCheckout { 20 | root: TempDir::new()?, 21 | }; 22 | 23 | assert_eq!(git.run("init").status()?.code(), Some(0)); 24 | 25 | // We add an initial commit because git diff-tree behaves 26 | // differently when HEAD is the only commit in the 27 | // repository. In actual production uses, our git 28 | // diff-tree invocation will show the files modified in 29 | // the HEAD commit compared to HEAD~, but if HEAD~ doesn't 30 | // exist, it returns an empty list of files. 31 | git.write_file("README", "or don't")?; 32 | git.add("README")?; 33 | git.commit("initial commit")?; 34 | 35 | Ok(git) 36 | } 37 | 38 | // Gets the root directory of the git clone. 39 | pub fn root(&self) -> &std::path::Path { 40 | self.root.path() 41 | } 42 | 43 | pub fn rm_file(&self, name: &str) -> Result<()> { 44 | let path = self.root().join(name); 45 | std::fs::remove_file(path)?; 46 | Ok(()) 47 | } 48 | 49 | pub fn write_file(&self, name: &str, contents: &str) -> Result<()> { 50 | let path = self.root().join(name); 51 | let mut file = OpenOptions::new() 52 | .read(true) 53 | .append(true) 54 | .create(true) 55 | .open(path)?; 56 | 57 | writeln!(file, "{}", contents)?; 58 | Ok(()) 59 | } 60 | 61 | pub fn checkout_new_branch(&self, branch_name: &str) -> Result<()> { 62 | let output = Command::new("git") 63 | .args(["checkout", "-b", branch_name]) 64 | .current_dir(self.root()) 65 | .output()?; 66 | assert!(output.status.success()); 67 | Ok(()) 68 | } 69 | 70 | pub fn add(&self, pathspec: &str) -> Result<()> { 71 | let output = Command::new("git") 72 | .args(["add", pathspec]) 73 | .current_dir(self.root()) 74 | .output()?; 75 | assert!(output.status.success()); 76 | Ok(()) 77 | } 78 | 79 | pub fn commit(&self, message: &str) -> Result<()> { 80 | let output = Command::new("git") 81 | .args(["commit", "-m", message]) 82 | .current_dir(self.root()) 83 | .output()?; 84 | assert!(output.status.success()); 85 | Ok(()) 86 | } 87 | 88 | pub fn changed_files(&self, relative_to: Option<&str>) -> Result> { 89 | let _shared = GIT_GLOBAL_MUTEX.lock().unwrap(); 90 | std::env::set_current_dir(self.root())?; 91 | let repo = get_version_control()?; 92 | let files = repo.get_changed_files(relative_to)?; 93 | let files = files 94 | .into_iter() 95 | .map(|abs_path| abs_path.file_name().unwrap().to_string_lossy().to_string()) 96 | .collect::>(); 97 | Ok(files) 98 | } 99 | 100 | pub fn merge_base_with(&self, merge_base_with: &str) -> Result { 101 | let _shared = GIT_GLOBAL_MUTEX.lock().unwrap(); 102 | std::env::set_current_dir(self.root())?; 103 | let repo = get_version_control()?; 104 | repo.get_merge_base_with(merge_base_with) 105 | } 106 | 107 | // Returns a Command to run the subcommand in the clone. 108 | pub fn run(&self, subcommand: &str) -> std::process::Command { 109 | let mut cmd = std::process::Command::new("git"); 110 | cmd.arg(subcommand); 111 | cmd.current_dir(self.root()); 112 | cmd 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/version_control.rs: -------------------------------------------------------------------------------- 1 | use anyhow; 2 | 3 | use crate::path::AbsPath; 4 | 5 | // Trait describing the operations we need in lintrunner for a version 6 | // control system. 7 | pub trait VersionControl { 8 | // Creates a new instance, trying the different implementations we 9 | // have available. 10 | fn new() -> anyhow::Result 11 | where 12 | Self: Sized; 13 | 14 | // Gets the tip of the repository. 15 | fn get_head(&self) -> anyhow::Result; 16 | 17 | // Gets the most recent common ancestor between the tip and the 18 | // given commit. 19 | fn get_merge_base_with(&self, merge_base_with: &str) -> anyhow::Result; 20 | 21 | // Gets the files that have changed relative to the given commit. 22 | fn get_changed_files(&self, relative_to: Option<&str>) -> anyhow::Result>; 23 | 24 | // Get all files in the repo. 25 | fn get_all_files(&self, under: Option<&AbsPath>) -> anyhow::Result>; 26 | } 27 | -------------------------------------------------------------------------------- /tests/fixtures/fake_source_file.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use assert_cmd::Command; 3 | use insta::assert_yaml_snapshot; 4 | use lintrunner::lint_message::{LintMessage, LintSeverity}; 5 | use regex::Regex; 6 | use std::io::Write; 7 | 8 | fn assert_output_snapshot(cmd: &mut Command) -> Result<()> { 9 | let re = Regex::new("'.*test-lintrunner-config.*toml'").unwrap(); 10 | let output = cmd.output()?; 11 | 12 | let output_string = format!( 13 | "STDOUT:\n{}\n\nSTDERR:\n{}", 14 | std::str::from_utf8(&output.stdout)?, 15 | std::str::from_utf8(&output.stderr)?, 16 | ); 17 | let output_lines = output_string.lines().collect::>(); 18 | 19 | assert_yaml_snapshot!( 20 | output_lines, 21 | // Define a dynamic redaction on all lines. This will replace the config 22 | // name (which is a tempfile that changes from run to run) with a fixed value. 23 | // Everything else is passed through normally. 24 | { 25 | "[]" => insta::dynamic_redaction(move |value, _path| 26 | { 27 | re.replace(value.as_str().unwrap(), "").to_string() 28 | } 29 | ), 30 | } 31 | ); 32 | Ok(()) 33 | } 34 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__changed_init_causes_warning_1.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - ok No lint issues. 8 | - "" 9 | - "" 10 | - "STDERR:" 11 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 12 | 13 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__changed_init_causes_warning_2.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - ok No lint issues. 8 | - "" 9 | - "" 10 | - "STDERR:" 11 | - "WARNING: The init commands have changed since you last ran lintrunner. You may need to run `lintrunner init`." 12 | 13 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__duplicate_code_fails.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | --- 5 | - "STDOUT:" 6 | - "" 7 | - "" 8 | - "STDERR:" 9 | - "error: Invalid linter configuration: linter 'DUPE' is defined multiple times." 10 | 11 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__empty_command_fails.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - "STDERR:" 10 | - "error: Invalid linter configuration: 'TESTLINTER' has an empty command list." 11 | 12 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__excluding_dryrun_fails.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - "STDERR:" 10 | - "error: Config for linter TESTLINTER defines init args but does not take a {{DRYRUN}} argument." 11 | 12 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__format_command_doesnt_use_nonformat_linter.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | --- 5 | - "STDOUT:" 6 | - No linters ran. 7 | - "" 8 | - "" 9 | - "STDERR:" 10 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 11 | 12 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__git_head_files.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - ">>> Lint for test_file_1.txt:" 10 | - "" 11 | - " Error (ECHOLINTER) echo lint" 12 | - "" 13 | - " >>> 1 |Initial commit" 14 | - " 2 |content from commit2" 15 | - " 3 |content from commit2" 16 | - "" 17 | - "" 18 | - "" 19 | - "STDERR:" 20 | - "[2021-11-15T23:53:41Z DEBUG lintrunner::lint_config] Found linters: {\"ECHOLINTER\"}" 21 | - "[2021-11-15T23:53:41Z DEBUG lintrunner] Running linters: [\"ECHOLINTER\"]" 22 | - "[2021-11-15T23:53:41Z DEBUG lintrunner::git] Linting commit diff files: {}" 23 | - "[2021-11-15T23:53:41Z DEBUG lintrunner::git] Linting working tree diff files: {\"test_file_1.txt\"}" 24 | - "[2021-11-15T23:53:41Z DEBUG lintrunner] Linting files: [" 25 | - " AbsPath {" 26 | - " inner: \"/tmp/.tmpqSYc7w/test_file_1.txt\"," 27 | - " }," 28 | - " ]" 29 | - "[2021-11-15T23:53:41Z DEBUG lintrunner::linter] Linter 'ECHOLINTER' matched files: [" 30 | - " AbsPath {" 31 | - " inner: \"/tmp/.tmpqSYc7w/test_file_1.txt\"," 32 | - " }," 33 | - " ]" 34 | - "[2021-11-15T23:53:41Z DEBUG lintrunner::linter] Running linter ECHOLINTER: python3 /raid/suo/lintrunner/tests/fixtures/echo_linter.py -- @/tmp/.tmpAvNutT" 35 | - "[2021-11-15T23:53:41Z DEBUG lintrunner::linter] Linter ECHOLINTER took: 36.505041ms" 36 | 37 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__git_no_changes.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - ">>> General linter failure:" 10 | - "" 11 | - " Error (TESTLINTER) Linter failed" 12 | - " Linter failed. This a bug, please file an issue against the linter" 13 | - " maintainer." 14 | - " " 15 | - " CONTEXT:" 16 | - " Failed to execute linter command wont_be_run with args: []" 17 | - "" 18 | - "" 19 | - "STDERR:" 20 | - "[2021-11-15T22:37:14Z DEBUG lintrunner::lint_config] Found linters: {\"TESTLINTER\"}" 21 | - "[2021-11-15T22:37:14Z DEBUG lintrunner] Running linters: [\"TESTLINTER\"]" 22 | - "[2021-11-15T22:37:14Z DEBUG lintrunner::git] Linting commit diff files: {}" 23 | - "[2021-11-15T22:37:14Z DEBUG lintrunner::git] Linting working tree diff files: {\"README.md\"}" 24 | - "[2021-11-15T22:37:14Z DEBUG lintrunner] Linting files: [" 25 | - " AbsPath {" 26 | - " inner: \"/tmp/.tmp85SiQW/README.md\"," 27 | - " }," 28 | - " ]" 29 | - "[2021-11-15T22:37:14Z DEBUG lintrunner::linter] Linter 'TESTLINTER' matched files: [" 30 | - " AbsPath {" 31 | - " inner: \"/tmp/.tmp85SiQW/README.md\"," 32 | - " }," 33 | - " ]" 34 | - "[2021-11-15T22:37:14Z DEBUG lintrunner::linter] Running linter TESTLINTER: wont_be_run " 35 | 36 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__init_suppresses_warning.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - ok No lint issues. 8 | - "" 9 | - "" 10 | - "STDERR:" 11 | 12 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__invalid_args.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - "STDERR:" 10 | - "error: The argument '--paths-cmd ' cannot be used with 'paths'" 11 | - "" 12 | - "USAGE:" 13 | - " lintrunner --config --paths-cmd " 14 | - "" 15 | - For more information try --help 16 | 17 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__invalid_config_fails.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | --- 5 | - "STDOUT:" 6 | - "" 7 | - "" 8 | - "STDERR:" 9 | - "error: Config file had invalid schema" 10 | - "caused_by: missing field `linter`" 11 | 12 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__invalid_paths_cmd_and_from.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | --- 5 | - "STDOUT:" 6 | - "" 7 | - "" 8 | - "STDERR:" 9 | - "error: The argument '--paths-cmd ' cannot be used with '--paths-from '" 10 | - "" 11 | - "USAGE:" 12 | - " lintrunner --configs ... --paths-cmd " 13 | - "" 14 | - For more information try --help 15 | 16 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__invalid_paths_cmd_and_specified_paths.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | --- 5 | - "STDOUT:" 6 | - "" 7 | - "" 8 | - "STDERR:" 9 | - "error: The argument '--paths-cmd ' cannot be used with '...'" 10 | - "" 11 | - "USAGE:" 12 | - " lintrunner --configs ... --paths-cmd " 13 | - "" 14 | - For more information try --help 15 | 16 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__lint_with_no_linters.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | --- 5 | - "STDOUT:" 6 | - No linters ran. 7 | - "" 8 | - "" 9 | - "STDERR:" 10 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 11 | 12 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__linter_hard_failure_is_caught.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - ">>> General linter failure:" 10 | - "" 11 | - " Error (TESTLINTER) Linter failed" 12 | - " Linter failed. This a bug, please file an issue against the linter" 13 | - " maintainer." 14 | - " " 15 | - " CONTEXT:" 16 | - " Linter command failed with non-zero exit code." 17 | - " STDERR:" 18 | - " " 19 | - " " 20 | - " STDOUT:" 21 | - " " 22 | - "" 23 | - "" 24 | - "STDERR:" 25 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 26 | 27 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__linter_nonexistent_command.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - ">>> General linter failure:" 10 | - "" 11 | - " Error (TESTLINTER) Linter failed" 12 | - " Linter failed. This a bug, please file an issue against the linter" 13 | - " maintainer." 14 | - " " 15 | - " CONTEXT:" 16 | - " Failed to execute linter command idonotexist with args: []" 17 | - "" 18 | - "" 19 | - "STDERR:" 20 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 21 | 22 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__linter_providing_nonexistent_path_degrades_gracefully.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - ">>> Lint for i_dont_exist_wow:" 10 | - "" 11 | - " Advice (DUMMY) dummy failure" 12 | - " A dummy linter failure" 13 | - "" 14 | - " Could not retrieve source context: No such file or directory (os error 2)" 15 | - " This is typically a linter bug." 16 | - "" 17 | - "" 18 | - "STDERR:" 19 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 20 | 21 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__linter_replacement_trailing_newlines.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | assertion_line: 20 4 | expression: output_lines 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - "\u001b[1m>>>\u001b[0m Lint for \u001b[4mtests/fixtures/fake_source_file.rs\u001b[0m:" 10 | - "" 11 | - " \u001b[43m\u001b[1mAdvice\u001b[0m (DUMMY) \u001b[4mdummy failure\u001b[0m" 12 | - " A dummy linter failure" 13 | - "" 14 | - " \u001b[36mYou can run `lintrunner -a` to apply this patch.\u001b[0m" 15 | - "" 16 | - " \u001b[2m0 \u001b[0m |\u001b[31m\u001b[1m-\u001b[0m\u001b[31mfoo\u001b[0m\u001b[31m\u001b[40m\u001b[4m \u001b[0m\u001b[31m" 17 | - "\u001b[0m \u001b[2m1 \u001b[0m |\u001b[31m\u001b[1m-\u001b[0m\u001b[31mbar\u001b[0m\u001b[31m\u001b[40m\u001b[4m \u001b[0m\u001b[31m" 18 | - "\u001b[0m \u001b[2m2 \u001b[0m |\u001b[31m\u001b[1m-\u001b[0m\u001b[31mbaz\u001b[0m\u001b[31m\u001b[40m\u001b[4m \u001b[0m\u001b[31m" 19 | - "\u001b[0m \u001b[2m3 \u001b[0m |\u001b[31m\u001b[1m-\u001b[0m\u001b[31mfoo\u001b[0m\u001b[31m\u001b[40m\u001b[4m \u001b[0m\u001b[31m" 20 | - "\u001b[0m \u001b[2m 1\u001b[0m |\u001b[32m\u001b[1m+\u001b[0m\u001b[32mfoo\u001b[0m\u001b[32m" 21 | - "\u001b[0m \u001b[2m 2\u001b[0m |\u001b[32m\u001b[1m+\u001b[0m\u001b[32mbar\u001b[0m\u001b[32m" 22 | - "\u001b[0m \u001b[2m 3\u001b[0m |\u001b[32m\u001b[1m+\u001b[0m\u001b[32mbaz\u001b[0m\u001b[32m" 23 | - "\u001b[0m \u001b[2m 4\u001b[0m |\u001b[32m\u001b[1m+\u001b[0m\u001b[32mfoo\u001b[0m\u001b[32m" 24 | - "\u001b[0m" 25 | - "" 26 | - "" 27 | - "STDERR:" 28 | - "\u001b[33m\u001b[1mWARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`.\u001b[0m" 29 | 30 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__no_op_config_succeeds.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - ok No lint issues. 8 | - "" 9 | - "" 10 | - "STDERR:" 11 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 12 | 13 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__rage_command_output.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | --- 5 | - "STDOUT:" 6 | - "lintrunner rage report:" 7 | - "timestamp: 2022-05-09T23:43:21-07:00" 8 | - "args: '--data-path=/var/folders/by/9xrsv08s7ql0n9mzdf1dwlm80000gn/T/.tmp3uM2kH' 'README.md'" 9 | - "exit code: 1" 10 | - "err msg: None" 11 | - "" 12 | 13 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__simple_linter.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - ">>> Lint for tests/fixtures/fake_source_file.rs:" 10 | - "" 11 | - " Advice (DUMMY) dummy failure" 12 | - " A dummy linter failure" 13 | - "" 14 | - " 6 |use std::io::Write;" 15 | - " 7 |" 16 | - " 8 |fn assert_output_snapshot(cmd: &mut Command) -> Result<()> {" 17 | - " >>> 9 | let re = Regex::new(\"\").unwrap();" 18 | - " 10 | let output = cmd.output()?;" 19 | - " 11 |" 20 | - " 12 | let output_string = format!(" 21 | - "" 22 | - "" 23 | - "" 24 | - "STDERR:" 25 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 26 | 27 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__simple_linter_fails_on_nonexistent_file.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - "STDERR:" 10 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 11 | - "error: Failed to find provided file: 'blahblahblah'" 12 | - "caused_by: No such file or directory (os error 2)" 13 | 14 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__simple_linter_fake_second_config.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | --- 5 | - "STDOUT:" 6 | - "" 7 | - "" 8 | - ">>> Lint for tests/fixtures/fake_source_file.rs:" 9 | - "" 10 | - " Advice (DUMMY) dummy failure" 11 | - " A dummy linter failure" 12 | - "" 13 | - " 6 |use std::io::Write;" 14 | - " 7 |" 15 | - " 8 |fn assert_output_snapshot(cmd: &mut Command) -> Result<()> {" 16 | - " >>> 9 | let re = Regex::new(\"\").unwrap();" 17 | - " 10 | let output = cmd.output()?;" 18 | - " 11 |" 19 | - " 12 | let output_string = format!(" 20 | - "" 21 | - "" 22 | - "" 23 | - "STDERR:" 24 | - "Warning: Could not find a lintrunner config at: 'NONEXISTENT_CONFIG'. Continuing without using configuration file." 25 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 26 | 27 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__simple_linter_oneline.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "tests/fixtures/fake_source_file.rs:9:1 :Advice A dummy linter failure [DUMMY/dummy failure]" 8 | - "" 9 | - "" 10 | - "STDERR:" 11 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 12 | 13 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__simple_linter_only_under_dir.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | --- 5 | - "STDOUT:" 6 | - ok No lint issues. 7 | - "" 8 | - "" 9 | - "STDERR:" 10 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 11 | 12 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__simple_linter_replacement_message.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - ">>> Lint for tests/fixtures/fake_source_file.rs:" 10 | - "" 11 | - " Advice (DUMMY) dummy failure" 12 | - " A dummy linter failure" 13 | - "" 14 | - " You can run `lintrunner -a` to apply this patch." 15 | - "" 16 | - " 1 1 | foo" 17 | - " 2 2 | bar" 18 | - " 2 |-baz" 19 | - " 3 |+bat" 20 | - " 4 4 | foo" 21 | - " 5 5 | bar" 22 | - " 5 |-baz" 23 | - " 6 |+bat" 24 | - " 7 7 | foo" 25 | - " 8 8 | bar" 26 | - " 8 |-baz" 27 | - " 9 |+bat" 28 | - " 10 10 | foo" 29 | - " 11 11 | bar" 30 | - " 11 |-baz" 31 | - " 12 |+bat" 32 | - "" 33 | - "" 34 | - "" 35 | - "STDERR:" 36 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 37 | 38 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__simple_linter_two_configs.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | --- 5 | - "STDOUT:" 6 | - "" 7 | - "" 8 | - ">>> Lint for tests/fixtures/fake_source_file.rs:" 9 | - "" 10 | - " Advice (DUMMY) real dummy failure" 11 | - " The real dummy linter failure" 12 | - "" 13 | - " 6 |use std::io::Write;" 14 | - " 7 |" 15 | - " 8 |fn assert_output_snapshot(cmd: &mut Command) -> Result<()> {" 16 | - " >>> 9 | let re = Regex::new(\"\").unwrap();" 17 | - " 10 | let output = cmd.output()?;" 18 | - " 11 |" 19 | - " 12 | let output_string = format!(" 20 | - "" 21 | - "" 22 | - "" 23 | - "STDERR:" 24 | - "WARNING: No previous init data found. If this is the first time you're running lintrunner, you should run `lintrunner init`." 25 | 26 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__skip_nonexistent_linter.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - "STDERR:" 10 | - "error: Unknown linter specified in --skip: MENOEXIST. These linters are available: {\"TESTLINTER\"}" 11 | 12 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__take_nonexistent_linter.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - "STDERR:" 10 | - "error: Unknown linter specified in --take: MENOEXIST. These linters are available: {\"TESTLINTER\"}" 11 | 12 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__tee_json.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: tee_json 4 | --- 5 | {"path":"tests/fixtures/fake_source_file.rs","line":9,"char":1,"code":"DUMMY","severity":"advice","name":"dummy failure","description":"A dummy linter failure"} 6 | 7 | -------------------------------------------------------------------------------- /tests/snapshots/integration_test__unknown_config_fails.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/integration_test.rs 3 | expression: output_lines 4 | 5 | --- 6 | - "STDOUT:" 7 | - "" 8 | - "" 9 | - "STDERR:" 10 | - "error: Could not read lintrunner config at: 'asdfasdfasdf'" 11 | - "caused_by: No such file or directory (os error 2)" 12 | 13 | -------------------------------------------------------------------------------- /tools/convert_to_sarif.py: -------------------------------------------------------------------------------- 1 | """Convert the output of lintrunner json to SARIF.""" 2 | 3 | from __future__ import annotations 4 | 5 | import argparse 6 | import json 7 | import os 8 | from typing import Any, Iterable 9 | 10 | 11 | def format_rule_name(lintrunner_result: dict[str, Any]) -> str: 12 | return f"{lintrunner_result['code']}/{lintrunner_result['name']}" 13 | 14 | 15 | def severity_to_github_level(severity: str) -> str: 16 | if severity in {"advice", "disabled"}: 17 | return "note" 18 | return severity 19 | 20 | 21 | def parse_single_lintrunner_result( 22 | lintrunner_result: dict[str, Any] 23 | ) -> tuple[dict[str, Any], dict[str, Any]]: 24 | r"""Parse a single lintrunner result. 25 | 26 | A result looks like this: 27 | { 28 | "path":"/adapters/pytorch/grep_linter.py", 29 | "line":227, 30 | "char":80, 31 | "code":"FLAKE8", 32 | "severity":"advice", 33 | "name":"E501", 34 | "description":"line too long (81 > 79 characters)\nSee https://www.flake8rules.com/rules/E501.html" 35 | } 36 | """ 37 | if lintrunner_result["path"] is None: 38 | artifact_uri = None 39 | else: 40 | artifact_uri = ( 41 | ("file://" + lintrunner_result["path"]) 42 | if lintrunner_result["path"].startswith("/") 43 | else lintrunner_result["path"] 44 | ) 45 | result = { 46 | "ruleId": format_rule_name(lintrunner_result), 47 | "level": severity_to_github_level(lintrunner_result["severity"]), 48 | "message": { 49 | "text": lintrunner_result["description"], 50 | }, 51 | "locations": [ 52 | { 53 | "physicalLocation": { 54 | "artifactLocation": { 55 | "uri": artifact_uri, 56 | }, 57 | "region": { 58 | "startLine": lintrunner_result["line"] or 1, 59 | "startColumn": lintrunner_result["char"] or 1, 60 | }, 61 | }, 62 | }, 63 | ], 64 | } 65 | 66 | rule = { 67 | "id": format_rule_name(lintrunner_result), 68 | "rule": { 69 | "id": format_rule_name(lintrunner_result), 70 | "name": format_rule_name(lintrunner_result), 71 | "shortDescription": {"text": format_rule_name(lintrunner_result)}, 72 | "fullDescription": { 73 | "text": format_rule_name(lintrunner_result) 74 | + "\n" 75 | + lintrunner_result["description"], 76 | }, 77 | "defaultConfiguration": { 78 | "level": severity_to_github_level(lintrunner_result["severity"]), 79 | }, 80 | }, 81 | } 82 | 83 | return result, rule 84 | 85 | 86 | def produce_sarif(lintrunner_results: Iterable[dict[str, Any]]) -> dict[str, Any]: 87 | """Convert the output of lintrunner json to SARIF.""" 88 | 89 | rules = {} 90 | results = [] 91 | for lintrunner_json in lintrunner_results: 92 | result, rule = parse_single_lintrunner_result(lintrunner_json) 93 | results.append(result) 94 | rules[rule["id"]] = rule["rule"] 95 | 96 | sarif = { 97 | "$schema": "https://json.schemastore.org/sarif-2.1.0.json", 98 | "version": "2.1.0", 99 | "runs": [ 100 | { 101 | "tool": { 102 | "driver": { 103 | "name": "lintrunner", 104 | "rules": list(rules.values()), 105 | }, 106 | }, 107 | "results": results, 108 | }, 109 | ], 110 | } 111 | 112 | return sarif 113 | 114 | 115 | def main(args: Any) -> None: 116 | """Convert the output of lintrunner json to SARIF.""" 117 | with open(args.input, "r", encoding="utf-8") as f: 118 | lintrunner_jsons = [json.loads(line) for line in f] 119 | 120 | sarif = produce_sarif(lintrunner_jsons) 121 | 122 | output_dir = os.path.dirname(args.output) 123 | if output_dir: 124 | os.makedirs(os.path.dirname(args.output), exist_ok=True) 125 | 126 | with open(args.output, "w", encoding="utf-8") as f: 127 | json.dump(sarif, f) 128 | 129 | 130 | if __name__ == "__main__": 131 | parser = argparse.ArgumentParser() 132 | parser.add_argument( 133 | "--input", type=str, required=True, help="json file generated by lintrunner" 134 | ) 135 | parser.add_argument("--output", type=str, required=True, help="output sarif file") 136 | args = parser.parse_args() 137 | main(args) 138 | -------------------------------------------------------------------------------- /tools/convert_to_sarif_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import unittest 4 | 5 | import convert_to_sarif 6 | 7 | 8 | class TestConvertToSarif(unittest.TestCase): 9 | def test_produce_sarif_returns_correct_sarif_result(self) -> None: 10 | lintrunner_results = [ 11 | { 12 | "path": "test.py", 13 | "line": 1, 14 | "char": 2, 15 | "code": "FLAKE8", 16 | "severity": "error", 17 | "description": "test description", 18 | "name": "test-code", 19 | }, 20 | { 21 | "path": "test.py", 22 | "line": 1, 23 | "char": 2, 24 | "code": "FLAKE8", 25 | "severity": "error", 26 | "description": "test description", 27 | "name": "test-code-2", 28 | }, 29 | { 30 | "path": "test2.py", 31 | "line": 3, 32 | "char": 4, 33 | "code": "FLAKE8", 34 | "severity": "advice", 35 | "description": "test description", 36 | "name": "test-code", 37 | }, 38 | ] 39 | actual = convert_to_sarif.produce_sarif(lintrunner_results) 40 | expected = { 41 | "$schema": "https://json.schemastore.org/sarif-2.1.0.json", 42 | "version": "2.1.0", 43 | "runs": [ 44 | { 45 | "tool": { 46 | "driver": { 47 | "name": "lintrunner", 48 | "rules": [ 49 | { 50 | "id": "FLAKE8/test-code", 51 | "name": "FLAKE8/test-code", 52 | "shortDescription": {"text": "FLAKE8/test-code"}, 53 | "fullDescription": { 54 | "text": "FLAKE8/test-code\ntest description" 55 | }, 56 | "defaultConfiguration": {"level": "note"}, 57 | }, 58 | { 59 | "id": "FLAKE8/test-code-2", 60 | "name": "FLAKE8/test-code-2", 61 | "shortDescription": {"text": "FLAKE8/test-code-2"}, 62 | "fullDescription": { 63 | "text": "FLAKE8/test-code-2\ntest description" 64 | }, 65 | "defaultConfiguration": {"level": "error"}, 66 | }, 67 | ], 68 | } 69 | }, 70 | "results": [ 71 | { 72 | "ruleId": "FLAKE8/test-code", 73 | "level": "error", 74 | "message": {"text": "test description"}, 75 | "locations": [ 76 | { 77 | "physicalLocation": { 78 | "artifactLocation": {"uri": "test.py"}, 79 | "region": {"startLine": 1, "startColumn": 2}, 80 | } 81 | } 82 | ], 83 | }, 84 | { 85 | "ruleId": "FLAKE8/test-code-2", 86 | "level": "error", 87 | "message": {"text": "test description"}, 88 | "locations": [ 89 | { 90 | "physicalLocation": { 91 | "artifactLocation": {"uri": "test.py"}, 92 | "region": {"startLine": 1, "startColumn": 2}, 93 | } 94 | } 95 | ], 96 | }, 97 | { 98 | "ruleId": "FLAKE8/test-code", 99 | "level": "note", 100 | "message": {"text": "test description"}, 101 | "locations": [ 102 | { 103 | "physicalLocation": { 104 | "artifactLocation": {"uri": "test2.py"}, 105 | "region": {"startLine": 3, "startColumn": 4}, 106 | } 107 | } 108 | ], 109 | }, 110 | ], 111 | } 112 | ], 113 | } 114 | self.maxDiff = None 115 | self.assertEqual(actual, expected) 116 | 117 | def test_it_handles_relative_paths(self) -> None: 118 | lintrunner_results = [ 119 | { 120 | "path": "test.py", 121 | "line": 1, 122 | "char": 2, 123 | "code": "FLAKE8", 124 | "severity": "error", 125 | "description": "test description", 126 | "name": "test-code", 127 | }, 128 | ] 129 | actual = convert_to_sarif.produce_sarif(lintrunner_results) 130 | expected_results = [ 131 | { 132 | "ruleId": "FLAKE8/test-code", 133 | "level": "error", 134 | "message": {"text": "test description"}, 135 | "locations": [ 136 | { 137 | "physicalLocation": { 138 | "artifactLocation": {"uri": "test.py"}, 139 | "region": {"startLine": 1, "startColumn": 2}, 140 | } 141 | } 142 | ], 143 | }, 144 | ] 145 | self.assertEqual(actual["runs"][0]["results"], expected_results) 146 | 147 | def test_it_handles_absolute_paths(self) -> None: 148 | lintrunner_results = [ 149 | { 150 | "path": "/path/to/test.py", 151 | "line": 1, 152 | "char": 2, 153 | "code": "FLAKE8", 154 | "severity": "error", 155 | "description": "test description", 156 | "name": "test-code", 157 | }, 158 | ] 159 | actual = convert_to_sarif.produce_sarif(lintrunner_results) 160 | expected_results = [ 161 | { 162 | "ruleId": "FLAKE8/test-code", 163 | "level": "error", 164 | "message": {"text": "test description"}, 165 | "locations": [ 166 | { 167 | "physicalLocation": { 168 | "artifactLocation": {"uri": "file:///path/to/test.py"}, 169 | "region": {"startLine": 1, "startColumn": 2}, 170 | } 171 | } 172 | ], 173 | }, 174 | ] 175 | self.assertEqual(actual["runs"][0]["results"], expected_results) 176 | 177 | 178 | if __name__ == "__main__": 179 | unittest.main() 180 | --------------------------------------------------------------------------------