├── .editorconfig ├── .env.example ├── .github ├── CODEOWNERS └── workflows │ └── github-issue-sync.yml ├── .gitignore ├── .gitlab-ci.yml ├── .rustfmt.toml ├── CODEOWNERS ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── Dockerfile.README.md ├── LICENSE ├── README.md ├── deploy-stg.sh ├── helm ├── Chart.yaml ├── templates │ ├── env-secrets.yaml │ ├── processbot-ingress.yaml │ ├── processbot-secret.yaml │ └── processbot.yaml ├── values-parity-prod.yaml ├── values-staging.yaml └── values.yaml ├── rust-toolchain ├── scripts └── run_integration_tests.sh ├── src ├── bot.rs ├── companion.rs ├── config.rs ├── constants.rs ├── core.rs ├── error.rs ├── git_ops.rs ├── github │ ├── client │ │ ├── commit.rs │ │ ├── file.rs │ │ ├── issue.rs │ │ ├── mod.rs │ │ ├── org.rs │ │ └── pull_request.rs │ └── mod.rs ├── gitlab.rs ├── lib.rs ├── logging │ ├── gke.rs │ └── mod.rs ├── macros.rs ├── main.rs ├── merge_request.rs ├── server.rs ├── shell.rs ├── types.rs └── vanity_service.rs └── tests ├── helpers ├── cmd.rs ├── constants.rs ├── mod.rs └── setup.rs ├── merge.rs └── snapshots └── merge__simple_merge_succeeds.snap /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | indent_style = tab 7 | trim_trailing_whitespace = true 8 | 9 | [*.{yml,yaml,md}] 10 | indent_style = space 11 | indent_size = 2 12 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Set the appropriate values and copy this file to .env 2 | 3 | # --- REQUIRED VARIABLES --- 4 | # The port where the server will be started at 5 | WEBHOOK_PORT=8080 6 | 7 | # The organization name or the repository owner's username of where the Github 8 | # App is installed 9 | INSTALLATION_LOGIN=placeholder 10 | 11 | # The path of the database directory. If it's not an absolute path, it will be 12 | # relative to this repository's root. 13 | DB_PATH=db 14 | 15 | # The directory where the repositories will be cloned to. If it's not an 16 | # absolute path, it will be relative to this repository's root. 17 | REPOSITORIES_PATH=repositories 18 | 19 | # The path of the private key. If it's not an absolute path, it will be relative 20 | # to this repository's root. 21 | PRIVATE_KEY_PATH=githubPrivateKey.pem 22 | 23 | # The webhook secret according to the Github App's settings. 24 | WEBHOOK_SECRET=placeholder 25 | 26 | # The Github App ID according to the Github App's settings. 27 | GITHUB_APP_ID=123 28 | 29 | # The GitLab URL from which failing jobs should be detected. You can leave this 30 | # with a placeholder value if you don't plan to use this feature while 31 | # developing. 32 | GITLAB_URL=placeholder 33 | 34 | # GitLab Access Token from $GITLAB_URL used to detect if a failing job has been 35 | # retried on GitLab. You can leave this with a placeholder value if you don't 36 | # plan to use this feature while developing. 37 | GITLAB_ACCESS_TOKEN=placeholder 38 | 39 | # --- OPTIONAL VARIABLES --- 40 | # If you set this variable, the application will receive events from Smee and a 41 | # local server will not be started 42 | # WEBHOOK_PROXY_URL=https://smee.io/parity-processbot 43 | 44 | # Disable organization checks for using the bot. Useful if you're using the bot 45 | # in your own account and not an organization. 46 | # DISABLE_ORG_CHECKS=true 47 | 48 | # Configure which prefix to use for detecting sources in dependencies 49 | # e.g. "ssh://git@github.com" if you're trying it on a private repository 50 | # GITHUB_SOURCE_PREFIX=https://github.com 51 | 52 | # Configure which suffix to use for detecting sources in dependencies 53 | # e.g. ".git" if you're using ssh 54 | # GITHUB_SOURCE_SUFFIX= 55 | 56 | # DEPENDENCY_UPDATE_CONFIGURATION defines which dependencies should be updated 57 | # before merging a pull request in a given repository. Its form is: 58 | # [repository]=[dependency]+...:[repository]=[dependency]+... 59 | # For example, suppose you want to 60 | # - Always update Substrate + Polkadot before merging Cumulus PRs 61 | # - Always update Substrate before merging Polkadot PRs 62 | # It would be written as follows 63 | # cumulus=polkadot+substrate:polkadot=substrate 64 | # DEPENDENCY_UPDATE_CONFIGURATION= 65 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Lists some code owners. 2 | # 3 | # A codeowner just oversees some part of the codebase. If an owned file is changed then the 4 | # corresponding codeowner receives a review request. An approval of the codeowner might be 5 | # required for merging a PR (depends on repository settings). 6 | # 7 | # For details about syntax, see: 8 | # https://help.github.com/en/articles/about-code-owners 9 | # But here are some important notes: 10 | # 11 | # - Glob syntax is git-like, e.g. `/core` means the core directory in the root, unlike `core` 12 | # which can be everywhere. 13 | # - Multiple owners are supported. 14 | # - Either handle (e.g, @github_user or @github_org/team) or email can be used. Keep in mind, 15 | # that handles might work better because they are more recognizable on GitHub, 16 | # you can use them for mentioning unlike an email. 17 | # - The latest matching rule, if multiple, takes precedence. 18 | 19 | # CI 20 | /.github/ @paritytech/ci 21 | /.gitlab-ci.yml @paritytech/ci @joao-paulo-parity 22 | 23 | # DevOps 24 | /helm/ @paritytech/devops 25 | -------------------------------------------------------------------------------- /.github/workflows/github-issue-sync.yml: -------------------------------------------------------------------------------- 1 | name: GitHub Issue Sync 2 | 3 | on: 4 | issues: 5 | types: 6 | - opened 7 | workflow_dispatch: 8 | inputs: 9 | excludeClosed: 10 | description: 'Exclude closed issues in the sync.' 11 | type: boolean 12 | default: true 13 | 14 | jobs: 15 | sync: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Generate token 19 | id: generate_token 20 | uses: tibdex/github-app-token@v1 21 | with: 22 | app_id: ${{ secrets.PROJECT_APP_ID }} 23 | private_key: ${{ secrets.PROJECT_APP_KEY }} 24 | - name: Sync issues 25 | uses: paritytech/github-issue-sync@v0.3 26 | with: 27 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 28 | PROJECT_TOKEN: ${{ steps.generate_token.outputs.token }} 29 | project: 16 30 | project_field: Tool 31 | project_value: parity-processbot 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/target/ 2 | *.swp 3 | .DS_Store 4 | .env 5 | *.pem 6 | db/ 7 | repositories/ 8 | .idea 9 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | variables: 2 | KUBE_NAMESPACE: "processbot" 3 | CI_REGISTRY: "paritytech" 4 | BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29" 5 | BUILDAH_COMMAND: "buildah --storage-driver overlay2" 6 | CI_IMAGE: "paritytech/ci-linux@sha256:0fe9d110a29ec77ac6fa6507e4af968ea6aced6f6b7ce4deb231696ffc19b715" # 1.70.0-bullseye 2023-06-20 7 | GIT_STRATEGY: fetch 8 | GIT_DEPTH: 3 9 | CARGO_TARGET_DIR: "/ci-cache/${CI_PROJECT_NAME}/targets/${CI_COMMIT_REF_NAME}/${CI_JOB_NAME}" 10 | GITLAB_URL: https://gitlab.parity.io 11 | 12 | default: 13 | image: $CI_IMAGE 14 | interruptible: true 15 | retry: 16 | max: 2 17 | when: 18 | - runner_system_failure 19 | - unknown_failure 20 | - api_failure 21 | tags: 22 | - linux-docker-vm-c2 23 | 24 | .test-refs: &test-refs 25 | rules: 26 | - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs 27 | 28 | .publish-refs: &publish-refs 29 | rules: 30 | - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 31 | - if: $CI_COMMIT_REF_NAME =~ /^stg-v[0-9]+\.[0-9]+.*$/ # i.e. stg-v1.0, stg-v2.1rc1 32 | - if: $CI_COMMIT_REF_NAME == "master" 33 | 34 | .common-refs: &common-refs 35 | rules: 36 | - !reference [.test-refs, rules] 37 | - !reference [.publish-refs, rules] 38 | 39 | .production-refs: &production-refs 40 | rules: 41 | - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 42 | - if: $CI_COMMIT_REF_NAME == "master" 43 | when: manual 44 | 45 | stages: 46 | - check 47 | - test 48 | - build 49 | - dockerize 50 | - deploy 51 | 52 | 53 | #### stage: check 54 | 55 | check: 56 | stage: check 57 | <<: *test-refs 58 | script: 59 | - cargo +nightly fmt --all -- --check 60 | - cargo check --all-targets --workspace 61 | - cargo clippy --all-targets --workspace -- -Dwarnings 62 | 63 | #### stage: test 64 | 65 | integration-tests: 66 | stage: test 67 | <<: *test-refs 68 | script: 69 | - ./scripts/run_integration_tests.sh 70 | 71 | tests: 72 | stage: test 73 | <<: *test-refs 74 | script: 75 | - cargo test --lib 76 | 77 | #### stage: build 78 | 79 | build: 80 | stage: build 81 | <<: *common-refs 82 | script: 83 | - cargo build --release 84 | - mkdir -p ./artifacts/ 85 | - cp ${CARGO_TARGET_DIR}/release/parity-processbot ./artifacts/ 86 | - cp ./Dockerfile ./artifacts/ 87 | artifacts: 88 | name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" 89 | when: on_success 90 | expire_in: 1 hour 91 | paths: 92 | - ./artifacts/ 93 | 94 | #### stage: dockerize 95 | 96 | # build only 97 | docker-build: 98 | stage: dockerize 99 | image: $BUILDAH_IMAGE 100 | <<: *test-refs 101 | variables: 102 | GIT_STRATEGY: none 103 | DOCKER_IMAGE: "${CI_REGISTRY}/${KUBE_NAMESPACE}" 104 | interruptible: true 105 | script: 106 | - cd ./artifacts 107 | - $BUILDAH_COMMAND build 108 | --format=docker 109 | --tag "$DOCKER_IMAGE:$CI_COMMIT_REF_NAME" . 110 | needs: 111 | - job: build 112 | artifacts: true 113 | tags: 114 | - kubernetes-parity-build 115 | 116 | docker-build-push: 117 | stage: dockerize 118 | image: $BUILDAH_IMAGE 119 | <<: *publish-refs 120 | variables: 121 | GIT_STRATEGY: none 122 | DOCKER_IMAGE: "${CI_REGISTRY}/${KUBE_NAMESPACE}" 123 | interruptible: true 124 | script: 125 | - cd ./artifacts 126 | - $BUILDAH_COMMAND build 127 | --format=docker 128 | --tag "$DOCKER_IMAGE:$CI_COMMIT_REF_NAME" . 129 | - echo "$Docker_Hub_Pass_Parity" | 130 | buildah login --username "$Docker_Hub_User_Parity" --password-stdin docker.io 131 | - $BUILDAH_COMMAND push --format=v2s2 "$DOCKER_IMAGE:$CI_COMMIT_REF_NAME" 132 | needs: 133 | - job: build 134 | artifacts: true 135 | tags: 136 | - kubernetes-parity-build 137 | 138 | publish-docker-image-description: 139 | stage: dockerize 140 | variables: 141 | CI_IMAGE: paritytech/dockerhub-description 142 | DOCKERHUB_REPOSITORY: ${CI_REGISTRY}/${KUBE_NAMESPACE} 143 | DOCKER_USERNAME: $Docker_Hub_User_Parity 144 | DOCKER_PASSWORD: $Docker_Hub_Pass_Parity 145 | README_FILEPATH: $CI_PROJECT_DIR/Dockerfile.README.md 146 | SHORT_DESCRIPTION: "parity-processbot is a GitHub App which drives the Companion Build System's merge process" 147 | rules: 148 | - if: $CI_COMMIT_REF_NAME == "master" 149 | changes: 150 | - Dockerfile.README.md 151 | script: 152 | - cd / && sh entrypoint.sh 153 | tags: 154 | - kubernetes-parity-build 155 | 156 | #### stage: deploy 157 | 158 | .deploy-k8s: &deploy-k8s 159 | image: paritytech/kubetools:3.5.3 160 | interruptible: true 161 | # PROCESSBOT_KEY should be base64 encoded 162 | script: 163 | - helm upgrade processbot ./helm 164 | --install 165 | --namespace "${KUBE_NAMESPACE}" 166 | --values "helm/values-${CI_ENVIRONMENT_NAME}.yaml" 167 | --set "app.INSTALLATION_LOGIN=${INSTALLATION_LOGIN}" 168 | --set "app.KUBE_NAMESPACE=${KUBE_NAMESPACE}" 169 | --set "app.DOCKER_TAG=${CI_COMMIT_REF_NAME}" 170 | --set "app.PROCESSBOT_KEY=${PROCESSBOT_KEY}" 171 | --set "app.GITHUB_APP_ID=${GITHUB_APP_ID}" 172 | --set "app.WEBHOOK_SECRET=${WEBHOOK_SECRET}" 173 | --set "app.GITLAB_URL=${GITLAB_URL}" 174 | --set "app.GITLAB_ACCESS_TOKEN=${GITLAB_ACCESS_TOKEN}" 175 | --set "app.DEPENDENCY_UPDATE_CONFIGURATION=${DEPENDENCY_UPDATE_CONFIGURATION}" 176 | 177 | deploy-staging: 178 | stage: deploy 179 | <<: *deploy-k8s 180 | <<: *publish-refs 181 | environment: 182 | name: staging 183 | tags: 184 | - parity-processbot-stg 185 | 186 | 187 | deploy-production: 188 | stage: deploy 189 | <<: *deploy-k8s 190 | <<: *production-refs 191 | environment: 192 | name: parity-prod 193 | tags: 194 | - parity-processbot-prod 195 | 196 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | hard_tabs = true 2 | max_width = 80 3 | edition = "2018" 4 | imports_granularity = "Crate" 5 | group_imports = "StdExternalCrate" 6 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Lists some code owners. 2 | # 3 | # A codeowner just oversees some part of the codebase. If an owned file is changed then the 4 | # corresponding codeowner receives a review request. An approval of the codeowner might be 5 | # required for merging a PR (depends on repository settings). 6 | # 7 | # For details about syntax, see: 8 | # https://help.github.com/en/articles/about-code-owners 9 | # But here are some important notes: 10 | # 11 | # - Glob syntax is git-like, e.g. `/core` means the core directory in the root, unlike `core` 12 | # which can be everywhere. 13 | # - Multiple owners are supported. 14 | # - Either handle (e.g, @github_user or @github_org/team) or email can be used. Keep in mind, 15 | # that handles might work better because they are more recognizable on GitHub, 16 | # you can use them for mentioning unlike an email. 17 | # - The latest matching rule, if multiple, takes precedence. 18 | 19 | # Global code owners 20 | * @paritytech/opstooling 21 | 22 | # CI 23 | /.gitlab-ci.yml @paritytech/ci @paritytech/opstooling 24 | /helm @paritytech/ci @paritytech/opstooling 25 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "parity-processbot" 3 | version = "0.1.0" 4 | authors = ["Parity Technologies "] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | async-recursion = "0.3" 11 | byteorder = "1.3.2" 12 | dotenv = "^0.15" 13 | env_logger = "^0.7" 14 | futures = { version = "0.3", features = ["async-await"] } 15 | futures-util = { version = "0.3", features = ["async-await"] } 16 | hyperx = "1" 17 | log = "^0.4" 18 | reqwest = { version = "0.11.10", features = ["json"] } 19 | rocksdb = "0.14" 20 | serde = { version = "^1.0", features = ["derive"] } 21 | serde_json = "1.0.44" 22 | snafu = { version = "0.6.0" } 23 | tokio = { version = "1.20.4", features = ["full"] } 24 | curl = "0.4" 25 | curl-sys = "0.4" 26 | regex = "1.3" 27 | toml = "0.5" 28 | base64 = "0.11" 29 | chrono = { version = "0.4.19", features = ["serde"] } 30 | itertools = "0.8" 31 | parking_lot = "0.10" 32 | jsonwebtoken = "8.1.0" 33 | lazy_static = "1.4.0" 34 | bincode = "1.2" 35 | ring = "0.16" 36 | base16 = { version = "0.2", features = ["alloc"] } 37 | anyhow = "1.0" 38 | async-std = { version = "1.0.1", features = ["unstable"] } 39 | hyper = { version = "0.14.19", default-features = false, features = ["stream"] } 40 | serde_yaml = "0.8" 41 | thiserror = "1" 42 | url = "2.1.1" 43 | html-escape = "0.2.9" 44 | cargo-lock = "^7.0.1" 45 | eventsource = { git = "https://github.com/paritytech/eventsource", branch = "master" } 46 | urlencoding = "2.1.0" 47 | 48 | [dev-dependencies] 49 | httptest = "0.15.1" 50 | tempfile = "3" 51 | insta = "1.7.1" 52 | flexi_logger = "0.22.5" 53 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/paritytech/ci-linux:production 2 | 3 | COPY parity-processbot /usr/local/bin/parity-processbot 4 | 5 | RUN set -ev; \ 6 | apt-get update; \ 7 | apt-get upgrade -y; \ 8 | apt-get install -y --no-install-recommends \ 9 | pkg-config curl ca-certificates libssl-dev git; \ 10 | git config --global user.name "parity-processbot"; \ 11 | git config --global user.email "<>"; 12 | 13 | CMD ["parity-processbot"] 14 | -------------------------------------------------------------------------------- /Dockerfile.README.md: -------------------------------------------------------------------------------- 1 | # parity-processbot 2 | 3 | parity-processbot is a GitHub App which drives the Companion Build System's merge process 4 | 5 | ### [GitHub](https://github.com/paritytech/parity-processbot) 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | [![GitHub Issue Sync](https://github.com/paritytech/parity-processbot/actions/workflows/github-issue-sync.yml/badge.svg)](https://github.com/paritytech/parity-processbot/actions/workflows/github-issue-sync.yml) 4 | 5 | parity-processbot is a 6 | [GitHub App](https://docs.github.com/en/developers/apps/getting-started-with-apps/about-apps) 7 | which drives the 8 | [Companion Build System](https://github.com/paritytech/parity-processbot/issues/327)'s 9 | merge process. 10 | 11 | Note that parity-processbot works independently of the Companion Build System's 12 | cross-repository integration check, which is done on CI (see 13 | [check_dependent_project](https://github.com/paritytech/pipeline-scripts#check_dependent_project) 14 | for that). 15 | 16 | Before starting to work on this project we recommend reading the 17 | [Implementation section](#implementation). 18 | 19 | # TOC 20 | 21 | - [How it works](#how-it-works) 22 | - [Commands](#commands) 23 | - [Relation to CI](#commands-relation-to-ci) 24 | - [Criteria for merge](#criteria-for-merge) 25 | - [Checks and statuses](#criteria-for-merge-checks-and-statuses) 26 | - [GitHub App](#github-app) 27 | - [Configuration](#github-app-configuration) 28 | - [Installation](#github-app-installation) 29 | - [Setup](#setup) 30 | - [Requirements](#setup-requirements) 31 | - [Environment variables](#setup-environment-variables) 32 | - [Development](#development) 33 | - [Run the application](#development-run) 34 | - [Example workflows](#development-example-workflows) 35 | - [Test repositories](#development-test-repositories) 36 | - [Integration tests](#development-integration-tests) 37 | - [Deployment](#deployment) 38 | - [Logs](#deployment-logs) 39 | - [Environments](#deployment-environments) 40 | 41 | # How it works 42 | 43 | processbot receives [commands](#commands) from pull request comments. 44 | 45 | The merge commands will either merge the pull request right away, if possible, 46 | or merge it automatically once all of its requirements are passing; a guided 47 | description of how that works internally is provided in the 48 | [Implementation section](#implementation). 49 | 50 | # Commands 51 | 52 | The following commands should be posted as pull request comments. **Your whole 53 | comment should only have the command**. 54 | 55 | - `bot merge`: merge once checks pass 56 | - `bot merge force`: merge immediately while disregarding checks 57 | ([not all of them can be disregarded](#criteria-for-merge-checks-and-statuses)) 58 | - `bot merge cancel`: cancel a pending `bot merge`; does not affect anything 59 | outside of processbot, only stops the bot from following through with the 60 | merge 61 | - `bot rebase`: create a merge commit from the target branch into the PR 62 | 63 | Note: The commands will only work if you are a member of the organization where 64 | the GitHub App is installed. Organization membership is fetched from the GitHub 65 | API at the time a comment arrives. 66 | 67 | ## Relation to CI 68 | 69 | processbot categorizes CI statuses as following, ranked in descending order of 70 | importance: 71 | 72 | ### 1. Required 73 | 74 | Required through GitHub branch protection rules 75 | 76 | They are meant to be blockers so can't be skipped anyhow. 77 | 78 | ### 2. Important 79 | 80 | Derived from Gitlab Jobs which **do not** have `allow_failure: true` 81 | 82 | They are relevant but not blockers, thus can be skipped with `bot merge force` 83 | but will not pass `bot merge`. Note that the merge of companions follows the 84 | logic of `bot merge`, thus a brittle job in this category might get in the way 85 | of a companion merge. 86 | 87 | ### 3. Fallible 88 | 89 | Derived from Gitlab Jobs which have `allow_failure: true` 90 | 91 | Unstable statuses will have `allow_failure: true` encoded in their descriptions 92 | ([delivered from vanity-service](https://gitlab.parity.io/parity/websites/vanity-service/-/blob/ddc0af0ec8520a99a35b9e33de57d28d37678686/service.js#L77)) 93 | which will allow processbot to detect and disregard them. 94 | 95 | # Criteria for merge 96 | 97 | ## Checks and statuses 98 | 99 | All [Important and above](#commands-relation-to-ci) checks should be green when 100 | using `bot merge`. 101 | 102 | Non-Required statuses can bypassed by using `bot merge force`. 103 | 104 | # GitHub App 105 | 106 | The GitHub App is necessary for the application to receive 107 | [webhook events](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks) 108 | and access the GitHub API properly. 109 | 110 | Follow the instructions of 111 | 112 | for creating a new GitHub App. 113 | 114 | After creating the app, you should [configure](#github-app-configuration) and 115 | [install it](#github-app-installation) (make sure the 116 | [environment](#setup-environment-variables) is properly set up before using it). 117 | 118 | ## Configuration 119 | 120 | ### Repository permissions 121 | 122 | - Contents: Read & write 123 | - Enables pushing commits for updating companions after their dependencies 124 | have been merged 125 | - Issues: Read & write 126 | - Enables comment on pull requests 127 | - Pull requests: Read & write 128 | - Enables merging pull requests 129 | - Commit statuses: Read-only 130 | - Enables fetching the CI statuses before merge 131 | - Checks: Read-only 132 | - Enables fetching the checks' statuses before merge 133 | - Workflows: Read & write 134 | - Allows the bot to push commits to workflow files (see https://github.com/paritytech/cumulus/pull/1436#issuecomment-1181637222) 135 | 136 | ### Organization permissions 137 | 138 | - Members: Read-only 139 | - Enables fetching the command requester's organization membership even if 140 | their membership is private 141 | 142 | ### Events 143 | 144 | - Issue comment 145 | - Enables reacting to [commands](#commands) from GitHub comments 146 | - Check run, Status, Workflow job 147 | - Used to trigger the processing of pending pull requests 148 | 149 | ## Installation 150 | 151 | Having [created](#github-app) and [configured](#github-app-configuration) the 152 | GitHub App, install it in a repository through 153 | `https://github.com/settings/apps/${APP}/installations`. 154 | 155 | If processbot has to merge PRs into protected branches which have the 156 | "Restrict who can push to matching branches" rule enabled, it should 157 | be added to the allowlist for that rule, otherwise merging will not work 158 | ([example](https://github.com/paritytech/polkadot/pull/4122#issuecomment-948680155)). 159 | In such cases it's necessary to add the app to the allowlist, as 160 | demonstrated below: 161 | 162 | ![image](https://user-images.githubusercontent.com/77391175/138313741-b33b86a5-ee58-4031-a7da-12703ea9958e.png) 163 | 164 | # Setup 165 | 166 | ## Requirements 167 | 168 | - Rust for running the application 169 | - [rustup](https://rustup.rs/) is the recommended way of setting up a Rust 170 | toolchain 171 | - libssl for the HTTPS requests library 172 | - libclang for building the database (RocksDB) 173 | - git for cloning companions and updating them 174 | 175 | ## Environment variables 176 | 177 | All relevant environment variables are documented in the 178 | [.env.example](./.env.example) file. For development you're welcome to copy that 179 | file to `.env` so that all values will be loaded automatically once the 180 | application starts. 181 | 182 | # Development 183 | 184 | ## Run the application 185 | 186 | 1. [Set up the GitHub App](#github-app) 187 | 2. [Set up the application](#setup) 188 | 189 | During development it's handy to use a [smee.io](https://smee.io/) proxy, 190 | through the `WEBHOOK_PROXY_URL` environment variable, for receiving GitHub 191 | Webhook Events in your local server instance. 192 | 193 | 3. Run the project with `cargo run` 194 | 4. Optionally [try out the example workflows](#development-example-workflows) in 195 | the repositories where you have installed the app or the 196 | [test repositories](#development-test-repositories) after a deployment 197 | 198 | ## Example workflows 199 | 200 | ### Single merge use-case 201 | 202 | Example: https://github.com/paritytech/main-for-processbot-staging/pull/55 203 | 204 | Steps: 205 | 206 | 1. Create a pull request in the repositories where the app is installed 207 | 2. Comment `bot merge` 208 | 209 | ### Companion use-case 210 | 211 | Example: 212 | - Repository A: https://github.com/paritytech/main-for-processbot-staging/pull/53 213 | - Repository B: https://github.com/paritytech/companion-for-processbot-staging/pull/31 214 | 215 | Steps: 216 | 217 | 1. Install the app in Repository A 218 | 2. Install the app in Repository B 219 | - Repository B needs to be a dependency of Repository A 220 | ([example](https://github.com/paritytech/companion-for-processbot-staging/blob/8ff68ae8287342f2a4581b1950913b4e9e88a0e0/Cargo.toml#L8)) 221 | 3. Create a pull request on Repository B and copy its link 222 | 4. Create a pull request on Repository A and put `companion: [link from step 3]` 223 | in its description 224 | 5. Comment `bot merge` on the pull request in Repository A 225 | 6. Observe that the the pull request in Repository A will be merged first and 226 | the pull request on Repository B will be merged after 227 | 228 | ## Test repositories 229 | 230 | The staging instance is installed in the following repositories: 231 | 232 | - https://github.com/paritytech/main-for-processbot-staging 233 | - https://github.com/paritytech/companion-for-processbot-staging 234 | 235 | The GitHub App for staging is managed by 236 | [paritytech](http://github.com/paritytech)'s Organizational GitHub Admins. 237 | 238 | ## Integration tests 239 | 240 | The integration tests are executed as follows: 241 | 242 | ```sh 243 | ./scripts/run_integration_tests.sh 244 | ``` 245 | 246 | We use [insta](https://github.com/mitsuhiko/insta#introduction) for integration 247 | tests' snapshots. After creating or modifying a snapshot, use `cargo insta 248 | review` to manage the results. 249 | 250 | # Deployment 251 | 252 | All of the relevant configuration for deployment lives in the [./helm](./helm) 253 | folder. The values for each specific environment are in 254 | `helm/values-${ENVIRONMENT}.yml`. If you add a value, it needs to be used in 255 | [helm/templates/processbot.yaml](helm/templates/processbot.yaml). 256 | 257 | ## Logs 258 | 259 | See 260 | 261 | ## Environments 262 | 263 | When you push a deployment tag to GitHub, it will be 264 | [mirrored to GitLab](https://gitlab.parity.io/parity/parity-processbot) and then 265 | its [CI pipeline](./.gitlab-ci.yml) will be run for deploying the app. 266 | 267 | The application can be deployed to the following environments: 268 | 269 | - Production: push a tag with the pattern `/^v[0-9]+\.[0-9]+.*$/`, e.g. `v1.1` 270 | 271 | The production instance is installed in 272 | [Substrate](https://github.com/paritytech/substrate), 273 | [Polkadot](https://github.com/paritytech/polkadot) and 274 | [Cumulus](https://github.com/paritytech/cumulus). 275 | 276 | - Staging: push a tag with the pattern `/^pre-v[0-9]+\.[0-9]+.*$/`, e.g. 277 | `pre-v0.6` 278 | 279 | The staging instance is installed in the 280 | [test repositories](#development-test-repositories). 281 | 282 | # Implementation 283 | 284 | Before reading any of this, we strongly recommend to have a good understanding 285 | of the Companion Build System by 286 | [consulting its explanation](https://github.com/paritytech/parity-processbot/issues/327). 287 | 288 | A 289 | [web server](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/server.rs#L88) 290 | (set up from 291 | [main](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/main.rs#L107)) 292 | receives 293 | [GitHub Webhook events](https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads) 294 | as HTTP `POST` requests. 295 | 296 | When someone comments in a pull request, the 297 | [issue comment event is parsed](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/webhook.rs#L220) 298 | and from it a 299 | [command is extracted](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/webhook.rs#L906) 300 | and 301 | [handled](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/webhook.rs#L752). 302 | 303 | The merge chain is 304 | [started](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/webhook.rs#L761) 305 | from a 306 | [merge command](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/webhook.rs#L785). If the pull request at the root of the chain is 307 | [ready to be merged, it will be merged immediately](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/webhook.rs#L787), 308 | otherwise it will 309 | [be saved to the database](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/webhook.rs#L813) 310 | and 311 | [merged later once its requirements are ready](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/webhook.rs#L580); 312 | by "requirements" we mean its statuses, checks and dependencies 313 | ([the root of the chain is started without dependencies](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/webhook.rs#L777-L778), 314 | hence why it can be merged first). 315 | 316 | After a pull request is merged, 317 | [its dependents are checked](https://github.com/paritytech/parity-processbot/blob/4b36d6dcb8dd6d2ba9063c28c1c61bff503c364d/src/webhook.rs#L831) 318 | and possibly merged if all of their requirements are ready (note that a pull 319 | request my might depend on more than one pull request, as 320 | [explained in the presentation at 4:48](https://drive.google.com/file/d/1E4Fd3aO2QRJuoUBI4j0Zp4027yGeHeer/view?t=4m48s) 321 | or 322 | [slide number 6](https://docs.google.com/presentation/d/12ksmejR_UXC1tIHD2f4pQQZ1uw5NK3n8enmwkTCPOpw/edit?usp=sharing)). 323 | This process is repeated for each item that is merged throughout the merge 324 | chain (referred as "Phase 1 and Phase 2" 325 | [in the presentation at 25:48](https://drive.google.com/file/d/1E4Fd3aO2QRJuoUBI4j0Zp4027yGeHeer/view?t=25m48s) 326 | or 327 | [slide number 21](https://docs.google.com/presentation/d/12ksmejR_UXC1tIHD2f4pQQZ1uw5NK3n8enmwkTCPOpw/edit?usp=sharing)). 328 | -------------------------------------------------------------------------------- /deploy-stg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | timestamp=$(date +%s) 4 | 5 | # remember initial branch 6 | dev_branch=$(git rev-parse --abbrev-ref HEAD) 7 | 8 | # replace possible "/" with "-" 9 | # Because of a docker, as it uses git tag for tagging the image so if there's a / symbol 10 | # docker thinks that it's actually a path. but not part of the tag name. 11 | dev_branch_sanitized=${dev_branch/\//-} 12 | 13 | stg_branch="stg-v0.0.${timestamp}-${dev_branch_sanitized}" 14 | 15 | # create a branch on remote 16 | git push origin HEAD:"$stg_branch" 17 | 18 | # wait a bit before deleting branch, so gitlab triggers pipeline 19 | sleep 10 20 | 21 | git push origin --delete "$stg_branch" 22 | -------------------------------------------------------------------------------- /helm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: processbot 3 | description: Helm chart for parity-processbot 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | version: 0.1.0 18 | 19 | # This is the version number of the application being deployed. This version number should be 20 | # incremented each time you make changes to the application. 21 | appVersion: 0.1.16 22 | -------------------------------------------------------------------------------- /helm/templates/env-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: env-secrets 5 | namespace: processbot 6 | type: Opaque 7 | stringData: 8 | WEBHOOK_SECRET: {{ .Values.app.WEBHOOK_SECRET | quote }} 9 | -------------------------------------------------------------------------------- /helm/templates/processbot-ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Ingress for the webhook 3 | apiVersion: networking.k8s.io/v1 4 | kind: Ingress 5 | metadata: 6 | name: processbot-ingress 7 | namespace: {{ .Values.app.KUBE_NAMESPACE }} 8 | labels: 9 | app: parity-processbot 10 | annotations: 11 | {{- toYaml .Values.ingress.annotations | nindent 4 }} 12 | spec: 13 | rules: 14 | - host: {{ .Values.ingress.domain }} 15 | http: 16 | paths: 17 | - backend: 18 | service: 19 | name: parity-processbot 20 | port: 21 | number: {{ .Values.app.WEBHOOK_PORT }} 22 | path: / 23 | pathType: ImplementationSpecific 24 | tls: 25 | - hosts: 26 | - {{ .Values.ingress.domain }} 27 | secretName: {{ .Values.ingress.domain }} 28 | -------------------------------------------------------------------------------- /helm/templates/processbot-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: processbot-key 5 | namespace: processbot 6 | type: Opaque 7 | data: 8 | PROCESSBOT_KEY: {{ .Values.app.PROCESSBOT_KEY | quote }} 9 | -------------------------------------------------------------------------------- /helm/templates/processbot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: parity-processbot 6 | namespace: {{ .Values.app.KUBE_NAMESPACE }} 7 | labels: 8 | app: parity-processbot 9 | spec: 10 | ports: 11 | - name: backend 12 | port: {{ .Values.app.WEBHOOK_PORT }} 13 | selector: 14 | app: parity-processbot 15 | --- 16 | apiVersion: apps/v1 17 | kind: StatefulSet 18 | metadata: 19 | name: parity-processbot 20 | namespace: {{ .Values.app.KUBE_NAMESPACE }} 21 | labels: 22 | app: parity-processbot 23 | spec: 24 | selector: 25 | matchLabels: 26 | app: parity-processbot 27 | serviceName: parity-processbot 28 | updateStrategy: 29 | type: RollingUpdate 30 | replicas: 1 31 | template: 32 | metadata: 33 | labels: 34 | app: parity-processbot 35 | spec: 36 | volumes: 37 | - name: processbot-key-volume 38 | secret: 39 | secretName: processbot-key 40 | defaultMode: 256 41 | - name: storage 42 | persistentVolumeClaim: 43 | claimName: processbot-pv-claim 44 | containers: 45 | - name: parity-processbot 46 | imagePullPolicy: Always 47 | image: paritytech/processbot:{{ .Values.app.DOCKER_TAG }} 48 | volumeMounts: 49 | - name: processbot-key-volume 50 | mountPath: "/etc/processbot-key" 51 | readOnly: true 52 | - name: storage 53 | mountPath: {{ .Values.config.storagePath }} 54 | ports: 55 | - name: backend 56 | containerPort: {{ .Values.app.WEBHOOK_PORT }} 57 | readinessProbe: 58 | httpGet: 59 | path: /health 60 | port: {{ .Values.app.WEBHOOK_PORT }} 61 | initialDelaySeconds: 15 62 | periodSeconds: 5 63 | livenessProbe: 64 | httpGet: 65 | path: /health 66 | port: {{ .Values.app.WEBHOOK_PORT }} 67 | initialDelaySeconds: 15 68 | periodSeconds: 5 69 | env: 70 | - name: RUST_BACKTRACE 71 | value: full 72 | - name: RUST_LOG 73 | value: debug 74 | - name: GITLAB_URL 75 | value: {{ .Values.app.GITLAB_URL }} 76 | - name: GITLAB_ACCESS_TOKEN 77 | value: {{ .Values.app.GITLAB_ACCESS_TOKEN }} 78 | - name: INSTALLATION_LOGIN 79 | value: {{ .Values.app.INSTALLATION_LOGIN }} 80 | - name: PRIVATE_KEY_PATH 81 | value: "/etc/processbot-key/PROCESSBOT_KEY" 82 | - name: WEBHOOK_PORT 83 | value: {{ quote .Values.app.WEBHOOK_PORT }} 84 | - name: START_FROM_CWD 85 | value: {{ quote .Values.app.START_FROM_CWD }} 86 | - name: GITHUB_APP_ID 87 | value: {{ quote .Values.app.GITHUB_APP_ID }} 88 | - name: DEPENDENCY_UPDATE_CONFIGURATION 89 | value: {{ quote .Values.app.DEPENDENCY_UPDATE_CONFIGURATION }} 90 | - name: DB_PATH 91 | value: {{ .Values.config.storagePath }}/db 92 | - name: REPOSITORIES_PATH 93 | value: {{ .Values.config.storagePath }}/repositories 94 | - name: WEBHOOK_SECRET 95 | valueFrom: 96 | secretKeyRef: 97 | name: env-secrets 98 | key: WEBHOOK_SECRET 99 | 100 | --- 101 | apiVersion: v1 102 | kind: PersistentVolumeClaim 103 | metadata: 104 | name: processbot-pv-claim 105 | spec: 106 | accessModes: 107 | - ReadWriteOnce 108 | resources: 109 | requests: 110 | storage: 10Gi 111 | -------------------------------------------------------------------------------- /helm/values-parity-prod.yaml: -------------------------------------------------------------------------------- 1 | environment: production 2 | ingress: 3 | domain: processbot.parity.io 4 | annotations: 5 | cert-manager.io/cluster-issuer: letsencrypt-http01 6 | kubernetes.io/ingress.class: traefik-external 7 | traefik.ingress.kubernetes.io/router.entrypoints: web,websecure 8 | traefik.ingress.kubernetes.io/router.tls: "true" 9 | -------------------------------------------------------------------------------- /helm/values-staging.yaml: -------------------------------------------------------------------------------- 1 | environment: staging 2 | ingress: 3 | domain: processbot.parity-stg.parity.io 4 | annotations: 5 | cert-manager.io/cluster-issuer: letsencrypt-http01 6 | external-dns.alpha.kubernetes.io/target: traefik-external.parity-stg.parity.io. 7 | kubernetes.io/ingress.class: traefik-external 8 | traefik.ingress.kubernetes.io/router.entrypoints: web,websecure 9 | traefik.ingress.kubernetes.io/router.tls: "true" 10 | -------------------------------------------------------------------------------- /helm/values.yaml: -------------------------------------------------------------------------------- 1 | app: 2 | WEBHOOK_PORT: 8080 3 | INSTALLATION_LOGIN: from-gitlab-vars 4 | DOCKER_TAG: from-gitlab-vars 5 | PROCESSBOT_KEY: from-gitlab-vars 6 | WEBHOOK_SECRET: from-gitlab-vars 7 | KUBE_NAMESPACE: from-gitlab-vars 8 | START_FROM_CWD: true 9 | 10 | config: 11 | storagePath: /storage 12 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | stable 2 | -------------------------------------------------------------------------------- /scripts/run_integration_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | # GIT_DAEMON_BASE_PATH_TRACKER collects all the --base-path used for the Git 4 | # daemon instances on tests and cleans them up when the tests end 5 | git_daemon_base_path_tracker="$(mktemp)" 6 | 7 | on_exit() { 8 | # kill lingering git daemon instances by their "base-path" argument because 9 | # the whole process tree is not finished when the main process exits; 10 | # targetting the process tree does not work either 11 | while IFS= read -r base_path; do 12 | >/dev/null pkill -f -- "--base-path=$base_path" 13 | done < "$git_daemon_base_path_tracker" 14 | 15 | rm "$git_daemon_base_path_tracker" 16 | } 17 | trap on_exit EXIT 18 | 19 | # --test '*' means only run the integration tests 20 | # https://github.com/rust-lang/cargo/issues/8396#issuecomment-713126649 21 | # --nocapture is used so that we see the commands being executed interleaved within the logged info 22 | GIT_DAEMON_BASE_PATH_TRACKER="$git_daemon_base_path_tracker" cargo test --test '*' -- --nocapture 23 | -------------------------------------------------------------------------------- /src/bot.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use futures::StreamExt; 4 | use hyper::{Body, Request, Response, StatusCode}; 5 | use ring::hmac; 6 | use snafu::{OptionExt, ResultExt}; 7 | use tokio::{sync::Mutex, time::sleep}; 8 | 9 | use crate::{ 10 | core::{ 11 | handle_command, process_commit_checks_and_statuses, AppState, 12 | CommentCommand, MergeCommentCommand, PullRequestMergeCancelOutcome, 13 | }, 14 | error::{self, handle_error, Error, PullRequestDetails}, 15 | github::*, 16 | merge_request::{ 17 | cleanup_merge_request, MergeRequest, MergeRequestCleanupReason, 18 | }, 19 | types::Result, 20 | WEBHOOK_PARSING_ERROR_TEMPLATE, 21 | }; 22 | 23 | fn verify_github_webhook_signature( 24 | secret: &[u8], 25 | msg: &[u8], 26 | signature: &[u8], 27 | ) -> Result<(), ring::error::Unspecified> { 28 | let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, secret); 29 | hmac::verify(&key, msg, signature) 30 | } 31 | 32 | pub async fn handle_http_request_for_bot( 33 | req: Request, 34 | state: Arc>, 35 | ) -> Result> { 36 | if req.uri().path() == "/webhook" { 37 | let state = &*state.lock().await; 38 | 39 | if let Some((merge_cancel_outcome, err)) = 40 | match process_webhook_request(req, state).await { 41 | Ok((merge_cancel_outcome, result)) => match result { 42 | Ok(_) => None, 43 | Err(err) => Some((merge_cancel_outcome, err)), 44 | }, 45 | Err(err) => { 46 | Some((PullRequestMergeCancelOutcome::WasNotCancelled, err)) 47 | } 48 | } { 49 | handle_error(merge_cancel_outcome, err, state).await 50 | }; 51 | 52 | Response::builder() 53 | .status(StatusCode::OK) 54 | .body(Body::from("")) 55 | .ok() 56 | .context(error::Message { 57 | msg: "Error building response".to_owned(), 58 | }) 59 | } else if req.uri().path() == "/health" { 60 | Response::builder() 61 | .status(StatusCode::OK) 62 | .body(Body::from("OK")) 63 | .ok() 64 | .context(error::Message { 65 | msg: "Healthcheck".to_owned(), 66 | }) 67 | } else { 68 | Response::builder() 69 | .status(StatusCode::NOT_FOUND) 70 | .body(Body::from("Not found.")) 71 | .ok() 72 | .context(error::Message { 73 | msg: "Error building response".to_owned(), 74 | }) 75 | } 76 | } 77 | 78 | pub async fn process_webhook_request( 79 | mut req: Request, 80 | state: &AppState, 81 | ) -> Result<(PullRequestMergeCancelOutcome, Result<()>)> { 82 | let mut msg_bytes = vec![]; 83 | while let Some(item) = req.body_mut().next().await { 84 | msg_bytes.extend_from_slice(&item.ok().context(error::Message { 85 | msg: "Error getting bytes from request body".to_owned(), 86 | })?); 87 | } 88 | 89 | let webhook_signature = req 90 | .headers() 91 | .get("x-hub-signature") 92 | .context(error::Message { 93 | msg: "Missing x-hub-signature".to_string(), 94 | })? 95 | .to_str() 96 | .ok() 97 | .context(error::Message { 98 | msg: "Error parsing x-hub-signature".to_owned(), 99 | })? 100 | .replace("sha1=", ""); 101 | let sig_bytes = base16::decode(webhook_signature.as_bytes()).ok().context( 102 | error::Message { 103 | msg: "Error decoding x-hub-signature".to_owned(), 104 | }, 105 | )?; 106 | 107 | let AppState { config, .. } = state; 108 | 109 | verify_github_webhook_signature( 110 | config.webhook_secret.trim().as_bytes(), 111 | &msg_bytes, 112 | &sig_bytes, 113 | ) 114 | .ok() 115 | .context(error::Message { 116 | msg: "Validation signature does not match".to_owned(), 117 | })?; 118 | 119 | log::info!("Parsing payload {}", String::from_utf8_lossy(&msg_bytes)); 120 | match serde_json::from_slice::(&msg_bytes) { 121 | Ok(payload) => Ok(handle_github_payload(payload, state).await), 122 | Err(err) => { 123 | // If this comment was originated from a Bot, then acting on it might make the bot 124 | // to respond to itself recursively, as happened on 125 | // https://github.com/paritytech/substrate/pull/8409. Therefore we'll only act on 126 | // this error if it's known for sure it has been initiated only by a User comment. 127 | let pr_details = serde_json::from_slice::< 128 | DetectUserCommentPullRequest, 129 | >(&msg_bytes) 130 | .ok() 131 | .and_then(|detected| detected.get_pull_request_details()); 132 | 133 | if let Some(pr_details) = pr_details { 134 | Err(Error::Message { 135 | msg: format!( 136 | WEBHOOK_PARSING_ERROR_TEMPLATE!(), 137 | err, 138 | String::from_utf8_lossy(&msg_bytes) 139 | ), 140 | } 141 | .with_pull_request_details(pr_details)) 142 | } else { 143 | log::info!("Ignoring payload parsing error",); 144 | Ok((PullRequestMergeCancelOutcome::ShaNotFound, Ok(()))) 145 | } 146 | } 147 | } 148 | } 149 | 150 | pub async fn handle_github_payload( 151 | payload: GithubWebhookPayload, 152 | state: &AppState, 153 | ) -> (PullRequestMergeCancelOutcome, Result<()>) { 154 | let (result, sha) = match payload { 155 | GithubWebhookPayload::IssueComment { 156 | action: GithubIssueCommentAction::Unknown, 157 | .. 158 | } => (Ok(()), None), 159 | GithubWebhookPayload::IssueComment { 160 | action: GithubIssueCommentAction::Created, 161 | comment, 162 | issue, 163 | repository, 164 | } => { 165 | if issue.pull_request.is_none() 166 | || comment.user.type_field == GithubUserType::Bot 167 | { 168 | (Ok(()), None) 169 | } else { 170 | let (sha, result) = handle_pull_request_comment( 171 | state, 172 | &comment, 173 | issue.number, 174 | &issue.html_url, 175 | repository, 176 | ) 177 | .await; 178 | 179 | ( 180 | result.map_err(|err| match err { 181 | Error::WithPullRequestDetails { .. } => err, 182 | err => { 183 | if let Some(details) = 184 | issue.get_pull_request_details() 185 | { 186 | err.with_pull_request_details(details) 187 | } else { 188 | err 189 | } 190 | } 191 | }), 192 | sha, 193 | ) 194 | } 195 | } 196 | GithubWebhookPayload::CommitStatus { sha, state: status } => ( 197 | match status { 198 | GithubCommitStatusState::Unknown => Ok(()), 199 | _ => process_commit_checks_and_statuses(state, &sha).await, 200 | }, 201 | Some(sha), 202 | ), 203 | GithubWebhookPayload::CheckRun { 204 | check_run: 205 | GithubCheckRun { 206 | status, 207 | head_sha: sha, 208 | .. 209 | }, 210 | .. 211 | } => ( 212 | match status { 213 | GithubCheckRunStatus::Completed => { 214 | process_commit_checks_and_statuses(state, &sha).await 215 | } 216 | _ => Ok(()), 217 | }, 218 | Some(sha), 219 | ), 220 | GithubWebhookPayload::WorkflowJob { 221 | workflow_job: 222 | GithubWorkflowJob { 223 | head_sha: sha, 224 | conclusion, 225 | }, 226 | .. 227 | } => ( 228 | if conclusion.is_some() { 229 | process_commit_checks_and_statuses(state, &sha).await 230 | } else { 231 | Ok(()) 232 | }, 233 | Some(sha), 234 | ), 235 | }; 236 | 237 | // From this point onwards we'll clean the SHA from the database if this is a error which stops 238 | // the merge process 239 | 240 | // Without the SHA we'll not be able to fetch the database for more context, so exit early 241 | let sha = match sha { 242 | Some(sha) => sha, 243 | None => return (PullRequestMergeCancelOutcome::ShaNotFound, result), 244 | }; 245 | 246 | // If it's not an error then don't bother with going further 247 | let err = match result { 248 | Ok(_) => { 249 | return (PullRequestMergeCancelOutcome::WasNotCancelled, Ok(())) 250 | } 251 | Err(err) => err, 252 | }; 253 | 254 | // If this error does not interrupt the merge process, then don't bother with going further 255 | if !err.stops_merge_attempt() { 256 | log::info!( 257 | "SHA {} did not have its merge attempt stopped because error does not stop the merge attempt {:?}", 258 | sha, 259 | err 260 | ); 261 | return (PullRequestMergeCancelOutcome::WasNotCancelled, Err(err)); 262 | }; 263 | 264 | log::info!( 265 | "SHA {} will have its merge attempt stopped due to {:?}", 266 | sha, 267 | err 268 | ); 269 | 270 | match state.db.get(sha.as_bytes()) { 271 | Ok(Some(bytes)) => { 272 | match bincode::deserialize::(&bytes) 273 | .context(error::Bincode) 274 | { 275 | Ok(mr) => { 276 | let merge_cancel_outcome = match cleanup_merge_request( 277 | state, 278 | &sha, 279 | &mr.owner, 280 | &mr.repo, 281 | mr.number, 282 | &MergeRequestCleanupReason::Cancelled, 283 | ) 284 | .await 285 | { 286 | Ok(_) => { 287 | log::info!( 288 | "Merge of {} (sha {}) was cancelled due to {:?}", 289 | &mr.html_url, 290 | sha, 291 | err 292 | ); 293 | PullRequestMergeCancelOutcome::WasCancelled 294 | } 295 | Err(err) => { 296 | log::error!( 297 | "Failed to cancel merge of {} (sha {}) in handle_payload due to {:?}", 298 | &mr.html_url, 299 | sha, 300 | err 301 | ); 302 | PullRequestMergeCancelOutcome::WasNotCancelled 303 | } 304 | }; 305 | 306 | ( 307 | merge_cancel_outcome, 308 | Err(err.with_pull_request_details( 309 | PullRequestDetails { 310 | owner: mr.owner, 311 | repo: mr.repo, 312 | number: mr.number, 313 | }, 314 | )), 315 | ) 316 | } 317 | Err(db_err) => { 318 | log::error!( 319 | "Failed to parse {} from the database due to {:?}", 320 | &sha, 321 | db_err 322 | ); 323 | (PullRequestMergeCancelOutcome::WasNotCancelled, Err(err)) 324 | } 325 | } 326 | } 327 | Ok(None) => (PullRequestMergeCancelOutcome::ShaNotFound, Err(err)), 328 | Err(db_err) => { 329 | log::info!( 330 | "Failed to fetch {} from the database due to {:?}", 331 | sha, 332 | db_err 333 | ); 334 | (PullRequestMergeCancelOutcome::WasNotCancelled, Err(err)) 335 | } 336 | } 337 | } 338 | 339 | /// Parse bot commands in pull request comments. 340 | /// The first member of the returned tuple is the relevant commit SHA to invalidate from the 341 | /// database in case of errors. 342 | /// The second member of the returned tuple is the result of handling the parsed command. 343 | async fn handle_pull_request_comment( 344 | state: &AppState, 345 | comment: &GithubIssueComment, 346 | number: i64, 347 | html_url: &str, 348 | repo: GithubIssueRepository, 349 | ) -> (Option, Result<()>) { 350 | let body = &comment.body; 351 | let requested_by = &comment.user.login; 352 | 353 | let cmd = match parse_bot_comment_from_text(body) { 354 | Some(cmd) => cmd, 355 | None => return (None, Ok(())), 356 | }; 357 | 358 | log::info!("{:?} requested by {} in {}", cmd, requested_by, html_url); 359 | 360 | let AppState { 361 | gh_client, config, .. 362 | } = state; 363 | 364 | if !config.disable_org_checks { 365 | if let Err(err) = 366 | gh_client.org_member(&repo.owner.login, requested_by).await 367 | { 368 | return (None, Err(err)); 369 | } 370 | } 371 | 372 | if let CommentCommand::Merge(_) = cmd { 373 | // We've noticed the bot failing for no human-discernable reason when, for 374 | // instance, it complained that the pull request was not mergeable when, in 375 | // fact, it seemed to be, if one were to guess what the state of the Github 376 | // API was at the time the response was received with "second" precision. For 377 | // the lack of insight onto the Github Servers, it's assumed that those 378 | // failures happened because the Github API did not update fast enough and 379 | // therefore the state was invalid when the request happened, but it got 380 | // cleared shortly after (possibly microseconds after, hence why it is not 381 | // discernable at "second" resolution). As a workaround we'll wait for long 382 | // enough so that Github hopefully has time to update the API and make our 383 | // merges succeed. A proper workaround would also entail retrying every X 384 | // seconds for recoverable errors such as "required statuses are missing or 385 | // pending". 386 | sleep(Duration::from_millis(config.merge_command_delay)).await; 387 | }; 388 | 389 | let pr = match gh_client 390 | .pull_request(&repo.owner.login, &repo.name, number) 391 | .await 392 | { 393 | Ok(pr) => pr, 394 | Err(err) => return (None, Err(err)), 395 | }; 396 | 397 | if let Err(err) = gh_client 398 | .acknowledge_issue_comment( 399 | &pr.base.repo.owner.login, 400 | &pr.base.repo.name, 401 | comment.id, 402 | ) 403 | .await 404 | { 405 | log::error!( 406 | "Failed to acknowledge comment on {} due to {}", 407 | pr.html_url, 408 | err 409 | ); 410 | } 411 | 412 | let result = handle_command(state, &cmd, &pr, requested_by) 413 | .await 414 | .map_err(|err| { 415 | err.with_pull_request_details(PullRequestDetails { 416 | owner: (&pr.base.repo.owner.login).into(), 417 | repo: (&pr.base.repo.name).into(), 418 | number, 419 | }) 420 | }); 421 | 422 | let sha = match cmd { 423 | CommentCommand::Merge(_) => Some(pr.head.sha), 424 | _ => None, 425 | }; 426 | 427 | (sha, result) 428 | } 429 | 430 | pub fn parse_bot_comment_from_text(text: &str) -> Option { 431 | let text = text.to_lowercase(); 432 | let text = text.trim(); 433 | 434 | let cmd = match text { 435 | "bot merge" => CommentCommand::Merge(MergeCommentCommand::Normal), 436 | "bot merge force" => CommentCommand::Merge(MergeCommentCommand::Force), 437 | "bot merge cancel" => CommentCommand::CancelMerge, 438 | "bot rebase" => CommentCommand::Rebase, 439 | _ => return None, 440 | }; 441 | 442 | Some(cmd) 443 | } 444 | -------------------------------------------------------------------------------- /src/companion.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashSet, 3 | iter::{FromIterator, Iterator}, 4 | path::Path, 5 | time::Duration, 6 | }; 7 | 8 | use async_recursion::async_recursion; 9 | use regex::RegexBuilder; 10 | use snafu::ResultExt; 11 | use tokio::time::sleep; 12 | 13 | use crate::{ 14 | core::{get_commit_statuses, process_dependents_after_merge, AppState}, 15 | error::*, 16 | git_ops::{setup_contributor_branch, SetupContributorBranchData}, 17 | github::*, 18 | merge_request::{ 19 | check_merge_is_allowed, cleanup_merge_request, 20 | handle_merged_pull_request, is_ready_to_merge, merge_pull_request, 21 | queue_merge_request, MergeRequest, MergeRequestCleanupReason, 22 | MergeRequestQueuedMessage, 23 | }, 24 | shell::*, 25 | types::Result, 26 | COMPANION_LONG_REGEX, COMPANION_PREFIX_REGEX, COMPANION_SHORT_REGEX, 27 | OWNER_AND_REPO_SEQUENCE, PR_HTML_URL_REGEX, 28 | }; 29 | 30 | #[derive(Clone)] 31 | pub struct CompanionReferenceTrailItem { 32 | pub owner: String, 33 | pub repo: String, 34 | } 35 | 36 | async fn update_pr_branch( 37 | state: &AppState, 38 | owner: &str, 39 | owner_repo: &str, 40 | owner_branch: &str, 41 | contributor: &str, 42 | contributor_repo: &str, 43 | contributor_branch: &str, 44 | inferred_dependencies_to_update: &HashSet<&String>, 45 | number: i64, 46 | ) -> Result { 47 | let AppState { config, .. } = state; 48 | 49 | let SetupContributorBranchData { 50 | repo_dir, 51 | secrets_to_hide, 52 | contributor_remote_branch, 53 | .. 54 | } = &setup_contributor_branch( 55 | state, 56 | owner, 57 | owner_repo, 58 | owner_branch, 59 | contributor, 60 | contributor_repo, 61 | contributor_branch, 62 | ) 63 | .await?; 64 | let secrets_to_hide = secrets_to_hide.as_ref().map(|vec| &vec[..]); 65 | 66 | let dependencies_to_update = { 67 | let mut dependencies_to_update = 68 | inferred_dependencies_to_update.clone(); 69 | if let Some(dependencies_to_update_from_config) = 70 | config.dependency_update_configuration.get(owner_repo) 71 | { 72 | for dep in dependencies_to_update_from_config.iter() { 73 | dependencies_to_update.insert(dep); 74 | } 75 | }; 76 | dependencies_to_update 77 | }; 78 | 79 | log::info!( 80 | "Dependencies to update for {}/{}/pull/{}: {:?}", 81 | owner, 82 | owner_repo, 83 | number, 84 | dependencies_to_update 85 | ); 86 | for dependency_to_update in dependencies_to_update.iter() { 87 | let source_to_update = format!( 88 | "{}/{}/{}{}", 89 | config.github_source_prefix, 90 | owner, 91 | dependency_to_update, 92 | config.github_source_suffix 93 | ); 94 | log::info!( 95 | "Updating references of {} in the Cargo.lock of {:?}", 96 | source_to_update, 97 | repo_dir 98 | ); 99 | let cargo_lock_path = Path::new(&repo_dir).join("Cargo.lock"); 100 | let lockfile = 101 | cargo_lock::Lockfile::load(cargo_lock_path).map_err(|err| { 102 | Error::Message { 103 | msg: format!( 104 | "Failed to parse lockfile of {}: {:?}", 105 | contributor_repo, err 106 | ), 107 | } 108 | })?; 109 | let pkgs_in_companion: HashSet = { 110 | HashSet::from_iter(lockfile.packages.iter().filter_map(|pkg| { 111 | if let Some(src) = pkg.source.as_ref() { 112 | if src.url().as_str() == source_to_update { 113 | Some(format!("{}:{}", pkg.name.as_str(), pkg.version)) 114 | } else { 115 | None 116 | } 117 | } else { 118 | None 119 | } 120 | })) 121 | }; 122 | if !pkgs_in_companion.is_empty() { 123 | let args = { 124 | let mut args = vec!["update", "-v"]; 125 | args.extend( 126 | pkgs_in_companion.iter().flat_map(|pkg| ["-p", pkg]), 127 | ); 128 | args 129 | }; 130 | run_cmd( 131 | "cargo", 132 | &args, 133 | &repo_dir, 134 | CommandMessage::Configured(CommandMessageConfiguration { 135 | secrets_to_hide, 136 | are_errors_silenced: false, 137 | }), 138 | ) 139 | .await?; 140 | } 141 | } 142 | 143 | // Check if `cargo update` resulted in any changes. If the master merge commit already had an 144 | // up-to-date lockfile then no changes might have been made. 145 | let output = run_cmd_with_output( 146 | "git", 147 | &["status", "--short"], 148 | &repo_dir, 149 | CommandMessage::Configured(CommandMessageConfiguration { 150 | secrets_to_hide, 151 | are_errors_silenced: false, 152 | }), 153 | ) 154 | .await?; 155 | if !String::from_utf8_lossy(&output.stdout[..]) 156 | .trim() 157 | .is_empty() 158 | { 159 | run_cmd( 160 | "git", 161 | &[ 162 | "commit", 163 | "-am", 164 | &format!("update lockfile for {:?}", dependencies_to_update), 165 | ], 166 | &repo_dir, 167 | CommandMessage::Configured(CommandMessageConfiguration { 168 | secrets_to_hide, 169 | are_errors_silenced: false, 170 | }), 171 | ) 172 | .await?; 173 | } 174 | 175 | run_cmd( 176 | "git", 177 | &["push", contributor, contributor_branch], 178 | &repo_dir, 179 | CommandMessage::Configured(CommandMessageConfiguration { 180 | secrets_to_hide, 181 | are_errors_silenced: false, 182 | }), 183 | ) 184 | .await?; 185 | 186 | log::info!( 187 | "Getting the head SHA after a PR branch update in {}", 188 | &contributor_remote_branch 189 | ); 190 | let updated_sha_output = run_cmd_with_output( 191 | "git", 192 | &["rev-parse", "HEAD"], 193 | &repo_dir, 194 | CommandMessage::Configured(CommandMessageConfiguration { 195 | secrets_to_hide, 196 | are_errors_silenced: false, 197 | }), 198 | ) 199 | .await?; 200 | let updated_sha = String::from_utf8(updated_sha_output.stdout) 201 | .context(Utf8)? 202 | .trim() 203 | .to_string(); 204 | 205 | Ok(updated_sha) 206 | } 207 | 208 | fn parse_companion_from_url( 209 | body: &str, 210 | ) -> Option { 211 | parse_companion_from_long_url(body) 212 | .or_else(|| parse_companion_from_short_url(body)) 213 | } 214 | 215 | fn parse_companion_from_long_url( 216 | body: &str, 217 | ) -> Option { 218 | let re = RegexBuilder::new(COMPANION_LONG_REGEX!()) 219 | .case_insensitive(true) 220 | .build() 221 | .unwrap(); 222 | let caps = re.captures(body)?; 223 | let html_url = caps.name("html_url")?.as_str().to_owned(); 224 | let owner = caps.name("owner")?.as_str().to_owned(); 225 | let repo = caps.name("repo")?.as_str().to_owned(); 226 | let number = caps 227 | .name("number")? 228 | .as_str() 229 | .to_owned() 230 | .parse::() 231 | .ok()?; 232 | Some(PullRequestDetailsWithHtmlUrl { 233 | html_url, 234 | owner, 235 | repo, 236 | number, 237 | }) 238 | } 239 | 240 | fn parse_companion_from_short_url( 241 | body: &str, 242 | ) -> Option { 243 | let re = RegexBuilder::new(COMPANION_SHORT_REGEX!()) 244 | .case_insensitive(true) 245 | .build() 246 | .unwrap(); 247 | let caps = re.captures(body)?; 248 | let owner = caps.name("owner")?.as_str().to_owned(); 249 | let repo = caps.name("repo")?.as_str().to_owned(); 250 | let number = caps 251 | .name("number")? 252 | .as_str() 253 | .to_owned() 254 | .parse::() 255 | .ok()?; 256 | let html_url = format!( 257 | "https://github.com/{owner}/{repo}/pull/{number}", 258 | owner = owner, 259 | repo = repo, 260 | number = number 261 | ); 262 | Some(PullRequestDetailsWithHtmlUrl { 263 | html_url, 264 | owner, 265 | repo, 266 | number, 267 | }) 268 | } 269 | 270 | pub fn parse_all_companions( 271 | companion_reference_trail: &[CompanionReferenceTrailItem], 272 | body: &str, 273 | ) -> Vec { 274 | body.lines() 275 | .filter_map(|line| { 276 | parse_companion_from_url(line).and_then(|comp| { 277 | // Break cyclical references between dependency and dependents because we're only 278 | // interested in the dependency -> dependent relationship, not the other way around. 279 | for item in companion_reference_trail { 280 | if comp.owner == item.owner && comp.repo == item.repo { 281 | return None; 282 | } 283 | } 284 | Some(comp) 285 | }) 286 | }) 287 | .collect() 288 | } 289 | 290 | #[async_recursion] 291 | pub async fn check_all_companions_are_mergeable( 292 | state: &AppState, 293 | pr: &GithubPullRequest, 294 | requested_by: &str, 295 | companion_reference_trail: &[CompanionReferenceTrailItem], 296 | ) -> Result<()> { 297 | let companions = match pr.parse_all_companions(companion_reference_trail) { 298 | Some(companions) => { 299 | if companions.is_empty() { 300 | return Ok(()); 301 | } else { 302 | companions 303 | } 304 | } 305 | _ => return Ok(()), 306 | }; 307 | 308 | let AppState { 309 | gh_client, config, .. 310 | } = state; 311 | for PullRequestDetailsWithHtmlUrl { 312 | html_url, 313 | owner, 314 | repo, 315 | number, 316 | } in companions 317 | { 318 | let companion = gh_client.pull_request(&owner, &repo, number).await?; 319 | 320 | if companion.merged { 321 | continue; 322 | } 323 | 324 | let has_user_owner = companion 325 | .user 326 | .as_ref() 327 | .map(|user| user.type_field == GithubUserType::User) 328 | .unwrap_or(false); 329 | if !has_user_owner { 330 | return Err(Error::Message { 331 | msg: format!( 332 | "Companion {} is not owned by a user, therefore processbot would not be able to push the lockfile update to their branch due to a Github limitation (https://github.com/isaacs/github/issues/1681)", 333 | html_url 334 | ), 335 | }); 336 | } 337 | 338 | if !companion.maintainer_can_modify 339 | // Even if the "Allow edits from maintainers" setting is not enabled, as long as the 340 | // companion belongs to the same organization, the bot should still be able to push 341 | // commits. 342 | && companion 343 | .head 344 | .repo 345 | .owner.login != pr.base.repo.owner.login 346 | { 347 | return Err(Error::Message { 348 | msg: format!( 349 | "Github API says \"Allow edits from maintainers\" is not enabled for {}. The bot would use that permission to push the lockfile update after merging this PR. Please check https://docs.github.com/en/github/collaborating-with-pull-requests/working-with-forks/allowing-changes-to-a-pull-request-branch-created-from-a-fork.", 350 | html_url 351 | ), 352 | }); 353 | } 354 | 355 | if !config.disable_org_checks { 356 | /* 357 | FIXME: Get rid of this ugly hack once the Companion Build System doesn't 358 | ignore the companion's CI 359 | */ 360 | let latest_statuses = get_commit_statuses( 361 | state, 362 | &companion.base.repo.owner.login, 363 | &companion.base.repo.name, 364 | &companion.head.sha, 365 | &companion.html_url, 366 | false, 367 | ) 368 | .await? 369 | .1; 370 | 371 | const CHECK_REVIEWS_STATUS: &str = "Check reviews"; 372 | let reviews_are_passing = latest_statuses 373 | .get(CHECK_REVIEWS_STATUS) 374 | .map(|(_, state, _)| state == &GithubCommitStatusState::Success) 375 | .unwrap_or(false); 376 | if !reviews_are_passing { 377 | return Err(Error::Message { 378 | msg: format!( 379 | "\"{}\" status is not passing for {}", 380 | CHECK_REVIEWS_STATUS, &companion.html_url 381 | ), 382 | }); 383 | } 384 | } 385 | 386 | // Keeping track of the trail of references is necessary to break chains like A -> B -> C -> A 387 | // TODO: of course this should be tested 388 | let next_companion_reference_trail = { 389 | let mut next_trail = 390 | Vec::with_capacity(companion_reference_trail.len() + 1); 391 | next_trail.extend_from_slice(companion_reference_trail); 392 | next_trail.push(CompanionReferenceTrailItem { 393 | owner: (&pr.base.repo.owner.login).into(), 394 | repo: (&pr.base.repo.name).into(), 395 | }); 396 | next_trail 397 | }; 398 | 399 | check_merge_is_allowed( 400 | state, 401 | &companion, 402 | requested_by, 403 | &next_companion_reference_trail, 404 | ) 405 | .await?; 406 | } 407 | 408 | Ok(()) 409 | } 410 | 411 | #[async_recursion] 412 | pub async fn update_companion_then_merge( 413 | state: &AppState, 414 | comp: &MergeRequest, 415 | msg: &MergeRequestQueuedMessage, 416 | should_register_comp: bool, 417 | all_dependencies_are_ready: bool, 418 | ) -> Result> { 419 | let AppState { 420 | gh_client, config, .. 421 | } = state; 422 | 423 | match async { 424 | let comp_pr = gh_client 425 | .pull_request(&comp.owner, &comp.repo, comp.number) 426 | .await?; 427 | if handle_merged_pull_request(state, &comp_pr, &comp.requested_by) 428 | .await? 429 | { 430 | return Ok(None); 431 | } 432 | 433 | let (updated_sha, comp_pr) = if comp.was_updated { 434 | if comp_pr.head.sha != comp.sha { 435 | return Err(Error::HeadChanged { 436 | expected: comp.sha.to_string(), 437 | actual: comp_pr.head.sha.to_string(), 438 | }); 439 | } 440 | (None, comp_pr) 441 | } else { 442 | check_merge_is_allowed(state, &comp_pr, &comp.requested_by, &[]) 443 | .await?; 444 | 445 | let dependencies_to_update = 446 | if let Some(ref dependencies) = comp.dependencies { 447 | HashSet::from_iter( 448 | dependencies.iter().map(|dependency| &dependency.repo), 449 | ) 450 | } else { 451 | HashSet::new() 452 | }; 453 | 454 | if !all_dependencies_are_ready && !dependencies_to_update.is_empty() 455 | { 456 | if should_register_comp { 457 | queue_merge_request( 458 | state, 459 | comp, 460 | &MergeRequestQueuedMessage::None, 461 | ) 462 | .await?; 463 | } 464 | return Ok(None); 465 | } 466 | 467 | log::info!( 468 | "Updating {} including the following dependencies: {:?}", 469 | comp_pr.html_url, 470 | dependencies_to_update 471 | ); 472 | 473 | let updated_sha = update_pr_branch( 474 | state, 475 | &comp_pr.base.repo.owner.login, 476 | &comp_pr.base.repo.name, 477 | &comp_pr.base.ref_field, 478 | &comp_pr.head.repo.owner.login, 479 | &comp_pr.head.repo.name, 480 | &comp_pr.head.ref_field, 481 | &dependencies_to_update, 482 | comp_pr.number, 483 | ) 484 | .await?; 485 | 486 | // Wait a bit for the statuses to settle after we've updated the companion 487 | sleep(Duration::from_millis(config.companion_status_settle_delay)) 488 | .await; 489 | 490 | // Fetch it again since we've pushed some commits and therefore some status or check might have 491 | // failed already 492 | let comp_pr = gh_client 493 | .pull_request( 494 | &comp_pr.base.repo.owner.login, 495 | &comp_pr.base.repo.name, 496 | comp_pr.number, 497 | ) 498 | .await?; 499 | 500 | // Sanity-check: the PR's new HEAD sha should be the updated SHA we just 501 | // pushed 502 | if comp_pr.head.sha != updated_sha { 503 | return Err(Error::HeadChanged { 504 | expected: updated_sha.to_string(), 505 | actual: comp_pr.head.sha.to_string(), 506 | }); 507 | } 508 | 509 | // Cleanup the pre-update SHA in order to prevent late status deliveries from 510 | // removing the updated SHA from the database 511 | cleanup_merge_request( 512 | state, 513 | &comp.sha, 514 | &comp.owner, 515 | &comp.repo, 516 | comp.number, 517 | &MergeRequestCleanupReason::AfterSHAUpdate(&updated_sha), 518 | ) 519 | .await?; 520 | 521 | (Some(updated_sha), comp_pr) 522 | }; 523 | 524 | if is_ready_to_merge(state, &comp_pr).await? { 525 | log::info!( 526 | "Attempting to merge {} after companion update", 527 | comp_pr.html_url 528 | ); 529 | if let Err(err) = 530 | merge_pull_request(state, &comp_pr, &comp.requested_by).await? 531 | { 532 | match err { 533 | Error::MergeFailureWillBeSolvedLater { .. } => {} 534 | err => return Err(err), 535 | }; 536 | } else { 537 | process_dependents_after_merge( 538 | state, 539 | &comp_pr, 540 | &comp.requested_by, 541 | ) 542 | .await?; 543 | return Ok(updated_sha); 544 | } 545 | } 546 | 547 | log::info!( 548 | "Companion updated; waiting for checks on {}", 549 | comp_pr.html_url 550 | ); 551 | queue_merge_request( 552 | state, 553 | &MergeRequest { 554 | sha: comp_pr.head.sha, 555 | owner: comp_pr.base.repo.owner.login, 556 | repo: comp_pr.base.repo.name, 557 | number: comp_pr.number, 558 | html_url: comp_pr.html_url, 559 | requested_by: (&comp.requested_by).into(), 560 | // Set "was_updated: true" to avoid updating a branch more than once 561 | was_updated: true, 562 | // All dependencies should have been updated above, we won't update them 563 | // again 564 | dependencies: None, 565 | }, 566 | msg, 567 | ) 568 | .await?; 569 | 570 | Ok(updated_sha) 571 | } 572 | .await 573 | { 574 | Err(err) => Err(err.with_pull_request_details(PullRequestDetails { 575 | owner: comp.owner.to_owned(), 576 | repo: comp.repo.to_owned(), 577 | number: comp.number, 578 | })), 579 | other => other, 580 | } 581 | } 582 | 583 | #[cfg(test)] 584 | mod tests { 585 | use super::*; 586 | 587 | const COMPANION_MARKERS: &[&str; 2] = &["Companion", "companion"]; 588 | 589 | #[test] 590 | fn test_companion_parsing_url_params() { 591 | for companion_marker in COMPANION_MARKERS { 592 | // Extra params should not be included in the parsed URL 593 | assert_eq!( 594 | parse_companion_from_url(&format!( 595 | "{}: https://github.com/org/repo/pull/1234?extra_params=true", 596 | companion_marker 597 | )), 598 | Some(PullRequestDetailsWithHtmlUrl { 599 | html_url: "https://github.com/org/repo/pull/1234" 600 | .to_owned(), 601 | owner: "org".to_owned(), 602 | repo: "repo".to_owned(), 603 | number: 1234 604 | }) 605 | ); 606 | } 607 | } 608 | 609 | #[test] 610 | fn test_companion_parsing_all_markers() { 611 | for companion_marker in COMPANION_MARKERS { 612 | // Long version should work even if the body has some other content around 613 | // the companion text 614 | assert_eq!( 615 | parse_companion_from_url(&format!( 616 | " 617 | Companion line is in the middle 618 | {}: https://github.com/org/repo/pull/1234 619 | Final line 620 | ", 621 | companion_marker 622 | )), 623 | Some(PullRequestDetailsWithHtmlUrl { 624 | html_url: "https://github.com/org/repo/pull/1234" 625 | .to_owned(), 626 | owner: "org".to_owned(), 627 | repo: "repo".to_owned(), 628 | number: 1234 629 | }) 630 | ); 631 | } 632 | } 633 | 634 | #[test] 635 | fn test_companion_parsing_short_version_wrap() { 636 | for companion_marker in COMPANION_MARKERS { 637 | // Short version should work even if the body has some other content around 638 | // the companion text 639 | assert_eq!( 640 | parse_companion_from_url(&format!( 641 | " 642 | Companion line is in the middle 643 | {}: org/repo#1234 644 | Final line 645 | ", 646 | companion_marker 647 | )), 648 | Some(PullRequestDetailsWithHtmlUrl { 649 | html_url: "https://github.com/org/repo/pull/1234" 650 | .to_owned(), 651 | owner: "org".to_owned(), 652 | repo: "repo".to_owned(), 653 | number: 1234 654 | }) 655 | ); 656 | } 657 | } 658 | 659 | #[test] 660 | fn test_companion_parsing_long_version_same_line() { 661 | for companion_marker in COMPANION_MARKERS { 662 | // Long version should not be detected if "companion: " and the expression 663 | // are not both in the same line 664 | assert_eq!( 665 | parse_companion_from_url(&format!( 666 | " 667 | I want to talk about {}: but NOT reference it 668 | I submitted it in https://github.com/org/repo/pull/1234 669 | ", 670 | companion_marker 671 | )), 672 | None 673 | ); 674 | } 675 | } 676 | 677 | #[test] 678 | fn test_companion_parsing_short_version_same_line() { 679 | for companion_marker in COMPANION_MARKERS { 680 | // Short version should not be detected if "companion: " and the expression are not both in 681 | // the same line 682 | assert_eq!( 683 | parse_companion_from_url(&format!( 684 | " 685 | I want to talk about {}: but NOT reference it 686 | I submitted it in org/repo#1234 687 | ", 688 | companion_marker 689 | )), 690 | None 691 | ); 692 | } 693 | } 694 | 695 | #[test] 696 | fn test_companion_parsing_multiple_companions() { 697 | let owner = "org"; 698 | let repo = "repo"; 699 | let pr_number = 1234; 700 | let companion_url = 701 | format!("https://github.com/{}/{}/pull/{}", owner, repo, pr_number); 702 | let expected_companion = PullRequestDetailsWithHtmlUrl { 703 | html_url: companion_url.to_owned(), 704 | owner: owner.into(), 705 | repo: repo.into(), 706 | number: pr_number, 707 | }; 708 | for companion_marker in COMPANION_MARKERS { 709 | assert_eq!( 710 | parse_all_companions( 711 | &[], 712 | &format!( 713 | " 714 | first {}: {} 715 | second {}: {} 716 | ", 717 | companion_marker, 718 | &companion_url, 719 | companion_marker, 720 | &companion_url 721 | ) 722 | ), 723 | vec![expected_companion.clone(), expected_companion.clone()] 724 | ); 725 | } 726 | } 727 | 728 | #[test] 729 | fn test_cyclical_references() { 730 | let owner = "org"; 731 | let repo = "repo"; 732 | 733 | for companion_marker in COMPANION_MARKERS { 734 | let companion_description = format!( 735 | " 736 | {}: https://github.com/{}/{}/pull/123 737 | ", 738 | companion_marker, owner, repo, 739 | ); 740 | 741 | // If the source is not referenced in the description, something is parsed 742 | assert_ne!( 743 | parse_all_companions(&[], &companion_description), 744 | vec![] 745 | ); 746 | 747 | // If the source is referenced in the description, it is omitted 748 | assert_eq!( 749 | parse_all_companions( 750 | &[CompanionReferenceTrailItem { 751 | owner: owner.into(), 752 | repo: repo.into() 753 | }], 754 | &companion_description 755 | ), 756 | vec![] 757 | ); 758 | } 759 | } 760 | 761 | #[test] 762 | fn test_restricted_regex() { 763 | let owner = "org"; 764 | let repo = "repo"; 765 | let pr_number = 1234; 766 | let companion_url = format!("{}/{}#{}", owner, repo, pr_number); 767 | for companion_marker in COMPANION_MARKERS { 768 | assert_eq!( 769 | parse_all_companions( 770 | &[], 771 | // the companion expression should not be matched because of the " for" part 772 | &format!("{} for {}", companion_marker, &companion_url) 773 | ), 774 | vec![] 775 | ); 776 | } 777 | } 778 | } 779 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, path::PathBuf}; 2 | 3 | #[derive(Debug, Clone)] 4 | pub struct MainConfig { 5 | pub installation_login: String, 6 | pub webhook_secret: String, 7 | pub webhook_port: String, 8 | pub db_path: PathBuf, 9 | pub repos_path: PathBuf, 10 | pub private_key: Vec, 11 | pub webhook_proxy_url: Option, 12 | pub github_app_id: usize, 13 | pub disable_org_checks: bool, 14 | pub github_api_url: String, 15 | pub companion_status_settle_delay: u64, 16 | pub merge_command_delay: u64, 17 | pub github_source_prefix: String, 18 | pub github_source_suffix: String, 19 | pub gitlab_url: String, 20 | pub gitlab_access_token: String, 21 | pub dependency_update_configuration: HashMap>, 22 | } 23 | 24 | impl MainConfig { 25 | pub fn from_env() -> Self { 26 | dotenv::dotenv().ok(); 27 | 28 | let root_dir = if dotenv::var("START_FROM_CWD").is_ok() { 29 | std::env::current_dir().expect("START_FROM_CWD was set, but it was not possible to get the current directory") 30 | } else { 31 | PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR is not set, please run the application through cargo")) 32 | }; 33 | 34 | let installation_login = 35 | dotenv::var("INSTALLATION_LOGIN").expect("INSTALLATION_LOGIN"); 36 | let webhook_secret = 37 | dotenv::var("WEBHOOK_SECRET").expect("WEBHOOK_SECRET"); 38 | let webhook_port = dotenv::var("WEBHOOK_PORT").expect("WEBHOOK_PORT"); 39 | 40 | let db_path = dotenv::var("DB_PATH").unwrap(); 41 | let db_path = if db_path.starts_with('/') { 42 | PathBuf::from(db_path) 43 | } else { 44 | root_dir.join(db_path) 45 | }; 46 | std::fs::create_dir_all(&db_path) 47 | .expect("Could not create database directory (DB_PATH)"); 48 | 49 | let repos_path = 50 | dotenv::var("REPOSITORIES_PATH").expect("REPOSITORIES_PATH"); 51 | let repos_path = if repos_path.starts_with('/') { 52 | PathBuf::from(repos_path) 53 | } else { 54 | root_dir.join(repos_path) 55 | }; 56 | std::fs::create_dir_all(&repos_path).expect( 57 | "Could not create repositories directory (REPOSITORIES_PATH)", 58 | ); 59 | 60 | let private_key_path = 61 | dotenv::var("PRIVATE_KEY_PATH").expect("PRIVATE_KEY_PATH"); 62 | let private_key = std::fs::read(&private_key_path) 63 | .expect("Couldn't find private key."); 64 | 65 | let webhook_proxy_url = dotenv::var("WEBHOOK_PROXY_URL").ok(); 66 | let github_app_id = dotenv::var("GITHUB_APP_ID") 67 | .expect("GITHUB_APP_ID") 68 | .parse::() 69 | .expect("GITHUB_APP_ID should be a number"); 70 | 71 | let disable_org_checks = dotenv::var("DISABLE_ORG_CHECKS") 72 | .ok() 73 | .map(|value| match value.as_str() { 74 | "true" => true, 75 | "false" => false, 76 | _ => { 77 | panic!("DISABLE_ORG_CHECKS should be \"true\" or \"false\"") 78 | } 79 | }) 80 | .unwrap_or(false); 81 | 82 | let github_api_url = "https://api.github.com".to_owned(); 83 | let github_source_prefix = dotenv::var("GITHUB_SOURCE_PREFIX") 84 | .unwrap_or_else(|_| "https://github.com".to_string()); 85 | let github_source_suffix = dotenv::var("GITHUB_SOURCE_SUFFIX") 86 | .unwrap_or_else(|_| "".to_string()); 87 | 88 | let merge_command_delay = 4096; 89 | 90 | let companion_status_settle_delay = 4096; 91 | 92 | let gitlab_url = dotenv::var("GITLAB_URL").unwrap(); 93 | let gitlab_access_token = dotenv::var("GITLAB_ACCESS_TOKEN").unwrap(); 94 | 95 | let dependency_update_configuration = { 96 | let mut dependency_update_configuration = HashMap::new(); 97 | 98 | if let Ok(raw_configuration) = 99 | dotenv::var("DEPENDENCY_UPDATE_CONFIGURATION") 100 | { 101 | for token in raw_configuration.split(':') { 102 | let token_parsing_err_msg = format!( 103 | "$DEPENDENCY_UPDATE_CONFIGURATION segment \"{}\" should be of the form REPOSITORY=DEPENDENCY,DEPENDENCY,...", 104 | token 105 | ); 106 | 107 | let mut token_parts = token.split('='); 108 | let repository = 109 | token_parts.next().expect(&token_parsing_err_msg); 110 | let dependencies = 111 | token_parts.next().expect(&token_parsing_err_msg); 112 | if token_parts.next().is_some() { 113 | panic!("{}", token_parsing_err_msg) 114 | } 115 | 116 | dependency_update_configuration.insert( 117 | repository.into(), 118 | dependencies.split('+').map(|dep| dep.into()).collect(), 119 | ); 120 | } 121 | } 122 | 123 | dependency_update_configuration 124 | }; 125 | log::info!( 126 | "dependency_update_configuration: {:?}", 127 | dependency_update_configuration 128 | ); 129 | 130 | Self { 131 | installation_login, 132 | webhook_secret, 133 | webhook_port, 134 | db_path, 135 | private_key, 136 | webhook_proxy_url, 137 | github_app_id, 138 | disable_org_checks, 139 | github_api_url, 140 | merge_command_delay, 141 | companion_status_settle_delay, 142 | repos_path, 143 | github_source_prefix, 144 | github_source_suffix, 145 | gitlab_url, 146 | gitlab_access_token, 147 | dependency_update_configuration, 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/constants.rs: -------------------------------------------------------------------------------- 1 | // Note: the old database will be *DELETED* when changing this constant 2 | // Do not change this without checking the implementation first 3 | pub const DATABASE_VERSION: &str = "v3.0"; 4 | -------------------------------------------------------------------------------- /src/core.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{HashMap, HashSet}; 2 | 3 | use async_recursion::async_recursion; 4 | use regex::RegexBuilder; 5 | use reqwest::Client as HttpClient; 6 | use rocksdb::DB; 7 | use snafu::ResultExt; 8 | 9 | use crate::{ 10 | companion::update_companion_then_merge, 11 | config::MainConfig, 12 | error::{self, handle_error, Error, PullRequestDetails}, 13 | git_ops::{rebase, RebaseOutcome}, 14 | github::*, 15 | gitlab::*, 16 | merge_request::{ 17 | check_merge_is_allowed, cleanup_merge_request, 18 | handle_merged_pull_request, is_ready_to_merge, merge_pull_request, 19 | queue_merge_request, MergeRequest, MergeRequestCleanupReason, 20 | MergeRequestQueuedMessage, 21 | }, 22 | types::Result, 23 | vanity_service, 24 | }; 25 | 26 | #[derive(Debug)] 27 | pub enum Status { 28 | Success, 29 | Pending, 30 | Failure, 31 | } 32 | 33 | pub enum PullRequestMergeCancelOutcome { 34 | ShaNotFound, 35 | WasCancelled, 36 | WasNotCancelled, 37 | } 38 | 39 | pub struct AppState { 40 | pub db: DB, 41 | pub gh_client: GithubClient, 42 | pub config: MainConfig, 43 | } 44 | 45 | #[derive(Debug)] 46 | pub enum CommentCommand { 47 | Merge(MergeCommentCommand), 48 | CancelMerge, 49 | Rebase, 50 | } 51 | 52 | #[derive(Debug)] 53 | pub enum MergeCommentCommand { 54 | Normal, 55 | Force, 56 | } 57 | 58 | pub async fn get_commit_statuses( 59 | state: &AppState, 60 | owner: &str, 61 | repo: &str, 62 | commit_sha: &str, 63 | html_url: &str, 64 | should_handle_retried_jobs: bool, 65 | ) -> Result<( 66 | Status, 67 | HashMap)>, 68 | )> { 69 | let AppState { 70 | gh_client, config, .. 71 | } = state; 72 | 73 | let statuses = gh_client.statuses(owner, repo, commit_sha).await?; 74 | log::info!("{} statuses: {:?}", html_url, statuses); 75 | 76 | // Since Github only considers the latest instance of each status, we should 77 | // abide by the same rule. Each instance is uniquely identified by "context". 78 | let mut latest_statuses: HashMap< 79 | String, 80 | (i64, GithubCommitStatusState, Option), 81 | > = HashMap::new(); 82 | for s in statuses { 83 | if s.description 84 | .as_ref() 85 | .map(|description| { 86 | match serde_json::from_str::( 87 | description, 88 | ) { 89 | Ok(info) => info.build_allow_failure.unwrap_or(false), 90 | _ => false, 91 | } 92 | }) 93 | .unwrap_or(false) 94 | { 95 | continue; 96 | } 97 | 98 | if latest_statuses 99 | .get(&s.context) 100 | .map(|(prev_id, _, _)| prev_id < &s.id) 101 | .unwrap_or(true) 102 | { 103 | latest_statuses.insert(s.context, (s.id, s.state, s.target_url)); 104 | } 105 | } 106 | log::info!("{} latest_statuses: {:?}", html_url, latest_statuses); 107 | 108 | if latest_statuses 109 | .values() 110 | .all(|(_, state, _)| *state == GithubCommitStatusState::Success) 111 | { 112 | log::info!("{} has success status", html_url); 113 | Ok((Status::Success, latest_statuses)) 114 | } else if latest_statuses.values().any(|(_, state, _)| { 115 | *state == GithubCommitStatusState::Error 116 | || *state == GithubCommitStatusState::Failure 117 | }) { 118 | if should_handle_retried_jobs { 119 | let mut has_failed_status_from_outside_gitlab = false; 120 | 121 | let gitlab_job_target_url_matcher = 122 | RegexBuilder::new(r"^(\w+://[^/]+)/(.*)/builds/([0-9]+)$") 123 | .case_insensitive(true) 124 | .build() 125 | .unwrap(); 126 | let failed_gitlab_jobs = latest_statuses 127 | .values() 128 | .filter_map(|(_, status, target_url)| match *status { 129 | GithubCommitStatusState::Failure 130 | | GithubCommitStatusState::Error => { 131 | let gitlab_job_data = 132 | target_url.as_ref().and_then(|target_url| { 133 | gitlab_job_target_url_matcher 134 | .captures(target_url) 135 | .and_then(|matches| { 136 | let gitlab_url = 137 | matches.get(1).unwrap().as_str(); 138 | if gitlab_url == config.gitlab_url { 139 | let gitlab_project = matches 140 | .get(2) 141 | .unwrap() 142 | .as_str(); 143 | let job_id = matches 144 | .get(3) 145 | .unwrap() 146 | .as_str() 147 | .parse::() 148 | .unwrap(); 149 | Some(( 150 | gitlab_url, 151 | gitlab_project, 152 | job_id, 153 | )) 154 | } else { 155 | None 156 | } 157 | }) 158 | }); 159 | if gitlab_job_data.is_none() { 160 | has_failed_status_from_outside_gitlab = true; 161 | } 162 | gitlab_job_data 163 | } 164 | _ => None, 165 | }) 166 | .collect::>(); 167 | 168 | if has_failed_status_from_outside_gitlab { 169 | log::info!( 170 | "Non-GitLab statuses have failed, therefore we bail out of trying to check if following GitLab jobs have recovered: {:?}", 171 | failed_gitlab_jobs 172 | ); 173 | } else if !failed_gitlab_jobs.is_empty() { 174 | let mut recovered_jobs = vec![]; 175 | 176 | let http_client = HttpClient::new(); 177 | for (gitlab_url, gitlab_project, job_id) in failed_gitlab_jobs { 178 | // https://docs.gitlab.com/ee/api/jobs.html#get-a-single-job 179 | let job_api_url = format!( 180 | "{}/api/v4/projects/{}/jobs/{}", 181 | gitlab_url, 182 | urlencoding::encode(gitlab_project), 183 | job_id 184 | ); 185 | 186 | let job = http_client 187 | .execute( 188 | http_client 189 | .get(&job_api_url) 190 | .headers( 191 | config.get_gitlab_api_request_headers()?, 192 | ) 193 | .build() 194 | .map_err(|err| Error::Message { 195 | msg: format!( 196 | "Failed to build request to fetch {} due to {:?}", 197 | job_api_url, 198 | err 199 | ), 200 | })?, 201 | ) 202 | .await 203 | .context(error::Http)? 204 | .json::() 205 | .await 206 | .context(error::Http)?; 207 | 208 | log::info!("Fetched job for {}: {:?}", job_api_url, job); 209 | 210 | match job.pipeline.status { 211 | GitlabPipelineStatus::Created 212 | | GitlabPipelineStatus::WaitingForResource 213 | | GitlabPipelineStatus::Preparing 214 | | GitlabPipelineStatus::Pending 215 | | GitlabPipelineStatus::Running 216 | | GitlabPipelineStatus::Scheduled => { 217 | log::info!("{} is failing on GitHub, but its pipeline is pending, therefore we'll check if it's running or pending (it might have been retried)", job_api_url); 218 | 219 | let pending_or_successful_jobs = { 220 | let mut pending_or_successful_jobs = vec![]; 221 | // https://docs.gitlab.com/ee/api/#offset-based-pagination 222 | let mut page = 1; 223 | loop { 224 | // https://docs.gitlab.com/ee/api/jobs.html#list-pipeline-jobs 225 | let pending_or_successful_jobs_api = format!( 226 | "{}/api/v4/projects/{}/pipelines/{}/jobs?scope[]=pending&scope[]=running&scope[]=success&scope[]=created&per_page=100&page={}", 227 | gitlab_url, 228 | job.pipeline.project_id, 229 | job.pipeline.id, 230 | page 231 | ); 232 | 233 | let page_pending_or_successful_jobs = http_client 234 | .execute( 235 | http_client 236 | .get(&pending_or_successful_jobs_api) 237 | .headers(config.get_gitlab_api_request_headers()?) 238 | .build() 239 | .map_err(|err| Error::Message { 240 | msg: format!( 241 | "Failed to build request to fetch {} due to {:?}", 242 | pending_or_successful_jobs_api, 243 | err 244 | ), 245 | })?, 246 | ) 247 | .await 248 | .context(error::Http)? 249 | .json::>() 250 | .await 251 | .context(error::Http)?; 252 | 253 | if page_pending_or_successful_jobs 254 | .is_empty() 255 | { 256 | break; 257 | } 258 | 259 | pending_or_successful_jobs.extend( 260 | page_pending_or_successful_jobs, 261 | ); 262 | 263 | page += 1; 264 | } 265 | pending_or_successful_jobs 266 | }; 267 | 268 | if pending_or_successful_jobs.iter().any( 269 | |pending_pipeline_job| { 270 | pending_pipeline_job.name == job.name 271 | }, 272 | ) { 273 | recovered_jobs.push(job_api_url); 274 | } else { 275 | log::info!( 276 | "{} 's GitLab pipeline (id: {}) for job {} (name: {}) did not list it as pending or successful, therefore the job is considered to be failing", 277 | html_url, 278 | job.pipeline.id, 279 | job_api_url, 280 | job.name 281 | ); 282 | recovered_jobs.clear(); 283 | break; 284 | } 285 | } 286 | _ => { 287 | log::info!( 288 | "{} 's GitLab pipeline (id: {}) for job {} (name: {}) is not pending, therefore the job itself can't be considered to be pending", 289 | html_url, 290 | job.pipeline.id, 291 | job_api_url, 292 | job.name, 293 | ); 294 | recovered_jobs.clear(); 295 | break; 296 | } 297 | } 298 | } 299 | 300 | if !recovered_jobs.is_empty() { 301 | log::info!( 302 | "{} was initially considered to be failing, but we consider it has recovered because the following jobs have recovered: {:?}", 303 | html_url, 304 | recovered_jobs 305 | ); 306 | return Ok((Status::Pending, latest_statuses)); 307 | } 308 | } 309 | } 310 | 311 | log::info!("{} has failed status", html_url); 312 | Ok((Status::Failure, latest_statuses)) 313 | } else { 314 | log::info!("{} has pending status", html_url); 315 | Ok((Status::Pending, latest_statuses)) 316 | } 317 | } 318 | 319 | pub async fn get_commit_checks( 320 | gh_client: &GithubClient, 321 | owner: &str, 322 | repo_name: &str, 323 | commit_sha: &str, 324 | html_url: &str, 325 | ) -> Result { 326 | let check_runs = gh_client.check_runs(owner, repo_name, commit_sha).await?; 327 | log::info!("{} check_runs: {:?}", html_url, check_runs); 328 | 329 | // Since Github only considers the latest instance of each check, we should abide by the same 330 | // rule. Each instance is uniquely identified by "name". 331 | let mut latest_checks = HashMap::new(); 332 | for c in check_runs { 333 | if latest_checks 334 | .get(&c.name) 335 | .map(|(prev_id, _, _)| prev_id < &c.id) 336 | .unwrap_or(true) 337 | { 338 | latest_checks.insert(c.name, (c.id, c.status, c.conclusion)); 339 | } 340 | } 341 | log::info!("{} latest_checks: {:?}", html_url, latest_checks); 342 | 343 | Ok( 344 | if latest_checks.values().all(|(_, _, conclusion)| { 345 | *conclusion == Some(GithubCheckRunConclusion::Success) 346 | }) { 347 | log::info!("{} has successful checks", html_url); 348 | Status::Success 349 | } else if latest_checks 350 | .values() 351 | .all(|(_, status, _)| *status == GithubCheckRunStatus::Completed) 352 | { 353 | log::info!("{} has unsuccessful checks", html_url); 354 | Status::Failure 355 | } else { 356 | log::info!("{} has pending checks", html_url); 357 | Status::Pending 358 | }, 359 | ) 360 | } 361 | 362 | #[async_recursion] 363 | pub async fn process_commit_checks_and_statuses( 364 | state: &AppState, 365 | sha: &str, 366 | ) -> Result<()> { 367 | let AppState { db, gh_client, .. } = state; 368 | 369 | log::info!("Checking for statuses of {}", sha); 370 | 371 | let mr: MergeRequest = match db.get(sha.as_bytes()).context(error::Db)? { 372 | Some(bytes) => bincode::deserialize(&bytes).context(error::Bincode)?, 373 | None => return Ok(()), 374 | }; 375 | let pr = gh_client 376 | .pull_request(&mr.owner, &mr.repo, mr.number) 377 | .await?; 378 | log::info!( 379 | "Deserialized merge request for {} (sha {}): {:?}", 380 | pr.html_url, 381 | sha, 382 | mr 383 | ); 384 | 385 | match async { 386 | if handle_merged_pull_request(state, &pr, &mr.requested_by).await? { 387 | return Ok(()); 388 | } 389 | 390 | if mr.sha != pr.head.sha { 391 | return Err(Error::HeadChanged { 392 | expected: sha.to_string(), 393 | actual: pr.head.sha.to_owned(), 394 | }); 395 | } 396 | 397 | if !is_ready_to_merge(state, &pr).await? { 398 | log::info!("{} is not ready", pr.html_url); 399 | return Ok(()); 400 | } 401 | 402 | check_merge_is_allowed(state, &pr, &mr.requested_by, &[]).await?; 403 | 404 | if let Some(dependencies) = &mr.dependencies { 405 | for dependency in dependencies { 406 | let dependency_pr = gh_client 407 | .pull_request( 408 | &dependency.owner, 409 | &dependency.repo, 410 | dependency.number, 411 | ) 412 | .await?; 413 | if dependency_pr.head.sha != dependency.sha { 414 | return Err(Error::Message { 415 | msg: format!( 416 | "Dependency {} 's HEAD SHA changed from {} to {}. Aborting.", 417 | dependency.html_url, 418 | dependency.sha, 419 | dependency_pr.head.sha 420 | ), 421 | }); 422 | } 423 | 424 | if dependency_pr.merged { 425 | log::info!( 426 | "Dependency {} of PR {} was merged, cleaning it", 427 | dependency_pr.html_url, 428 | pr.html_url 429 | ); 430 | cleanup_merge_request( 431 | state, 432 | &dependency_pr.head.sha, 433 | &dependency.owner, 434 | &dependency.repo, 435 | dependency.number, 436 | &MergeRequestCleanupReason::AfterMerge, 437 | ) 438 | .await?; 439 | } else { 440 | log::info!( 441 | "Giving up on merging {} because its dependency {} has not been merged yet", 442 | pr.html_url, 443 | dependency.html_url 444 | ); 445 | return Ok(()); 446 | }; 447 | } 448 | } 449 | 450 | log::info!("Updating companion {} before merge", pr.html_url); 451 | update_companion_then_merge( 452 | state, 453 | &mr, 454 | &MergeRequestQueuedMessage::None, 455 | // No need to register the MR again: we already know it is registered because 456 | // it was fetched from the database at the start 457 | false, 458 | // We have checked that all dependencies are ready by this point 459 | true, 460 | ) 461 | .await?; 462 | 463 | Ok(()) 464 | } 465 | .await 466 | { 467 | Ok(_) | Err(Error::MergeFailureWillBeSolvedLater { .. }) => Ok(()), 468 | Err(err) => Err(err.with_pull_request_details(PullRequestDetails { 469 | owner: pr.base.repo.owner.login, 470 | repo: pr.base.repo.name, 471 | number: pr.number, 472 | })), 473 | } 474 | } 475 | 476 | pub async fn process_dependents_after_merge( 477 | state: &AppState, 478 | pr: &GithubPullRequest, 479 | requested_by: &str, 480 | ) -> Result<()> { 481 | log::info!("Handling dependents of {}", pr.html_url); 482 | 483 | let AppState { 484 | gh_client, 485 | db, 486 | config, 487 | .. 488 | } = state; 489 | 490 | let fetched_dependents = gh_client 491 | .resolve_pr_dependents(config, pr, requested_by, &[]) 492 | .await?; 493 | if let Some(dependents) = &fetched_dependents { 494 | log::info!( 495 | "Found current dependents of {}: {:?}", 496 | pr.html_url, 497 | dependents 498 | ); 499 | } 500 | 501 | /* 502 | The alive dependents are the ones which are still referenced in the PR 503 | description plus the ones from the database were registered as indirect 504 | dependencies 505 | */ 506 | let mut alive_dependents = fetched_dependents.clone().unwrap_or_default(); 507 | 508 | // Helper function to avoid duplicate dependents from being registered 509 | let mut register_alive_dependent = |dep: MergeRequest| { 510 | if alive_dependents.iter().any(|alive_dep: &MergeRequest| { 511 | dep.owner == alive_dep.owner 512 | && dep.repo == alive_dep.repo 513 | && dep.number == alive_dep.number 514 | }) { 515 | return; 516 | }; 517 | alive_dependents.push(dep) 518 | }; 519 | 520 | /* 521 | Step 1: Update dangling references 522 | 523 | The dependents we have detected when the merge chain was first built might not 524 | be referenced in the PR description anymore (i.e. they have become dangling 525 | references); in that case try to invalidate them from the database 526 | 527 | --- 528 | 529 | Set up a loop for reinitializing the DB's iterator since the operations 530 | performed in this loop might modify or delete multiple items from the 531 | database, thus potentially making the iteration not work according to 532 | expectations. 533 | */ 534 | let mut processed_mrs = vec![]; 535 | 'db_iteration_loop: loop { 536 | let db_iter = db.iterator(rocksdb::IteratorMode::Start); 537 | 'to_next_item: for (key, value) in db_iter { 538 | match bincode::deserialize::(&value) 539 | .context(error::Bincode) 540 | { 541 | Ok(mut mr) => { 542 | if processed_mrs.iter().any(|prev_mr: &MergeRequest| { 543 | mr.owner == prev_mr.owner 544 | && mr.repo == prev_mr.repo && mr.number 545 | == prev_mr.number 546 | }) { 547 | continue; 548 | } 549 | 550 | if let Some(dependents) = &fetched_dependents { 551 | for dependent in dependents { 552 | if dependent.owner == mr.owner 553 | && dependent.repo == mr.repo && dependent.number 554 | == mr.number 555 | { 556 | // This item was detected a dependent, therefore it is not potentially 557 | // dangling for this PR specifically 558 | register_alive_dependent(mr); 559 | continue 'to_next_item; 560 | } 561 | } 562 | } 563 | 564 | #[derive(PartialEq)] 565 | enum LivenessOutcome { 566 | Updated, 567 | Dangling, 568 | Alive, 569 | AliveNeedsUpdate, 570 | } 571 | let mut liveness_outcome: Option = None; 572 | 573 | mr.dependencies = mr.dependencies.map(|dependencies| { 574 | dependencies 575 | .into_iter() 576 | .filter(|dependency| { 577 | if dependency.owner == pr.base.repo.owner.login 578 | && dependency.repo == pr.base.repo.name 579 | && dependency.number == pr.number 580 | { 581 | if dependency.is_directly_referenced { 582 | if liveness_outcome.is_none() { 583 | liveness_outcome = 584 | Some(LivenessOutcome::Dangling); 585 | } 586 | false 587 | } else { 588 | if liveness_outcome != Some(LivenessOutcome::AliveNeedsUpdate) { 589 | liveness_outcome = match liveness_outcome { 590 | Some(LivenessOutcome::Updated) => Some(LivenessOutcome::AliveNeedsUpdate), 591 | _ => Some(LivenessOutcome::Alive) 592 | }; 593 | } 594 | true 595 | } 596 | } else { 597 | if let Some(LivenessOutcome::Dangling) = 598 | liveness_outcome 599 | { 600 | liveness_outcome = 601 | Some(LivenessOutcome::Updated); 602 | } 603 | true 604 | } 605 | }) 606 | .collect() 607 | }); 608 | 609 | if let Some(liveness_outcome) = liveness_outcome { 610 | match liveness_outcome { 611 | LivenessOutcome::Alive => { 612 | register_alive_dependent(mr.clone()); 613 | } 614 | LivenessOutcome::Updated 615 | | LivenessOutcome::AliveNeedsUpdate => { 616 | if let Err(err) = db 617 | .put( 618 | &key, 619 | bincode::serialize(&mr) 620 | .context(error::Bincode)?, 621 | ) 622 | .context(error::Db) 623 | { 624 | log::error!( 625 | "Failed to update database references after merge of {} in dependent {} due to {:?}", 626 | pr.html_url, 627 | mr.html_url, 628 | err 629 | ); 630 | let _ = cleanup_merge_request( 631 | state, 632 | &mr.sha, 633 | &mr.owner, 634 | &mr.repo, 635 | mr.number, 636 | &MergeRequestCleanupReason::Error, 637 | ) 638 | .await; 639 | handle_error( 640 | PullRequestMergeCancelOutcome::WasCancelled, 641 | Error::Message { 642 | msg: format!( 643 | "Unable to update {} in the database (detected as a dependent of {})", 644 | &mr.html_url, 645 | pr.html_url 646 | ), 647 | } 648 | .with_pull_request_details(PullRequestDetails { 649 | owner: (&mr.owner).into(), 650 | repo: (&mr.repo).into(), 651 | number: mr.number, 652 | }), 653 | state, 654 | ) 655 | .await; 656 | } else if liveness_outcome 657 | == LivenessOutcome::AliveNeedsUpdate 658 | { 659 | register_alive_dependent(mr.clone()); 660 | } 661 | } 662 | LivenessOutcome::Dangling => { 663 | let _ = db.delete(&key); 664 | } 665 | }; 666 | 667 | processed_mrs.push(mr); 668 | continue 'db_iteration_loop; 669 | } 670 | } 671 | Err(err) => { 672 | log::error!( 673 | "Failed to deserialize key {} from the database due to {:?}", 674 | String::from_utf8_lossy(&key), 675 | err 676 | ); 677 | let _ = db.delete(&key); 678 | } 679 | }; 680 | } 681 | break; 682 | } 683 | 684 | let dependents = { 685 | if alive_dependents.is_empty() { 686 | return Ok(()); 687 | } 688 | alive_dependents 689 | }; 690 | 691 | /* 692 | Step 2: Update the dependents (and merge them right away if possible) 693 | 694 | Update dependents which can be updated (i.e. those who have the PR which was 695 | just merged as their *only* pending dependency) 696 | */ 697 | let mut updated_dependents: Vec<(String, &MergeRequest)> = vec![]; 698 | for dependent in &dependents { 699 | let depends_on_another_pr = dependent 700 | .dependencies 701 | .as_ref() 702 | .map(|dependencies| { 703 | dependencies 704 | .iter() 705 | .any(|dependency| dependency.repo != pr.base.repo.name) 706 | }) 707 | .unwrap_or(false); 708 | match update_companion_then_merge( 709 | state, 710 | dependent, 711 | &MergeRequestQueuedMessage::Default, 712 | // The dependent should always be registered to the database as a pending 713 | // item since one of its dependencies just got merged, therefore it becomes 714 | // eligible for merge in the future 715 | true, 716 | !depends_on_another_pr, 717 | ) 718 | .await 719 | { 720 | Ok(updated_sha) => { 721 | if let Some(updated_sha) = updated_sha { 722 | updated_dependents.push((updated_sha, dependent)) 723 | } 724 | } 725 | Err(err) => { 726 | let _ = cleanup_merge_request( 727 | state, 728 | &dependent.sha, 729 | &dependent.owner, 730 | &dependent.repo, 731 | dependent.number, 732 | &MergeRequestCleanupReason::Error, 733 | ) 734 | .await; 735 | handle_error( 736 | PullRequestMergeCancelOutcome::WasCancelled, 737 | err.with_pull_request_details(PullRequestDetails { 738 | owner: (&dependent.owner).into(), 739 | repo: (&dependent.repo).into(), 740 | number: dependent.number, 741 | }), 742 | state, 743 | ) 744 | .await; 745 | } 746 | } 747 | } 748 | 749 | /* 750 | Step 3: Collect the relevant dependents which should be checked 751 | 752 | If the dependent was merged in the previous step or someone merged it manually 753 | in-between this step and the previous one, the dependents of that dependent 754 | should be collected for the check because they might be mergeable now, 755 | because one of its dependencies (the dependent) was merged. 756 | 757 | If the dependent was updated in the previous step, it might already be 758 | mergeable (i.e. their statuses might already be passing after the update), 759 | therefore it should be included in the dependents_to_check. Also, since it was 760 | updated, its dependencies should be updated as well to track the resulting SHA 761 | after the update, otherwise their processing would result in the HeadChanged 762 | error unintendedly (HeadChanged is a security measure to prevent malicious 763 | commits from sneaking in after the chain is built, but in this case we changed 764 | the HEAD of the PR ourselves through the update, which is safe). 765 | */ 766 | let mut dependents_to_check = HashMap::new(); 767 | let db_iter = db.iterator(rocksdb::IteratorMode::Start); 768 | for (key, value) in db_iter { 769 | match bincode::deserialize::(&value) 770 | .context(error::Bincode) 771 | { 772 | Ok(mut dependent_of_dependent) => { 773 | let mut should_be_included_in_check = false; 774 | let mut record_needs_update = false; 775 | 776 | let mut updated_dependencies = HashSet::new(); 777 | dependent_of_dependent.dependencies = 778 | if let Some(mut dependencies) = 779 | dependent_of_dependent.dependencies 780 | { 781 | for dependency in dependencies.iter_mut() { 782 | for (updated_sha, updated_dependent) in 783 | &updated_dependents 784 | { 785 | if dependency.owner == updated_dependent.owner 786 | && dependency.repo == updated_dependent.repo 787 | && dependency.number 788 | == updated_dependent.number 789 | { 790 | record_needs_update = true; 791 | log::info!( 792 | "Updating {} 's dependency on {} to SHA {}", 793 | dependency.html_url, 794 | dependent_of_dependent.html_url, 795 | updated_sha, 796 | ); 797 | dependency.sha = updated_sha.clone(); 798 | updated_dependencies 799 | .insert(&updated_dependent.html_url); 800 | } 801 | } 802 | if dependency.owner == pr.base.repo.owner.login 803 | && dependency.repo == pr.base.repo.name 804 | && dependency.number == pr.number 805 | { 806 | should_be_included_in_check = true; 807 | } 808 | } 809 | Some(dependencies) 810 | } else { 811 | None 812 | }; 813 | 814 | if record_needs_update { 815 | if let Err(err) = db 816 | .put( 817 | &key, 818 | bincode::serialize(&dependent_of_dependent) 819 | .context(error::Bincode)?, 820 | ) 821 | .context(error::Db) 822 | { 823 | log::error!( 824 | "Failed to update a dependent to {:?} due to {:?}", 825 | dependent_of_dependent, 826 | err 827 | ); 828 | let _ = cleanup_merge_request( 829 | state, 830 | &dependent_of_dependent.sha, 831 | &dependent_of_dependent.owner, 832 | &dependent_of_dependent.repo, 833 | dependent_of_dependent.number, 834 | &MergeRequestCleanupReason::Error, 835 | ) 836 | .await; 837 | handle_error( 838 | PullRequestMergeCancelOutcome::WasCancelled, 839 | Error::Message { 840 | msg: format!( 841 | "Failed to update database references of {:?} in dependent {} after the merge of {}", 842 | updated_dependencies, 843 | dependent_of_dependent.html_url, 844 | pr.html_url 845 | ), 846 | } 847 | .with_pull_request_details(PullRequestDetails { 848 | owner: (&dependent_of_dependent.owner).into(), 849 | repo: (&dependent_of_dependent.repo).into(), 850 | number: dependent_of_dependent.number, 851 | }), 852 | state, 853 | ) 854 | .await; 855 | } else { 856 | dependents_to_check.insert(key, dependent_of_dependent); 857 | } 858 | } else if should_be_included_in_check { 859 | dependents_to_check.insert(key, dependent_of_dependent); 860 | } 861 | } 862 | Err(err) => { 863 | log::error!( 864 | "Failed to deserialize key {} from the database due to {:?}", 865 | String::from_utf8_lossy(&key), 866 | err 867 | ); 868 | let _ = db.delete(&key); 869 | } 870 | }; 871 | } 872 | 873 | /* 874 | Step 4: Check the dependents collected in the previous step 875 | 876 | Because the PR passed as an argument to this function is merged and its 877 | dependents might have been merged in the previous steps, the dependents we 878 | collected (which might include dependents of the dependents which were just 879 | merged) might have become ready to be merged at this point. 880 | */ 881 | for dependent in dependents_to_check.into_values() { 882 | if let Err(err) = 883 | process_commit_checks_and_statuses(state, &dependent.sha).await 884 | { 885 | let _ = cleanup_merge_request( 886 | state, 887 | &dependent.sha, 888 | &dependent.owner, 889 | &dependent.repo, 890 | dependent.number, 891 | &MergeRequestCleanupReason::Error, 892 | ) 893 | .await; 894 | handle_error( 895 | PullRequestMergeCancelOutcome::WasCancelled, 896 | err, 897 | state, 898 | ) 899 | .await; 900 | } 901 | } 902 | 903 | Ok(()) 904 | } 905 | 906 | pub async fn handle_command( 907 | state: &AppState, 908 | cmd: &CommentCommand, 909 | pr: &GithubPullRequest, 910 | requested_by: &str, 911 | ) -> Result<()> { 912 | let AppState { gh_client, .. } = state; 913 | 914 | match cmd { 915 | // This command marks the start of the chain of merges. The PR where the 916 | // command was received will act as the starting point for resolving further 917 | // dependencies. 918 | CommentCommand::Merge(cmd) => { 919 | let mr = MergeRequest { 920 | sha: (&pr.head.sha).into(), 921 | owner: (&pr.base.repo.owner.login).into(), 922 | repo: (&pr.base.repo.name).into(), 923 | number: pr.number, 924 | html_url: (&pr.html_url).into(), 925 | requested_by: requested_by.into(), 926 | // Set "was_updated" from the start so that this branch will not be updated 927 | // It's important for it not to be updated because the command issuer has 928 | // trusted the current commit, but not the ones coming after it (some 929 | // malicious actor might want to sneak in changes after the command starts). 930 | was_updated: true, 931 | // This is the starting point of the merge chain, hence why always no 932 | // dependencies are registered for it upfront 933 | dependencies: None, 934 | }; 935 | 936 | check_merge_is_allowed(state, pr, requested_by, &[]).await?; 937 | 938 | match cmd { 939 | MergeCommentCommand::Normal => { 940 | if is_ready_to_merge(state, pr).await? { 941 | match merge_pull_request(state, pr, requested_by) 942 | .await? 943 | { 944 | // If the merge failure will be solved later, then register the PR in the database so that 945 | // it'll eventually resume processing when later statuses arrive 946 | Err(Error::MergeFailureWillBeSolvedLater { 947 | msg, 948 | }) => { 949 | let msg = format!( 950 | "This PR cannot be merged **at the moment** due to: {}\n\nprocessbot expects that the problem will be solved automatically later and so the auto-merge process will be started. You can simply wait for now.\n\n", 951 | msg 952 | ); 953 | queue_merge_request( 954 | state, 955 | &mr, 956 | &MergeRequestQueuedMessage::Custom(&msg), 957 | ) 958 | .await?; 959 | return Err( 960 | Error::MergeFailureWillBeSolvedLater { 961 | msg, 962 | }, 963 | ); 964 | } 965 | Err(e) => return Err(e), 966 | _ => (), 967 | } 968 | } else { 969 | queue_merge_request( 970 | state, 971 | &mr, 972 | &MergeRequestQueuedMessage::Default, 973 | ) 974 | .await?; 975 | return Ok(()); 976 | } 977 | } 978 | MergeCommentCommand::Force => { 979 | match merge_pull_request(state, pr, requested_by).await? { 980 | // Even if the merge failure can be solved later, it does not matter because `merge force` is 981 | // supposed to be immediate. We should give up here and yield the error message. 982 | Err(Error::MergeFailureWillBeSolvedLater { msg }) => { 983 | return Err(Error::Message { msg }) 984 | } 985 | Err(e) => return Err(e), 986 | _ => (), 987 | } 988 | } 989 | } 990 | 991 | process_dependents_after_merge(state, pr, requested_by).await 992 | } 993 | CommentCommand::CancelMerge => { 994 | log::info!("Deleting merge request for {}", pr.html_url); 995 | 996 | cleanup_merge_request( 997 | state, 998 | &pr.head.sha, 999 | &pr.base.repo.owner.login, 1000 | &pr.base.repo.name, 1001 | pr.number, 1002 | &MergeRequestCleanupReason::Cancelled, 1003 | ) 1004 | .await?; 1005 | 1006 | if let Err(err) = gh_client 1007 | .create_issue_comment( 1008 | &pr.base.repo.owner.login, 1009 | &pr.base.repo.name, 1010 | pr.number, 1011 | "Merge cancelled.", 1012 | ) 1013 | .await 1014 | { 1015 | log::error!( 1016 | "Failed to post comment on {} due to {}", 1017 | pr.html_url, 1018 | err 1019 | ); 1020 | } 1021 | 1022 | Ok(()) 1023 | } 1024 | CommentCommand::Rebase => { 1025 | let outcome = rebase( 1026 | state, 1027 | &pr.base.repo.owner.login, 1028 | &pr.base.repo.name, 1029 | &pr.base.ref_field, 1030 | &pr.head.repo.owner.login, 1031 | &pr.head.repo.name, 1032 | &pr.head.ref_field, 1033 | ) 1034 | .await?; 1035 | 1036 | if let Err(err) = gh_client 1037 | .create_issue_comment( 1038 | &pr.base.repo.owner.login, 1039 | &pr.base.repo.name, 1040 | pr.number, 1041 | match outcome { 1042 | RebaseOutcome::UpToDate => { 1043 | "Branch is already up-to-date" 1044 | } 1045 | RebaseOutcome::Pushed => "Rebased", 1046 | }, 1047 | ) 1048 | .await 1049 | { 1050 | log::error!( 1051 | "Failed to post comment on {} due to {}", 1052 | pr.html_url, 1053 | err 1054 | ); 1055 | } 1056 | 1057 | Ok(()) 1058 | } 1059 | } 1060 | } 1061 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use snafu::Snafu; 2 | 3 | use crate::core::{AppState, PullRequestMergeCancelOutcome}; 4 | 5 | #[derive(Debug)] 6 | pub struct PullRequestDetails { 7 | pub owner: String, 8 | pub repo: String, 9 | pub number: i64, 10 | } 11 | 12 | #[derive(PartialEq, Eq, Debug, Clone)] 13 | pub struct PullRequestDetailsWithHtmlUrl { 14 | pub html_url: String, 15 | pub owner: String, 16 | pub repo: String, 17 | pub number: i64, 18 | } 19 | 20 | #[derive(Debug, Snafu)] 21 | #[snafu(visibility = "pub")] 22 | pub enum Error { 23 | #[snafu(display("WithIssue: {}", source))] 24 | WithPullRequestDetails { 25 | source: Box, 26 | details: PullRequestDetails, 27 | }, 28 | 29 | #[snafu(display("Checks failed for {}", commit_sha))] 30 | ChecksFailed { 31 | commit_sha: String, 32 | }, 33 | 34 | #[snafu(display("Statuses failed for {}", commit_sha))] 35 | StatusesFailed { 36 | commit_sha: String, 37 | }, 38 | 39 | #[snafu(display("Head SHA changed from {} to {}", expected, actual))] 40 | HeadChanged { 41 | expected: String, 42 | actual: String, 43 | }, 44 | 45 | #[snafu(display("{}", msg))] 46 | Message { 47 | msg: String, 48 | }, 49 | 50 | #[snafu(display("Status code: {}\nBody:\n{:#?}", status, body,))] 51 | Response { 52 | status: reqwest::StatusCode, 53 | body: serde_json::Value, 54 | }, 55 | 56 | #[snafu(display("Http: {}", source))] 57 | Http { 58 | source: reqwest::Error, 59 | }, 60 | 61 | #[snafu(display("Tokio: {}", source))] 62 | Tokio { 63 | source: tokio::io::Error, 64 | }, 65 | 66 | #[snafu(display("Db: {}", source))] 67 | Db { 68 | source: rocksdb::Error, 69 | }, 70 | 71 | #[snafu(display("Utf8: {}", source))] 72 | Utf8 { 73 | source: std::string::FromUtf8Error, 74 | }, 75 | 76 | #[snafu(display("Json: {}", source))] 77 | Json { 78 | source: serde_json::Error, 79 | }, 80 | 81 | Jwt { 82 | source: jsonwebtoken::errors::Error, 83 | }, 84 | 85 | #[snafu(display("Bincode: {}", source))] 86 | Bincode { 87 | source: bincode::Error, 88 | }, 89 | 90 | #[snafu(display( 91 | "Command '{}' failed with status {:?}; output: {}", 92 | cmd, 93 | status_code, 94 | err 95 | ))] 96 | CommandFailed { 97 | cmd: String, 98 | status_code: Option, 99 | err: String, 100 | }, 101 | 102 | #[snafu(display( 103 | "Encountered merge failure (would be solved later): {}", 104 | msg 105 | ))] 106 | MergeFailureWillBeSolvedLater { 107 | msg: String, 108 | }, 109 | } 110 | 111 | impl Error { 112 | pub fn with_pull_request_details( 113 | self, 114 | details: PullRequestDetails, 115 | ) -> Self { 116 | match self { 117 | Self::WithPullRequestDetails { .. } => self, 118 | _ => Self::WithPullRequestDetails { 119 | source: Box::new(self), 120 | details, 121 | }, 122 | } 123 | } 124 | pub fn stops_merge_attempt(&self) -> bool { 125 | match self { 126 | Self::WithPullRequestDetails { source, .. } => { 127 | source.stops_merge_attempt() 128 | } 129 | Self::MergeFailureWillBeSolvedLater { .. } => false, 130 | _ => true, 131 | } 132 | } 133 | } 134 | 135 | pub async fn handle_error( 136 | merge_cancel_outcome: PullRequestMergeCancelOutcome, 137 | err: Error, 138 | state: &AppState, 139 | ) { 140 | log::info!("handle_error: {}", err); 141 | match err { 142 | Error::MergeFailureWillBeSolvedLater { .. } => (), 143 | err => { 144 | if let Error::WithPullRequestDetails { 145 | source, 146 | details: 147 | PullRequestDetails { 148 | owner, 149 | repo, 150 | number, 151 | }, 152 | .. 153 | } = err 154 | { 155 | match *source { 156 | Error::MergeFailureWillBeSolvedLater { .. } => (), 157 | err => { 158 | let msg = { 159 | let description = format_error(state, err); 160 | let caption = match merge_cancel_outcome { 161 | PullRequestMergeCancelOutcome::ShaNotFound => "", 162 | PullRequestMergeCancelOutcome::WasCancelled => "Merge cancelled due to error.", 163 | PullRequestMergeCancelOutcome::WasNotCancelled => "Some error happened, but the merge was not cancelled (likely due to a bug).", 164 | }; 165 | format!("{} Error: {}", caption, description) 166 | }; 167 | if let Err(comment_post_err) = state 168 | .gh_client 169 | .create_issue_comment(&owner, &repo, number, &msg) 170 | .await 171 | { 172 | log::error!( 173 | "Error posting comment: {}", 174 | comment_post_err 175 | ); 176 | } 177 | } 178 | } 179 | } 180 | } 181 | } 182 | } 183 | 184 | fn format_error(_state: &AppState, err: Error) -> String { 185 | match err { 186 | Error::Response { 187 | ref body, 188 | ref status, 189 | } => format!( 190 | "Response error (status {}):
{}
", 191 | status, 192 | html_escape::encode_safe(&body.to_string()) 193 | ), 194 | _ => format!("{}", err), 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /src/git_ops.rs: -------------------------------------------------------------------------------- 1 | use snafu::ResultExt; 2 | 3 | use crate::{ 4 | core::AppState, 5 | error::*, 6 | shell::{ 7 | run_cmd, run_cmd_in_cwd, run_cmd_with_output, CommandMessage, 8 | CommandMessageConfiguration, 9 | }, 10 | types::Result, 11 | }; 12 | 13 | pub struct SetupContributorBranchData { 14 | pub contributor_remote: String, 15 | pub repo_dir: String, 16 | pub secrets_to_hide: Option>, 17 | pub contributor_remote_branch: String, 18 | } 19 | pub async fn setup_contributor_branch( 20 | state: &AppState, 21 | owner: &str, 22 | owner_repo: &str, 23 | owner_branch: &str, 24 | contributor: &str, 25 | contributor_repo: &str, 26 | contributor_branch: &str, 27 | ) -> Result { 28 | let AppState { 29 | gh_client, config, .. 30 | } = state; 31 | 32 | /* 33 | Constantly refresh the token in-between operations, preferably right before 34 | using it, for avoiding expiration issues. Some operations such as cloning 35 | repositories might take a long time, thus causing the token to be 36 | invalidated after it finishes. In any case, the token generation API should 37 | backed by a cache, thus there's no problem with spamming the refresh calls. 38 | */ 39 | 40 | let repo_dir = config.repos_path.join(owner_repo); 41 | let repo_dir_str = if let Some(repo_dir_str) = repo_dir.as_os_str().to_str() 42 | { 43 | repo_dir_str 44 | } else { 45 | return Err(Error::Message { 46 | msg: format!( 47 | "Path {:?} could not be converted to string", 48 | repo_dir 49 | ), 50 | }); 51 | }; 52 | 53 | if repo_dir.exists() { 54 | log::info!("{} is already cloned; skipping", owner_repo); 55 | } else { 56 | let token = gh_client.auth_token().await?; 57 | let secrets_to_hide = [token.as_str()]; 58 | let secrets_to_hide = Some(&secrets_to_hide[..]); 59 | let owner_repository_domain = 60 | format!("github.com/{}/{}.git", owner, owner_repo); 61 | let owner_remote_address = format!( 62 | "https://x-access-token:{}@{}", 63 | token, owner_repository_domain 64 | ); 65 | run_cmd_in_cwd( 66 | "git", 67 | &["clone", "-v", &owner_remote_address, repo_dir_str], 68 | CommandMessage::Configured(CommandMessageConfiguration { 69 | secrets_to_hide, 70 | are_errors_silenced: false, 71 | }), 72 | ) 73 | .await?; 74 | } 75 | 76 | run_cmd( 77 | "git", 78 | &["add", "."], 79 | &repo_dir, 80 | CommandMessage::Configured::<'_, &str>(CommandMessageConfiguration { 81 | secrets_to_hide: None, 82 | are_errors_silenced: false, 83 | }), 84 | ) 85 | .await?; 86 | run_cmd( 87 | "git", 88 | &["reset", "--hard"], 89 | &repo_dir, 90 | CommandMessage::Configured::<'_, &str>(CommandMessageConfiguration { 91 | secrets_to_hide: None, 92 | are_errors_silenced: false, 93 | }), 94 | ) 95 | .await?; 96 | 97 | // The contributor's remote entry might exist from a previous run (not expected for a fresh 98 | // clone). If that is the case, delete it so that it can be recreated. 99 | if run_cmd( 100 | "git", 101 | &["remote", "get-url", contributor], 102 | &repo_dir, 103 | CommandMessage::Configured::<'_, &str>(CommandMessageConfiguration { 104 | secrets_to_hide: None, 105 | are_errors_silenced: true, 106 | }), 107 | ) 108 | .await 109 | .is_ok() 110 | { 111 | run_cmd( 112 | "git", 113 | &["remote", "remove", contributor], 114 | &repo_dir, 115 | CommandMessage::Configured::<'_, &str>( 116 | CommandMessageConfiguration { 117 | secrets_to_hide: None, 118 | are_errors_silenced: false, 119 | }, 120 | ), 121 | ) 122 | .await?; 123 | } 124 | 125 | let contributor_remote_branch = 126 | format!("{}/{}", contributor, contributor_branch); 127 | let token = gh_client.auth_token().await?; 128 | let secrets_to_hide = [token.as_str()]; 129 | let secrets_to_hide = Some(&secrets_to_hide[..]); 130 | let contributor_repository_domain = 131 | format!("github.com/{}/{}.git", contributor, contributor_repo); 132 | let contributor_remote_address = format!( 133 | "https://x-access-token:{}@{}", 134 | token, contributor_repository_domain 135 | ); 136 | 137 | run_cmd( 138 | "git", 139 | &["remote", "add", contributor, &contributor_remote_address], 140 | &repo_dir, 141 | CommandMessage::Configured(CommandMessageConfiguration { 142 | secrets_to_hide, 143 | are_errors_silenced: false, 144 | }), 145 | ) 146 | .await?; 147 | run_cmd( 148 | "git", 149 | &["fetch", contributor, contributor_branch], 150 | &repo_dir, 151 | CommandMessage::Configured(CommandMessageConfiguration { 152 | secrets_to_hide, 153 | are_errors_silenced: false, 154 | }), 155 | ) 156 | .await?; 157 | 158 | // The contributor's branch might exist from a previous run (not expected for a fresh clone). 159 | // If so, delete it so that it can be recreated. 160 | // Before deleting the branch, it's first required to checkout to a detached SHA so that any 161 | // branch can be deleted without problems (e.g. the branch we're trying to deleted might be the 162 | // one that is currently active, and so deleting it would fail). 163 | let head_sha_output = run_cmd_with_output( 164 | "git", 165 | &["rev-parse", "HEAD"], 166 | &repo_dir, 167 | CommandMessage::Configured(CommandMessageConfiguration { 168 | secrets_to_hide, 169 | are_errors_silenced: false, 170 | }), 171 | ) 172 | .await?; 173 | run_cmd( 174 | "git", 175 | &[ 176 | "checkout", 177 | String::from_utf8(head_sha_output.stdout) 178 | .context(Utf8)? 179 | .trim(), 180 | ], 181 | &repo_dir, 182 | CommandMessage::Configured(CommandMessageConfiguration { 183 | secrets_to_hide, 184 | are_errors_silenced: true, 185 | }), 186 | ) 187 | .await?; 188 | let _ = run_cmd( 189 | "git", 190 | &["branch", "-D", contributor_branch], 191 | &repo_dir, 192 | CommandMessage::Configured(CommandMessageConfiguration { 193 | secrets_to_hide, 194 | are_errors_silenced: true, 195 | }), 196 | ) 197 | .await; 198 | run_cmd( 199 | "git", 200 | &["checkout", "--track", &contributor_remote_branch], 201 | &repo_dir, 202 | CommandMessage::Configured(CommandMessageConfiguration { 203 | secrets_to_hide, 204 | are_errors_silenced: false, 205 | }), 206 | ) 207 | .await?; 208 | 209 | let owner_remote = "origin"; 210 | let owner_remote_branch = format!("{}/{}", owner_remote, owner_branch); 211 | 212 | let token = gh_client.auth_token().await?; 213 | let secrets_to_hide = [token.as_str()]; 214 | let secrets_to_hide = Some(&secrets_to_hide[..]); 215 | let owner_repository_domain = 216 | format!("github.com/{}/{}.git", owner, owner_repo); 217 | let owner_remote_address = format!( 218 | "https://x-access-token:{}@{}", 219 | token, owner_repository_domain 220 | ); 221 | run_cmd( 222 | "git", 223 | &["remote", "set-url", owner_remote, &owner_remote_address], 224 | &repo_dir, 225 | CommandMessage::Configured(CommandMessageConfiguration { 226 | secrets_to_hide, 227 | are_errors_silenced: false, 228 | }), 229 | ) 230 | .await?; 231 | run_cmd( 232 | "git", 233 | &["fetch", owner_remote, owner_branch], 234 | &repo_dir, 235 | CommandMessage::Configured(CommandMessageConfiguration { 236 | secrets_to_hide, 237 | are_errors_silenced: false, 238 | }), 239 | ) 240 | .await?; 241 | 242 | // Create master merge commit before updating packages 243 | run_cmd( 244 | "git", 245 | &["merge", &owner_remote_branch, "--no-ff", "--no-edit"], 246 | &repo_dir, 247 | CommandMessage::Configured(CommandMessageConfiguration { 248 | secrets_to_hide, 249 | are_errors_silenced: false, 250 | }), 251 | ) 252 | .await?; 253 | 254 | Ok(SetupContributorBranchData { 255 | contributor_remote: contributor.into(), 256 | repo_dir: repo_dir_str.into(), 257 | contributor_remote_branch, 258 | secrets_to_hide: secrets_to_hide.map(|secrets_to_hide| { 259 | secrets_to_hide.iter().map(|str| str.to_string()).collect() 260 | }), 261 | }) 262 | } 263 | 264 | pub enum RebaseOutcome { 265 | UpToDate, 266 | Pushed, 267 | } 268 | pub async fn rebase( 269 | state: &AppState, 270 | owner: &str, 271 | owner_repo: &str, 272 | owner_branch: &str, 273 | contributor: &str, 274 | contributor_repo: &str, 275 | contributor_branch: &str, 276 | ) -> Result { 277 | let SetupContributorBranchData { 278 | contributor_remote, 279 | repo_dir, 280 | secrets_to_hide, 281 | .. 282 | } = &setup_contributor_branch( 283 | state, 284 | owner, 285 | owner_repo, 286 | owner_branch, 287 | contributor, 288 | contributor_repo, 289 | contributor_branch, 290 | ) 291 | .await?; 292 | let secrets_to_hide = secrets_to_hide.as_ref().map(|vec| &vec[..]); 293 | 294 | let push_output = run_cmd_with_output( 295 | "git", 296 | &[ 297 | "push", 298 | "--porcelain", 299 | contributor_remote, 300 | contributor_branch, 301 | ], 302 | &repo_dir, 303 | CommandMessage::Configured(CommandMessageConfiguration { 304 | secrets_to_hide, 305 | are_errors_silenced: false, 306 | }), 307 | ) 308 | .await?; 309 | let push_output = String::from_utf8(push_output.stdout).context(Utf8)?; 310 | let push_output = push_output.trim(); 311 | log::info!("rebase push_output: {:?}", push_output); 312 | 313 | for line in push_output.lines() { 314 | if line.ends_with("[up to date]") { 315 | return Ok(RebaseOutcome::UpToDate); 316 | } 317 | } 318 | 319 | Ok(RebaseOutcome::Pushed) 320 | } 321 | -------------------------------------------------------------------------------- /src/github/client/commit.rs: -------------------------------------------------------------------------------- 1 | use super::GithubClient; 2 | use crate::{github::*, types::Result}; 3 | 4 | impl GithubClient { 5 | pub async fn statuses( 6 | &self, 7 | owner: &str, 8 | repo: &str, 9 | sha: &str, 10 | ) -> Result> { 11 | let mut page = 1; 12 | const PER_PAGE_MAX: usize = 100; 13 | 14 | let mut statuses = vec![]; 15 | loop { 16 | let url = format!( 17 | "{}/repos/{}/{}/statuses/{}?per_page={}&page={}", 18 | self.github_api_url, owner, repo, sha, PER_PAGE_MAX, page 19 | ); 20 | let page_statuses = 21 | self.get::>(url).await?; 22 | 23 | let should_break = page_statuses.len() < PER_PAGE_MAX; 24 | 25 | statuses.extend(page_statuses); 26 | 27 | if should_break { 28 | break; 29 | } 30 | 31 | page += 1; 32 | } 33 | 34 | Ok(statuses) 35 | } 36 | 37 | pub async fn check_runs( 38 | &self, 39 | owner: &str, 40 | repo: &str, 41 | sha: &str, 42 | ) -> Result> { 43 | let mut page = 1; 44 | const PER_PAGE_MAX: usize = 100; 45 | 46 | let mut check_runs = vec![]; 47 | loop { 48 | let url = format!( 49 | "{}/repos/{}/{}/commits/{}/check-runs?per_page={}&page={}", 50 | self.github_api_url, owner, repo, sha, PER_PAGE_MAX, page 51 | ); 52 | 53 | let page_check_runs = 54 | self.get::(url).await?; 55 | 56 | let should_break = page_check_runs.check_runs.len() < PER_PAGE_MAX; 57 | 58 | check_runs.extend(page_check_runs.check_runs); 59 | 60 | if should_break { 61 | break; 62 | } 63 | 64 | page += 1; 65 | } 66 | 67 | Ok(check_runs) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/github/client/file.rs: -------------------------------------------------------------------------------- 1 | use super::GithubClient; 2 | use crate::{github::*, types::Result}; 3 | 4 | impl GithubClient { 5 | pub async fn contents( 6 | &self, 7 | owner: &str, 8 | repo: &str, 9 | path: &str, 10 | ref_field: &str, 11 | ) -> Result { 12 | let url = &format!( 13 | "{}/repos/{}/{}/contents/{}?ref={}", 14 | self.github_api_url, owner, repo, path, ref_field 15 | ); 16 | self.get(url).await 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/github/client/issue.rs: -------------------------------------------------------------------------------- 1 | use super::GithubClient; 2 | use crate::types::Result; 3 | 4 | impl GithubClient { 5 | pub async fn create_issue_comment( 6 | &self, 7 | owner: &str, 8 | repo: &str, 9 | number: i64, 10 | comment: &str, 11 | ) -> Result<()> { 12 | let url = format!( 13 | "{}/repos/{}/{}/issues/{}/comments", 14 | self.github_api_url, owner, repo, number 15 | ); 16 | self.post_response(&url, &serde_json::json!({ "body": comment })) 17 | .await 18 | .map(|_| ()) 19 | } 20 | 21 | pub async fn acknowledge_issue_comment( 22 | &self, 23 | owner: &str, 24 | repo: &str, 25 | comment_id: i64, 26 | ) -> Result<()> { 27 | let url = format!( 28 | "{}/repos/{}/{}/issues/comments/{}/reactions", 29 | self.github_api_url, owner, repo, comment_id 30 | ); 31 | self.post_response(&url, &serde_json::json!({ "content": "+1" })) 32 | .await 33 | .map(|_| ()) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/github/client/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{borrow::Cow, time::SystemTime}; 2 | 3 | use chrono::{DateTime, Duration, Utc}; 4 | use reqwest::{header, IntoUrl, RequestBuilder, Response}; 5 | use serde::Serialize; 6 | use snafu::ResultExt; 7 | 8 | mod commit; 9 | mod file; 10 | mod issue; 11 | mod org; 12 | mod pull_request; 13 | 14 | use crate::{ 15 | config::MainConfig, 16 | error::{self, Error}, 17 | github, 18 | types::Result, 19 | }; 20 | 21 | pub struct GithubClient { 22 | client: reqwest::Client, 23 | private_key: Vec, 24 | installation_login: String, 25 | github_app_id: usize, 26 | github_api_url: String, 27 | } 28 | 29 | macro_rules! impl_methods_with_body { 30 | ($($method:ident : $method_response_fn:ident),*) => { 31 | $( 32 | pub async fn $method<'b, I, B, T>(&self, url: I, body: &B) -> Result 33 | where 34 | I: Into> + Clone, 35 | B: Serialize + Clone, 36 | T: serde::de::DeserializeOwned, 37 | { 38 | self.$method_response_fn(url, body) 39 | .await? 40 | .json::() 41 | .await 42 | .context(error::Http) 43 | } 44 | 45 | async fn $method_response_fn<'b, I, B>( 46 | &self, 47 | url: I, 48 | body: &B, 49 | ) -> Result 50 | where 51 | I: Into> + Clone, 52 | B: Serialize + Clone, 53 | { 54 | // retry up to 5 times if request times out 55 | let mut retries = 0; 56 | 'retry: loop { 57 | let res = self.execute( 58 | self.client 59 | .$method(&*url.clone().into()) 60 | .json(&body.clone()), 61 | ) 62 | .await; 63 | // retry if timeout 64 | if let Err(error::Error::Http { source: e, .. }) = res.as_ref() { 65 | if e.is_timeout() && retries < 5 { 66 | log::debug!("Request timed out; retrying"); 67 | retries += 1; 68 | continue 'retry; 69 | } 70 | } 71 | return res; 72 | } 73 | } 74 | 75 | )* 76 | } 77 | } 78 | 79 | async fn handle_response(response: Response) -> Result { 80 | log::debug!("response: {:?}", &response); 81 | 82 | let status = response.status(); 83 | if status.is_success() { 84 | Ok(response) 85 | } else { 86 | let text = response.text().await.context(error::Http)?; 87 | 88 | // Try to decode the response error as JSON otherwise store 89 | // it as plain text in a JSON object. 90 | let body = if let Ok(value) = 91 | serde_json::from_str(&text).context(error::Json) 92 | { 93 | value 94 | } else { 95 | serde_json::json!({ "error_message": text }) 96 | }; 97 | 98 | error::Response { status, body }.fail() 99 | } 100 | } 101 | 102 | impl GithubClient { 103 | pub fn new(config: &MainConfig) -> Self { 104 | Self { 105 | private_key: config.private_key.clone(), 106 | installation_login: config.installation_login.clone(), 107 | github_app_id: config.github_app_id, 108 | github_api_url: config.github_api_url.clone(), 109 | client: reqwest::Client::default(), 110 | } 111 | } 112 | 113 | impl_methods_with_body! { 114 | post: post_response, 115 | put: put_response, 116 | patch: patch_response, 117 | delete: delete_response 118 | } 119 | 120 | pub async fn auth_token(&self) -> Result { 121 | log::debug!("auth_token"); 122 | 123 | lazy_static::lazy_static! { 124 | static ref TOKEN_CACHE: parking_lot::Mutex, String)>> = { 125 | parking_lot::Mutex::new(None) 126 | }; 127 | } 128 | 129 | // Add some padding for avoiding token use just as it's about to expire 130 | let installation_lease_with_padding = 131 | Utc::now() + Duration::minutes(10); 132 | let token = { 133 | TOKEN_CACHE 134 | .lock() 135 | .as_ref() 136 | // Ensure token is not expired if set. 137 | .filter(|(time, _)| time > &installation_lease_with_padding) 138 | .map(|(_, token)| token.clone()) 139 | }; 140 | 141 | if let Some(token) = token { 142 | return Ok(token); 143 | } 144 | 145 | let installations: Vec = self 146 | .jwt_get(&format!("{}/app/installations", self.github_api_url,)) 147 | .await?; 148 | 149 | let installation = if let Some(installation) = installations 150 | .iter() 151 | .find(|inst| inst.account.login == self.installation_login) 152 | { 153 | installation 154 | } else { 155 | return Err(Error::Message { 156 | msg: format!( 157 | "Installation for login {} could not be found", 158 | self.installation_login 159 | ), 160 | }); 161 | }; 162 | 163 | let install_token: github::GithubInstallationToken = self 164 | .jwt_post( 165 | &format!( 166 | "{}/app/installations/{}/access_tokens", 167 | self.github_api_url, installation.id 168 | ), 169 | &serde_json::json!({}), 170 | ) 171 | .await?; 172 | 173 | let default_exp = Utc::now() + Duration::minutes(40); 174 | let expiry = install_token 175 | .expires_at 176 | .map_or(default_exp, |t| t.parse().unwrap_or(default_exp)); 177 | let token = install_token.token; 178 | 179 | *TOKEN_CACHE.lock() = Some((expiry, token.clone())); 180 | 181 | Ok(token) 182 | } 183 | 184 | async fn execute(&self, builder: RequestBuilder) -> Result { 185 | let request = builder 186 | .bearer_auth(self.auth_token().await?) 187 | .header( 188 | header::ACCEPT, 189 | "application/vnd.github.starfox-preview+json", 190 | ) 191 | .header( 192 | header::ACCEPT, 193 | "application/vnd.github.inertia-preview+json", 194 | ) 195 | .header( 196 | header::ACCEPT, 197 | "application/vnd.github.antiope-preview+json", 198 | ) 199 | .header( 200 | header::ACCEPT, 201 | "application/vnd.github.machine-man-preview+json", 202 | ) 203 | .header(header::USER_AGENT, "parity-processbot/0.0.1") 204 | .timeout(std::time::Duration::from_secs(10)) 205 | .build() 206 | .context(error::Http)?; 207 | 208 | log::debug!("request: {:?}", &request); 209 | handle_response( 210 | self.client.execute(request).await.context(error::Http)?, 211 | ) 212 | .await 213 | } 214 | 215 | fn create_jwt(&self) -> Result { 216 | log::debug!("create_jwt"); 217 | const TEN_MINS_IN_SECONDS: u64 = 10 * 60; 218 | let iat = SystemTime::now() 219 | .duration_since(SystemTime::UNIX_EPOCH) 220 | .unwrap() 221 | .as_secs(); 222 | 223 | let body = serde_json::json!({ 224 | "iat": iat, 225 | "exp": iat + TEN_MINS_IN_SECONDS, 226 | "iss": self.github_app_id, 227 | }); 228 | 229 | jsonwebtoken::encode( 230 | &jsonwebtoken::Header::new(jsonwebtoken::Algorithm::RS256), 231 | &body, 232 | &jsonwebtoken::EncodingKey::from_rsa_pem(&self.private_key) 233 | .expect("private key should be RSA pem"), 234 | ) 235 | .context(error::Jwt) 236 | } 237 | 238 | async fn jwt_execute(&self, builder: RequestBuilder) -> Result { 239 | log::debug!("jwt_execute"); 240 | let response = builder 241 | .bearer_auth(&self.create_jwt()?) 242 | .header( 243 | header::ACCEPT, 244 | "application/vnd.github.machine-man-preview+json", 245 | ) 246 | .header(header::USER_AGENT, "parity-processbot/0.0.1") 247 | .timeout(std::time::Duration::from_secs(10)) 248 | .send() 249 | .await 250 | .context(error::Http)?; 251 | 252 | handle_response(response).await 253 | } 254 | 255 | async fn jwt_get(&self, url: impl IntoUrl) -> Result 256 | where 257 | T: serde::de::DeserializeOwned, 258 | { 259 | log::debug!("jwt_get"); 260 | self.jwt_execute(self.client.get(url)) 261 | .await? 262 | .json::() 263 | .await 264 | .context(error::Http) 265 | } 266 | 267 | async fn jwt_post( 268 | &self, 269 | url: impl IntoUrl, 270 | body: &impl serde::Serialize, 271 | ) -> Result 272 | where 273 | T: serde::de::DeserializeOwned, 274 | { 275 | log::debug!("jwt_post"); 276 | self.jwt_execute(self.client.post(url).json(body)) 277 | .await? 278 | .json::() 279 | .await 280 | .context(error::Http) 281 | } 282 | 283 | async fn get<'b, I, T>(&self, url: I) -> Result 284 | where 285 | I: Into> + Clone, 286 | T: serde::de::DeserializeOwned + core::fmt::Debug, 287 | { 288 | self.get_response(url, serde_json::json!({})) 289 | .await? 290 | .json::() 291 | .await 292 | .context(error::Http) 293 | } 294 | 295 | pub async fn get_status<'b, I>(&self, url: I) -> Result 296 | where 297 | I: Into> + Clone, 298 | { 299 | let res = self 300 | .get_response(url, serde_json::json!({})) 301 | .await? 302 | .status() 303 | .as_u16(); 304 | Ok(res) 305 | } 306 | 307 | async fn get_response<'b, I, P>( 308 | &self, 309 | url: I, 310 | params: P, 311 | ) -> Result 312 | where 313 | I: Into> + Clone, 314 | P: Serialize + Clone, 315 | { 316 | log::debug!("get_response"); 317 | // retry up to 5 times if request times out 318 | let mut retries = 0; 319 | 'retry: loop { 320 | let res = self 321 | .execute( 322 | self.client.get(&*url.clone().into()).json(¶ms.clone()), 323 | ) 324 | .await; 325 | // retry if timeout 326 | if let Err(error::Error::Http { source: e, .. }) = res.as_ref() { 327 | if e.is_timeout() && retries < 5 { 328 | log::debug!("Request timed out; retrying"); 329 | retries += 1; 330 | continue 'retry; 331 | } 332 | } 333 | return res; 334 | } 335 | } 336 | } 337 | -------------------------------------------------------------------------------- /src/github/client/org.rs: -------------------------------------------------------------------------------- 1 | use super::GithubClient; 2 | use crate::types::Result; 3 | 4 | impl GithubClient { 5 | pub async fn org_member(&self, org: &str, username: &str) -> Result { 6 | let url = &format!( 7 | "{}/orgs/{}/members/{}", 8 | self.github_api_url, org, username 9 | ); 10 | let status = self.get_status(url).await?; 11 | // https://docs.github.com/en/rest/orgs/members#check-organization-membership-for-a-user--code-samples 12 | Ok(status == 204) 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/github/client/pull_request.rs: -------------------------------------------------------------------------------- 1 | use super::GithubClient; 2 | use crate::{ 3 | companion::CompanionReferenceTrailItem, 4 | config::MainConfig, 5 | error::Error, 6 | github::*, 7 | merge_request::{MergeRequest, MergeRequestDependency}, 8 | types::Result, 9 | }; 10 | 11 | impl GithubClient { 12 | pub async fn pull_request( 13 | &self, 14 | owner: &str, 15 | repo: &str, 16 | number: i64, 17 | ) -> Result { 18 | self.get(format!( 19 | "{}/repos/{}/{}/pulls/{}", 20 | self.github_api_url, owner, repo, number 21 | )) 22 | .await 23 | } 24 | 25 | pub async fn merge_pull_request( 26 | &self, 27 | owner: &str, 28 | repo: &str, 29 | number: i64, 30 | head_sha: &str, 31 | ) -> Result<()> { 32 | let url = format!( 33 | "{}/repos/{}/{}/pulls/{}/merge", 34 | self.github_api_url, owner, repo, number 35 | ); 36 | let params = serde_json::json!({ 37 | "sha": head_sha, 38 | "merge_method": "squash" 39 | }); 40 | self.put_response(&url, ¶ms).await.map(|_| ()) 41 | } 42 | 43 | pub async fn resolve_pr_dependents( 44 | &self, 45 | config: &MainConfig, 46 | pr: &GithubPullRequest, 47 | requested_by: &str, 48 | companion_reference_trail: &[CompanionReferenceTrailItem], 49 | ) -> Result>, Error> { 50 | let companions = 51 | match pr.parse_all_companions(companion_reference_trail) { 52 | Some(companions) => companions, 53 | None => return Ok(None), 54 | }; 55 | 56 | let parent_dependency = MergeRequestDependency { 57 | sha: (&pr.head.sha).into(), 58 | owner: (&pr.base.repo.owner.login).into(), 59 | repo: (&pr.base.repo.name).into(), 60 | number: pr.number, 61 | html_url: (&pr.html_url).into(), 62 | is_directly_referenced: true, 63 | }; 64 | let dependents = 65 | // If there's only one companion, then it can't possibly depend on another companion 66 | if let [comp] = &*companions { 67 | let comp_pr = self 68 | .pull_request(&comp.owner, &comp.repo, comp.number) 69 | .await?; 70 | vec![MergeRequest { 71 | was_updated: false, 72 | sha: comp_pr.head.sha, 73 | owner: comp_pr.base.repo.owner.login, 74 | repo: comp_pr.base.repo.name, 75 | number: comp_pr.number, 76 | html_url: comp_pr.html_url, 77 | requested_by: requested_by.into(), 78 | dependencies: Some(vec![parent_dependency]), 79 | }] 80 | } else { 81 | let base_dependencies = vec![parent_dependency]; 82 | 83 | let mut dependents = vec![]; 84 | for comp in &companions { 85 | // Prevent duplicate dependencies in case of error in user input 86 | if comp.repo == pr.base.repo.owner.login { 87 | continue; 88 | } 89 | 90 | // Fetch the companion's lockfile in order to detect its dependencies 91 | let comp_pr = self 92 | .pull_request(&comp.owner, &comp.repo, comp.number) 93 | .await?; 94 | let comp_owner = &comp_pr.base.repo.owner.login; 95 | let comp_repo = &comp_pr.base.repo.name; 96 | 97 | let comp_lockfile = { 98 | let lockfile_content = self 99 | .contents( 100 | comp_owner, 101 | comp_repo, 102 | "Cargo.lock", 103 | &comp_pr.head.sha, 104 | ) 105 | .await?; 106 | let txt_encoded = base64::decode( 107 | &lockfile_content.content.replace('\n', ""), 108 | ) 109 | .map_err(|err| Error::Message { 110 | msg: format!( 111 | "Failed to decode the API content for the lockfile of {}: {:?}", 112 | &comp_pr.html_url, err 113 | ), 114 | })?; 115 | let txt = String::from_utf8_lossy(&txt_encoded); 116 | txt.parse::().map_err(|err| { 117 | Error::Message { 118 | msg: format!( 119 | "Failed to parse lockfile of {}: {:?}", 120 | &comp_pr.html_url, err 121 | ), 122 | } 123 | })? 124 | }; 125 | 126 | let mut dependencies = base_dependencies.clone(); 127 | 128 | // Go through all the other companions to check if any of them is a dependency 129 | // of this companion 130 | 'to_next_other_companion: for other_comp in &companions 131 | { 132 | if &other_comp.repo == comp_repo || 133 | // Prevent duplicate dependencies in case of error in user input 134 | other_comp.repo == pr.base.repo.owner.login { 135 | continue; 136 | } 137 | let other_comp_github_url = format!( 138 | "{}/{}/{}{}", 139 | config.github_source_prefix, 140 | &other_comp.owner, &other_comp.repo, 141 | config.github_source_suffix 142 | ); 143 | for pkg in comp_lockfile.packages.iter() { 144 | if let Some(src) = pkg.source.as_ref() { 145 | if src.url().as_str() == other_comp_github_url { 146 | let other_comp_pr = self 147 | .pull_request( 148 | &other_comp.owner, 149 | &other_comp.repo, 150 | other_comp.number, 151 | ) 152 | .await?; 153 | dependencies.push(MergeRequestDependency { 154 | owner: other_comp_pr.base.repo.owner.login, 155 | repo: other_comp_pr.base.repo.name, 156 | sha: other_comp_pr.head.sha, 157 | number: other_comp_pr.number, 158 | html_url: other_comp_pr.html_url, 159 | is_directly_referenced: false 160 | }); 161 | continue 'to_next_other_companion; 162 | } 163 | } 164 | } 165 | } 166 | 167 | dependents.push(MergeRequest { 168 | was_updated: false, 169 | sha: comp_pr.head.sha, 170 | owner: comp_owner.into(), 171 | repo: comp_repo.into(), 172 | number: comp_pr.number, 173 | html_url: comp_pr.html_url, 174 | requested_by: requested_by.into(), 175 | dependencies: Some(dependencies), 176 | }) 177 | } 178 | 179 | dependents 180 | }; 181 | 182 | log::info!("Dependents of {}: {:?}", pr.html_url, dependents); 183 | Ok(Some(dependents)) 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /src/github/mod.rs: -------------------------------------------------------------------------------- 1 | use regex::Regex; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use crate::{ 5 | bot::parse_bot_comment_from_text, 6 | companion::{parse_all_companions, CompanionReferenceTrailItem}, 7 | error::*, 8 | types::PlaceholderDeserializationItem, 9 | OWNER_AND_REPO_SEQUENCE, PR_HTML_URL_REGEX, 10 | }; 11 | 12 | mod client; 13 | 14 | pub trait HasPullRequestDetails { 15 | fn get_pull_request_details(&self) -> Option; 16 | } 17 | 18 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 19 | pub struct GithubPullRequest { 20 | pub url: String, 21 | pub html_url: String, 22 | pub number: i64, 23 | pub user: Option, 24 | pub body: Option, 25 | pub head: GithubPullRequestHead, 26 | pub base: GithubPullRequestBase, 27 | pub mergeable: Option, 28 | pub merged: bool, 29 | pub maintainer_can_modify: bool, 30 | } 31 | 32 | impl GithubPullRequest { 33 | pub fn parse_all_companions( 34 | &self, 35 | companion_reference_trail: &[CompanionReferenceTrailItem], 36 | ) -> Option> { 37 | let mut next_trail = 38 | Vec::with_capacity(companion_reference_trail.len() + 1); 39 | next_trail.extend_from_slice(companion_reference_trail); 40 | next_trail.push(CompanionReferenceTrailItem { 41 | owner: (&self.base.repo.owner.login).into(), 42 | repo: (&self.base.repo.name).into(), 43 | }); 44 | self.body 45 | .as_ref() 46 | .map(|body| parse_all_companions(&next_trail, body)) 47 | } 48 | } 49 | 50 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 51 | pub struct GithubFileContents { 52 | pub content: String, 53 | } 54 | 55 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 56 | pub struct GithubPullRequestBase { 57 | #[serde(rename = "ref")] 58 | pub ref_field: String, 59 | pub repo: GithubPullRequestBaseRepository, 60 | } 61 | 62 | #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] 63 | pub enum GithubUserType { 64 | User, 65 | Bot, 66 | #[serde(other)] 67 | Unknown, 68 | } 69 | 70 | #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] 71 | pub struct GithubUser { 72 | pub login: String, 73 | #[serde(rename = "type")] 74 | pub type_field: GithubUserType, 75 | } 76 | 77 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 78 | pub struct GithubRepository { 79 | pub name: String, 80 | pub full_name: String, 81 | pub owner: GithubUser, 82 | pub html_url: String, 83 | } 84 | 85 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 86 | pub struct GithubCommitStatus { 87 | pub id: i64, 88 | pub context: String, 89 | pub state: GithubCommitStatusState, 90 | pub description: Option, 91 | pub target_url: Option, 92 | } 93 | 94 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 95 | #[serde(rename_all = "snake_case")] 96 | pub enum GithubCommitStatusState { 97 | Success, 98 | Error, 99 | Failure, 100 | #[serde(other)] 101 | Unknown, 102 | } 103 | 104 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 105 | pub struct GithubInstallation { 106 | pub id: i64, 107 | pub account: GithubUser, 108 | } 109 | 110 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 111 | pub struct GithubInstallationToken { 112 | pub token: String, 113 | pub expires_at: Option, 114 | } 115 | 116 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 117 | #[serde(rename_all = "snake_case")] 118 | pub enum GithubIssueCommentAction { 119 | Created, 120 | #[serde(other)] 121 | Unknown, 122 | } 123 | 124 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 125 | pub struct GithubCheckRuns { 126 | pub check_runs: Vec, 127 | } 128 | 129 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 130 | pub struct GithubPullRequestHeadRepository { 131 | pub name: String, 132 | pub owner: GithubUser, 133 | } 134 | 135 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 136 | pub struct GithubPullRequestHead { 137 | pub sha: String, 138 | pub repo: GithubPullRequestHeadRepository, 139 | #[serde(rename = "ref")] 140 | pub ref_field: String, 141 | } 142 | 143 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 144 | pub struct GithubPullRequestBaseRepository { 145 | pub name: String, 146 | pub owner: GithubUser, 147 | } 148 | 149 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 150 | #[serde(rename_all = "snake_case")] 151 | pub enum GithubCheckRunConclusion { 152 | Success, 153 | #[serde(other)] 154 | Unknown, 155 | } 156 | 157 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 158 | #[serde(rename_all = "snake_case")] 159 | pub enum GithubCheckRunStatus { 160 | Completed, 161 | #[serde(other)] 162 | Unknown, 163 | } 164 | 165 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 166 | pub struct GithubCheckRun { 167 | pub id: i64, 168 | pub name: String, 169 | pub status: GithubCheckRunStatus, 170 | pub conclusion: Option, 171 | pub head_sha: String, 172 | } 173 | 174 | #[derive(Debug, PartialEq, Eq, Deserialize)] 175 | pub struct GithubIssue { 176 | pub number: i64, 177 | pub html_url: String, 178 | pub pull_request: Option, 179 | } 180 | impl HasPullRequestDetails for GithubIssue { 181 | fn get_pull_request_details(&self) -> Option { 182 | parse_pull_request_details_from_url(&self.html_url) 183 | } 184 | } 185 | 186 | #[derive(PartialEq, Eq, Deserialize)] 187 | pub struct GithubIssueComment { 188 | pub id: i64, 189 | pub body: String, 190 | pub user: GithubUser, 191 | } 192 | 193 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 194 | #[serde(rename_all = "snake_case")] 195 | pub enum GithubWorkflowJobConclusion { 196 | #[serde(other)] 197 | Unknown, 198 | } 199 | 200 | #[derive(PartialEq, Eq, Deserialize)] 201 | pub struct GithubWorkflowJob { 202 | pub head_sha: String, 203 | pub conclusion: Option, 204 | } 205 | 206 | #[derive(Deserialize, PartialEq, Eq)] 207 | pub struct GithubIssueRepository { 208 | pub owner: GithubUser, 209 | pub name: String, 210 | } 211 | 212 | #[derive(PartialEq, Eq, Deserialize)] 213 | #[serde(untagged)] 214 | pub enum GithubWebhookPayload { 215 | IssueComment { 216 | action: GithubIssueCommentAction, 217 | issue: GithubIssue, 218 | comment: GithubIssueComment, 219 | repository: GithubIssueRepository, 220 | }, 221 | CommitStatus { 222 | // FIXME: This payload also has a field `repository` for the repository where the status 223 | // originated from which should be used *together* with commit SHA for indexing pull requests. 224 | // Currently, because merge requests are indexed purely by their head SHA into the database, 225 | // there's no way to disambiguate between two different PRs in two different repositories with 226 | // the same head SHA. 227 | sha: String, 228 | state: GithubCommitStatusState, 229 | }, 230 | CheckRun { 231 | check_run: GithubCheckRun, 232 | }, 233 | WorkflowJob { 234 | workflow_job: GithubWorkflowJob, 235 | }, 236 | } 237 | 238 | #[derive(Deserialize)] 239 | struct DetectUserCommentPullRequestPullRequest { 240 | pub html_url: Option, 241 | } 242 | 243 | #[derive(Deserialize)] 244 | struct DetectUserCommentPullRequestRepository { 245 | pub name: Option, 246 | pub full_name: Option, 247 | pub owner: Option, 248 | } 249 | 250 | #[derive(Deserialize)] 251 | struct DetectUserCommentPullRequestIssue { 252 | pub pull_request: Option, 253 | pub number: i64, 254 | } 255 | 256 | #[derive(Deserialize)] 257 | struct DetectUserCommentPullRequestComment { 258 | pub body: Option, 259 | } 260 | 261 | #[derive(Deserialize)] 262 | pub struct DetectUserCommentPullRequest { 263 | action: GithubIssueCommentAction, 264 | issue: Option, 265 | repository: Option, 266 | sender: Option, 267 | comment: Option, 268 | } 269 | 270 | impl HasPullRequestDetails for DetectUserCommentPullRequest { 271 | fn get_pull_request_details(&self) -> Option { 272 | if let DetectUserCommentPullRequest { 273 | action: GithubIssueCommentAction::Created, 274 | issue: 275 | Some(DetectUserCommentPullRequestIssue { 276 | number, 277 | pull_request: Some(pr), 278 | }), 279 | comment: 280 | Some(DetectUserCommentPullRequestComment { body: Some(body) }), 281 | repository, 282 | .. 283 | } = self 284 | { 285 | match self.sender { 286 | Some(GithubUser { 287 | type_field: GithubUserType::Bot, 288 | .. 289 | }) => None, 290 | _ => { 291 | parse_bot_comment_from_text(body)?; 292 | 293 | if let Some(DetectUserCommentPullRequestRepository { 294 | name: Some(name), 295 | owner: Some(GithubUser { login, .. }), 296 | .. 297 | }) = repository 298 | { 299 | Some(PullRequestDetails { 300 | owner: login.into(), 301 | repo: name.into(), 302 | number: *number, 303 | }) 304 | } else { 305 | None 306 | } 307 | .or_else(|| { 308 | if let Some(DetectUserCommentPullRequestRepository { 309 | full_name: Some(full_name), 310 | .. 311 | }) = repository 312 | { 313 | parse_repository_full_name(full_name).map( 314 | |(owner, repo)| PullRequestDetails { 315 | owner, 316 | repo, 317 | number: *number, 318 | }, 319 | ) 320 | } else { 321 | None 322 | } 323 | }) 324 | .or_else(|| { 325 | if let DetectUserCommentPullRequestPullRequest { 326 | html_url: Some(html_url), 327 | } = pr 328 | { 329 | parse_pull_request_details_from_url(html_url) 330 | } else { 331 | None 332 | } 333 | }) 334 | } 335 | } 336 | } else { 337 | None 338 | } 339 | } 340 | } 341 | 342 | fn parse_pull_request_details_from_url( 343 | pr_html_url: &str, 344 | ) -> Option { 345 | let re = Regex::new(PR_HTML_URL_REGEX!()).unwrap(); 346 | let matches = re.captures(pr_html_url)?; 347 | let owner = matches.name("owner")?.as_str().to_owned(); 348 | let repo = matches.name("repo")?.as_str().to_owned(); 349 | let number = matches 350 | .name("number")? 351 | .as_str() 352 | .to_owned() 353 | .parse::() 354 | .ok()?; 355 | Some(PullRequestDetails { 356 | owner, 357 | repo, 358 | number, 359 | }) 360 | } 361 | 362 | /// full_name is org/repo 363 | fn parse_repository_full_name(full_name: &str) -> Option<(String, String)> { 364 | let parts: Vec<&str> = full_name.split('/').collect(); 365 | parts 366 | .first() 367 | .and_then(|owner| { 368 | parts.get(1).map(|repo_name| { 369 | Some((owner.to_string(), repo_name.to_string())) 370 | }) 371 | }) 372 | .flatten() 373 | } 374 | 375 | pub use client::GithubClient; 376 | -------------------------------------------------------------------------------- /src/gitlab.rs: -------------------------------------------------------------------------------- 1 | use reqwest::header::HeaderMap; 2 | use serde::Deserialize; 3 | 4 | use crate::{config::MainConfig, error::Error, types::Result}; 5 | 6 | impl MainConfig { 7 | pub fn get_gitlab_api_request_headers(&self) -> Result { 8 | let mut headers = HeaderMap::new(); 9 | headers.insert( 10 | "PRIVATE-TOKEN", 11 | self.gitlab_access_token 12 | .parse() 13 | .map_err(|_| Error::Message { 14 | msg: "Couldn't parse Gitlab Access Token as request header" 15 | .into(), 16 | })?, 17 | ); 18 | Ok(headers) 19 | } 20 | } 21 | 22 | #[derive(Deserialize, PartialEq, Eq, Debug)] 23 | #[serde(rename_all = "snake_case")] 24 | pub enum GitlabPipelineStatus { 25 | Created, 26 | WaitingForResource, 27 | Preparing, 28 | Pending, 29 | Running, 30 | Scheduled, 31 | #[serde(other)] 32 | Unknown, 33 | } 34 | 35 | #[derive(Deserialize, Debug)] 36 | pub struct GitlabJobPipeline { 37 | pub status: GitlabPipelineStatus, 38 | pub id: i64, 39 | pub project_id: i64, 40 | } 41 | 42 | #[derive(Deserialize, Debug)] 43 | pub struct GitlabJob { 44 | pub pipeline: GitlabJobPipeline, 45 | pub name: String, 46 | } 47 | 48 | #[derive(Deserialize, Debug)] 49 | pub struct GitlabPipelineJob { 50 | pub name: String, 51 | } 52 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![allow(clippy::blocks_in_if_conditions)] 3 | #![allow(clippy::too_many_arguments)] 4 | 5 | pub mod macros; 6 | pub mod shell; 7 | #[macro_use] 8 | pub mod companion; 9 | pub mod config; 10 | pub mod constants; 11 | pub mod error; 12 | #[macro_use] 13 | pub mod github; 14 | pub mod bot; 15 | pub mod core; 16 | pub mod git_ops; 17 | pub mod gitlab; 18 | pub mod merge_request; 19 | pub mod server; 20 | pub mod types; 21 | pub mod vanity_service; 22 | -------------------------------------------------------------------------------- /src/logging/gke.rs: -------------------------------------------------------------------------------- 1 | // GKE stands for Google Kubernetes Engine 2 | 3 | use std::io::{self, Write}; 4 | 5 | use env_logger::fmt::Formatter; 6 | use log::Record; 7 | use serde::Serialize; 8 | 9 | #[derive(Serialize)] 10 | #[serde(rename_all = "UPPERCASE")] 11 | enum Severity { 12 | Error, 13 | Info, 14 | } 15 | 16 | #[derive(Serialize)] 17 | struct Log { 18 | pub severity: Severity, 19 | pub message: String, 20 | pub timestamp: chrono::DateTime, 21 | } 22 | 23 | pub fn format(fmt: &mut Formatter, record: &Record) -> io::Result<()> { 24 | writeln!( 25 | fmt, 26 | "{}", 27 | serde_json::to_string(&Log { 28 | severity: match record.level() { 29 | log::Level::Error => Severity::Error, 30 | _ => Severity::Info, 31 | }, 32 | message: format!("{}", record.args()), 33 | timestamp: chrono::Utc::now(), 34 | }) 35 | .unwrap_or_else(|_| format!( 36 | "ERROR: Unable to serialize {}", 37 | record.args() 38 | )) 39 | ) 40 | } 41 | -------------------------------------------------------------------------------- /src/logging/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod gke; 2 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! OWNER_AND_REPO_SEQUENCE { 3 | () => { 4 | r"(?P[^ \t\n]+)/(?P[^ \t\n]+)" 5 | }; 6 | } 7 | 8 | #[macro_export] 9 | macro_rules! PR_HTML_URL_REGEX { 10 | () => { 11 | concat!( 12 | r"(?Phttps://[^ \t\n]+/", 13 | OWNER_AND_REPO_SEQUENCE!(), 14 | r"/pull/(?P[[:digit:]]+))" 15 | ) 16 | }; 17 | } 18 | 19 | #[macro_export] 20 | macro_rules! COMPANION_PREFIX_REGEX { 21 | () => { 22 | r"companion[^[[:alpha:]]\n]*" 23 | }; 24 | } 25 | 26 | #[macro_export] 27 | macro_rules! COMPANION_LONG_REGEX { 28 | () => { 29 | concat!(COMPANION_PREFIX_REGEX!(), PR_HTML_URL_REGEX!()) 30 | }; 31 | } 32 | 33 | #[macro_export] 34 | macro_rules! COMPANION_SHORT_REGEX { 35 | () => { 36 | concat!( 37 | COMPANION_PREFIX_REGEX!(), 38 | OWNER_AND_REPO_SEQUENCE!(), 39 | r"#(?P[[:digit:]]+)" 40 | ) 41 | }; 42 | } 43 | 44 | #[macro_export] 45 | macro_rules! WEBHOOK_PARSING_ERROR_TEMPLATE { 46 | () => { 47 | "Webhook event parsing failed due to: 48 | 49 | ``` 50 | {} 51 | ``` 52 | 53 | Payload: 54 | 55 | ``` 56 | {} 57 | ``` 58 | " 59 | }; 60 | } 61 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs, 3 | net::{IpAddr, Ipv4Addr, SocketAddr}, 4 | path::Path, 5 | sync::Arc, 6 | }; 7 | 8 | use rocksdb::DB; 9 | use tokio::sync::Mutex; 10 | mod logging; 11 | use std::{thread, time::Duration}; 12 | 13 | use parity_processbot::{ 14 | bot::handle_github_payload, 15 | config::MainConfig, 16 | constants::*, 17 | core::{ 18 | process_commit_checks_and_statuses, AppState, 19 | PullRequestMergeCancelOutcome, 20 | }, 21 | error::{handle_error, Bincode}, 22 | github::*, 23 | merge_request::{ 24 | cleanup_merge_request, MergeRequest, MergeRequestCleanupReason, 25 | }, 26 | server, 27 | }; 28 | use snafu::ResultExt; 29 | 30 | fn main() -> anyhow::Result<()> { 31 | env_logger::from_env(env_logger::Env::default().default_filter_or("info")) 32 | .format(logging::gke::format) 33 | .init(); 34 | 35 | let config = MainConfig::from_env(); 36 | 37 | let socket = SocketAddr::new( 38 | IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 39 | config.webhook_port.parse::().expect("webhook port"), 40 | ); 41 | 42 | let db_version_path = 43 | Path::new(&config.db_path).join("__PROCESSBOT_VERSION__"); 44 | let is_at_current_db_version = match db_version_path.exists() { 45 | true => { 46 | let str = fs::read_to_string(&db_version_path)?; 47 | str == DATABASE_VERSION 48 | } 49 | false => false, 50 | }; 51 | if !is_at_current_db_version { 52 | log::info!( 53 | "Clearing database to start from version {}", 54 | DATABASE_VERSION 55 | ); 56 | for entry in fs::read_dir(&config.db_path)? { 57 | let entry = entry?; 58 | if entry.path() == db_version_path { 59 | continue; 60 | } 61 | if entry.metadata()?.is_dir() { 62 | fs::remove_dir_all(entry.path())?; 63 | } else { 64 | fs::remove_file(entry.path())?; 65 | } 66 | } 67 | fs::write(db_version_path, DATABASE_VERSION)?; 68 | } 69 | 70 | let db = DB::open_default(&config.db_path)?; 71 | 72 | let gh_client = GithubClient::new(&config); 73 | 74 | let webhook_proxy_url = config.webhook_proxy_url.clone(); 75 | 76 | let app_state = Arc::new(Mutex::new(AppState { 77 | db, 78 | gh_client, 79 | config, 80 | })); 81 | 82 | // Poll for pending merge requests 83 | { 84 | let state = app_state.clone(); 85 | let rt = tokio::runtime::Builder::new_multi_thread() 86 | .enable_all() 87 | .build()?; 88 | thread::spawn(move || loop { 89 | log::info!("Acquiring poll lock"); 90 | 91 | rt.block_on(async { 92 | let state = &*state.lock().await; 93 | 94 | /* 95 | Set up a loop for reinitializing the DB's iterator since the operations 96 | performed in this loop might modify or delete multiple items from the 97 | database, thus potentially making the iteration not work according to 98 | expectations. 99 | */ 100 | let mut processed_mrs = vec![]; 101 | 'db_iteration_loop: loop { 102 | let db_iter = 103 | state.db.iterator(rocksdb::IteratorMode::Start); 104 | for (key, value) in db_iter { 105 | match bincode::deserialize::(&value) 106 | .context(Bincode) 107 | { 108 | Ok(mr) => { 109 | if processed_mrs.iter().any( 110 | |prev_mr: &MergeRequest| { 111 | mr.owner == prev_mr.owner 112 | && mr.repo == prev_mr.repo && mr.number 113 | == prev_mr.number 114 | }, 115 | ) { 116 | continue; 117 | } 118 | 119 | // It's only worthwhile to try merging this MR if it has no pending 120 | // dependencies 121 | if mr 122 | .dependencies 123 | .as_ref() 124 | .map(|vec| vec.is_empty()) 125 | .unwrap_or(true) 126 | { 127 | log::info!( 128 | "Attempting to resume merge request processing during poll: {:?}", 129 | mr 130 | ); 131 | 132 | if let Err(err) = 133 | process_commit_checks_and_statuses( 134 | state, &mr.sha, 135 | ) 136 | .await 137 | { 138 | let _ = cleanup_merge_request( 139 | state, 140 | &mr.sha, 141 | &mr.owner, 142 | &mr.repo, 143 | mr.number, 144 | &MergeRequestCleanupReason::Error, 145 | ) 146 | .await; 147 | handle_error( 148 | PullRequestMergeCancelOutcome::WasCancelled, 149 | err, 150 | state, 151 | ) 152 | .await; 153 | } 154 | 155 | processed_mrs.push(mr); 156 | continue 'db_iteration_loop; 157 | } 158 | } 159 | Err(err) => { 160 | log::error!( 161 | "Failed to deserialize key {} from the database due to {:?}", 162 | String::from_utf8_lossy(&key), 163 | err 164 | ); 165 | let _ = state.db.delete(&key); 166 | } 167 | } 168 | } 169 | break; 170 | } 171 | }); 172 | 173 | log::info!("Releasing poll lock"); 174 | thread::sleep(Duration::from_secs(10 * 60)); 175 | }); 176 | } 177 | 178 | let rt = tokio::runtime::Builder::new_multi_thread() 179 | .enable_all() 180 | .build()?; 181 | 182 | if let Some(webhook_proxy_url) = webhook_proxy_url { 183 | use eventsource::reqwest::Client; 184 | use reqwest::Url; 185 | 186 | log::info!("Connecting to webhook proxy at {}", webhook_proxy_url); 187 | let client = Client::new(Url::parse(&webhook_proxy_url).unwrap()); 188 | 189 | #[derive(serde::Deserialize)] 190 | struct SmeePayload { 191 | body: GithubWebhookPayload, 192 | } 193 | for event in client { 194 | let state = app_state.clone(); 195 | rt.block_on(async move { 196 | let event = event.unwrap(); 197 | 198 | if let Ok(payload) = 199 | serde_json::from_str::(event.data.as_str()) 200 | { 201 | log::info!("Acquiring lock"); 202 | let state = &*state.lock().await; 203 | let (merge_cancel_outcome, result) = 204 | handle_github_payload(payload.body, state).await; 205 | if let Err(err) = result { 206 | handle_error(merge_cancel_outcome, err, state).await; 207 | } 208 | log::info!("Releasing lock"); 209 | } else { 210 | match event.event_type.as_deref() { 211 | Some("ping") => (), 212 | Some("ready") => log::info!("Webhook proxy is ready!"), 213 | _ => log::info!("Not parsed: {:?}", event), 214 | } 215 | } 216 | }); 217 | } 218 | } else { 219 | rt.block_on(server::init(socket, app_state))?; 220 | } 221 | 222 | Ok(()) 223 | } 224 | -------------------------------------------------------------------------------- /src/merge_request.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use hyper::StatusCode as HttpStatusCode; 4 | use regex::RegexBuilder; 5 | use serde::{Deserialize, Serialize}; 6 | use snafu::ResultExt; 7 | 8 | use crate::{ 9 | companion::{ 10 | check_all_companions_are_mergeable, CompanionReferenceTrailItem, 11 | }, 12 | core::{ 13 | get_commit_checks, get_commit_statuses, process_dependents_after_merge, 14 | AppState, Status, 15 | }, 16 | error::{self, Error}, 17 | github::GithubPullRequest, 18 | types::Result, 19 | }; 20 | 21 | #[derive(Debug, Serialize, Deserialize, Clone)] 22 | #[repr(C)] 23 | pub struct MergeRequestDependency { 24 | pub sha: String, 25 | pub owner: String, 26 | pub repo: String, 27 | pub number: i64, 28 | pub html_url: String, 29 | pub is_directly_referenced: bool, 30 | } 31 | 32 | #[derive(Debug, Serialize, Deserialize, Clone)] 33 | #[repr(C)] 34 | pub struct MergeRequest { 35 | pub sha: String, 36 | pub was_updated: bool, 37 | pub owner: String, 38 | pub repo: String, 39 | pub number: i64, 40 | pub html_url: String, 41 | pub requested_by: String, 42 | pub dependencies: Option>, 43 | } 44 | 45 | pub enum MergeRequestCleanupReason<'a> { 46 | AfterMerge, 47 | AfterSHAUpdate(&'a String), 48 | Cancelled, 49 | Error, 50 | } 51 | // Removes a pull request from the database (e.g. when it has been merged) and 52 | // executes side-effects related to the kind of trigger for this function 53 | pub async fn cleanup_merge_request( 54 | state: &AppState, 55 | key_to_guarantee_deleted: &str, 56 | owner: &str, 57 | repo: &str, 58 | number: i64, 59 | reason: &MergeRequestCleanupReason<'_>, 60 | ) -> Result<()> { 61 | let AppState { db, .. } = state; 62 | 63 | let mut related_dependents = HashMap::new(); 64 | 65 | let db_iter = db.iterator(rocksdb::IteratorMode::Start); 66 | 'to_next_db_item: for (key, value) in db_iter { 67 | match bincode::deserialize::(&value) 68 | .context(error::Bincode) 69 | { 70 | Ok(mr) => { 71 | if mr.owner == owner && mr.repo == repo && mr.number == number { 72 | log::info!( 73 | "Cleaning up {:?} due to key {} of {}/{}/pull/{}", 74 | mr, 75 | key_to_guarantee_deleted, 76 | owner, 77 | repo, 78 | number 79 | ); 80 | 81 | if let Err(err) = db.delete(&key) { 82 | log::error!( 83 | "Failed to delete {} during cleanup_merge_request due to {:?}", 84 | String::from_utf8_lossy(&key), 85 | err 86 | ); 87 | } 88 | } 89 | 90 | if let Some(dependencies) = &mr.dependencies { 91 | for dependency in dependencies.iter() { 92 | if dependency.owner == owner 93 | && dependency.repo == repo && dependency.number 94 | == number 95 | { 96 | related_dependents.insert(mr.sha.clone(), mr); 97 | continue 'to_next_db_item; 98 | } 99 | } 100 | } 101 | } 102 | Err(err) => { 103 | log::error!( 104 | "Failed to deserialize key {} from the database due to {:?}", 105 | String::from_utf8_lossy(&key), 106 | err 107 | ); 108 | } 109 | } 110 | } 111 | 112 | // Sanity check: the key should have actually been deleted 113 | if db 114 | .get(key_to_guarantee_deleted) 115 | .context(error::Db)? 116 | .is_some() 117 | { 118 | return Err(Error::Message { 119 | msg: format!( 120 | "Key {} was not deleted from the database", 121 | key_to_guarantee_deleted 122 | ), 123 | }); 124 | } 125 | 126 | struct CleanedUpPullRequest { 127 | pub owner: String, 128 | pub repo: String, 129 | pub key_to_guarantee_deleted: String, 130 | pub number: i64, 131 | } 132 | lazy_static::lazy_static! { 133 | static ref CLEANUP_PR_RECURSION_PREVENTION: parking_lot::Mutex> = { 134 | parking_lot::Mutex::new(vec![]) 135 | }; 136 | } 137 | // Prevent mutual recursion since the side-effects might end up calling this 138 | // function again. We want to trigger the further side-effects at most once for 139 | // each pull request. 140 | { 141 | log::info!( 142 | "Acquiring cleanup_merge_request's recursion prevention lock" 143 | ); 144 | let mut cleaned_up_prs = CLEANUP_PR_RECURSION_PREVENTION.lock(); 145 | for pr in &*cleaned_up_prs { 146 | if pr.owner == owner 147 | && pr.repo == repo 148 | && pr.number == number 149 | && pr.key_to_guarantee_deleted == key_to_guarantee_deleted 150 | { 151 | log::info!( 152 | "Skipping side-effects of {}/{}/pull/{} (key {}) because they have already been processed", 153 | owner, 154 | repo, 155 | number, 156 | key_to_guarantee_deleted 157 | ); 158 | return Ok(()); 159 | } 160 | } 161 | cleaned_up_prs.push(CleanedUpPullRequest { 162 | owner: owner.into(), 163 | repo: repo.into(), 164 | key_to_guarantee_deleted: key_to_guarantee_deleted.into(), 165 | number, 166 | }); 167 | log::info!( 168 | "Releasing cleanup_merge_request's recursion prevention lock" 169 | ); 170 | } 171 | 172 | log::info!( 173 | "Related dependents of {}/{}/pull/{} (key {}): {:?}", 174 | owner, 175 | repo, 176 | number, 177 | key_to_guarantee_deleted, 178 | related_dependents 179 | ); 180 | 181 | match reason { 182 | MergeRequestCleanupReason::Error 183 | | MergeRequestCleanupReason::Cancelled => { 184 | for dependent in related_dependents.values() { 185 | // TODO: these cleanup_merge_request() might not be actually executed rn, poll them? 186 | let _result = cleanup_merge_request( 187 | state, 188 | &dependent.sha, 189 | &dependent.owner, 190 | &dependent.repo, 191 | dependent.number, 192 | reason, 193 | ); 194 | } 195 | } 196 | MergeRequestCleanupReason::AfterSHAUpdate(updated_sha) => { 197 | for mut dependent in related_dependents.into_values() { 198 | let mut was_updated = false; 199 | dependent.dependencies = 200 | if let Some(mut dependencies) = dependent.dependencies { 201 | for dependency in dependencies.iter_mut() { 202 | if dependency.owner == owner 203 | && dependency.repo == repo && dependency.number 204 | == number 205 | { 206 | was_updated = true; 207 | log::info!( 208 | "Dependency of {} on {}/{}/pull/{} was updated to SHA {}", 209 | dependent.html_url, 210 | owner, 211 | repo, 212 | number, 213 | updated_sha 214 | ); 215 | dependency.sha = updated_sha.to_string(); 216 | } 217 | } 218 | Some(dependencies) 219 | } else { 220 | None 221 | }; 222 | 223 | if was_updated { 224 | db.put( 225 | dependent.sha.as_bytes(), 226 | bincode::serialize(&dependent) 227 | .context(error::Bincode)?, 228 | ) 229 | .context(error::Db)?; 230 | } 231 | } 232 | } 233 | MergeRequestCleanupReason::AfterMerge => {} 234 | } 235 | 236 | log::info!( 237 | "Cleaning up cleanup_merge_request recursion prevention lock's entries" 238 | ); 239 | CLEANUP_PR_RECURSION_PREVENTION.lock().clear(); 240 | 241 | Ok(()) 242 | } 243 | 244 | pub enum MergeRequestQueuedMessage<'a> { 245 | Custom(&'a str), 246 | Default, 247 | None, 248 | } 249 | pub async fn queue_merge_request( 250 | state: &AppState, 251 | mr: &MergeRequest, 252 | msg: &MergeRequestQueuedMessage<'_>, 253 | ) -> Result<()> { 254 | register_merge_request(state, mr).await?; 255 | 256 | let AppState { gh_client, .. } = state; 257 | 258 | let MergeRequest { 259 | owner, 260 | repo, 261 | number, 262 | .. 263 | } = mr; 264 | 265 | let msg = match msg { 266 | MergeRequestQueuedMessage::Custom(msg) => msg, 267 | MergeRequestQueuedMessage::Default => "Waiting for commit status.", 268 | MergeRequestQueuedMessage::None => return Ok(()), 269 | }; 270 | 271 | let post_comment_result = gh_client 272 | .create_issue_comment(owner, repo, *number, msg) 273 | .await; 274 | if let Err(err) = post_comment_result { 275 | log::error!("Error posting comment: {}", err); 276 | } 277 | 278 | Ok(()) 279 | } 280 | 281 | pub async fn handle_merged_pull_request( 282 | state: &AppState, 283 | pr: &GithubPullRequest, 284 | requested_by: &str, 285 | ) -> Result { 286 | if !pr.merged { 287 | return Ok(false); 288 | } 289 | 290 | let was_cleaned_up = cleanup_merge_request( 291 | state, 292 | &pr.head.sha, 293 | &pr.base.repo.owner.login, 294 | &pr.base.repo.name, 295 | pr.number, 296 | &MergeRequestCleanupReason::AfterMerge, 297 | ) 298 | .await 299 | .map(|_| true); 300 | 301 | /* 302 | It's not sane to try to handle the dependents if the cleanup went wrong since 303 | that hints at some bug in the application 304 | */ 305 | if was_cleaned_up.is_ok() { 306 | if let Err(err) = 307 | process_dependents_after_merge(state, pr, requested_by).await 308 | { 309 | log::error!( 310 | "Failed to process process_dependents_after_merge in cleanup_merged_pr due to {:?}", 311 | err 312 | ); 313 | } 314 | } 315 | 316 | was_cleaned_up 317 | } 318 | 319 | pub async fn is_ready_to_merge( 320 | state: &AppState, 321 | pr: &GithubPullRequest, 322 | ) -> Result { 323 | let AppState { gh_client, .. } = state; 324 | 325 | match get_commit_checks( 326 | gh_client, 327 | &pr.base.repo.owner.login, 328 | &pr.base.repo.name, 329 | &pr.head.sha, 330 | &pr.html_url, 331 | ) 332 | .await? 333 | { 334 | Status::Success => { 335 | match get_commit_statuses( 336 | state, 337 | &pr.base.repo.owner.login, 338 | &pr.base.repo.name, 339 | &pr.head.sha, 340 | &pr.html_url, 341 | true, 342 | ) 343 | .await? 344 | .0 345 | { 346 | Status::Success => Ok(true), 347 | Status::Failure => Err(Error::StatusesFailed { 348 | commit_sha: pr.head.sha.to_owned(), 349 | }), 350 | _ => Ok(false), 351 | } 352 | } 353 | Status::Failure => Err(Error::ChecksFailed { 354 | commit_sha: pr.head.sha.to_owned(), 355 | }), 356 | _ => Ok(false), 357 | } 358 | } 359 | 360 | pub async fn merge_pull_request( 361 | state: &AppState, 362 | pr: &GithubPullRequest, 363 | requested_by: &str, 364 | ) -> Result> { 365 | if handle_merged_pull_request(state, pr, requested_by).await? { 366 | return Ok(Ok(())); 367 | } 368 | 369 | let AppState { gh_client, .. } = state; 370 | 371 | let err = match gh_client 372 | .merge_pull_request( 373 | &pr.base.repo.owner.login, 374 | &pr.base.repo.name, 375 | pr.number, 376 | &pr.head.sha, 377 | ) 378 | .await 379 | { 380 | Ok(_) => { 381 | log::info!("{} merged successfully.", pr.html_url); 382 | // Merge succeeded! Now clean it from the database 383 | if let Err(err) = cleanup_merge_request( 384 | state, 385 | &pr.head.sha, 386 | &pr.base.repo.owner.login, 387 | &pr.base.repo.name, 388 | pr.number, 389 | &MergeRequestCleanupReason::AfterMerge, 390 | ) 391 | .await 392 | { 393 | log::error!( 394 | "Failed to cleanup PR on the database after merge: {}", 395 | err 396 | ); 397 | }; 398 | return Ok(Ok(())); 399 | } 400 | Err(err) => err, 401 | }; 402 | 403 | let msg = match err { 404 | Error::Response { 405 | ref status, 406 | ref body, 407 | } if *status == HttpStatusCode::METHOD_NOT_ALLOWED => { 408 | match body.get("message") { 409 | Some(msg) => match msg.as_str() { 410 | Some(msg) => msg, 411 | None => { 412 | log::error!("Expected \"message\" of Github API merge failure response to be a string"); 413 | return Err(err); 414 | } 415 | }, 416 | None => { 417 | log::error!("Expected \"message\" of Github API merge failure response to be available"); 418 | return Err(err); 419 | } 420 | } 421 | } 422 | _ => return Err(err), 423 | }; 424 | 425 | // Matches the following 426 | // - "Required status check ... is {pending,expected}." 427 | // - "... required status checks have not succeeded: ... {pending,expected}." 428 | let missing_status_matcher = 429 | RegexBuilder::new(r"required\s+status\s+.*(pending|expected)") 430 | .case_insensitive(true) 431 | .build() 432 | .unwrap(); 433 | 434 | if missing_status_matcher.find(msg).is_some() { 435 | // This problem will be solved automatically when all the required statuses are delivered, thus 436 | // it can be ignored here 437 | log::info!( 438 | "Ignoring merge failure due to pending required status; message: {}", 439 | msg 440 | ); 441 | return Ok(Err(Error::MergeFailureWillBeSolvedLater { 442 | msg: msg.to_string(), 443 | })); 444 | } 445 | 446 | Err(Error::Message { msg: msg.into() }) 447 | } 448 | 449 | async fn register_merge_request( 450 | state: &AppState, 451 | mr: &MergeRequest, 452 | ) -> Result<()> { 453 | let AppState { db, .. } = state; 454 | let MergeRequest { sha, .. } = mr; 455 | log::info!("Registering merge request (sha: {}): {:?}", sha, mr); 456 | db.put( 457 | sha.as_bytes(), 458 | bincode::serialize(mr).context(error::Bincode)?, 459 | ) 460 | .context(error::Db) 461 | } 462 | 463 | pub async fn check_merge_is_allowed( 464 | state: &AppState, 465 | pr: &GithubPullRequest, 466 | requested_by: &str, 467 | companion_reference_trail: &[CompanionReferenceTrailItem], 468 | ) -> Result<()> { 469 | if !pr.mergeable.unwrap_or(false) { 470 | return Err(Error::Message { 471 | msg: format!("Github API says {} is not mergeable", pr.html_url), 472 | }); 473 | } else { 474 | log::info!("{} is mergeable", pr.html_url); 475 | } 476 | 477 | check_all_companions_are_mergeable( 478 | state, 479 | pr, 480 | requested_by, 481 | companion_reference_trail, 482 | ) 483 | .await 484 | } 485 | -------------------------------------------------------------------------------- /src/server.rs: -------------------------------------------------------------------------------- 1 | use std::{net::SocketAddr, sync::Arc}; 2 | 3 | use hyper::{ 4 | service::{make_service_fn, service_fn}, 5 | Body, Request, Server, 6 | }; 7 | use tokio::sync::Mutex; 8 | 9 | use crate::{bot::*, core::AppState}; 10 | 11 | pub async fn init( 12 | addr: SocketAddr, 13 | state: Arc>, 14 | ) -> anyhow::Result<()> { 15 | let service = make_service_fn(move |_| { 16 | let state = Arc::clone(&state); 17 | async move { 18 | Ok::<_, hyper::Error>(service_fn(move |req: Request| { 19 | let state = Arc::clone(&state); 20 | handle_http_request_for_bot(req, state) 21 | })) 22 | } 23 | }); 24 | 25 | let server = Server::bind(&addr).http1_half_close(true).serve(service); 26 | 27 | log::info!("Listening on {}", addr); 28 | if let Err(e) = server.await { 29 | eprintln!("server error: {}", e); 30 | } 31 | 32 | Ok(()) 33 | } 34 | -------------------------------------------------------------------------------- /src/shell.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | ffi::OsStr, 3 | fmt::{Debug, Display}, 4 | path::Path, 5 | process::{Output, Stdio}, 6 | }; 7 | 8 | use snafu::ResultExt; 9 | use tokio::process::Command; 10 | 11 | use crate::{error::*, types::Result}; 12 | 13 | #[derive(PartialEq, Eq)] 14 | pub struct CommandMessageConfiguration<'a, Secret: AsRef> { 15 | pub secrets_to_hide: Option<&'a [Secret]>, 16 | pub are_errors_silenced: bool, 17 | } 18 | 19 | #[derive(PartialEq, Eq)] 20 | pub enum CommandMessage<'a, Secret: AsRef> { 21 | Configured(CommandMessageConfiguration<'a, Secret>), 22 | } 23 | 24 | pub async fn run_cmd>( 25 | cmd: Cmd, 26 | args: &[&str], 27 | dir: Dir, 28 | logging: CommandMessage<'_, Secret>, 29 | ) -> Result 30 | where 31 | Cmd: AsRef + Display, 32 | Dir: AsRef + Debug, 33 | { 34 | before_cmd(&cmd, args, Some(&dir), &logging); 35 | 36 | #[allow(unused_mut)] 37 | let mut init_cmd = Command::new(cmd); 38 | let cmd = init_cmd.args(args).current_dir(dir).stderr(Stdio::piped()); 39 | let result = cmd.output().await.context(Tokio)?; 40 | 41 | handle_cmd_result(cmd, result, &logging) 42 | } 43 | 44 | pub async fn run_cmd_in_cwd>( 45 | cmd: Cmd, 46 | args: &[&str], 47 | logging: CommandMessage<'_, Secret>, 48 | ) -> Result 49 | where 50 | Cmd: AsRef + Display, 51 | { 52 | before_cmd::<&Cmd, String, Secret>(&cmd, args, None, &logging); 53 | 54 | #[allow(unused_mut)] 55 | let mut init_cmd = Command::new(cmd); 56 | let cmd = init_cmd.args(args).stderr(Stdio::piped()); 57 | let result = cmd.output().await.context(Tokio)?; 58 | 59 | handle_cmd_result(cmd, result, &logging) 60 | } 61 | 62 | pub async fn run_cmd_with_output>( 63 | cmd: Cmd, 64 | args: &[&str], 65 | dir: Dir, 66 | logging: CommandMessage<'_, Secret>, 67 | ) -> Result 68 | where 69 | Cmd: AsRef + Display, 70 | Dir: AsRef + Debug, 71 | { 72 | before_cmd(&cmd, args, Some(&dir), &logging); 73 | 74 | #[allow(unused_mut)] 75 | let mut init_cmd = Command::new(cmd); 76 | let cmd = init_cmd 77 | .args(args) 78 | .current_dir(dir) 79 | .stdin(Stdio::piped()) 80 | .stderr(Stdio::piped()); 81 | let result = cmd.output().await.context(Tokio)?; 82 | 83 | handle_cmd_result(cmd, result, &logging) 84 | } 85 | 86 | fn before_cmd>( 87 | cmd: Cmd, 88 | args: &[&str], 89 | dir: Option, 90 | logging: &CommandMessage, 91 | ) where 92 | Cmd: AsRef + Display, 93 | Dir: AsRef + Debug, 94 | { 95 | match logging { 96 | CommandMessage::Configured(CommandMessageConfiguration { 97 | secrets_to_hide, 98 | .. 99 | }) => { 100 | let mut cmd_display = format!("{}", cmd); 101 | let mut args_display = format!("{:?}", args); 102 | if let Some(secrets) = secrets_to_hide.as_ref() { 103 | for secret in secrets.iter() { 104 | cmd_display = 105 | cmd_display.replace(secret.as_ref(), "${SECRET}"); 106 | args_display = 107 | args_display.replace(secret.as_ref(), "${SECRET}"); 108 | } 109 | } 110 | 111 | if let Some(dir) = dir { 112 | log::info!("Run {} {} in {:?}", cmd_display, args_display, dir); 113 | } else { 114 | log::info!( 115 | "Run {} {} in the current directory", 116 | cmd_display, 117 | args_display, 118 | ); 119 | } 120 | } 121 | }; 122 | } 123 | 124 | fn handle_cmd_result>( 125 | cmd: &mut Command, 126 | result: Output, 127 | logging: &CommandMessage, 128 | ) -> Result { 129 | if result.status.success() { 130 | Ok(result) 131 | } else { 132 | let (cmd_display, err_msg) = match logging { 133 | CommandMessage::Configured(CommandMessageConfiguration { 134 | are_errors_silenced, 135 | secrets_to_hide, 136 | }) => { 137 | let mut cmd_display = format!("{:?}", cmd); 138 | if let Some(secrets) = secrets_to_hide.as_ref() { 139 | for secret in secrets.iter() { 140 | cmd_display = 141 | cmd_display.replace(secret.as_ref(), "${SECRET}"); 142 | } 143 | } 144 | let err_msg = if *are_errors_silenced { 145 | None 146 | } else { 147 | let err_output = String::from_utf8_lossy(&result.stderr); 148 | if err_output.is_empty() { 149 | None 150 | } else { 151 | let mut err_output = err_output.to_string(); 152 | if let Some(secrets) = secrets_to_hide.as_ref() { 153 | for secret in secrets.iter() { 154 | err_output = err_output 155 | .replace(secret.as_ref(), "${SECRET}"); 156 | } 157 | } 158 | log::error!( 159 | "handle_cmd_result: {} failed with error: {}", 160 | cmd_display, 161 | err_output 162 | ); 163 | Some(err_output) 164 | } 165 | }; 166 | 167 | (cmd_display, err_msg) 168 | } 169 | }; 170 | 171 | Err(Error::CommandFailed { 172 | cmd: cmd_display, 173 | status_code: result.status.code(), 174 | err: err_msg.unwrap_or_else(|| "no output".to_string()), 175 | }) 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use crate::error::Error; 4 | 5 | pub type Result = std::result::Result; 6 | 7 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 8 | pub struct PlaceholderDeserializationItem {} 9 | -------------------------------------------------------------------------------- /src/vanity_service.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | 3 | #[derive(Deserialize)] 4 | pub struct JobInformation { 5 | pub build_allow_failure: Option, 6 | } 7 | -------------------------------------------------------------------------------- /tests/helpers/cmd.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | ffi::OsStr, 3 | fmt::{Debug, Display}, 4 | path::Path, 5 | process::{Command, Stdio}, 6 | }; 7 | 8 | pub enum CmdConfiguration<'a> { 9 | IgnoreStderrStartingWith(&'a [&'a str]), 10 | } 11 | 12 | fn build_cmd(cmd: Cmd, args: &[&str], dir: &Option) -> Command 13 | where 14 | Cmd: AsRef + Display, 15 | Dir: AsRef + Debug, 16 | { 17 | let mut cmd = Command::new(cmd); 18 | 19 | cmd.args(args); 20 | 21 | if let Some(dir) = dir { 22 | cmd.current_dir(dir); 23 | println!("Executing {:?} on {:?}", cmd, dir); 24 | } else { 25 | println!("Executing {:?}", cmd); 26 | } 27 | 28 | cmd 29 | } 30 | 31 | pub fn exec( 32 | cmd: Cmd, 33 | args: &[&str], 34 | dir: Option, 35 | conf: Option>, 36 | ) where 37 | Cmd: AsRef + Display, 38 | Dir: AsRef + Debug, 39 | { 40 | let mut cmd = build_cmd(cmd, args, &dir); 41 | 42 | let was_success = match conf { 43 | Some(CmdConfiguration::IgnoreStderrStartingWith( 44 | prefixes_to_ignore, 45 | )) => { 46 | let out = cmd 47 | .stderr(Stdio::piped()) 48 | .spawn() 49 | .unwrap() 50 | .wait_with_output() 51 | .unwrap(); 52 | 53 | let err = String::from_utf8_lossy(&out.stderr); 54 | let err = err.trim(); 55 | if err.is_empty() { 56 | return; 57 | } else { 58 | for prefix_to_ignore in prefixes_to_ignore { 59 | if err.starts_with(prefix_to_ignore) { 60 | return; 61 | } 62 | } 63 | }; 64 | 65 | eprintln!("{}", err); 66 | 67 | out.status.success() 68 | } 69 | _ => cmd.spawn().unwrap().wait().unwrap().success(), 70 | }; 71 | 72 | if !was_success { 73 | panic!("Command {:?} failed", cmd); 74 | } 75 | } 76 | 77 | pub fn get_cmd_success( 78 | cmd: Cmd, 79 | args: &[&str], 80 | dir: Option, 81 | ) -> bool 82 | where 83 | Cmd: AsRef + Display, 84 | Dir: AsRef + Debug, 85 | { 86 | let mut cmd = build_cmd(cmd, args, &dir); 87 | cmd.spawn().unwrap().wait().unwrap().success() 88 | } 89 | 90 | pub fn get_cmd_output( 91 | cmd: Cmd, 92 | args: &[&str], 93 | dir: Option, 94 | ) -> String 95 | where 96 | Cmd: AsRef + Display, 97 | Dir: AsRef + Debug, 98 | { 99 | let mut cmd = build_cmd(cmd, args, &dir); 100 | let output = cmd 101 | .stdout(Stdio::piped()) 102 | .spawn() 103 | .unwrap() 104 | .wait_with_output() 105 | .unwrap(); 106 | String::from_utf8_lossy(&output.stdout).trim().to_string() 107 | } 108 | -------------------------------------------------------------------------------- /tests/helpers/constants.rs: -------------------------------------------------------------------------------- 1 | pub const I64_PLACEHOLDER_WHICH_DOES_NOT_MATTER: i64 = 0; 2 | pub const USIZE_PLACEHOLDER_WHICH_DOES_NOT_MATTER: usize = 0; 3 | pub const URL_PLACEHOLDER_WHICH_DOES_NOT_MATTER: &str = "https://localhost"; 4 | -------------------------------------------------------------------------------- /tests/helpers/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::{self, remove_dir_all, remove_file, File}, 3 | io::Read, 4 | net::TcpListener, 5 | path::{Path, PathBuf}, 6 | }; 7 | 8 | pub mod cmd; 9 | pub mod constants; 10 | pub mod setup; 11 | 12 | use cmd::exec; 13 | 14 | pub fn get_available_port() -> Option { 15 | (1025..65535).find(|&port| TcpListener::bind(("127.0.0.1", port)).is_ok()) 16 | } 17 | 18 | pub fn read_snapshot(log_dir: PathBuf, texts_to_hide: &[&str]) -> String { 19 | let entry = log_dir.read_dir().unwrap().next().unwrap().unwrap(); 20 | let mut file = File::open(entry.path()).unwrap(); 21 | let mut buf = String::new(); 22 | file.read_to_string(&mut buf).unwrap(); 23 | for text_to_hide in texts_to_hide.iter() { 24 | buf = buf.replace(text_to_hide, "{REDACTED}"); 25 | } 26 | buf 27 | } 28 | 29 | pub fn clean_directory(dir: PathBuf) { 30 | for f in dir.read_dir().unwrap() { 31 | let f = f.unwrap(); 32 | let _ = if f.metadata().unwrap().is_dir() { 33 | remove_dir_all(f.path()) 34 | } else { 35 | remove_file(f.path()) 36 | }; 37 | } 38 | } 39 | 40 | pub fn initialize_repository(repo_dir: &Path, initial_branch: &str) { 41 | exec::<&str, PathBuf>( 42 | "git", 43 | &["init", &repo_dir.display().to_string()], 44 | None, 45 | None, 46 | ); 47 | // --initial-branch from Git init can't be used because the Git on CI is too old 48 | exec( 49 | "git", 50 | &["checkout", "-b", initial_branch], 51 | Some(repo_dir), 52 | None, 53 | ); 54 | exec( 55 | "git", 56 | &["config", "--local", "user.name", "processbot"], 57 | Some(repo_dir), 58 | None, 59 | ); 60 | exec( 61 | "git", 62 | &["config", "--local", "user.email", "foo@bar.com"], 63 | Some(repo_dir), 64 | None, 65 | ); 66 | exec( 67 | "git", 68 | &["config", "--local", "advice.detachedHead", "false"], 69 | Some(repo_dir), 70 | None, 71 | ); 72 | fs::write(repo_dir.join("README"), "").unwrap(); 73 | exec("git", &["add", "."], Some(&repo_dir), None); 74 | exec( 75 | "git", 76 | &["commit", "-m", "initial commit"], 77 | Some(&repo_dir), 78 | None, 79 | ); 80 | } 81 | -------------------------------------------------------------------------------- /tests/helpers/setup.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | env, fs, 3 | io::Write, 4 | path::PathBuf, 5 | process::{self, Command, Stdio}, 6 | }; 7 | 8 | use flexi_logger::FileSpec; 9 | use httptest::{matchers::*, responders::*, Expectation, Server}; 10 | use parity_processbot::{self, github::*}; 11 | use serde_json::json; 12 | use tempfile::TempDir; 13 | 14 | use super::{cmd::*, constants::*, *}; 15 | 16 | pub struct CommonSetupOutput { 17 | pub log_dir: TempDir, 18 | pub db_dir: TempDir, 19 | pub git_daemon_handle: process::Child, 20 | pub git_daemon_dir: TempDir, 21 | pub private_key: Vec, 22 | pub github_api: Server, 23 | pub github_api_url: String, 24 | pub owner: GithubUser, 25 | pub repo_name: &'static str, 26 | pub repo_dir: PathBuf, 27 | pub repo_full_name: String, 28 | pub github_app_id: usize, 29 | pub initial_branch: String, 30 | } 31 | pub fn common_setup() -> CommonSetupOutput { 32 | let git_daemon_base_path_tracker = 33 | env::var("GIT_DAEMON_BASE_PATH_TRACKER").unwrap(); 34 | 35 | let log_dir = tempfile::tempdir().unwrap(); 36 | flexi_logger::Logger::try_with_env_or_str("info") 37 | .unwrap() 38 | .log_to_file( 39 | FileSpec::default() 40 | .directory(log_dir.path().to_path_buf()) 41 | .basename("test") 42 | .suppress_timestamp() 43 | .suffix("log"), 44 | ) 45 | .duplicate_to_stdout(flexi_logger::Duplicate::All) 46 | .start() 47 | .unwrap(); 48 | 49 | // The git daemon will be used for fetching and pushing branches during tests 50 | let git_daemon_dir = tempfile::tempdir().unwrap(); 51 | let git_daemon_dir_path_str = git_daemon_dir.path().display().to_string(); 52 | { 53 | let mut file = std::fs::OpenOptions::new() 54 | .write(true) 55 | .append(true) 56 | .open(git_daemon_base_path_tracker) 57 | .unwrap(); 58 | writeln!(file, "{}", &git_daemon_dir_path_str).unwrap(); 59 | } 60 | clean_directory(git_daemon_dir.path().to_path_buf()); 61 | let git_daemon_port = get_available_port().unwrap(); 62 | let git_daemon_handle = Command::new("git") 63 | .arg("daemon") 64 | .arg(format!("--port={}", git_daemon_port)) 65 | .arg(format!("--base-path={}", git_daemon_dir_path_str)) 66 | .arg("--export-all") 67 | .arg("--enable=receive-pack") 68 | .stdout(Stdio::null()) 69 | .current_dir(git_daemon_dir.path()) 70 | .spawn() 71 | .unwrap(); 72 | 73 | // "owner" is the placeholder user which will act as the requester for the bot's commands 74 | let owner = GithubUser { 75 | login: "owner".to_string(), 76 | type_field: GithubUserType::User, 77 | }; 78 | let private_key = " 79 | -----BEGIN PRIVATE KEY----- 80 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJETqse41HRBsc 81 | 7cfcq3ak4oZWFCoZlcic525A3FfO4qW9BMtRO/iXiyCCHn8JhiL9y8j5JdVP2Q9Z 82 | IpfElcFd3/guS9w+5RqQGgCR+H56IVUyHZWtTJbKPcwWXQdNUX0rBFcsBzCRESJL 83 | eelOEdHIjG7LRkx5l/FUvlqsyHDVJEQsHwegZ8b8C0fz0EgT2MMEdn10t6Ur1rXz 84 | jMB/wvCg8vG8lvciXmedyo9xJ8oMOh0wUEgxziVDMMovmC+aJctcHUAYubwoGN8T 85 | yzcvnGqL7JSh36Pwy28iPzXZ2RLhAyJFU39vLaHdljwthUaupldlNyCfa6Ofy4qN 86 | ctlUPlN1AgMBAAECggEAdESTQjQ70O8QIp1ZSkCYXeZjuhj081CK7jhhp/4ChK7J 87 | GlFQZMwiBze7d6K84TwAtfQGZhQ7km25E1kOm+3hIDCoKdVSKch/oL54f/BK6sKl 88 | qlIzQEAenho4DuKCm3I4yAw9gEc0DV70DuMTR0LEpYyXcNJY3KNBOTjN5EYQAR9s 89 | 2MeurpgK2MdJlIuZaIbzSGd+diiz2E6vkmcufJLtmYUT/k/ddWvEtz+1DnO6bRHh 90 | xuuDMeJA/lGB/EYloSLtdyCF6sII6C6slJJtgfb0bPy7l8VtL5iDyz46IKyzdyzW 91 | tKAn394dm7MYR1RlUBEfqFUyNK7C+pVMVoTwCC2V4QKBgQD64syfiQ2oeUlLYDm4 92 | CcKSP3RnES02bcTyEDFSuGyyS1jldI4A8GXHJ/lG5EYgiYa1RUivge4lJrlNfjyf 93 | dV230xgKms7+JiXqag1FI+3mqjAgg4mYiNjaao8N8O3/PD59wMPeWYImsWXNyeHS 94 | 55rUKiHERtCcvdzKl4u35ZtTqQKBgQDNKnX2bVqOJ4WSqCgHRhOm386ugPHfy+8j 95 | m6cicmUR46ND6ggBB03bCnEG9OtGisxTo/TuYVRu3WP4KjoJs2LD5fwdwJqpgtHl 96 | yVsk45Y1Hfo+7M6lAuR8rzCi6kHHNb0HyBmZjysHWZsn79ZM+sQnLpgaYgQGRbKV 97 | DZWlbw7g7QKBgQCl1u+98UGXAP1jFutwbPsx40IVszP4y5ypCe0gqgon3UiY/G+1 98 | zTLp79GGe/SjI2VpQ7AlW7TI2A0bXXvDSDi3/5Dfya9ULnFXv9yfvH1QwWToySpW 99 | Kvd1gYSoiX84/WCtjZOr0e0HmLIb0vw0hqZA4szJSqoxQgvF22EfIWaIaQKBgQCf 100 | 34+OmMYw8fEvSCPxDxVvOwW2i7pvV14hFEDYIeZKW2W1HWBhVMzBfFB5SE8yaCQy 101 | pRfOzj9aKOCm2FjjiErVNpkQoi6jGtLvScnhZAt/lr2TXTrl8OwVkPrIaN0bG/AS 102 | aUYxmBPCpXu3UjhfQiWqFq/mFyzlqlgvuCc9g95HPQKBgAscKP8mLxdKwOgX8yFW 103 | GcZ0izY/30012ajdHY+/QK5lsMoxTnn0skdS+spLxaS5ZEO4qvPVb8RAoCkWMMal 104 | 2pOhmquJQVDPDLuZHdrIiKiDM20dy9sMfHygWcZjQ4WSxf/J7T9canLZIXFhHAZT 105 | 3wc9h4G8BBCtWN2TN/LsGZdB 106 | -----END PRIVATE KEY----- 107 | " 108 | .as_bytes() 109 | .to_vec(); 110 | 111 | // Set up the Git repository with an initial commit 112 | let repo = "repo"; 113 | let repo_full_name = format!("{}/{}", &owner.login, repo); 114 | let repo_dir = git_daemon_dir.path().join(&owner.login).join(repo); 115 | let initial_branch = "master"; 116 | fs::create_dir_all(&repo_dir).unwrap(); 117 | initialize_repository(&repo_dir, initial_branch); 118 | 119 | // Use a mock HTTP server as the Github API 120 | let github_api = Server::run(); 121 | let github_api_url = { 122 | let url = github_api.url("").to_string(); 123 | url[0..url.len() - 1].to_string() 124 | }; 125 | 126 | // The bot requires an installation access token according for the Github API's requests 127 | // The token's value does not matter as we'll not be validating it on the mock HTTP server 128 | // anyways 129 | github_api.expect( 130 | Expectation::matching(request::method_path( 131 | "GET", 132 | "/app/installations", 133 | )) 134 | .times(0..) 135 | .respond_with(json_encoded(vec![GithubInstallation { 136 | id: I64_PLACEHOLDER_WHICH_DOES_NOT_MATTER, 137 | account: GithubUser { 138 | login: owner.login.clone(), 139 | type_field: GithubUserType::Bot, 140 | }, 141 | }])), 142 | ); 143 | github_api.expect( 144 | Expectation::matching(request::method_path( 145 | "POST", 146 | format!( 147 | "/app/installations/{}/access_tokens", 148 | I64_PLACEHOLDER_WHICH_DOES_NOT_MATTER 149 | ), 150 | )) 151 | .times(0..) 152 | .respond_with(json_encoded(GithubInstallationToken { 153 | token: "does not matter".to_string(), 154 | expires_at: None, 155 | })), 156 | ); 157 | 158 | // Set up the membership for the initial user so that the organization checks will pass 159 | github_api.expect( 160 | Expectation::matching(request::method_path( 161 | "GET", 162 | format!("/orgs/{}/members/{}", &owner.login, &owner.login), 163 | )) 164 | .times(0..) 165 | .respond_with( 166 | status_code(204) 167 | .append_header("Content-Type", "application/json") 168 | .body(serde_json::to_string(&json!({})).unwrap()), 169 | ), 170 | ); 171 | 172 | let db_dir = tempfile::tempdir().unwrap(); 173 | 174 | CommonSetupOutput { 175 | log_dir, 176 | git_daemon_handle, 177 | git_daemon_dir, 178 | github_api, 179 | github_api_url, 180 | db_dir, 181 | repo_dir, 182 | github_app_id: USIZE_PLACEHOLDER_WHICH_DOES_NOT_MATTER, 183 | owner, 184 | repo_name: repo, 185 | repo_full_name, 186 | private_key, 187 | initial_branch: initial_branch.to_string(), 188 | } 189 | } 190 | 191 | pub fn setup_commit(setup: &CommonSetupOutput, sha: &str) { 192 | let CommonSetupOutput { 193 | owner, 194 | repo_name, 195 | github_api, 196 | .. 197 | } = setup; 198 | 199 | github_api.expect( 200 | Expectation::matching(request::method_path( 201 | "GET", 202 | format!("/repos/{}/{}/statuses/{}", &owner.login, repo_name, sha), 203 | )) 204 | .times(0..) 205 | .respond_with(json_encoded(vec![GithubCommitStatus { 206 | id: 1, 207 | context: "does not matter".to_string(), 208 | description: Some("does not matter".to_string()), 209 | state: GithubCommitStatusState::Success, 210 | target_url: None, 211 | }])), 212 | ); 213 | 214 | github_api.expect( 215 | Expectation::matching(request::method_path( 216 | "GET", 217 | format!( 218 | "/repos/{}/{}/commits/{}/check-runs", 219 | &owner.login, repo_name, sha 220 | ), 221 | )) 222 | .times(0..) 223 | .respond_with(json_encoded(GithubCheckRuns { 224 | check_runs: vec![GithubCheckRun { 225 | id: 1, 226 | name: "does not matter".to_string(), 227 | status: GithubCheckRunStatus::Completed, 228 | conclusion: Some(GithubCheckRunConclusion::Success), 229 | head_sha: sha.to_string(), 230 | }], 231 | })), 232 | ); 233 | } 234 | 235 | pub struct SetupPullRequestOutput { 236 | pub url: String, 237 | pub html_url: String, 238 | pub number: i64, 239 | } 240 | pub fn setup_pull_request( 241 | setup: &CommonSetupOutput, 242 | repo: &GithubRepository, 243 | head_sha: &str, 244 | comment: &GithubIssueComment, 245 | pr_branch: &str, 246 | number: i64, 247 | ) -> SetupPullRequestOutput { 248 | let CommonSetupOutput { 249 | github_api, 250 | github_api_url, 251 | owner, 252 | repo_dir, 253 | initial_branch: base_branch, 254 | .. 255 | } = setup; 256 | 257 | let repo_api_path = &format!("/repos/{}", &repo.full_name); 258 | let pr_api_path = &format!("{}/pulls/{}", repo_api_path, number); 259 | let issue_api_path = &format!("{}/issues/{}", repo_api_path, number); 260 | let url = format!("{}{}", github_api_url, pr_api_path); 261 | let html_url = format!("{}/pull/{}", &repo.html_url, number); 262 | 263 | { 264 | let repo_dir: &'static PathBuf = 265 | &*Box::leak(Box::new(repo_dir.clone())); 266 | let pr_branch: &'static String = 267 | &*Box::leak(Box::new(pr_branch.to_string())); 268 | let base_branch: &'static String = 269 | &*Box::leak(Box::new(base_branch.to_string())); 270 | github_api.expect( 271 | Expectation::matching(request::method_path( 272 | "PUT", 273 | format!("{}/merge", pr_api_path), 274 | )) 275 | .times(0..) 276 | .respond_with(move || { 277 | exec( 278 | "git", 279 | &["checkout", pr_branch], 280 | Some(repo_dir), 281 | Some(CmdConfiguration::IgnoreStderrStartingWith(&[ 282 | "Switched to branch", 283 | ])), 284 | ); 285 | let tmp_branch_name = "tmp"; 286 | exec( 287 | "git", 288 | &["checkout", "-b", tmp_branch_name], 289 | Some(repo_dir), 290 | Some(CmdConfiguration::IgnoreStderrStartingWith(&[ 291 | "Switched to a new branch", 292 | ])), 293 | ); 294 | let was_merge_success = get_cmd_success( 295 | "git", 296 | &["merge", "--no-ff", "--no-edit", base_branch], 297 | Some(repo_dir), 298 | ); 299 | // Merge is only successful if the PR branch has no conflict with the base branch; otherwise, 300 | // this code simulates the "Pull Request is not mergeable" response (code 405). 301 | // https://docs.github.com/en/rest/reference/pulls#merge-a-pull-request 302 | let result = if was_merge_success { 303 | status_code(200) 304 | .append_header("Content-Type", "application/json") 305 | .body(serde_json::to_string(&json!({})).unwrap()) 306 | } else { 307 | status_code(405) 308 | .append_header("Content-Type", "application/json") 309 | .body( 310 | serde_json::to_string( 311 | &json!({ "message": "Pull Request is not mergeable" }), 312 | ) 313 | .unwrap(), 314 | ) 315 | }; 316 | exec( 317 | "git", 318 | &["merge", "--abort"], 319 | Some(repo_dir), 320 | Some(CmdConfiguration::IgnoreStderrStartingWith(&[ 321 | "fatal: There is no merge to abort", 322 | ])), 323 | ); 324 | exec( 325 | "git", 326 | &["checkout", base_branch], 327 | Some(repo_dir), 328 | Some(CmdConfiguration::IgnoreStderrStartingWith(&[ 329 | "Switched to branch", 330 | ])), 331 | ); 332 | exec( 333 | "git", 334 | &["branch", "-D", tmp_branch_name], 335 | Some(repo_dir), 336 | None, 337 | ); 338 | result 339 | }), 340 | ); 341 | } 342 | 343 | github_api.expect( 344 | Expectation::matching(request::method_path( 345 | "GET", 346 | pr_api_path.to_string(), 347 | )) 348 | .times(0..) 349 | .respond_with(json_encoded(GithubPullRequest { 350 | body: None, 351 | number, 352 | mergeable: Some(true), 353 | html_url: html_url.clone(), 354 | url: url.clone(), 355 | user: Some(owner.clone()), 356 | base: GithubPullRequestBase { 357 | ref_field: base_branch.to_string(), 358 | repo: GithubPullRequestBaseRepository { 359 | name: repo.name.clone(), 360 | owner: owner.clone(), 361 | }, 362 | }, 363 | head: GithubPullRequestHead { 364 | ref_field: pr_branch.to_string(), 365 | sha: head_sha.to_string(), 366 | repo: GithubPullRequestHeadRepository { 367 | name: repo.name.clone(), 368 | owner: owner.clone(), 369 | }, 370 | }, 371 | merged: false, 372 | maintainer_can_modify: true, 373 | })), 374 | ); 375 | 376 | github_api.expect( 377 | Expectation::matching(request::method_path( 378 | "POST", 379 | format!("{}/comments", issue_api_path,), 380 | )) 381 | .times(0..) 382 | .respond_with( 383 | status_code(201) 384 | .append_header("Content-Type", "application/json") 385 | .body(serde_json::to_string(&json!({})).unwrap()), 386 | ), 387 | ); 388 | 389 | github_api.expect( 390 | Expectation::matching(request::method_path( 391 | "POST", 392 | format!( 393 | "{}/issues/comments/{}/reactions", 394 | repo_api_path, comment.id, 395 | ), 396 | )) 397 | .times(0..) 398 | .respond_with( 399 | status_code(201) 400 | .append_header("Content-Type", "application/json") 401 | .body(serde_json::to_string(&json!({})).unwrap()), 402 | ), 403 | ); 404 | 405 | SetupPullRequestOutput { 406 | url, 407 | html_url, 408 | number, 409 | } 410 | } 411 | -------------------------------------------------------------------------------- /tests/merge.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, fs}; 2 | 3 | use insta::assert_snapshot; 4 | use parity_processbot::{ 5 | self, bot::handle_github_payload, config::MainConfig, core::AppState, 6 | github::*, types::PlaceholderDeserializationItem, 7 | }; 8 | use rocksdb::DB; 9 | 10 | mod helpers; 11 | 12 | use helpers::{cmd::*, constants::*, read_snapshot, setup::*}; 13 | 14 | #[tokio::test] 15 | async fn simple_merge_succeeds() { 16 | let common_setup = common_setup(); 17 | let CommonSetupOutput { 18 | log_dir, 19 | github_api_url, 20 | db_dir, 21 | owner, 22 | repo_dir, 23 | private_key, 24 | github_app_id, 25 | repo_name, 26 | repo_full_name, 27 | git_daemon_dir, 28 | .. 29 | } = &common_setup; 30 | 31 | // Create PR branch 32 | let pr_branch = "contributor_patches"; 33 | exec( 34 | "git", 35 | &["checkout", "-b", pr_branch], 36 | Some(repo_dir), 37 | Some(CmdConfiguration::IgnoreStderrStartingWith(&[ 38 | "Switched to a new branch", 39 | ])), 40 | ); 41 | 42 | // Add a commit to the PR's branch 43 | fs::write(repo_dir.join("foo"), "this file has changed").unwrap(); 44 | exec("git", &["add", "."], Some(repo_dir), None); 45 | exec( 46 | "git", 47 | &["commit", "-m", "change file"], 48 | Some(repo_dir), 49 | None, 50 | ); 51 | let pr_head_sha = 52 | get_cmd_output("git", &["rev-parse", "HEAD"], Some(&repo_dir)); 53 | 54 | // Setup the commit in the API so that the status checks criterion will pass 55 | setup_commit(&common_setup, &pr_head_sha); 56 | 57 | let repo = GithubRepository { 58 | name: repo_name.to_string(), 59 | full_name: repo_full_name.clone(), 60 | owner: owner.clone(), 61 | html_url: format!( 62 | "{}/{}", 63 | URL_PLACEHOLDER_WHICH_DOES_NOT_MATTER, repo_full_name 64 | ), 65 | }; 66 | 67 | let comment = GithubIssueComment { 68 | id: I64_PLACEHOLDER_WHICH_DOES_NOT_MATTER, 69 | body: "bot merge".to_string(), 70 | user: owner.clone(), 71 | }; 72 | 73 | let mut next_pr_number: i64 = 0; 74 | next_pr_number += 1; 75 | let pr = &setup_pull_request( 76 | &common_setup, 77 | &repo, 78 | &pr_head_sha, 79 | &comment, 80 | pr_branch, 81 | next_pr_number, 82 | ); 83 | 84 | let config = MainConfig { 85 | installation_login: owner.login.clone(), 86 | webhook_secret: "does not matter".to_owned(), 87 | webhook_port: "does not matter".to_string(), 88 | db_path: db_dir.path().to_path_buf(), 89 | repos_path: git_daemon_dir.path().to_path_buf(), 90 | private_key: private_key.clone(), 91 | webhook_proxy_url: None, 92 | disable_org_checks: false, 93 | github_api_url: github_api_url.clone(), 94 | github_app_id: *github_app_id, 95 | merge_command_delay: 0, 96 | companion_status_settle_delay: 0, 97 | github_source_prefix: "https://github.com".into(), 98 | github_source_suffix: "".into(), 99 | gitlab_url: "".into(), 100 | gitlab_access_token: "".into(), 101 | dependency_update_configuration: HashMap::new(), 102 | }; 103 | let gh_client = GithubClient::new(&config); 104 | let db = DB::open_default(&config.db_path).unwrap(); 105 | let state = AppState { 106 | db, 107 | gh_client, 108 | config, 109 | }; 110 | 111 | let _ = handle_github_payload( 112 | GithubWebhookPayload::IssueComment { 113 | action: GithubIssueCommentAction::Created, 114 | comment, 115 | issue: GithubIssue { 116 | number: pr.number, 117 | html_url: pr.html_url.clone(), 118 | pull_request: Some(PlaceholderDeserializationItem {}), 119 | }, 120 | repository: GithubIssueRepository { 121 | name: repo.name, 122 | owner: owner.clone(), 123 | }, 124 | }, 125 | &state, 126 | ) 127 | .await; 128 | 129 | assert_snapshot!(read_snapshot( 130 | log_dir.path().to_path_buf(), 131 | &[&pr_head_sha] 132 | )); 133 | } 134 | -------------------------------------------------------------------------------- /tests/snapshots/merge__simple_merge_succeeds.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: tests/merge.rs 3 | expression: "read_snapshot(log_dir.path().to_path_buf(), &[&pr_head_sha])" 4 | --- 5 | INFO [parity_processbot::bot] Merge(Normal) requested by owner in https://localhost/owner/repo/pull/1 6 | INFO [parity_processbot::merge_request] https://localhost/owner/repo/pull/1 is mergeable 7 | INFO [parity_processbot::core] https://localhost/owner/repo/pull/1 check_runs: [GithubCheckRun { id: 1, name: "does not matter", status: Completed, conclusion: Some(Success), head_sha: "{REDACTED}" }] 8 | INFO [parity_processbot::core] https://localhost/owner/repo/pull/1 latest_checks: {"does not matter": (1, Completed, Some(Success))} 9 | INFO [parity_processbot::core] https://localhost/owner/repo/pull/1 has successful checks 10 | INFO [parity_processbot::core] https://localhost/owner/repo/pull/1 statuses: [GithubCommitStatus { id: 1, context: "does not matter", state: Success, description: Some("does not matter"), target_url: None }] 11 | INFO [parity_processbot::core] https://localhost/owner/repo/pull/1 latest_statuses: {"does not matter": (1, Success, None)} 12 | INFO [parity_processbot::core] https://localhost/owner/repo/pull/1 has success status 13 | INFO [parity_processbot::merge_request] https://localhost/owner/repo/pull/1 merged successfully. 14 | INFO [parity_processbot::merge_request] Acquiring cleanup_merge_request's recursion prevention lock 15 | INFO [parity_processbot::merge_request] Releasing cleanup_merge_request's recursion prevention lock 16 | INFO [parity_processbot::merge_request] Related dependents of owner/repo/pull/1 (key {REDACTED}): {} 17 | INFO [parity_processbot::merge_request] Cleaning up cleanup_merge_request recursion prevention lock's entries 18 | INFO [parity_processbot::core] Handling dependents of https://localhost/owner/repo/pull/1 19 | 20 | --------------------------------------------------------------------------------