├── .dockerignore ├── .github ├── dependabot.yaml └── workflows │ ├── release.yaml │ └── test.yaml ├── .gitignore ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── Dockerfile.arm ├── LICENCE ├── README.md ├── docs ├── config.toml ├── content │ ├── _index.md │ ├── guides │ │ ├── _index.md │ │ ├── compiled.md │ │ ├── configuration.md │ │ ├── development.md │ │ ├── docker-compose.md │ │ ├── dynamic.md │ │ ├── netlify.md │ │ └── php.md │ ├── reference │ │ ├── _index.md │ │ ├── authentication.md │ │ ├── commandline.md │ │ ├── comparison.md │ │ └── environment-variables.md │ └── usage │ │ ├── _index.md │ │ ├── actions.md │ │ ├── crontab.md │ │ ├── docker.md │ │ ├── installation.md │ │ ├── start.md │ │ ├── systemd.md │ │ └── webhook.md ├── static │ ├── custom.css │ ├── install.sh │ ├── sakura-dark.css │ ├── sakura.css │ ├── webhook-github-deliveries.png │ ├── webhook-github.png │ └── webhook-gitlab.png └── templates │ ├── anchor-link.html │ ├── index.html │ ├── page.html │ ├── partials │ └── toc.html │ └── section.html ├── src ├── actions │ ├── mod.rs │ ├── process.rs │ ├── script.rs │ └── utils │ │ ├── command.rs │ │ └── mod.rs ├── args.rs ├── checks │ ├── git.rs │ ├── git │ │ ├── config.rs │ │ ├── credentials.rs │ │ ├── known_hosts.rs │ │ └── repository.rs │ ├── mod.rs │ └── watch.rs ├── context.rs ├── lib.rs ├── logger.rs ├── main.rs ├── start.rs └── triggers │ ├── http.rs │ ├── mod.rs │ ├── once.rs │ ├── schedule.rs │ └── signal.rs └── test_directories └── .keep /.dockerignore: -------------------------------------------------------------------------------- 1 | target/* 2 | !target/arm-unknown-linux-gnueabihf 3 | target/arm-unknown-linux-gnueabihf/* 4 | !target/arm-unknown-linux-gnueabihf/release 5 | target/arm-unknown-linux-gnueabihf/release/* 6 | !target/arm-unknown-linux-gnueabihf/release/gw 7 | docs 8 | test_directories -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "cargo" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | 8 | permissions: 9 | contents: write 10 | 11 | env: 12 | isRcRelease: ${{ contains(github.ref, 'rc') }} 13 | isLiveRelease: ${{ ! contains(github.ref, 'rc') }} 14 | 15 | jobs: 16 | release: 17 | name: Create GitHub release 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout the tag 21 | uses: actions/checkout@v3 22 | - name: Add toolchain for Rust 23 | uses: actions-rs/toolchain@v1 24 | with: 25 | toolchain: stable 26 | - name: Restore cached dependencies 27 | uses: Swatinem/rust-cache@v2 28 | - name: Release for tags 29 | uses: taiki-e/create-gh-release-action@v1 30 | if: ${{ env.isLiveRelease == 'true' }} 31 | with: 32 | changelog: CHANGELOG.md 33 | token: ${{ secrets.GITHUB_TOKEN }} 34 | - name: Publish to Crates.io 35 | uses: katyo/publish-crates@v2 36 | if: ${{ env.isLiveRelease == 'true' }} 37 | with: 38 | registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }} 39 | ignore-unpublished-changes: true 40 | 41 | release_linux: 42 | name: Release for Linux 43 | runs-on: ubuntu-latest 44 | needs: release 45 | steps: 46 | - name: Checkout the tag 47 | uses: actions/checkout@v3 48 | - name: Add toolchain for Rust 49 | uses: actions-rs/toolchain@v1 50 | with: 51 | toolchain: stable 52 | - name: Restore cached dependencies 53 | uses: Swatinem/rust-cache@v2 54 | - name: Build for Linux 55 | uses: actions-rs/cargo@v1 56 | with: 57 | command: build 58 | args: --release --target x86_64-unknown-linux-gnu 59 | - name: Convert binaries into compressed files 60 | run: | 61 | cd target/x86_64-unknown-linux-gnu/release && tar czf gw-bin_x86_64-unknown-linux-gnu.tar.gz gw && cd - 62 | cd target/x86_64-unknown-linux-gnu/release && zip gw-bin_x86_64-unknown-linux-gnu.zip gw && cd - 63 | - name: Upload zip to release 64 | uses: svenstaro/upload-release-action@v2 65 | if: ${{ env.isLiveRelease == 'true' }} 66 | with: 67 | file: target/x86_64-unknown-linux-gnu/release/gw-bin_x86_64-unknown-linux-gnu.zip 68 | asset_name: gw-bin_x86_64-unknown-linux-gnu.zip 69 | - name: Upload tar.gz to release 70 | uses: svenstaro/upload-release-action@v2 71 | if: ${{ env.isLiveRelease == 'true' }} 72 | with: 73 | file: target/x86_64-unknown-linux-gnu/release/gw-bin_x86_64-unknown-linux-gnu.tar.gz 74 | asset_name: gw-bin_x86_64-unknown-linux-gnu.tar.gz 75 | - name: Archive production artifacts 76 | uses: actions/upload-artifact@v4 77 | if: ${{ env.isRcRelease == 'true' }} 78 | with: 79 | name: gw-bin_x86_64-unknown-linux-gnu 80 | path: | 81 | target/x86_64-unknown-linux-gnu/release/gw 82 | 83 | release_musl: 84 | name: Release for Musl 85 | runs-on: ubuntu-latest 86 | needs: release 87 | steps: 88 | - name: Checkout the tag 89 | uses: actions/checkout@v3 90 | - name: Add toolchain for Rust 91 | uses: actions-rs/toolchain@v1 92 | with: 93 | toolchain: stable 94 | target: x86_64-unknown-linux-musl 95 | - name: Restore cached dependencies 96 | uses: Swatinem/rust-cache@v2 97 | - name: Install MUSL dependencies 98 | run: sudo apt-get install musl-tools --no-install-recommends -y 99 | - name: Build for Musl 100 | uses: actions-rs/cargo@v1 101 | with: 102 | command: build 103 | args: --release --target x86_64-unknown-linux-musl 104 | - name: Convert binaries into compressed files 105 | run: | 106 | cd target/x86_64-unknown-linux-musl/release && tar czf gw-bin_x86_64-unknown-linux-musl.tar.gz gw && cd - 107 | cd target/x86_64-unknown-linux-musl/release && zip gw-bin_x86_64-unknown-linux-musl.zip gw && cd - 108 | - name: Upload zip to release 109 | uses: svenstaro/upload-release-action@v2 110 | if: ${{ env.isLiveRelease == 'true' }} 111 | with: 112 | file: target/x86_64-unknown-linux-musl/release/gw-bin_x86_64-unknown-linux-musl.zip 113 | asset_name: gw-bin_x86_64-unknown-linux-musl.zip 114 | - name: Upload tar.gz to release 115 | uses: svenstaro/upload-release-action@v2 116 | if: ${{ env.isLiveRelease == 'true' }} 117 | with: 118 | file: target/x86_64-unknown-linux-musl/release/gw-bin_x86_64-unknown-linux-musl.tar.gz 119 | asset_name: gw-bin_x86_64-unknown-linux-musl.tar.gz 120 | - name: Archive production artifacts 121 | uses: actions/upload-artifact@v4 122 | if: ${{ env.isRcRelease == 'true' }} 123 | with: 124 | name: gw-bin_x86_64-unknown-linux-musl 125 | path: | 126 | target/x86_64-unknown-linux-musl/release/gw 127 | 128 | release_windows: 129 | name: Release for Windows 130 | runs-on: ubuntu-latest 131 | needs: release 132 | steps: 133 | - name: Checkout the tag 134 | uses: actions/checkout@v3 135 | - name: Add toolchain for Rust 136 | uses: actions-rs/toolchain@v1 137 | with: 138 | toolchain: stable 139 | - name: Restore cached dependencies 140 | uses: Swatinem/rust-cache@v2 141 | - name: Build for Windows 142 | uses: actions-rs/cargo@v1 143 | with: 144 | command: build 145 | args: --release --target x86_64-pc-windows-gnu 146 | use-cross: true 147 | - name: Convert binaries into compressed files 148 | run: | 149 | cd target/x86_64-pc-windows-gnu/release && zip gw-bin_x86_64-pc-windows-gnu.zip gw.exe && cd - 150 | - name: Upload zip to release 151 | uses: svenstaro/upload-release-action@v2 152 | if: ${{ env.isLiveRelease == 'true' }} 153 | with: 154 | file: target/x86_64-pc-windows-gnu/release/gw-bin_x86_64-pc-windows-gnu.zip 155 | asset_name: gw-bin_x86_64-pc-windows-gnu.zip 156 | - name: Archive production artifacts 157 | uses: actions/upload-artifact@v4 158 | if: ${{ env.isRcRelease == 'true' }} 159 | with: 160 | name: gw-bin_x86_64-pc-windows-gnu 161 | path: | 162 | target/x86_64-pc-windows-gnu/release/gw.exe 163 | 164 | 165 | release_arm: 166 | name: Release for ARM 167 | runs-on: ubuntu-latest 168 | needs: release 169 | steps: 170 | - name: Checkout the tag 171 | uses: actions/checkout@v3 172 | - name: Add toolchain for Rust 173 | uses: actions-rs/toolchain@v1 174 | with: 175 | toolchain: stable 176 | target: arm-unknown-linux-gnueabihf 177 | - name: Restore cached dependencies 178 | uses: Swatinem/rust-cache@v2 179 | - name: Add new apt sources that support armhf 180 | run: | 181 | sudo tee /etc/apt/sources.list.d/ubuntu.sources << EOF 182 | Types: deb 183 | URIs: http://archive.ubuntu.com/ubuntu/ 184 | Suites: noble noble-updates noble-backports 185 | Components: main universe restricted multiverse 186 | Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg 187 | Architectures: amd64 188 | 189 | Types: deb 190 | URIs: http://security.ubuntu.com/ubuntu/ 191 | Suites: noble-security 192 | Components: main universe restricted multiverse 193 | Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg 194 | Architectures: amd64 195 | 196 | Types: deb 197 | URIs: http://ports.ubuntu.com/ 198 | Suites: noble noble-updates noble-backports 199 | Components: main universe restricted multiverse 200 | Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg 201 | Architectures: armhf 202 | EOF 203 | - name: Enable ARM cross-build capabilities 204 | run: sudo dpkg --add-architecture armhf 205 | - name: Update apt repositories 206 | run: sudo apt-get update 207 | - name: Install cross-platform installation dependencies 208 | run: sudo apt-get install libc6-dev gcc-arm-linux-gnueabihf libc6-dev:armhf libssl-dev:armhf --no-install-recommends -y 209 | - name: Add setup for ARM cross-building 210 | run: | 211 | mkdir -p ~/.cargo 212 | cat > ~/.cargo/config.toml << EOF 213 | [target.arm-unknown-linux-gnueabihf] 214 | linker = "arm-linux-gnueabihf-gcc" 215 | rustflags = ["-L/usr/lib/arm-linux-gnueabihf", "-C", "target-feature=+crt-static"] 216 | EOF 217 | - name: Build for ARM 218 | run: | 219 | export OPENSSL_STATIC=1 220 | export OPENSSL_DIR=/usr/arm-linux-gnueabihf 221 | export OPENSSL_LIB_DIR=/usr/lib/arm-linux-gnueabihf 222 | export OPENSSL_INCLUDE_DIR=/usr/include/arm-linux-gnueabihf 223 | cargo build --release --target arm-unknown-linux-gnueabihf 224 | - name: Convert binaries into compressed files 225 | run: | 226 | cd target/arm-unknown-linux-gnueabihf/release && tar czf gw-bin_arm-unknown-linux-gnueabihf.tar.gz gw && cd - 227 | cd target/arm-unknown-linux-gnueabihf/release && zip gw-bin_arm-unknown-linux-gnueabihf.zip gw && cd - 228 | - name: Upload zip to release 229 | uses: svenstaro/upload-release-action@v2 230 | if: ${{ env.isLiveRelease == 'true' }} 231 | with: 232 | file: target/arm-unknown-linux-gnueabihf/release/gw-bin_arm-unknown-linux-gnueabihf.zip 233 | asset_name: gw-bin_arm-unknown-linux-gnueabihf.zip 234 | - name: Upload tar.gz to release 235 | uses: svenstaro/upload-release-action@v2 236 | if: ${{ env.isLiveRelease == 'true' }} 237 | with: 238 | file: target/arm-unknown-linux-gnueabihf/release/gw-bin_arm-unknown-linux-gnueabihf.tar.gz 239 | asset_name: gw-bin_arm-unknown-linux-gnueabihf.tar.gz 240 | - name: Archive production artifacts 241 | uses: actions/upload-artifact@v4 242 | with: 243 | name: gw-bin_arm-unknown-linux-gnueabihf 244 | path: | 245 | target/arm-unknown-linux-gnueabihf/release/gw 246 | 247 | release_mac: 248 | name: Release for MacOS 249 | needs: release 250 | runs-on: macos-latest 251 | steps: 252 | - name: Checkout the tag 253 | uses: actions/checkout@v3 254 | - name: Add toolchain for Rust 255 | uses: actions-rs/toolchain@v1 256 | with: 257 | toolchain: stable 258 | - name: Restore cached dependencies 259 | uses: Swatinem/rust-cache@v2 260 | - name: Build for MacOS 261 | uses: actions-rs/cargo@v1 262 | with: 263 | command: build 264 | args: --release --target aarch64-apple-darwin 265 | - name: Convert binaries into compressed files 266 | run: | 267 | cd target/aarch64-apple-darwin/release && zip gw-bin_aarch64-apple-darwin.zip gw && cd - 268 | - name: Upload zip to live release 269 | uses: svenstaro/upload-release-action@v2 270 | if: ${{ env.isLiveRelease == 'true' }} 271 | with: 272 | file: target/aarch64-apple-darwin/release/gw-bin_aarch64-apple-darwin.zip 273 | asset_name: gw-bin_aarch64-apple-darwin.zip 274 | - name: Store artifacts for the release candidates 275 | uses: actions/upload-artifact@v4 276 | if: ${{ env.isRcRelease == 'true' }} 277 | with: 278 | name: gw-bin_aarch64-apple-darwin 279 | path: | 280 | target/aarch64-apple-darwin/release/gw 281 | 282 | docker: 283 | name: Docker build if we are on a tag 284 | needs: release_arm 285 | runs-on: ubuntu-latest 286 | steps: 287 | - name: Checkout the tag 288 | uses: actions/checkout@v3 289 | - name: Set up QEMU 290 | uses: docker/setup-qemu-action@v3 291 | - name: Set up Docker Buildx 292 | uses: docker/setup-buildx-action@v3 293 | - name: Log in to Docker Hub 294 | uses: docker/login-action@v3 295 | with: 296 | username: ${{ secrets.DOCKERHUB_USERNAME }} 297 | password: ${{ secrets.DOCKERHUB_PASSWORD }} 298 | - name: Download artifacts 299 | uses: actions/download-artifact@v4 300 | with: 301 | name: gw-bin_arm-unknown-linux-gnueabihf 302 | path: target/arm-unknown-linux-gnueabihf/release 303 | - name: Make artifact executable 304 | run: chmod +x target/arm-unknown-linux-gnueabihf/release/gw 305 | - name: Docker meta for Debian 306 | id: meta 307 | uses: docker/metadata-action@v5 308 | with: 309 | images: | 310 | danielgrant/gw 311 | tags: | 312 | type=raw,value=latest 313 | type=semver,pattern={{version}} 314 | type=semver,pattern={{major}}.{{minor}} 315 | - name: Build and push Docker image for x86_64 316 | id: build_amd64 317 | uses: docker/build-push-action@v5 318 | with: 319 | context: . 320 | file: ./Dockerfile 321 | push: true 322 | tags: "danielgrant/gw:amd64" 323 | cache-from: type=registry,ref=danielgrant/gw:latest 324 | cache-to: type=inline 325 | - name: Build and push Docker image for ARMv7 326 | uses: docker/build-push-action@v5 327 | with: 328 | context: . 329 | file: ./Dockerfile.arm 330 | push: true 331 | tags: "danielgrant/gw:armv7" 332 | platforms: linux/arm/v7 333 | cache-from: type=registry,ref=danielgrant/gw:latest 334 | cache-to: type=inline 335 | - name: Build and push Docker image for ARM64 336 | uses: docker/build-push-action@v5 337 | with: 338 | context: . 339 | file: ./Dockerfile.arm 340 | push: true 341 | tags: "danielgrant/gw:arm64" 342 | platforms: linux/arm64 343 | cache-from: type=registry,ref=danielgrant/gw:latest 344 | cache-to: type=inline 345 | - name: Merge tags with docker manifest 346 | run: | 347 | TAGS=$(echo "${{ steps.meta.outputs.tags }}" | sed 's/^/--tag /' | xargs) 348 | docker buildx imagetools create $TAGS \ 349 | danielgrant/gw:amd64 \ 350 | danielgrant/gw:arm64 \ 351 | danielgrant/gw:armv7 352 | - name: Update repo description 353 | uses: peter-evans/dockerhub-description@v4 354 | if: ${{ env.isLiveRelease == 'true' }} 355 | with: 356 | username: ${{ secrets.DOCKERHUB_USERNAME }} 357 | password: ${{ secrets.DOCKERHUB_PASSWORD }} 358 | repository: danielgrant/gw 359 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | 6 | jobs: 7 | test: 8 | strategy: 9 | fail-fast: false 10 | matrix: 11 | os: [ubuntu-latest, macos-latest, windows-latest] 12 | name: Check and test on ${{ matrix.os }} 13 | runs-on: ${{ matrix.os }} 14 | steps: 15 | - name: Checkout the tag 16 | uses: actions/checkout@v3 17 | - name: Add toolchain for Rust 18 | uses: actions-rs/toolchain@v1 19 | with: 20 | toolchain: stable 21 | - name: Install nextest 22 | uses: taiki-e/install-action@nextest 23 | - name: Cache Rust dependencies 24 | uses: Swatinem/rust-cache@v2 25 | - name: Test for linting issues 26 | run: cargo clippy -- -D warnings 27 | - name: Setup git configuration 28 | run: | 29 | git config --global user.email "test@example.com" 30 | git config --global user.name "Test Thomas" 31 | git config --global init.defaultBranch master 32 | - name: Run tests 33 | run: cargo nextest run --no-fail-fast 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | docs/public 3 | test_directories/* 4 | !test_directories/.keep -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## [Unreleased] 4 | 5 | ### Changed 6 | - Updated test dependencies 7 | - Updated ureq to avoid ring vulnerability 8 | 9 | ## [0.4.1] - 2025-01-26 10 | 11 | ### Changed 12 | 13 | - **New feature**: Trigger on git tags 14 | - Use the `--on tag` to only pull to the latest tag on the branch 15 | - Use `--on tag:v*` to match the tags with the given glob 16 | - Git repository will stay on the branch, but will do a partial pull to the tag and run actions 17 | - Updated dependencies, with libgit2 updated to 1.9.0 18 | 19 | ## [0.4.0] - 2024-10-23 20 | 21 | ### Added 22 | 23 | - **New feature**: Subprocess handling 24 | - Use `-p` to start a process directly and restart on change 25 | - Configure the retries to restart the process in case of a failure 26 | - Set the stop signal and stop timeout args to configure graceful shutdown before restart 27 | - Add `-P` to start the process in the shell instead 28 | - The order of script and process flags now matter, scripts are run in order before and after the process 29 | - Add testing for Windows and MacOS machines 30 | 31 | ### Changed 32 | 33 | - **Breaking change**: Scripts are now running directly, you can run it in a shell using `-S` 34 | - Only change gitconfig (safe.directory) if there isn't one 35 | - Don't overwrite script environment, use already set variables 36 | - If the user presses Ctrl+C a second time, the program exits immediately 37 | 38 | ## [0.3.2] - 2024-08-26 39 | 40 | ### Added 41 | 42 | - Add Docker image support for arm/v7 43 | 44 | ### Changed 45 | 46 | - Make ARM binaries statically linked 32-bit to maintain compatibility with older devices 47 | 48 | ## [0.3.1] - 2024-08-21 49 | 50 | ### Added 51 | 52 | - Support cross-compilation for Linux ARM machines 53 | - Support compilation for MacOS ARM machines 54 | - Support multi-platform Docker images 55 | - Add Changelog to releases automatically 56 | 57 | ### Changed 58 | 59 | - Fix accidentally dropped Windows support 60 | 61 | ## [0.3.0] - 2024-08-19 62 | 63 | ### Added 64 | 65 | - Add context to share data between different steps 66 | - Expose the context through environmental variables for the scripts 67 | - Add documentation for the environmental variables 68 | - Add `--version` flag to print current version 69 | - Add `--quiet` flag to improve logging 70 | - Add signal handling to handle SIGINT and SIGTERM 71 | - Add `--ssh-key` flag to change the ssh-key path 72 | - Add `--git-username` and `--git-token` flags to change the https authentication 73 | - Generate `.ssh/known_hosts` file if there is none found on the system 74 | - Add `--git-known-host` to add an entry to the `.ssh/known_hosts` file 75 | - Add installation script 76 | 77 | ### Changed 78 | 79 | - Change musl release to build everything statically 80 | - Ignore different owner repository warnings 81 | - Improve error messages, print original error in fetch 82 | 83 | ### Removed 84 | 85 | - Remove `-o` short flag for `--once` 86 | - Remove debian-based image to streamline usage 87 | - Remove on tag argument for now 88 | 89 | ## [0.2.2] - 2024-02-17 90 | 91 | ### Added 92 | 93 | - Add `-v` flag to increase verbosity, default log level changed to INFO 94 | - Add check to avoid pulling, when the repository is dirty 95 | - Add more tracing to git repository 96 | - Add safe directory inside Docker image 97 | 98 | ### Changed 99 | 100 | - Fix bug with tag fetching 101 | 102 | ## [0.2.1] - 2024-02-12 103 | 104 | ### Added 105 | 106 | - Add Docker image 107 | - Add image building to GitHub Actions 108 | 109 | ### Changed 110 | 111 | - Improve documentation 112 | 113 | ## [0.2.0] - 2024-02-09 114 | 115 | ### Changed 116 | 117 | - Rewrite code to be more modular 118 | - Introduce tests to every part of the codebase 119 | - Add documentation to every module 120 | - Refactor error handling to use thiserror 121 | - Add testing to GitHub Actions 122 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "gw-bin" 3 | description = "Watch git repositories, pull changes and run commands on the new files" 4 | version = "0.4.1" 5 | license = "MIT" 6 | edition = "2021" 7 | documentation = "https://github.com/daniel7grant/gw" 8 | repository = "https://github.com/daniel7grant/gw" 9 | 10 | [package.metadata.binstall] 11 | pkg-url = "{ repo }/releases/download/v{ version }/{ name }_{ target }{ archive-suffix }" 12 | bin-dir = "{ bin }{ binary-ext }" 13 | pkg-fmt = "zip" 14 | 15 | [dependencies] 16 | dirs = "6" 17 | duct = "0.13.7" 18 | duct_sh = "0.13.7" 19 | duration-string = "0.5.2" 20 | git2 = "0.20.0" 21 | gumdrop = "0.8.1" 22 | log = "0.4.20" 23 | mockall = "0.13.0" 24 | nix = { version = "0.29.0", features = ["signal"] } 25 | shlex = "1.3.0" 26 | signal-hook = "0.3.17" 27 | simplelog = "0.12.2" 28 | thiserror = "2.0.3" 29 | time = "0.3.36" 30 | tiny_http = "0.12.0" 31 | 32 | [target.'cfg(any(target_env = "musl", target_arch = "arm", target_arch = "aarch64"))'.dependencies] 33 | git2 = { version = "0.20.0", features = ["vendored-libgit2", "vendored-openssl"] } 34 | 35 | [dev-dependencies] 36 | duct = "0.13.7" 37 | rand = "0.9.0" 38 | testing_logger = "0.1.1" 39 | ureq = { version = "3.0.5", default-features = false } 40 | 41 | [profile.release] 42 | strip = true 43 | 44 | [[bin]] 45 | name = "gw" 46 | path = "src/main.rs" 47 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.80-alpine AS builder 2 | 3 | WORKDIR /app 4 | 5 | ARG OPENSSL_STATIC=1 6 | 7 | RUN apk add --no-cache \ 8 | make \ 9 | musl-dev \ 10 | perl 11 | 12 | COPY ./Cargo.lock ./Cargo.toml /app 13 | COPY ./src /app/src 14 | 15 | RUN cargo build --release 16 | 17 | 18 | FROM alpine:3.20 19 | 20 | RUN apk add --no-cache \ 21 | ca-certificates 22 | 23 | COPY --from=builder /app/target/release/gw /usr/bin/gw 24 | 25 | ENTRYPOINT ["/usr/bin/gw"] 26 | -------------------------------------------------------------------------------- /Dockerfile.arm: -------------------------------------------------------------------------------- 1 | # This image is only for CI to avoid the very slow QEMU compilation 2 | FROM alpine:3.20 3 | 4 | RUN apk add --no-cache \ 5 | ca-certificates 6 | 7 | # Use the previously built binary artifact 8 | COPY target/arm-unknown-linux-gnueabihf/release/gw /usr/bin/gw 9 | 10 | ENTRYPOINT ["/usr/bin/gw"] 11 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Daniel Grant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gw 2 | 3 | Watch local git repositories, keep in sync with remote and run commands. 4 | 5 | ## Motivation 6 | 7 | `gw` is a lightweight binary that manages a simple pull-based continuous deployment for you. It watches a local git repository, fetches if the remote changes, and builds or deploys your code. Current CD solutions either lock you into proprietary software (e.g. Netlify or Vercel) or complicated to run and manage (e.g. ArgoCD). `gw` is a service that can run everywhere (even behind NAT or VPN), synchronizes code with your remote and deploys immediately, saving your developers time and energy. 8 | 9 | Features of `gw`: 10 | - **lightweight**: it is only a 1.5MB binary (~7MB with git and ssh statically built-in) 11 | - **runs anywhere**: use it on baremetal or [systemd](https://gw.danielgrants.com/usage/systemd.md) on Linux (x86_64 and ARM) or in [Docker](https://gw.danielgrants.com/usage/docker.md) (Windows and MacOS is supported on a best-effort basis) 12 | - **open source**: written entirely in Rust, you can build it from source in a few minutes 13 | - **pull-based**: works on any network, even behind a NAT or VPN 14 | - **flexible**: build, deploy, restart or anything you can imagine 15 | 16 | If you want to see how `gw` compare to other products: look at the [comparisons](https://gw.danielgrants.com/reference/comparison). 17 | 18 | ## Installation 19 | 20 | To get started with `gw`, you can use the install script: 21 | 22 | ```sh 23 | curl https://gw.danielgrants.com/install.sh | sh 24 | ``` 25 | 26 | For more installation methods, see the [documentation](https://gw.danielgrants.com/usage/installation/). 27 | 28 | ## Get started 29 | 30 | `gw` is a simple program, that you can use to pull changes from a remote repository and run scripts on the change. 31 | 32 | ### Prerequisites 33 | 34 | First, make sure, that `gw` is installed successfully and is in your PATH: 35 | 36 | ```sh 37 | $ gw --version 38 | 0.4.1 39 | ``` 40 | 41 | The other necessary part is a git repository to which you have pull access. It is recommended to use a repository that you know, but if you don't have one at hand, you can use the [daniel7grant/time](https://github.com/daniel7grant/time) repository. This is an example repository that is updated in every minute, so it is useful to test the auto update of `gw`. First clone this repository (if you are using your own, clone again), and enter the cloned directory: 42 | 43 | ```sh 44 | git clone https://github.com/daniel7grant/time.git 45 | cd time 46 | ``` 47 | 48 | ### Pull files automatically 49 | 50 | To get started, point `gw` to this local repository. By default it pulls the changes every minute. We can add the `--verbose` or `-v` flag to see when the changes occur: 51 | 52 | ```sh 53 | gw /path/to/repo -v 54 | ``` 55 | 56 | If you are using your own repository, create a commit in a different place, and see how it gets automatically pulled (in the case of the `time` repo, there is a commit every minute). The verbose logs should print that a git pull happened: 57 | 58 | ```sh 59 | $ gw /path/to/repo -v 60 | # ... 61 | 2024-03-10T14:48:13.447Z [DEBUG] Checked out fc23d21 on branch main. 62 | 2024-03-10T14:48:13.447Z [INFO ] There are updates, pulling. 63 | ``` 64 | 65 | Also check the files or the `git log` to see that it the repository has been updated: 66 | 67 | ```sh 68 | cat DATETIME # it should contain the latest time 69 | git log -1 # it should be a commit in the last minute 70 | ``` 71 | 72 | ### Run scripts on pull 73 | 74 | Pulling files automatically is useful but the `--script` or `-s` flag unlocks `gw`'s potential: it can run any kind of custom script if there are any changes. For a simple example, we can print the content of a file to the log with `cat`: 75 | 76 | ```sh 77 | gw /path/to/repo -v --script 'cat DATETIME' 78 | ``` 79 | 80 | This will run every time there is a new commit, and after the pull it will print the file contents. You can see that the results are printed in the log: 81 | 82 | ```sh 83 | $ gw /path/to/repo -v --script 'cat DATETIME' 84 | # ... 85 | 2024-10-18T16:28:53.907Z [INFO ] There are updates, running actions. 86 | 2024-10-18T16:28:53.907Z [INFO ] Running script "cat" in /path/to/repo. 87 | 2024-10-18T16:28:53.913Z [DEBUG] [cat] 2024-10-18T16:28:00+0000 88 | 2024-10-18T16:28:53.913Z [INFO ] Script "cat" finished successfully. 89 | ``` 90 | 91 | You can add multiple scripts, which will run one after another. Use these scripts to build source files, restarts deployments and anything else that you can imagine. 92 | 93 | ### Run subprocess, restart on pull 94 | 95 | It is often enough to run scripts, but many times you also want to maintain a long-running process e.g. for web services. `gw` can help you with this, using the `-p` flag. This will start a process in the background and restart it on pull. 96 | 97 | For example starting a python web server: 98 | 99 | ```sh 100 | $ gw /path/to/repo -v --process "python -m http.server" 101 | # ... 102 | 2024-10-06T21:58:21.306Z [DEBUG] Setting up ProcessAction "python -m http.server" on change. 103 | 2024-10-06T21:58:21.306Z [DEBUG] Starting process: "python" in directory /path/to/repo. 104 | 2024-10-06T21:58:56.211Z [DEBUG] [python] Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ... 105 | ``` 106 | 107 | This will run a python process in the background and stop and start it again if a git pull happened. Just wrap your deployment script with `gw` and see it gets updated every time you push to git. 108 | 109 | ## Run actions on tags 110 | 111 | Pulling on every commit might not be the fit for every product, especially ones that needs to maintains compatibility or strictly versioned. For these, you can instead trigger on tags. Use the `--on tag` flag to only pull changes if there is a tag on the current branch. 112 | 113 | ```sh 114 | $ gw /path/to/repo -v --on tag -S 'echo $GIT_TAG_NAME' 115 | # ... 116 | 2024-10-18T16:28:53.907Z [INFO ] There are updates, running actions. 117 | 2024-10-18T16:28:53.907Z [INFO ] Running script "echo" in /path/to/repo. 118 | 2024-10-18T16:28:53.913Z [DEBUG] [echo] v0.1.0 119 | 2024-10-18T16:28:53.913Z [INFO ] Script "echo" finished successfully. 120 | ``` 121 | 122 | This will always fetch the current branch, check for the latest tag on it and pull only the commits up to that tag. To match some kind of commit, you can use the `--on tag:v*` which will only pull if the tag is matching the passed glob (in this case starting with `v`). 123 | 124 | ```sh 125 | gw /path/to/repo -v --on 'tag:v*' -S 'echo "new version: $GIT_TAG_NAME"' 126 | ``` 127 | 128 | ## Next steps 129 | 130 | If you like `gw`, there are multiple ways to use it for real-life use-cases. 131 | 132 | If you want to put the `gw` script in the background, you can: 133 | 134 | - wrap into a [systemd unit](https://gw.danielgrants.com/usage/systemd), if you want to manage it with a single file; 135 | - start in a [docker container](https://gw.danielgrants.com/usage/docker), if you already use Docker in your workflow; 136 | - or run periodically with [cron](https://gw.danielgrants.com/usage/crontab), if you don't have shell access to the server. 137 | 138 | If you are interested in some ideas on how to use `gw`: 139 | 140 | - if you only need to pull files, see [PHP guide](https://gw.danielgrants.com/guides/php); 141 | - if you are using a dynamic language (e.g. JavaScript, Python, Ruby), see [Guide for dynamic languages](https://gw.danielgrants.com/guides/dynamic) for example on running your process; 142 | - if you are using a compiled language (e.g. TypeScript, Go, Rust), see [Guide for compiled languages](https://gw.danielgrants.com/guides/compiled) for example on compiling your program; 143 | - if you use a `docker-compose.yaml`, see [Guide for docker-compose](guides/docker-compose); 144 | - if you want to easily manage configuration files as GitOps, see [Configuration guide](https://gw.danielgrants.com/guides/configuration); 145 | - for a full-blown example, check out [Netlify](https://gw.danielgrants.com/guides/netlify); 146 | - and many other things, for the incomplete list [guides page](https://gw.danielgrants.com/guides). 147 | -------------------------------------------------------------------------------- /docs/config.toml: -------------------------------------------------------------------------------- 1 | # The URL the site will be built for 2 | base_url = "http://localhost" 3 | 4 | # Whether to automatically compile all Sass files in the sass directory 5 | compile_sass = false 6 | 7 | # Whether to build a search index to be used later on by a JavaScript library 8 | build_search_index = false 9 | 10 | [markdown] 11 | # Whether to do syntax highlighting 12 | # Theme can be customised by setting the `highlight_theme` variable to a theme supported by Zola 13 | highlight_code = true 14 | 15 | [extra] 16 | # Put all your custom variables here 17 | -------------------------------------------------------------------------------- /docs/content/_index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Get started" 3 | sort_by = "weight" 4 | 5 | insert_anchor_links = "right" 6 | +++ 7 | 8 | # gw 9 | 10 | Watch local git repositories, keep in sync with remote and run commands. 11 | 12 | ## Motivation 13 | 14 | `gw` is a lightweight binary that manages a simple pull-based continuous deployment for you. It watches a local git repository, fetches if the remote changes, and builds or deploys your code. Current CD solutions either lock you into proprietary software (e.g. Netlify or Vercel) or complicated to run and manage (e.g. ArgoCD). `gw` is a service that can run everywhere (even behind NAT or VPN), synchronizes code with your remote and deploys immediately, saving your developers time and energy. 15 | 16 | Features of `gw`: 17 | - **lightweight**: it is only a 1.5MB binary (~7MB with git and ssh statically built-in) 18 | - **runs anywhere**: use it on baremetal or [systemd](https://gw.danielgrants.com/usage/systemd.md) on Linux (x86_64 and ARM) or in [Docker](https://gw.danielgrants.com/usage/docker.md) (Windows and MacOS is supported on a best-effort basis) 19 | - **open source**: written entirely in Rust, you can build it from source in a few minutes 20 | - **pull-based**: works on any network, even behind a NAT or VPN 21 | - **flexible**: build, deploy, restart or anything you can imagine 22 | 23 | If you want to see how `gw` compare to other products: look at the [comparisons](/reference/comparison). 24 | 25 | ## Installation 26 | 27 | To get started with `gw`, you can use the install script: 28 | 29 | ```sh 30 | curl https://gw.danielgrants.com/install.sh | sh 31 | ``` 32 | 33 | For more installation methods, see [Installation](/usage/installation). 34 | 35 | ## Get started 36 | 37 | To use `gw`, you have to point it to your local repository and it will pull changes automatically. You can run scripts on every pull to build with the `--script` (or `-s`) flag or run your deployments with the `--process` (or `-p`) flag. 38 | 39 | ```sh 40 | gw /path/to/repo --script 'run build process' --process 'run deployment' 41 | ``` 42 | 43 | For your first steps with `gw`, see [Get started](/usage/start). 44 | 45 | ## Next steps 46 | 47 | But this is not all `gw` can do. With a little creativity you can create a lot of things, for example: 48 | 49 | - pull changes for [development](/guides/development) and get a notification; 50 | - rollout a [docker-compose](/guides/docker-compose) deployment continously; 51 | - build on all commits for a minimal [Netlify](/guides/netlify) alternative, 52 | 53 | ...and many thing else. For a complete list, check out the [guides page](/guides). 54 | -------------------------------------------------------------------------------- /docs/content/guides/_index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Guides" 3 | sort_by = "weight" 4 | 5 | weight = 100 6 | paginate_by = 1000 # display all posts 7 | insert_anchor_links = "right" 8 | +++ 9 | 10 | `gw` is useful for any setup that requires reacting to changes in a git repository. Here are a few ideas, but the limit is your creativity: -------------------------------------------------------------------------------- /docs/content/guides/compiled.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Compiled languages" 3 | weight = 3 4 | +++ 5 | 6 | # Compiled languages 7 | 8 | For compiled (Go, Rust) or transpiled (TypeScript) you can use `gw` to build new assets then restart the running binary to restart the server. 9 | 10 | ## Configuration 11 | 12 | Simply add the scripts to build the binary then another one to restart it and watch the repository. 13 | 14 | ### TypeScript 15 | 16 | For example for TypeScript, transpile to JS and run with Node.js: 17 | 18 | ```sh 19 | gw /path/to/repo -s 'npx tsc -p tsconfig.json' -p 'node dist/index.js' 20 | ``` 21 | 22 | If you want to ensure that your code is correct, you can run the unit tests first: 23 | 24 | ```sh 25 | gw /path/to/repo -s 'npm run test' -s 'npx tsc -p tsconfig.json' -p 'node dist/index.js' 26 | ``` 27 | 28 | For Next.js and other frameworks that require a build step before starting, you can use: 29 | 30 | ```sh 31 | gw /path/to/repo -s 'npm run build' -p 'npm run start' 32 | ``` 33 | 34 | ### Go 35 | 36 | For Go, you can either run it directly or build it first and run it as a subprocess: 37 | 38 | ```sh 39 | gw /path/to/repo -p 'go run main.go' 40 | # or 41 | gw /path/to/repo -s 'go build main.go' -p './main' 42 | ``` 43 | 44 | You can add testing as a script, if you want to run the unit tests before the code is deployed: 45 | 46 | ```sh 47 | gw /path/to/repo -s 'go test' -s 'go build main.go' -p './main' 48 | ``` 49 | 50 | ### Rust 51 | 52 | For Rust, you can either run it directly or build it first and run it as a subprocess: 53 | 54 | ```sh 55 | gw /path/to/repo -p 'cargo run --release' 56 | # or 57 | gw /path/to/repo -s 'cargo build --release' -p './target/release/repo' 58 | ``` 59 | 60 | Add the tests here as well to make sure that the code is correct: 61 | 62 | ```sh 63 | gw /path/to/repo -s 'cargo test' -s 'cargo build --release' -p './target/release/repo' 64 | ``` 65 | 66 | Also checkout [Docker configuration](/guides/docker-compose). 67 | -------------------------------------------------------------------------------- /docs/content/guides/configuration.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Configuration files" 3 | weight = 5 4 | +++ 5 | 6 | # Configuration files 7 | 8 | Configuration files for services are rarely commited, which means that in case of a fatal issue, you can't restore files, audit changes or rollback. With `gw` you can commit your configurations, and restart the service on change, without ever having to use a VPN or SSH. 9 | 10 | ## Project configuration 11 | 12 | Simply create a new git repository with your specific config files, and push it to a remote server. 13 | 14 | ## gw configuration 15 | 16 | All you have to do is point `gw` to the config directory and reload or restart the service if there are any changes. 17 | 18 | ```sh 19 | gw /etc/repo -s 'systemctl restart service' 20 | ``` 21 | 22 | ## Examples 23 | 24 | ### nginx configuration 25 | 26 | An example configuration could be codifying the reverse proxy. This could be used as a backup for worst-case scenario, but also would help to be able to modify files on your local computer and have those reflected on your production environment. 27 | 28 | To start off, create a git repository in your `/etc/nginx` directory, and push it to a remote: 29 | 30 | ```sh 31 | cd /etc/nginx 32 | git init 33 | git add -A 34 | git commit -m 'Initial commit' 35 | # set a remote and push to it 36 | ``` 37 | 38 | Then you can setup `gw` to reload `nginx` on config change. You can either use `nginx` command or reload using `systemctl`: 39 | 40 | ```sh 41 | gw /etc/nginx -s 'nginx -s reload' 42 | # or 43 | gw /etc/nginx -s 'systemctl reload nginx' 44 | ``` 45 | 46 | You can also run `nginx` as a subprocess if you don't want to manage it separately, but this will stop and restart it every time a pull occurs: 47 | 48 | ```sh 49 | gw /etc/nginx -p "nginx -g 'daemon off;'" 50 | ``` 51 | 52 | If you want to avoid getting your system into a bad state by mistake, you can test the config files first with `nginx -t`: 53 | 54 | ```sh 55 | gw /etc/nginx -s 'nginx -t' -s 'systemctl reload nginx' 56 | ``` 57 | 58 | ### DNS configuration with bind 59 | 60 | Another great experiment is codifying the most popular DNS service: bind (Berkeley Internet Name Domain). A nice feature of bind is that it can be configured entirely from plaintext files. This means that we can commit our DNS records and modify it with a simple text editor locally. It can also be rolled out to multiple hosts at the same time, thus avoiding any kind of zone transfer. We are actually using a setup like this in production! 61 | 62 | To start off, just initialize a git repository in the `/etc/bind` directory and push it to a remote: 63 | 64 | ```sh 65 | cd /etc/bind 66 | git init 67 | git add -A 68 | git commit -m 'Initial commit' 69 | # set a remote and push to it 70 | ``` 71 | 72 | Then if there are any changes, restart bind with `rdnc` or `systemctl`: 73 | 74 | ```sh 75 | gw /etc/bind -s 'rdnc reload' 76 | # or 77 | gw /etc/bind -s 'systemctl restart named' 78 | ``` 79 | -------------------------------------------------------------------------------- /docs/content/guides/development.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Local development" 3 | weight = 6 4 | +++ 5 | 6 | # Local development 7 | 8 | You can use `gw` to help in local development. You can pull your repository continuously, so in case somebody commits (and there is no conflicts) you can get the newest version. You can also set a notification to see immediately if somebody modified anything. 9 | 10 | > **Note**: I don't recommend working on the same branch with multiple people, but sometimes it happens. 11 | 12 | ## Configuration 13 | 14 | Simply set the path to your repository: 15 | 16 | ```sh 17 | gw /path/to/repo 18 | ``` 19 | 20 | You can use the `notify-send` command to popup notifications on Linux, let's use it to show if somebody commited to our branch. 21 | 22 | ```sh 23 | gw /path/to/repo -s 'notify-send "There are new commits on your branch!"' 24 | ``` -------------------------------------------------------------------------------- /docs/content/guides/docker-compose.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Docker Compose" 3 | weight = 4 4 | +++ 5 | 6 | # Docker Compose 7 | 8 | `gw` plays very well with `docker`, especially `docker compose`. You can use `gw` to build the Docker image and then stop and recreate the container. It can be done with `docker`, but it is recommended to do it with `docker compose` as it will handle the whole lifecycle of build, stop and restart. 9 | 10 | ## Project configuration 11 | 12 | Make sure to have a `docker-compose.yaml` file in the root directory. It can start existing containers or build images from the local files. 13 | 14 | > **Note**: if you build docker images locally, you save the storage and transfer costs of the docker image repository. 15 | 16 | Since it is also a file in your git repository, it basically doubles as an infrastructure-as-a-code. You can modify the `docker compose` setup (e.g. add another dependency, for example cache), commit and have the changes reflected in your infrastructure immediately. 17 | 18 | ## gw configuration 19 | 20 | Just simply point to your repository and run `docker compose up`. It will restart your containers and apply any new changes. 21 | 22 | ```sh 23 | gw /path/to/repo -s 'docker compose up -d' 24 | ``` 25 | 26 | If you are building your containers, add `--build`: 27 | 28 | ```sh 29 | gw /path/to/repo -s 'docker compose up -d --build' 30 | ``` 31 | 32 | ## Systemd unit 33 | 34 | One neat way to use `docker-compose` is to use it together with [systemd units](/usage/systemd). They play very nicely together 35 | because `docker-compose` is hard to be containerized, but this way it can run in the background on the host and update automatically. 36 | 37 | To create a systemd unit, you can use the [systemd usage guide](/usage/systemd#usage), but add this to the unit file (e.g. `/etc/systemd/system/gw.service`): 38 | 39 | ```ini 40 | # /etc/systemd/system/gw.service 41 | [Unit] 42 | Description = Autorestartable docker-compose for application 43 | After = NetworkManager-wait-online.service network.target docker.service 44 | PartOf = docker.service 45 | 46 | [Service] 47 | Type = simple 48 | ExecStart = /usr/local/bin/gw /path/to/repo -v -p '/usr/bin/docker compose -f /path/to/repo/docker-compose.yml up --build' 49 | 50 | [Install] 51 | WantedBy = default.target 52 | ``` 53 | 54 | This will rebuild your containers in the directory, every time there is a change. 55 | 56 | ### Template systemd unit 57 | 58 | If you have many applications that you want to autorestart with `docker-compose`, it might make sense to use a [template systemd unit](https://fedoramagazine.org/systemd-template-unit-files/). 59 | These are units that have a pattern (`%I`) in their unit files, which you can call multiple times with multiple configurations. 60 | 61 | For example if you have a `/path/to/repos/app1` and `/path/to/repos/app2`, you can create a generic systemd unit such as this 62 | (make sure that the filename ends with `@`): 63 | 64 | ```ini 65 | # /etc/systemd/system/gw@.service 66 | [Unit] 67 | Description = Autorestartable docker-compose for %I 68 | After = NetworkManager-wait-online.service network.target docker.service 69 | PartOf = docker.service 70 | 71 | [Service] 72 | Type = simple 73 | WorkingDirectory = /path/to/repos/%I 74 | ExecStart = /usr/local/bin/gw /path/to/repos/%I -vv -p '/usr/bin/docker compose -f /path/to/repos/%I/docker-compose.yml -p %I up --build' 75 | 76 | [Install] 77 | WantedBy = default.target 78 | ``` 79 | 80 | You can call it using the app name after a `@`, like `gw@app1` and `gw@app2`, and the `%I` will be automatically replaced with `app1` and `app2`. 81 | So in this case `systemctl start gw@app1` will start a `docker-compose` in `/path/to/repos/app1` using the `/path/to/repos/app1/docker-compose.yml` file 82 | with the project name `app1`. 83 | 84 | You can extend these further by simply adding new directories and starting an automaticly deploying process with one line of code. 85 | -------------------------------------------------------------------------------- /docs/content/guides/dynamic.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Dynamic languages" 3 | weight = 2 4 | +++ 5 | 6 | # Dynamic languages 7 | 8 | For dynamic languages you can use `gw` very simply with [processes](/usage/actions#processes). 9 | 10 | ## Configuration 11 | 12 | Wrap the way you normally start your program with the `--process` flag. 13 | 14 | ### Node.js 15 | 16 | For example for Node.js programs, use the usual `npm run start` script with a process: 17 | 18 | ```sh 19 | gw /path/to/repo -p 'npm run start' 20 | ``` 21 | 22 | You can also run the unit tests first, if you want to make sure to restart if the code is in a correct state: 23 | 24 | ```sh 25 | gw /path/to/repo -s 'npm run test' -p 'npm run start' 26 | ``` 27 | 28 | If you want to use a build step, for example for TypeScript or Next.js, look at the [TypeScript guide](/guides/compiled#typescript). 29 | 30 | ### Python 31 | 32 | Use the same idea with Python, wrap your program's entrypoint in a process: 33 | 34 | ```sh 35 | gw /path/to/repo -p 'python manage.py runserver' 36 | ``` 37 | 38 | ### Ruby 39 | 40 | Same thing with Ruby, add process to your program's entrypoint: 41 | 42 | ```sh 43 | gw /path/to/repo -p 'bin/rails server' 44 | ``` 45 | -------------------------------------------------------------------------------- /docs/content/guides/netlify.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Netlify alternative" 3 | weight = 7 4 | +++ 5 | 6 | # Netlify alternative 7 | 8 | If you want to generate a version for every commit of your code, and expose it as hashes, you can `gw` and environment variables. 9 | 10 | > **Note**: This solution **won't** be exactly like Netlify, it won't have the web UI, on-click rollbacks, etc. As this already requires a generous amount of ducktape, please be vary of using this on production. If you need all the features of Netlify, just use Netlify. 11 | 12 | ## Project configuration 13 | 14 | The main idea behind this solution is that most static site generators allow changing output directories. We can wire this together with the git short hash set in the environment by `gw`, and build the different versions side-by-side. 15 | 16 | For this you have to find the output directory flag in your static site generator and set it to the `GW_GIT_COMMIT_SHORT_SHA` variable. For example for Jekyll and Hugo this is the `--destination` flag, in 11ty this is the `--output` flag. 17 | 18 | ```sh 19 | jekyll build --destination=output/$GW_GIT_COMMIT_SHORT_SHA 20 | hugo --destination=output/$GW_GIT_COMMIT_SHORT_SHA 21 | npx @11ty/eleventy --input=. --output=output/$GW_GIT_COMMIT_SHORT_SHA 22 | ``` 23 | 24 | ## gw configuration 25 | 26 | You can use this command to configure your `gw`: 27 | 28 | ```sh 29 | gw /path/to/repo -S 'jekyll build --destination=output/$GW_GIT_COMMIT_SHORT_SHA' 30 | ``` 31 | 32 | To build another version for the latest you can copy the files to another folder: 33 | 34 | ```sh 35 | gw /path/to/repo -S 'jekyll build --destination=output/$GW_GIT_COMMIT_SHORT_SHA' -S 'cp -r output/$GW_GIT_COMMIT_SHORT_SHA output/latest' 36 | ``` 37 | 38 | ## Web server configuration 39 | 40 | One extra setup that you have to do is point your web server to this directory. By default you can use it as path prefixes, but it can be configured to sub domains to these directories. That way you could reach the commit `0c431ff1` on the url `0c431ff1.example.net`. 41 | 42 | > **Note**: Make sure to setup wildcard domains in your DNS server so it redirects all domains to your server! 43 | 44 | ## Nginx 45 | 46 | You can use regexes in [server_name](https://nginx.org/en/docs/http/server_names.html) to rewrite subdomains into different folders. For example this configuration will resolve `0c431ff1.example.net` to `/path/to/repo/0c431ff1`: 47 | 48 | ```sh 49 | http { 50 | server { 51 | # It will capture the subdomain (e.g. 0c431ff1.example.net) 52 | server_name ~^([0-9a-f])\.example\.net$; 53 | 54 | location / { 55 | # And resolve to /path/to/repo/0c431ff1 56 | root /path/to/repo/$1; 57 | } 58 | } 59 | 60 | # You can add another to reach the latest 61 | server { 62 | server_name example.net; 63 | 64 | location / { 65 | root /path/to/repo/latest; 66 | } 67 | } 68 | } 69 | ``` -------------------------------------------------------------------------------- /docs/content/guides/php.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "PHP" 3 | weight = 1 4 | +++ 5 | 6 | # PHP 7 | 8 | The simplest configuration is for PHP, because you don't have to build or restart anything. 9 | 10 | ## Configuration 11 | 12 | Just simply set `gw` to watch the directory and it will pull the changes: 13 | 14 | ```sh 15 | gw /path/to/directory 16 | ``` 17 | 18 | In case, you don't have access in your shared hosting to start long-running tasks, but you can run cronjobs (e.g. CPanel), you can still use `gw`. Just download the binary and add to the crontab with the `--once` flag: 19 | 20 | ```sh 21 | * * * * * gw /path/to/directory --once 22 | ``` 23 | 24 | This will pull changes every minute. -------------------------------------------------------------------------------- /docs/content/reference/_index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Reference" 3 | sort_by = "weight" 4 | 5 | weight = 200 6 | paginate_by = 1000 7 | insert_anchor_links = "right" 8 | +++ 9 | 10 | The reference contains detailed information about the inner workings of `gw`. 11 | -------------------------------------------------------------------------------- /docs/content/reference/authentication.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Authentication" 3 | weight = 2 4 | +++ 5 | 6 | # Authentication 7 | 8 | By default `gw` supports the same authentication methods that `git` does: you can use username-password authentication for HTTPS and ssh-keys with SSH. It uses your platform's credential helpers to find saved authentications. If you want to change these settings, you can use command-line flags. 9 | 10 | To debug authentication issues, it is recommended to run `gw` with `-vvv` (tracing mode) to log every credential attempt. 11 | 12 | ## SSH 13 | 14 | For SSH it is recommended to use the ssh-keys that you are already using on your system. If you cloned the repository with a user, you can use the same user to run `gw`. If you are running `gw` in a container, you can mount the whole `.ssh` folder into `/root/.ssh` (`.ssh/known_hosts` is usually needed as well). 15 | 16 | > **Note**: If you only running `gw` for a single repository, for improved security use read-only [Deploy keys](#deploy-keys). 17 | 18 | By default SSH authentication checks these files for credentials: 19 | 20 | - `.ssh/id_dsa` 21 | - `.ssh/id_ecdsa` 22 | - `.ssh/id_ecdsa_sk` 23 | - `.ssh/id_ed25519` 24 | - `.ssh/id_ed25519_sk` 25 | - `.ssh/id_rsa` 26 | 27 | If you want to use another file, you can use the `--ssh-key` (or `-i`) option: 28 | 29 | ```sh 30 | gw /path/to/repo --ssh-key ~/.ssh/id_deploy 31 | ``` 32 | 33 | ### SSH known hosts 34 | 35 | It is recommended to use the same `.ssh` directory that you used for cloning the repository, because git also requires that the remote's host key appears in `known_hosts`. However, if `gw` doesn't find a `.ssh/known_hosts` file (e.g. in a container), it will create a new one using some common host keys for GitHub, GitLab and Bitbucket. If you are using any of these services, `gw` should work out of the box. 36 | 37 | In case you are using another service or self-host your git server, host key checking might fail. Use `--git-known-host` to add a custom host key instead of the default contents to the `.ssh/known_hosts`. 38 | 39 | ```sh 40 | gw /path/to/repo --git-known-host "codeberg.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIVIC02vnjFyL+I4RHfvIGNtOgJMe769VTF1VR4EB3ZB" 41 | ``` 42 | 43 | This is designed for simple operations, if you want to add multiple entries you are better off creating your own `.ssh/known_hosts` file. 44 | 45 | ### Deploy keys 46 | 47 | If you want to use ssh keys with only one repository it is usually better to create a Deploy Key. These are the same as regular ssh keys, but only have pull access to one repository, reducing the attack surface. To get started generate an ssh key: 48 | 49 | ``` 50 | ssh-keygen 51 | ``` 52 | 53 | Both [GitHub](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/managing-deploy-keys) and [GitLab](https://docs.gitlab.com/ee/user/project/deploy_keys) supports Deploy keys by uploading it in the repository settings. Go to **Settings** (or **Settings** > **Repository** on GitLab) and enter **Deploy keys** and press **Add new deploy key**. Copy the content of the public key into the **Key** textarea and save it. `gw` never writes, so pull access is enough. 54 | 55 | If you used a non-default path, set it with `--ssh-key`: 56 | 57 | ```sh 58 | gw /path/to/repo --ssh-key ~/.ssh/id_deploy 59 | ``` 60 | 61 | ## Https 62 | 63 | Even though it is less common on servers, you can also use HTTPS for pulling repositories. By default `gw` will check credential helpers to extract username and passwords. If you want to set a username and password manually you can use the `--git-username` and `--git-token` fields. 64 | 65 | > **Note**: **Never** use your password as the `--git-token`, always use read-only [repository-level tokens](#repository-level-tokens) instead. 66 | 67 | ```sh 68 | gw /path/to/repo --git-username username --git-token f7818t23fb1amsc 69 | ``` 70 | 71 | If you are going this route, be careful to never leak your credentials and only use tokens with minimal privileges! 72 | 73 | ### Repository-level tokens 74 | 75 | Similarly to Deploy keys, it is recommended to only allow tokens access to a single repository. For this you can use [GitHub's Fine-grained personal access tokens](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-fine-grained-personal-access-token) or [GitLab's Deploy tokens](https://docs.gitlab.com/ee/user/project/deploy_tokens/). 76 | 77 | #### Set up fine-grained access tokens in GitHub 78 | 79 | In GitHub, there are no repository scoped tokens, but you can emulate one with [Fine-grained personal access tokens](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-fine-grained-personal-access-token). 80 | 81 | Click on your avatar and go to **Settings** > **Developer Settings** > **Personal access tokens** > **Fine-grained tokens**. Click on **Generate new token**, enter a name and set an expiration longer into the future. Select the repositories that this token should have access under **Only select repositories**. Under **Repository Permissions** select **Access: read-only** for **Contents**. Copy this token and use it with your GitHub username: 82 | 83 | ```sh 84 | gw /path/to/repo --git-username octocat --git-token github_pat_11AD... 85 | ``` 86 | 87 | #### Set up deploy tokens in GitLab 88 | 89 | In GitLab there are [Deploy tokens](https://docs.gitlab.com/ee/user/project/deploy_tokens/), that are access tokens scoped to a repository. 90 | 91 | Go to the project's **Settings** > **Repository** > **Deploy tokens** and click on **Add token**. Fill out the username and check `read_repository` to be able to pull commits. Copy the username and the token to use it: 92 | 93 | ```sh 94 | gw /path/to/repo --git-username git_token --git-token gldt-... 95 | ``` 96 | -------------------------------------------------------------------------------- /docs/content/reference/commandline.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "CLI arguments" 3 | weight = 3 4 | +++ 5 | 6 | # Command-line arguments 7 | 8 | This details the arguments that the `gw` binary takes. 9 | 10 | ## Positional arguments 11 | 12 | Every `gw` execution should specify a directory to a git repository. This will be the repository which the `gw` checks to see if there are any changes and run actions. 13 | 14 | ## Flag arguments 15 | 16 | `gw` follows GNU argument conventions, so every short arguments start with `-` and long arguments start with `--`. 17 | 18 | ### Basic flags 19 | 20 | To get information about `gw` or change the output settings (more verbose with `-v` or quieter with `-q`), you can use these flags: 21 | 22 | | Argument name | Example | Notes | 23 | | ----------------- | ----------- | ---------------------------------------------------------------------- | 24 | | `-v`, `--verbose` | `-v`, `-vv` | Increase verbosity, can be set multiple times (-v debug, -vv tracing). | 25 | | `-q`, `--quiet` | `-q` | Only print error messages. | 26 | | `-V`, `--version` | `--version` | Print the current version. | 27 | | `-h`, `--help` | `--help` | Print this help. | 28 | 29 | ### Trigger flags 30 | 31 | Use these flags, to set the different modes to check for changes: 32 | 33 | - Scheduled triggers (`-d`, default every 1 minute): check with a specified interval using [duration-string](https://github.com/Ronniskansing/duration-string) settings. Pass `0s` for disabling scheduled triggers. 34 | - Trigger once (`--once`): check if there are changes and then exit immediately. 35 | - Http trigger (`--http`): run an HTTP server on an interface and port (e. g. `0.0.0.0:8000`), which trigger on any incoming request. For more information, see [Webhook](/usage/webhook). 36 | 37 | | Argument name | Example | Notes | 38 | | --------------- | ------------------------------------------------ | ---------------------------------------------------------------------- | 39 | | `-d`, `--every` | `-d 5m`, `-d 1h`, `-d 0s` | Refreshes the repo with this interval. (default: 1m) | 40 | | `--once` | `--once` | Try to pull only once. Useful for cronjobs. | 41 | | `--http` | `--http localhost:1234`, `--http 127.0.0.1:4321` | Runs an HTTP server on the URL, which allows to trigger by calling it. | 42 | 43 | ### Check flags 44 | 45 | These flags change the way `gw` checks the git repository for changes: 46 | 47 | - On every push (`--on push`, default): pull the commits on the current branch and run actions if there are any new commits. 48 | - On every tag (`--on tag` or `--on tag:v*`): fetch the commits on the current branch and only pull to the first tag. You can pass a glob, in which case the first tag matching the glob. If there are no matching tags, no pull happens. 49 | 50 | You can also configure the authentication for the git repository: 51 | 52 | - SSH authentication (`-i`, `--ssh-key`): specify the path to the SSH key that will. 53 | - HTTP authentication (`--git-username`, `--git-token`): specify a username-token pair. 54 | 55 | For more information see [Authentication](/reference/authentication). 56 | 57 | | Argument name | Example | Notes | 58 | | ------------------ | ------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | 59 | | `--on` | `--on push`, `--on tag`, `--on tag:v*` | The trigger on which to run (can be `push`, `tag` or `tag:pattern`). (default: push) | 60 | | `-i`, `--ssh-key` | `-i ~/.ssh/test.id_rsa` | Set the path for an ssh-key to be used when pulling. | 61 | | `--git-username` | `--git-username daniel7grant` | Set the username for git to be used when pulling with HTTPS. | 62 | | `--git-token` | `--git-token 'ghp_jB3c5...'` | Set the token for git to be used when pulling with HTTPS. | 63 | | `--git-known-host` | `--git-known-host 'example.com ssh-rsa AAAAB3NzaC...'` | Add this line to the known_hosts file to be created (e.g. "example.com ssh-ed25519 AAAAC3..."). | 64 | 65 | ### Action flags 66 | 67 | These flags configure the actions that should be run when the changes occur. These come in two flavours lowercase letters indicate that it is run directly, while uppercase letters will run in a subshell (e.g. `/bin/sh` on Linux). This is useful if you want to expand variables, pipe commands etc. It is recommended to always use single-quotes for the argument values to avoid accidental shell issues (e.g. expanding variables at start time). 68 | 69 | - Run scripts (`-s`, `-S`): execute a script on every change, that will be waited until it ends. 70 | - Start process (`-p`, `-P`): start a process, when starting `gw`, that will be restarted on every change. 71 | 72 | You can also configure the process running: 73 | 74 | - Retries (`--process-retries`): in case of a failed process, how many time should it be restarted, before marking it failed. 75 | - Stop settings (`--stop-signal`, `--stop-timeout`): how to stop the process in case of a restart, by default sending `SIGINT` and after 10s a `SIGKILL` (supported only on `*NIX`). 76 | 77 | For more information see [Actions on pull](/usage/actions). 78 | 79 | | Argument name | Example | Notes | 80 | | ------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- | 81 | | `-s`, `--script` | `-s 'cat FILENAME'` | A script to run on changes, you can define multiple times. | 82 | | `-S` | | Run a script in a shell. | 83 | | `-p`, `--process` | | A background process that will be restarted on change. | 84 | | `-P` | | Run a background process in a shell. | 85 | | `--process-retries` | | The number of times to retry the background process in case it fails. By default 0 for no retries. | 86 | | `--stop-signal` | | The stop signal to give the background process. Useful for graceful shutdowns. By default SIGINT. (Only supported on \*NIX) | 87 | | `--stop-timeout` | | The timeout to wait before killing for the background process to shutdown gracefully. By default 10s. | 88 | -------------------------------------------------------------------------------- /docs/content/reference/comparison.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Comparison" 3 | weight = 1 4 | +++ 5 | 6 | # Comparison 7 | 8 | There are a lot of tools that can help you deploy code only by pushing to a git repository. `gw` offers a completely unique proposition without locking yourself into expensive cloud services or complicated setups like Kubernetes. 9 | 10 | ## ArgoCD 11 | 12 | **Similar products**: [FluxCD](https://fluxcd.io/) 13 | 14 | The main inspiration for `gw` was undeniably [ArgoCD](https://argo-cd.readthedocs.io/en/stable/). It is a great tool that can access git repositories and sync your application to Kubernetes with the latest changes. Leveraging the Kubernetes platform it can reconcile changes (you don't need to figure out imperatively how to update) and provides autohealing, no-downtime rollouts and simple rollbacks. There is a web interface which allows to visualise, redeploy or rollback the applications with one click. 15 | 16 | The main disadvantage of ArgoCD (and similar tools) is the tight integration with Kubernetes. For smaller applications it's not worth to maintain a Kubernetes cluster, but you might want to still use GitOps. If you don't need scalability, it is less complex to setup a `gw` script on a cheap VPS. 17 | 18 | ## Netlify 19 | 20 | **Similar products**: [Vercel](https://vercel.com/), [Cloudflare Workers/Pages](https://workers.cloudflare.com/) 21 | 22 | Cloud tools like [Netlify](https://www.netlify.com/) were the first ones that really moved automatic deployments to the mainstream. You can connect a git repository to Netlify, which then builds and deploys a separate version of your application on every commit. You can preview these and promote them to production or rollback if an issue arises. Netlify also takes care of DNS and certificate management. 23 | 24 | However with Netlify you can only deploy some compatible stacks (static application with serverless functions). If you want to deploy full-stack applications or need advanced features (e.g. task management or notifications), you might need to pay for separate services... if you can do it at all. These cloud-based vendors also lock you into their services, which makes it harder to move between the platforms. `gw` is entirely platform-independent and can build and deploy your application even if it is completely full-stack. By deploying to a single VPS you can avoid suprising bills that you can't get out of. 25 | 26 | ## GitHub Actions 27 | 28 | **Similar products**: [GitLab CI](https://docs.gitlab.com/ee/ci/), [Jenkins](https://www.jenkins.io/) 29 | 30 | A common way to deploy applications automatically is push-based CD. It means using CI (for example [GitHub Actions](https://docs.github.com/en/actions)) to build and push the code to the server. It can be useful because it can integrate with your already existing solutions. You check the code by unit and integration testing before pulling the trigger on a deployment. It also provides pre-built actions which can handle complex use-cases (Docker building, deploying IaaC). 31 | 32 | The biggest drawback of push-based deployments is that it needs access to your server. If you just want to copy some code to a server it might be a security risk to allow SSH access from an untrusted CI worker. It can get even more complicated when your servers are in a secure network (behind NAT or VPN). On the other hand, `gw` can run on your server and can pull the code, avoiding the security problems altogether. 33 | 34 | ## Watchtower 35 | 36 | [Watchtower](https://containrrr.dev/watchtower/) provides a half-push, half-pull approach using the common element between the CI and the server: the image registry. You can use your existing CI infrastructure to build a Docker image and push it to an image registry, while the server can listen to the registry and update the running containers on demand. This is a very good solution that can use existing CI code, while also providing instant feedback from the server. 37 | 38 | The main issue with this solution is that it puts the image registry under a lot of pressure. If you are deploying often, the storage and network costs might climb very quickly. If you are building large Docker images, it can also get considerable slow: CI-s rarely cache effectively and pushing and pulling from a registry can also be a slow operation. With `gw` you can save this roundtrip, building the Docker image right on the server. It also improves caching because the previous version of the image is right there. You have to be careful not to overload your server but it might be worth to avoid slow and expensive registries. 39 | 40 | ## Coolify 41 | 42 | **Similar products**: [Dokploy](https://dokploy.com/) 43 | 44 | [Coolify](https://coolify.io/) is very definitely the closest product to `gw`: an open-source, self-hostable, pull-based deployment platform that runs everywhere. You can install Coolify and all of their dependencies with a single command. It has a neat web interface, can manage databases with external backups and provides a reverse proxy with automatically renewing certificates. It is a great solution if you want to have everything set up automatically! 45 | 46 | The main problem with Coolify is that it takes over your server entirely. Instead of working with your existing deployments, it handles Docker, reverse proxying and certificates. It might be an excellent way of running things, but if you have some specific feature or you already have a running server, it might be a serious investment to transfer. Compared to this, `gw` draws from the UNIX philosophy: it is a modular piece of software that you can integrate with other parts to achieve what you want. You can slot in `gw` to almost any existing deployment, but expect to handle databases, certificates and other problems with different tools. 47 | 48 | ## while true; do git pull; done 49 | 50 | > I can do all of this with shell scripts... 51 | 52 | While it is true that in the core, `gw` is just a loop running git pull and actions, but it is also much more. With shell scripts you have to handle git polling, process management and logging, while also handling errors without crashing the script. Not to mention more advanced features like graceful shutdowns, multiple scripts and processes, git authentication or webhooks. `gw` is a very lightweight binary that provides all of these, while being configurable, portable and reliable. If you prefer to write the shell script you can do it, but if you don't, just drop `gw` in and let it handle the boring parts for you! 53 | -------------------------------------------------------------------------------- /docs/content/reference/environment-variables.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Env variables" 3 | weight = 4 4 | +++ 5 | 6 | # Environment variables 7 | 8 | The different steps can add variables to the context, which are exposed to the scripts as environment variables. 9 | All of these are prefixed with `GW_` to avoid collisions. The second part usually identifies the specific trigger, 10 | check or action. 11 | 12 | If you want to use the environment variables in [command-line arguments](/reference/commandline), make sure to use the subshell variants (`-S`, `-P`), 13 | because only these can expand variables. It is recommended to use single-quotes to avoid expanding at start time. A good way 14 | to debug environment variables is to print them with `-S 'printenv'`. 15 | 16 | ## Trigger variables 17 | 18 | These are the variables that are exposed from the trigger, which can be scheduled trigger or an HTTP endpoint. 19 | 20 | | Variable name | Example | Notes | 21 | | ------------------- | ------------------ | --------------------------------------- | 22 | | `GW_TRIGGER_NAME` | `SCHEDULE`, `HTTP` | The identifier of the trigger. | 23 | | `GW_HTTP_METHOD` | `GET`, `POST` | The HTTP method that was called. | 24 | | `GW_HTTP_URL` | `/`, `/trigger` | The HTTP URL that was called. | 25 | | `GW_SCHEDULE_DELAY` | `1m`, `1d`, `1w` | The delay between two scheduled checks. | 26 | 27 | ## Check variables 28 | 29 | These are the variables that are exposed from the check, which currently is always git. 30 | 31 | | Variable name | Example | Notes | 32 | | -------------------------------- | ------------------------------------ | --------------------------------------------- | 33 | | `GW_CHECK_NAME` | `GIT` | The identifier of the check. | 34 | | `GW_GIT_BEFORE_COMMIT_SHA` | `acfd4f88da199...` | The SHA of the commit before the pull. | 35 | | `GW_GIT_BEFORE_COMMIT_SHORT_SHA` | `acfd4f8` | The 7-character short hash of the commit. | 36 | | `GW_GIT_BRANCH_NAME` | `main` | The name of the branch, that the repo is on. | 37 | | `GW_GIT_COMMIT_SHA` | `acfd4f88da199...` | The SHA of the commit after the pull. | 38 | | `GW_GIT_COMMIT_SHORT_SHA` | `acfd4f8` | The 7-character short hash of the commit. | 39 | | `GW_GIT_REF_NAME` | `refs/heads/main`, `refs/tags/v1.0` | The full name of the current git ref. | 40 | | `GW_GIT_REF_TYPE` | `branch`, `tag` | The type of the ref we are currently on. | 41 | | `GW_GIT_REMOTE_NAME` | `origin` | The name of the remote used. | 42 | | `GW_GIT_REMOTE_URL` | `git@github.com:daniel7grant/gw.git` | The URL to the git remote. | 43 | | `GW_GIT_TAG_NAME` | `v1.0` | The tag of the pulled commit if there is one. | 44 | 45 | ## Action variables 46 | 47 | These are the variables added by the action, which is script or process. 48 | 49 | | Variable name | Example | Notes | 50 | | ---------------- | ------------------- | ------------------------------------------- | 51 | | `GW_ACTION_NAME` | `SCRIPT`, `PROCESS` | The identifier of the action. | 52 | | `GW_DIRECTORY` | `/src/http/gw` | The absolute path to the current directory. | 53 | -------------------------------------------------------------------------------- /docs/content/usage/_index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Usage" 3 | sort_by = "weight" 4 | 5 | weight = 0 6 | paginate_by = 1000 7 | 8 | insert_anchor_links = "right" 9 | +++ 10 | 11 | Here are your first steps with `gw`: -------------------------------------------------------------------------------- /docs/content/usage/actions.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Actions on pull" 3 | weight = 3 4 | +++ 5 | 6 | # Actions on pull 7 | 8 | The main point of `gw` is to do actions every time there code is pulled. There are multiple actions: running a script or restarting a background process. The order of the actions matter, and they will be executed sequentially based on the order of the command-line arguments. 9 | 10 | ## Scripts 11 | 12 | The simplest action is to run a command on pull with `--script` or `-s`: 13 | 14 | ```sh 15 | gw /path/to/repo -s 'echo "updated"' 16 | ``` 17 | 18 | You can define multiple scripts, these will run one after another (there is currently no way to parallelise these). If one of the scripts fail, the other scripts won't run at all. You can use scripts to run tests before updating your code. 19 | 20 | ```sh 21 | gw /path/to/repo -s 'echo "testing"' -s 'echo "updating"' 22 | ``` 23 | 24 | > **Note**: If you have more than 2-3 scripts on every pull it might be worth it to refactor it into an `update.sh` shell script. It can contain logic and be commited which helps if you want to change it without updating the gw process. 25 | 26 | The output of the script is not printed by default, you can increase verbosity (`-v`) to get output from the script: 27 | 28 | ```sh 29 | $ gw /path/to/repo -v -s 'echo "updated"' 30 | 2024-10-18T16:28:53.907Z [INFO ] There are updates, running actions. 31 | 2024-10-18T16:28:53.907Z [INFO ] Running script "echo" in /path/to/repo. 32 | 2024-10-18T16:28:53.913Z [DEBUG] [echo] updated 33 | 2024-10-18T16:28:53.913Z [INFO ] Script "echo" finished successfully. 34 | ``` 35 | 36 | By default, scripts are executed directly to avoid common issues with shells (e.g. shell injection and unexpected globbing). If you instead want to run in a shell to expand variables or use shell specific functionality (e.g. pipes or multiple commands), use the `-S` flag. These scripts will run in a shell: `/bin/sh` on Linux and `cmd.exe` on Windows. 37 | 38 | ```sh 39 | gw /path/to/repo -S 'ls -l . | wc -l' 40 | ``` 41 | 42 | The full enviroment is passed to scripts with a number of [gw-specific environment variables](/reference/environment-variables). If you want to use variables make sure to use singlequotes so they aren't expanded beforehand. 43 | 44 | ```sh 45 | gw /path/to/repo -S 'ls -l $BUILD_DIRECTORY | wc -l' 46 | ``` 47 | 48 | Best use-cases for scripts: 49 | 50 | - [compile](/guides/compiled) or transpile your code, 51 | - rebuild some assets, 52 | - restart or reload a separately running program. 53 | 54 | ## Processes 55 | 56 | If you have some long-running program, you can use `--process` or `-p` to start as a background process and `gw` will restart it on every pull: 57 | 58 | ```sh 59 | gw /path/to/repo -p 'ping 1.1.1.1' 60 | ``` 61 | 62 | Processes are started when `gw` is started and they are kept in the background. If there is a change the process is stopped and a new process is started. If you want to look at the output of process, you have to increase verbosity (`-v`): 63 | 64 | ```sh 65 | $ gw /path/to/repo -v -s 'ping 1.1.1.1' 66 | 2024-03-10T15:04:37.740Z [INFO ] There are updates, running actions. 67 | 2024-10-16T18:04:25.888Z [INFO ] Starting process "ping" in /path/to/repo. 68 | 2024-10-16T18:04:25.906Z [DEBUG] [ping] PING 1.1.1.1 (1.1.1.1) 56(84) bytes of data. 69 | 2024-10-16T18:04:25.906Z [DEBUG] [ping] 64 bytes from 1.1.1.1: icmp_seq=1 ttl=57 time=16.8 ms 70 | ``` 71 | 72 | Similarly to scripts, processes are executed directly. If you want to use the native shell for variable expansion or shell-specific functionality, you can use `-P`. 73 | 74 | ```sh 75 | gw /path/to/repo -P 'ping $TARGET_IP' 76 | ``` 77 | 78 | Unlike scripts, you can only define one process. Processes also can't access gw-specific environment variables. Scripts defined before process will be run before the restart and if defined after they will run after. If any of the scripts before the process fails the process will not be restarted. You can add tests and other checks to only restart the process if the code is 100% correct. 79 | 80 | ```sh 81 | gw /path/to/repo -s 'echo this runs before' -p 'ping 1.1.1.1' -s 'echo this runs after' 82 | ``` 83 | 84 | If a process fails, by default it marked failed and an error printed. If you want to retry the process you can set the `--process-retries` flag: 85 | 86 | ```sh 87 | gw /path/to/repo -v -s 'ping 1.1.1.1' --process-retries 5 88 | ``` 89 | 90 | You can also change the stopping behaviour. By default processes are first tried to be gracefully stopped with SIGINT and after some timeout (default: 10s) they are killed. If you want to influence these values you can set `--stop-signal` and `--stop-timeout` respectively. On non-Unix systems these options do nothing and the process is always killed. 91 | 92 | ```sh 93 | gw /path/to/repo -v -s 'ping 1.1.1.1' --stop-signal SIGTERM --stop-timeout 10s 94 | ``` 95 | 96 | Best use-cases for processes: 97 | 98 | - run [interpreted programs](/guides/interpreted) e.g. web frameworks, 99 | - run binaries after [compiling](/guides/compiled), 100 | - run external programs to restart [on config change](/guides/configuration). 101 | -------------------------------------------------------------------------------- /docs/content/usage/crontab.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Crontab" 3 | weight = 6 4 | +++ 5 | 6 | # Crontab 7 | 8 | If you don't have shell access to the server, you can still run `gw` with the crontab. 9 | 10 | > **Note:** this will disable some advanced functions like [webhooks](/usage/webhook). Only use this if you cannot use any other solution. 11 | 12 | ## Usage 13 | 14 | There is a `--once` flag in `gw`, that checks the repository for updates and then exits. You can use this to pair with your own scheduled runner to pull for changes manually. Simply open your crontab: 15 | 16 | ```sh 17 | crontab -e 18 | ``` 19 | 20 | ...and add a new line with your `gw` script. You can use `* * * * *` to run it every minute, but you can use more advanced patterns as well (see [crontab.guru](https://crontab.guru/)). For the command, make sure to specify `--once` to avoid running continuously and add `--quiet` so it will only print on a failure: 21 | 22 | ```sh 23 | * * * * * gw /path/to/repo --once --quiet 24 | ``` 25 | 26 | > **Warning**: Cronjobs are known to be error-prone and hard to debug, so make sure to test this solution extensively before relying on this in the real world. 27 | -------------------------------------------------------------------------------- /docs/content/usage/docker.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Docker container" 3 | weight = 5 4 | +++ 5 | 6 | # Docker container 7 | 8 | If you don't want to install or run `gw` on your server, you can also use the prebuilt Docker images at [danielgrant/gw](https://hub.docker.com/r/danielgrant/gw). 9 | 10 | ## Usage 11 | 12 | If you just want to pull a repository or run simple scripts, you can run the container with [docker](https://docs.docker.com/engine/install/). You can mount a repository to a directory and watch it. For example: 13 | 14 | ```sh 15 | docker run -d --name gw -v /path/to/repo:/app danielgrant/gw /app 16 | ``` 17 | 18 | You can also run scripts, but these images are very small and only have a few programs set up: 19 | 20 | ```sh 21 | docker run -d --name gw -v /path/to/repo:/app danielgrant/gw /app -s "cp -r build/ html/" 22 | ``` 23 | 24 | If you prefer to use `docker-compose`, you can copy this file to a `docker-compose.yaml` and run `docker compose up -d`: 25 | 26 | ```yaml 27 | # docker-compose.yaml 28 | version: "3" 29 | 30 | services: 31 | gw: 32 | container_name: gw 33 | image: danielgrant/gw 34 | command: /app 35 | volumes: 36 | - type: volume 37 | source: /path/to/repo 38 | target: /app 39 | - type: volume 40 | source: ~/.ssh 41 | target: /root/.ssh 42 | read_only: true 43 | ``` 44 | 45 | If you are using ssh-keys, mount the `.ssh` directory as well, so it can pull. For more information, see [Authentication](/reference/authentication). 46 | 47 | ## Customization 48 | 49 | ### Copy binary from gw 50 | 51 | Most applications have many dependencies and complicated setups, and are already running on Docker. In these cases it is often preferable to build the `gw` image on top of the already existing application image. 52 | 53 | > **Note**: This doesn't mean that these should be running in the same container, but they can use the same base image in two separate containers. It is a common wisdom that one container should run one thing. 54 | 55 | For this we can start off of our application image as a base layer and add the `gw` binary in a `COPY` layer. You can simply wrap your existing command using subprocess mode (`-p`) and it will restart the script every time a pull happened. 56 | 57 | ```dockerfile 58 | FROM example.org/registry/node-image:ubuntu 59 | 60 | # Copy from the `gw` image 61 | COPY --from=danielgrant/gw:0.4.1 /usr/bin/gw /usr/bin/gw 62 | 63 | ENTRYPOINT ["/usr/bin/gw"] 64 | CMD ["/app", "-p", "npm start"] 65 | ``` 66 | -------------------------------------------------------------------------------- /docs/content/usage/installation.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Installation" 3 | weight = 1 4 | +++ 5 | 6 | # Installation 7 | 8 | `gw` is a simple few MB binary, which you can install multiple ways. 9 | 10 | ## Install from script 11 | 12 | The simplest way is to run the installation script: 13 | 14 | ```sh 15 | curl https://gw.danielgrants.com/install.sh | sh 16 | ``` 17 | 18 | This will download the script to `~/.local/bin` or if run by root to `/usr/local/bin`. 19 | 20 | ## Download from GitHub releases 21 | 22 | Another way is to download the zipped binary from [Github Releases](https://github.com/daniel7grant/gw/releases) and install it to your path: 23 | 24 | ```sh 25 | curl -LO https://github.com/daniel7grant/gw/releases/download/v0.4.1/gw-bin_x86_64-unknown-linux-gnu.zip 26 | unzip gw-bin_x86_64-unknown-linux-gnu.zip 27 | mv gw ~/.local/bin/gw 28 | rm gw-bin_x86_64-unknown-linux-gnu.zip 29 | ``` 30 | 31 | ## Install with Cargo 32 | 33 | If you have Rust on your machine, you can also install the `gw` binary with Cargo. Use [cargo-binstall](https://github.com/cargo-bins/cargo-binstall) for a faster install or `cargo install` will build it from source. 34 | 35 | ```sh 36 | cargo binstall gw-bin 37 | # or 38 | cargo install gw-bin 39 | ``` 40 | -------------------------------------------------------------------------------- /docs/content/usage/start.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Get started" 3 | weight = 2 4 | +++ 5 | 6 | # Get started 7 | 8 | `gw` is a simple program, that you can use to pull changes from a remote repository and run scripts on the change. 9 | 10 | ### Prerequisites 11 | 12 | First, make sure, that `gw` is installed successfully and is in your PATH. If you don't have it, start with [Installation](/usage/installation): 13 | 14 | ```sh 15 | $ gw --version 16 | 0.4.1 17 | ``` 18 | 19 | The other necessary part is a git repository to which you have pull access. It is recommended to use a repository that you know, but if you don't have one at hand, you can use the [daniel7grant/time](https://github.com/daniel7grant/time) repository. This is an example repository that is updated in every minute, so it is useful to test the auto update of `gw`. First clone this repository (if you are using your own, clone again), and enter the cloned directory: 20 | 21 | ```sh 22 | git clone https://github.com/daniel7grant/time.git 23 | cd time 24 | ``` 25 | 26 | ## Pull files automatically 27 | 28 | To get started, point `gw` to this local repository. By default it pulls the changes every minute. We can add the `--verbose` or `-v` flag to see when the changes occur: 29 | 30 | ```sh 31 | gw /path/to/repo -v 32 | ``` 33 | 34 | If you are using your own repository, create a commit in a different place, and see how it gets automatically pulled (in the case of the `time` repo, there is a commit every minute). The verbose logs should print that a git pull happened: 35 | 36 | ```sh 37 | $ gw /path/to/repo -v 38 | # ... 39 | 2024-03-10T14:48:13.447Z [DEBUG] Checked out fc23d21 on branch main. 40 | 2024-03-10T14:48:13.447Z [INFO ] There are updates, pulling. 41 | ``` 42 | 43 | Also check the files or the `git log` to see that it the repository has been updated: 44 | 45 | ```sh 46 | cat DATETIME # it should contain the latest time 47 | git log -1 # it should be a commit in the last minute 48 | ``` 49 | 50 | ## Run scripts on pull 51 | 52 | Pulling files automatically is useful but the `--script` or `-s` flag unlocks `gw`'s potential: it can run any kind of custom script if there are any changes. For a simple example, we can print the content of a file to the log with `cat`: 53 | 54 | ```sh 55 | gw /path/to/repo -v --script 'cat DATETIME' 56 | ``` 57 | 58 | This will run every time there is a new commit, and after the pull it will print the file contents. You can see that the results are printed in the log: 59 | 60 | ```sh 61 | $ gw /path/to/repo -v --script 'cat DATETIME' 62 | # ... 63 | 2024-10-18T16:28:53.907Z [INFO ] There are updates, running actions. 64 | 2024-10-18T16:28:53.907Z [INFO ] Running script "cat" in /path/to/repo. 65 | 2024-10-18T16:28:53.913Z [DEBUG] [cat] 2024-10-18T16:28:00+0000 66 | 2024-10-18T16:28:53.913Z [INFO ] Script "cat" finished successfully. 67 | ``` 68 | 69 | You can add multiple scripts, which will run one after another. Use these scripts to build source files, restarts deployments and anything else that you can imagine. 70 | 71 | For more information, see [Scripts](/usage/actions#scripts). 72 | 73 | ### Run subprocess, restart on pull 74 | 75 | It is often enough to run scripts, but many times you also want to maintain a long-running process e.g. for web services. `gw` can help you with this, using the `--process` or `-p` flag. This will start a process in the background and restart it on pull. 76 | 77 | For example starting a python web server: 78 | 79 | ```sh 80 | $ gw /path/to/repo -v --process "python -m http.server" 81 | # ... 82 | 2024-10-06T21:58:21.306Z [DEBUG] Setting up ProcessAction "python -m http.server" on change. 83 | 2024-10-06T21:58:21.306Z [DEBUG] Starting process: "python" in directory /path/to/repo. 84 | 2024-10-06T21:58:56.211Z [DEBUG] [python] Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ... 85 | ``` 86 | 87 | This will run a python process in the background and stop and start it again if a git pull happened. Just wrap your deployment script with `gw` and see it gets updated every time you push to git. 88 | 89 | For more information, see [Processes](/usage/actions#processes). 90 | 91 | ## Run actions on tags 92 | 93 | Pulling on every commit might not be the fit for every product, especially ones that needs to maintains compatibility or strictly versioned. For these, you can instead trigger on tags. Use the `--on tag` flag to only pull changes if there is a tag on the current branch. 94 | 95 | ```sh 96 | $ gw /path/to/repo -v --on tag -S 'echo $GIT_TAG_NAME' 97 | # ... 98 | 2024-10-18T16:28:53.907Z [INFO ] There are updates, running actions. 99 | 2024-10-18T16:28:53.907Z [INFO ] Running script "echo" in /path/to/repo. 100 | 2024-10-18T16:28:53.913Z [DEBUG] [echo] v0.1.0 101 | 2024-10-18T16:28:53.913Z [INFO ] Script "echo" finished successfully. 102 | ``` 103 | 104 | This will always fetch the current branch, check for the latest tag on it and pull only the commits up to that tag. To match some kind of commit, you can use the `--on tag:v*` which will only pull if the tag is matching the passed glob (in this case starting with `v`). 105 | 106 | ```sh 107 | gw /path/to/repo -v --on 'tag:v*' -S 'echo "new version: $GIT_TAG_NAME"' 108 | ``` 109 | 110 | ## Next steps 111 | 112 | If you like `gw`, there are multiple ways to use it for real-life use-cases. 113 | 114 | If you want to put the `gw` script in the background, you can: 115 | 116 | - wrap into a [systemd unit](/usage/systemd), if you want to manage it with a single file; 117 | - start in a [docker container](/usage/docker), if you already use Docker in your workflow; 118 | - or run periodically with [cron](/usage/crontab), if you don't have shell access to the server. 119 | 120 | If you are interested in some ideas on how to use `gw`: 121 | 122 | - if you only need to pull files, see [PHP guide](/guides/php); 123 | - if you are using a dynamic language (e.g. JavaScript, Python, Ruby), see [Guide for dynamic languages](/guides/dynamic) for example on running a process; 124 | - if you are using a compiled language (e.g. TypeScript, Go, Rust), see [Guide for compiled languages](/guides/compiled) for example on compiling a program; 125 | - if you use a `docker-compose.yaml`, see [Guide for docker-compose](guides/docker-compose); 126 | - if you want to easily manage configuration files as GitOps, see [Configuration guide](/guides/configuration); 127 | - for a full-blown example, check out [Netlify](/guides/netlify); 128 | - and many other things, for the incomplete list [guides page](/guides). 129 | -------------------------------------------------------------------------------- /docs/content/usage/systemd.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Systemd unit" 3 | weight = 4 4 | +++ 5 | 6 | # Systemd unit 7 | 8 | If you just want to run `gw` process in the background without installing anything, you can use `systemctl`. You only have to create a systemd unit and `start` and `enable` it. 9 | 10 | > **Note**: by default systemd units run as root, so make sure to set up the necessary authentication (e.g. SSH keys) with the root user as well 11 | > (or use a `User` directive or [user systemd unit](#user-systemd-unit)). 12 | > You can test this by entering the directory with the root user and running `git pull` or `gw -vv .`. 13 | 14 | ## Usage 15 | 16 | To create a new unit you have to create a new unit file at the default systemd unit location, usually `/etc/systemd/system`. 17 | 18 | You can change this example systemd unit to your use-case and copy it under `/etc/systemd/system/gw.service`: 19 | 20 | ```ini 21 | # /etc/systemd/system/gw.service 22 | [Unit] 23 | Description=Watch git repository at /path/to/repo 24 | After=multi-user.target 25 | 26 | [Service] 27 | Type=simple 28 | ExecStart=/usr/bin/gw -v /path/to/repo -s 'echo ran from systemctl unit' 29 | Restart=always 30 | # run as a non-root user (recommended) 31 | User=myuser 32 | 33 | [Install] 34 | WantedBy=default.target 35 | ``` 36 | 37 | To reload the systemd unit database, you have to run `daemon-reload`: 38 | 39 | ```sh 40 | systemctl daemon-reload 41 | ``` 42 | 43 | With this you should see a new `gw.service` unit. You can start this with `systemctl start`: 44 | 45 | ```sh 46 | systemctl start gw 47 | ``` 48 | 49 | If you want to start this every time your server boots up, you can run `systemctl enable`: 50 | 51 | ```sh 52 | systemctl enable gw 53 | ``` 54 | 55 | To see if your unit is running you check the status, or read the logs with `journalctl`: 56 | 57 | ```sh 58 | systemctl status gw 59 | journalctl -fu gw 60 | ``` 61 | 62 | For a more complicated example, check out the [docker-compose systemd unit](/guides/docker-compose#systemd-unit). 63 | 64 | ### User systemd unit 65 | 66 | Most of the time the git configuration is only set up for some users, so it might make sense to run `gw` as a user systemd unit. You can do it, but you have to be careful with some things. 67 | 68 | The main issue with user services is that they are bound to the user session, so if you log out from the SSH, all started units will end. You can enable [lingering](https://wiki.archlinux.org/title/Systemd/User#Automatic_start-up_of_systemd_user_instances) with `loginctl` to keep the systemd units running after logout: 69 | 70 | ``` 71 | loginctl enable-linger 72 | ``` 73 | 74 | After you set up lingering, you can create a similar systemd unit except under `~/.config/systemd/user/`: 75 | 76 | ```ini 77 | # /home/myuser/.config/systemd/user/gw.service 78 | [Unit] 79 | Description=Watch git repository at /path/to/repo 80 | After=multi-user.target 81 | 82 | [Service] 83 | Type=simple 84 | ExecStart=/usr/bin/gw /path/to/repo -s 'echo ran from systemctl unit' 85 | Restart=always 86 | 87 | [Install] 88 | WantedBy=default.target 89 | ``` 90 | 91 | The same commands should work as above but with adding `--user` after `systemctl`. So to enable this unit above, you can start: 92 | 93 | ```sh 94 | systemctl --user daemon-reload 95 | systemctl --user start gw 96 | systemctl --user enable gw 97 | systemctl --user status gw 98 | ``` 99 | 100 | If you want to check the logs with `journalctl`, make sure to add your user to the `systemd-journal` group (requires root privileges): 101 | 102 | ```sh 103 | sudo usermod -aG $USER systemd-journal 104 | ``` 105 | 106 | After this, you can read the logs of your user services: 107 | 108 | ```sh 109 | journalctl -f --user-unit gw 110 | ``` 111 | 112 | User services can be a good way to use `gw` if you don't have or don't want to use root privileges, while still being able to use an automatic deployment workflow. 113 | -------------------------------------------------------------------------------- /docs/content/usage/webhook.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Webhook server" 3 | weight = 7 4 | +++ 5 | 6 | # Webhook server 7 | 8 | By default `gw` checks for updates every minute. Depending on your usecase it can be too slow or too often. If you only want to pull updates when a push happens, git servers (GitHub, GitLab or any other) usually have options to send a HTTP request to your `gw` service (webhook). `gw` can handle webhooks with a built-in web server. 9 | 10 | ## Usage 11 | 12 | To enable the webhook server, you can use the `--http` option. Most of the time you want to allow external connections, so to set to a high port (for example `10101`), you can use: 13 | 14 | ```sh 15 | gw /path/to/repo -v --http 0.0.0.0:10101 16 | ``` 17 | 18 | If you call this endpoint with any method on any URL, it will trigger a check for updates. To test this, you can use `curl`: 19 | 20 | ```sh 21 | curl http://localhost:10101 22 | ``` 23 | 24 | The `curl` output should print `OK` and the `gw` logs should include lines that show that it was updated: 25 | 26 | ```sh 27 | $ gw /path/to/repo -v --http 0.0.0.0:10101 28 | # ... 29 | 2024-03-10T16:52:51.531Z [DEBUG] Received request on GET / 30 | 2024-03-10T16:52:52.055Z [DEBUG] Checked out 5e25714 on branch main. 31 | 2024-03-10T16:52:52.055Z [INFO ] There are updates, pulling. 32 | ``` 33 | 34 | ### Using only webhooks 35 | 36 | If you want to disable the scheduled checks altogether and rely on the webhooks, you can set the schedule duration (`-d` flag) to zero seconds: 37 | 38 | ```sh 39 | gw /path/to/repo -v --http 0.0.0.0:10101 -d 0s 40 | ``` 41 | 42 | You can use this to create a push-based deployment, for example calling the update from your CI process after your testing has run. 43 | 44 | ## Setup webhooks 45 | 46 | Exposing a port is only one half of the problem, you also have to set the webhooks up with your git server. For this you will need a public IP or a domain name, which will be in the `$DOMAIN` variable in these examples. 47 | 48 | > **Warning:** if you can configure, you should setup your reverse proxy in front of the port to avoid exposing externally. 49 | 50 | ### GitHub 51 | 52 | For GitHub, you have to have administrator access to the repository. Navigate to **Settings > Webhooks**, and click to **Add webhook**. Fill the **Payload URL** with your `$DOMAIN` (make sure to add the `http://` protocol and the port) and select **application/json** for **Content Type**. Save this webhook to activate. 53 | 54 | > **Note**: Secrets are currently not supported. 55 | 56 | ![You have to setup the payload URL to be http://$DOMAIN:10101 on GitHub.](/webhook-github.png) 57 | 58 | On save, the webhook should send a `ping` event to `gw`. If you click into new webhook, to **Recent deliveries** you can see this event. 59 | 60 | ![A ping event has been delivered to the server.](/webhook-github-deliveries.png) 61 | 62 | A `POST /` request will also appear in the `gw` logs, assuming debug logging was enabled: 63 | 64 | ```sh 65 | $ gw /path/to/repo -v --http 0.0.0.0:10101 66 | # ... 67 | 2024-03-10T17:18:24.424Z [DEBUG] Received request on POST / 68 | 2024-03-10T17:18:24.567Z [DEBUG] There are no updates. 69 | ``` 70 | 71 | ### GitLab 72 | 73 | For GitLab, you have to have Maintainer access to the repository. Navigate to **Settings > Webhooks**, and click to **Add new webhook**. Fill the **URL** with your `$DOMAIN` (make sure to add the `http://` protocol and the port) and check the **Trigger** to **Push events** , you can filter it for example to only trigger on the `main` branch. If you are using `http`, you should disable SSL verification. Save this webhook to activate. 74 | 75 | ![You have to setup the URL to be http://$DOMAIN:10101 on GitLab and check Push events.](/webhook-gitlab.png) 76 | 77 | To test this webhook, you can click **Test > Push events** next to the name. GitLab should show a message that **Hook executed successfully: HTTP 200**, and you can find a `POST /` request in the `gw` logs, assuming debug logging was enabled: 78 | 79 | ```sh 80 | $ gw /path/to/repo -v --http 0.0.0.0:10101 81 | # ... 82 | 2024-03-10T17:58:28.919Z [DEBUG] Received request on POST / 83 | 2024-03-10T17:58:29.052Z [DEBUG] There are no updates. 84 | ``` 85 | -------------------------------------------------------------------------------- /docs/static/custom.css: -------------------------------------------------------------------------------- 1 | body { 2 | position: relative; 3 | } 4 | 5 | main a[href^="#"] { 6 | opacity: 30%; 7 | } 8 | 9 | a:not([href^='#']) { 10 | border-bottom: 1px solid #c9c9c9; 11 | } 12 | 13 | aside a:not([href^='#']) { 14 | border-bottom: none; 15 | } 16 | 17 | aside a:not([href^='#']).active { 18 | border-bottom: 2px solid #c9c9c9; 19 | } 20 | 21 | a:not([href^='#']):hover { 22 | border-bottom: 2px solid #c9c9c9; 23 | } 24 | 25 | @media screen and (min-width: 1200px) { 26 | aside { 27 | position: fixed; 28 | top: calc(3rem + 13px); 29 | bottom: 13px; 30 | left: 50%; 31 | transform: translate(calc(-100% - 20em)); 32 | padding: 0 20px; 33 | max-width: 200px; 34 | overflow-y: auto; 35 | } 36 | 37 | aside h1 { 38 | margin-top: 0; 39 | } 40 | 41 | .tagline { 42 | line-height: 1.33; 43 | margin-bottom: 1rem; 44 | } 45 | 46 | .tagline a:hover { 47 | border-bottom: none; 48 | } 49 | 50 | .tagline img { 51 | margin-top: 1rem; 52 | } 53 | 54 | .mobile-only { 55 | display: none; 56 | } 57 | } 58 | 59 | table { 60 | text-align: left; 61 | } 62 | 63 | table td:nth-child(1), 64 | table td:nth-child(2) { 65 | max-width: 120px; 66 | } 67 | 68 | 69 | @media screen and (min-width: 800px) { 70 | table td:nth-child(1), 71 | table td:nth-child(2) { 72 | max-width: 200px; 73 | } 74 | } 75 | 76 | table code { 77 | word-wrap: break-word; 78 | } -------------------------------------------------------------------------------- /docs/static/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -o pipefail 2>/dev/null | true 3 | set -eu 4 | 5 | fail() { 6 | echo $1 7 | exit 1 8 | } 9 | 10 | # CONFIGURE VARIABLES 11 | REPO="${REPO:-https://github.com/daniel7grant/gw}" 12 | VERSION="${VERSION:-v0.4.1}" 13 | if [ "$(id -u)" -ne "0" ]; then 14 | BIN_DIR="$HOME/.local/bin" 15 | else 16 | BIN_DIR="/usr/local/bin" 17 | fi 18 | if ldd /bin/ls | grep -q "musl"; then 19 | LIBC="musl" 20 | else 21 | LIBC="gnu" 22 | fi 23 | 24 | # DETERMINE THE CORRECT FILENAME 25 | PLATFORM=$(uname -sm) 26 | case "$PLATFORM" in 27 | "Linux x86_64") 28 | FILE="gw-bin_x86_64-unknown-linux-$LIBC.zip" 29 | ;; 30 | "Linux aarch"* | "Linux arm"*) 31 | FILE="gw-bin_arm-unknown-linux-gnueabihf.zip" 32 | ;; 33 | "Darwin arm64") 34 | FILE="gw-bin_aarch64-apple-darwin.zip" 35 | ;; 36 | *) 37 | fail "Platform $PLATFORM is currently not supported." 38 | ;; 39 | esac 40 | 41 | # DOWNLOAD AND MOVE IT TO BIN_DIR 42 | echo "Downloading version $VERSION to $PLATFORM..." 43 | DOWNLOAD_URL="$REPO/releases/download/$VERSION/$FILE" 44 | curl -Lfq --progress-bar $DOWNLOAD_URL -o $FILE || fail "Failed to download $DOWNLOAD_URL." 45 | unzip -qo $FILE || fail "Failed to unzip $FILE." 46 | mkdir -p $BIN_DIR 47 | mv gw "$BIN_DIR/gw" 48 | rm $FILE 49 | 50 | echo "Successfully installed gw binary to $BIN_DIR/gw!" 51 | -------------------------------------------------------------------------------- /docs/static/sakura-dark.css: -------------------------------------------------------------------------------- 1 | /* $color-text: #dedce5; */ 2 | /* Sakura.css v1.3.1 3 | * ================ 4 | * Minimal css theme. 5 | * Project: https://github.com/oxalorg/sakura/ 6 | */ 7 | /* Body */ 8 | html { 9 | font-size: 62.5%; 10 | font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif; } 11 | 12 | body { 13 | font-size: 1.8rem; 14 | line-height: 1.618; 15 | max-width: 38em; 16 | margin: auto; 17 | color: #c9c9c9; 18 | background-color: #222222; 19 | padding: 13px; } 20 | 21 | h1, h2, h3, h4, h5, h6 { 22 | line-height: 1.1; 23 | font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif; 24 | font-weight: 700; 25 | margin-top: 3rem; 26 | margin-bottom: 1.5rem; 27 | overflow-wrap: break-word; 28 | word-wrap: break-word; 29 | -ms-word-break: break-all; 30 | word-break: break-word; } 31 | 32 | h1 { 33 | font-size: 2.35em; } 34 | 35 | h2 { 36 | font-size: 2.00em; } 37 | 38 | h3 { 39 | font-size: 1.75em; } 40 | 41 | h4 { 42 | font-size: 1.5em; } 43 | 44 | h5 { 45 | font-size: 1.25em; } 46 | 47 | h6 { 48 | font-size: 1em; } 49 | 50 | p { 51 | margin-top: 0px; 52 | margin-bottom: 2.5rem; } 53 | 54 | small, sub, sup { 55 | font-size: 75%; } 56 | 57 | hr { 58 | border-color: #ffffff; } 59 | 60 | a { 61 | text-decoration: none; 62 | color: #ffffff; } 63 | a:hover { 64 | color: #c9c9c9; 65 | border-bottom: 2px solid #c9c9c9; } 66 | a:visited { 67 | color: #e6e6e6; } 68 | 69 | ul { 70 | padding-left: 1.4em; 71 | margin-top: 0px; 72 | margin-bottom: 2.5rem; } 73 | 74 | li { 75 | margin-bottom: 0; } 76 | 77 | blockquote { 78 | margin-left: 0px; 79 | margin-right: 0px; 80 | padding-left: 1em; 81 | padding-top: 0.8em; 82 | padding-bottom: 0.8em; 83 | padding-right: 0.8em; 84 | border-left: 5px solid #ffffff; 85 | margin-bottom: 2.5rem; 86 | background-color: #4a4a4a; } 87 | 88 | blockquote p { 89 | margin-bottom: 0; } 90 | 91 | img { 92 | height: auto; 93 | max-width: 100%; 94 | margin-top: 0px; 95 | margin-bottom: 2.5rem; } 96 | 97 | /* Pre and Code */ 98 | pre { 99 | background-color: #4a4a4a; 100 | display: block; 101 | padding: 1em; 102 | overflow-x: auto; 103 | margin-top: 0px; 104 | margin-bottom: 2.5rem; } 105 | 106 | code { 107 | font-size: 0.9em; 108 | padding: 0 0.5em; 109 | background-color: #4a4a4a; 110 | white-space: pre-wrap; } 111 | 112 | pre > code { 113 | padding: 0; 114 | background-color: transparent; 115 | white-space: pre; } 116 | 117 | /* Tables */ 118 | table { 119 | text-align: justify; 120 | width: 100%; 121 | border-collapse: collapse; } 122 | 123 | td, th { 124 | padding: 0.5em; 125 | border-bottom: 1px solid #4a4a4a; } 126 | 127 | /* Buttons, forms and input */ 128 | input, textarea { 129 | border: 1px solid #c9c9c9; } 130 | input:focus, textarea:focus { 131 | border: 1px solid #ffffff; } 132 | 133 | textarea { 134 | width: 100%; } 135 | 136 | .button, button, input[type="submit"], input[type="reset"], input[type="button"] { 137 | display: inline-block; 138 | padding: 5px 10px; 139 | text-align: center; 140 | text-decoration: none; 141 | white-space: nowrap; 142 | background-color: #ffffff; 143 | color: #222222; 144 | border-radius: 1px; 145 | border: 1px solid #ffffff; 146 | cursor: pointer; 147 | box-sizing: border-box; } 148 | .button[disabled], button[disabled], input[type="submit"][disabled], input[type="reset"][disabled], input[type="button"][disabled] { 149 | cursor: default; 150 | opacity: .5; } 151 | .button:focus:enabled, .button:hover:enabled, button:focus:enabled, button:hover:enabled, input[type="submit"]:focus:enabled, input[type="submit"]:hover:enabled, input[type="reset"]:focus:enabled, input[type="reset"]:hover:enabled, input[type="button"]:focus:enabled, input[type="button"]:hover:enabled { 152 | background-color: #c9c9c9; 153 | border-color: #c9c9c9; 154 | color: #222222; 155 | outline: 0; } 156 | 157 | textarea, select, input { 158 | color: #c9c9c9; 159 | padding: 6px 10px; 160 | /* The 6px vertically centers text on FF, ignored by Webkit */ 161 | margin-bottom: 10px; 162 | background-color: #4a4a4a; 163 | border: 1px solid #4a4a4a; 164 | border-radius: 4px; 165 | box-shadow: none; 166 | box-sizing: border-box; } 167 | textarea:focus, select:focus, input:focus { 168 | border: 1px solid #ffffff; 169 | outline: 0; } 170 | 171 | input[type="checkbox"]:focus { 172 | outline: 1px dotted #ffffff; } 173 | 174 | label, legend, fieldset { 175 | display: block; 176 | margin-bottom: .5rem; 177 | font-weight: 600; } 178 | -------------------------------------------------------------------------------- /docs/static/sakura.css: -------------------------------------------------------------------------------- 1 | /* Sakura.css v1.3.1 2 | * ================ 3 | * Minimal css theme. 4 | * Project: https://github.com/oxalorg/sakura/ 5 | */ 6 | /* Body */ 7 | html { 8 | font-size: 62.5%; 9 | font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif; } 10 | 11 | body { 12 | font-size: 1.8rem; 13 | line-height: 1.618; 14 | max-width: 38em; 15 | margin: auto; 16 | color: #4a4a4a; 17 | background-color: #f9f9f9; 18 | padding: 13px; } 19 | 20 | @media (max-width: 684px) { 21 | body { 22 | font-size: 1.53rem; } } 23 | 24 | @media (max-width: 382px) { 25 | body { 26 | font-size: 1.35rem; } } 27 | 28 | h1, h2, h3, h4, h5, h6 { 29 | line-height: 1.1; 30 | font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif; 31 | font-weight: 700; 32 | margin-top: 3rem; 33 | margin-bottom: 1.5rem; 34 | overflow-wrap: break-word; 35 | word-wrap: break-word; 36 | -ms-word-break: break-all; 37 | word-break: break-word; } 38 | 39 | h1 { 40 | font-size: 2.35em; } 41 | 42 | h2 { 43 | font-size: 2.00em; } 44 | 45 | h3 { 46 | font-size: 1.75em; } 47 | 48 | h4 { 49 | font-size: 1.5em; } 50 | 51 | h5 { 52 | font-size: 1.25em; } 53 | 54 | h6 { 55 | font-size: 1em; } 56 | 57 | p { 58 | margin-top: 0px; 59 | margin-bottom: 2.5rem; } 60 | 61 | small, sub, sup { 62 | font-size: 75%; } 63 | 64 | hr { 65 | border-color: #1d7484; } 66 | 67 | a { 68 | text-decoration: none; 69 | color: #1d7484; } 70 | a:hover { 71 | color: #982c61; 72 | border-bottom: 2px solid #4a4a4a; } 73 | a:visited { 74 | color: #144f5a; } 75 | 76 | ul { 77 | padding-left: 1.4em; 78 | margin-top: 0px; 79 | margin-bottom: 2.5rem; } 80 | 81 | li { 82 | margin-bottom: 0.4em; } 83 | 84 | blockquote { 85 | margin-left: 0px; 86 | margin-right: 0px; 87 | padding-left: 1em; 88 | padding-top: 0.8em; 89 | padding-bottom: 0.8em; 90 | padding-right: 0.8em; 91 | border-left: 5px solid #1d7484; 92 | margin-bottom: 2.5rem; 93 | background-color: #f1f1f1; } 94 | 95 | blockquote p { 96 | margin-bottom: 0; } 97 | 98 | img { 99 | height: auto; 100 | max-width: 100%; 101 | margin-top: 0px; 102 | margin-bottom: 2.5rem; } 103 | 104 | /* Pre and Code */ 105 | pre { 106 | background-color: #f1f1f1; 107 | display: block; 108 | padding: 1em; 109 | overflow-x: auto; 110 | margin-top: 0px; 111 | margin-bottom: 2.5rem; } 112 | 113 | code { 114 | font-size: 0.9em; 115 | padding: 0 0.5em; 116 | background-color: #f1f1f1; 117 | white-space: pre-wrap; } 118 | 119 | pre > code { 120 | padding: 0; 121 | background-color: transparent; 122 | white-space: pre; } 123 | 124 | /* Tables */ 125 | table { 126 | text-align: justify; 127 | width: 100%; 128 | border-collapse: collapse; } 129 | 130 | td, th { 131 | padding: 0.5em; 132 | border-bottom: 1px solid #f1f1f1; } 133 | 134 | /* Buttons, forms and input */ 135 | input, textarea { 136 | border: 1px solid #4a4a4a; } 137 | input:focus, textarea:focus { 138 | border: 1px solid #1d7484; } 139 | 140 | textarea { 141 | width: 100%; } 142 | 143 | .button, button, input[type="submit"], input[type="reset"], input[type="button"] { 144 | display: inline-block; 145 | padding: 5px 10px; 146 | text-align: center; 147 | text-decoration: none; 148 | white-space: nowrap; 149 | background-color: #1d7484; 150 | color: #f9f9f9; 151 | border-radius: 1px; 152 | border: 1px solid #1d7484; 153 | cursor: pointer; 154 | box-sizing: border-box; } 155 | .button[disabled], button[disabled], input[type="submit"][disabled], input[type="reset"][disabled], input[type="button"][disabled] { 156 | cursor: default; 157 | opacity: .5; } 158 | .button:focus:enabled, .button:hover:enabled, button:focus:enabled, button:hover:enabled, input[type="submit"]:focus:enabled, input[type="submit"]:hover:enabled, input[type="reset"]:focus:enabled, input[type="reset"]:hover:enabled, input[type="button"]:focus:enabled, input[type="button"]:hover:enabled { 159 | background-color: #982c61; 160 | border-color: #982c61; 161 | color: #f9f9f9; 162 | outline: 0; } 163 | 164 | textarea, select, input { 165 | color: #4a4a4a; 166 | padding: 6px 10px; 167 | /* The 6px vertically centers text on FF, ignored by Webkit */ 168 | margin-bottom: 10px; 169 | background-color: #f1f1f1; 170 | border: 1px solid #f1f1f1; 171 | border-radius: 4px; 172 | box-shadow: none; 173 | box-sizing: border-box; } 174 | textarea:focus, select:focus, input:focus { 175 | border: 1px solid #1d7484; 176 | outline: 0; } 177 | 178 | input[type="checkbox"]:focus { 179 | outline: 1px dotted #1d7484; } 180 | 181 | label, legend, fieldset { 182 | display: block; 183 | margin-bottom: .5rem; 184 | font-weight: 600; } 185 | -------------------------------------------------------------------------------- /docs/static/webhook-github-deliveries.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daniel7grant/gw/5546789d02a626cc273767f5c27fc40e7570d79d/docs/static/webhook-github-deliveries.png -------------------------------------------------------------------------------- /docs/static/webhook-github.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daniel7grant/gw/5546789d02a626cc273767f5c27fc40e7570d79d/docs/static/webhook-github.png -------------------------------------------------------------------------------- /docs/static/webhook-gitlab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daniel7grant/gw/5546789d02a626cc273767f5c27fc40e7570d79d/docs/static/webhook-gitlab.png -------------------------------------------------------------------------------- /docs/templates/anchor-link.html: -------------------------------------------------------------------------------- 1 | {% if level > 1 %} 2 | # 3 | {% endif %} -------------------------------------------------------------------------------- /docs/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | gw - {{ section.title }} 7 | 8 | 9 | {% if config.base_url == "https://gw.danielgrants.com" %} 10 | 11 | {% endif %} 12 | 13 | 14 | {% include "partials/toc.html" %} 15 |
{{ section.content | safe }}
16 | 17 | 18 | -------------------------------------------------------------------------------- /docs/templates/page.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | gw - {{ page.title }} 7 | 8 | 9 | {% if config.base_url == "https://gw.danielgrants.com" %} 10 | 11 | {% endif %} 12 | 13 | 14 | {% include "partials/toc.html" %} 15 |
{{ page.content | safe }}
16 | 17 | 18 | -------------------------------------------------------------------------------- /docs/templates/partials/toc.html: -------------------------------------------------------------------------------- 1 | 39 | -------------------------------------------------------------------------------- /docs/templates/section.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | gw - {{ section.title }} 7 | 8 | 9 | {% if config.base_url == "https://gw.danielgrants.com" %} 10 | 11 | {% endif %} 12 | 13 | 14 | {% include "partials/toc.html" %} 15 |
16 |

{{ section.title }}

17 | {{ section.content | safe }} 18 | 25 |
26 | 27 | 28 | -------------------------------------------------------------------------------- /src/actions/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::context::Context; 2 | use mockall::automock; 3 | use thiserror::Error; 4 | 5 | /// An action to run in the background and restart a subprocess. 6 | pub mod process; 7 | /// An action to run a custom shell script. 8 | pub mod script; 9 | /// Utilities for shared code 10 | pub mod utils; 11 | 12 | /// A custom error for describing the error cases for actions 13 | #[derive(Debug, Error)] 14 | pub enum ActionError { 15 | /// Cannot initialize action, because it has a misconfiguration. 16 | #[error("not configured correctly: {0}")] 17 | Misconfigured(String), 18 | /// Cannot run action, because there isn't enough permission. 19 | #[error("permission denied: {0}")] 20 | PermissionDenied(String), 21 | /// Running action failed. It is usually a runtime issue. 22 | #[error("{0}")] 23 | FailedAction(String), 24 | } 25 | 26 | /// An action is a process that runs if any changes occured. 27 | /// 28 | /// Actions may include: 29 | /// - running scripts ([script::ScriptAction]) 30 | /// - etc. 31 | #[automock] 32 | pub trait Action { 33 | /// Initiate the action 34 | fn run(&mut self, context: &Context) -> Result<(), ActionError>; 35 | } 36 | -------------------------------------------------------------------------------- /src/actions/process.rs: -------------------------------------------------------------------------------- 1 | use super::{utils::command::create_command, Action, ActionError}; 2 | use crate::context::Context; 3 | use duct::{Expression, ReaderHandle}; 4 | use log::{debug, error, info, trace, warn}; 5 | use std::{ 6 | io::{BufRead, BufReader}, 7 | sync::{Arc, RwLock}, 8 | thread::{self, sleep}, 9 | time::Duration, 10 | }; 11 | use thiserror::Error; 12 | 13 | #[cfg(unix)] 14 | use nix::{errno::Errno, sys::signal::Signal}; 15 | #[cfg(unix)] 16 | use std::{os::unix::process::ExitStatusExt, str::FromStr}; 17 | 18 | const ACTION_NAME: &str = "PROCESS"; 19 | 20 | /// Custom error describing the error cases for the ProcessAction. 21 | #[derive(Debug, Error, PartialEq, Eq)] 22 | pub enum ProcessError { 23 | /// The command is invalid (usually mismatched quotations etc.). 24 | #[error("the command {0:?} cannot be parsed")] 25 | CommandParseFailure(String), 26 | /// Signal is not a valid UNIX signal. 27 | #[error("the signal {0} is not valid")] 28 | SignalParseFailure(String), 29 | /// The underlying Rust command creation failed. The parameter contains the error. 30 | #[error("the script cannot start: {0}")] 31 | StartFailure(String), 32 | /// Stopping the command failed. 33 | #[error("the script cannot be stopped: {0}")] 34 | StopFailure(String), 35 | /// Killing the command failed. 36 | #[cfg(unix)] 37 | #[error("killing the process failed with error: {0}")] 38 | KillFailed(#[from] Errno), 39 | /// The lock on the child is poisoned: this means the thread failed while holding the lock. 40 | #[error("the mutex is poisoned")] 41 | MutexPoisoned, 42 | } 43 | 44 | impl From for ActionError { 45 | fn from(value: ProcessError) -> Self { 46 | match value { 47 | ProcessError::CommandParseFailure(_) | ProcessError::SignalParseFailure(_) => { 48 | ActionError::Misconfigured(value.to_string()) 49 | } 50 | _ => ActionError::FailedAction(value.to_string()), 51 | } 52 | } 53 | } 54 | 55 | /// Parameters for the process. 56 | #[derive(Debug, Clone)] 57 | pub struct ProcessParams { 58 | directory: String, 59 | command: String, 60 | process: Expression, 61 | retries: u32, 62 | #[cfg(unix)] 63 | stop_signal: Signal, 64 | #[cfg(unix)] 65 | stop_timeout: Duration, 66 | runs_in_shell: bool, 67 | } 68 | 69 | impl ProcessParams { 70 | pub fn new( 71 | original_command: String, 72 | directory: String, 73 | runs_in_shell: bool, 74 | ) -> Result { 75 | let (command, process) = create_command(&original_command, runs_in_shell) 76 | .ok_or(ProcessError::CommandParseFailure(original_command.clone()))?; 77 | 78 | Ok(ProcessParams { 79 | directory, 80 | command, 81 | process, 82 | retries: 0, 83 | #[cfg(unix)] 84 | stop_signal: Signal::SIGTERM, 85 | #[cfg(unix)] 86 | stop_timeout: Duration::from_secs(10), 87 | runs_in_shell, 88 | }) 89 | } 90 | 91 | pub fn set_retries(&mut self, retries: u32) { 92 | self.retries = retries; 93 | } 94 | 95 | #[cfg_attr(not(unix), allow(unused_variables))] 96 | pub fn set_stop_signal(&mut self, stop_signal: String) -> Result<(), ProcessError> { 97 | #[cfg(unix)] 98 | { 99 | self.stop_signal = Signal::from_str(&stop_signal) 100 | .map_err(|_| ProcessError::SignalParseFailure(stop_signal))?; 101 | } 102 | 103 | Ok(()) 104 | } 105 | 106 | #[cfg_attr(not(unix), allow(unused_variables))] 107 | pub fn set_stop_timeout(&mut self, stop_timeout: Duration) { 108 | #[cfg(unix)] 109 | { 110 | self.stop_timeout = stop_timeout; 111 | } 112 | } 113 | } 114 | 115 | /// Struct that can handle the lifecycle of the process with restarting etc. 116 | #[derive(Debug)] 117 | #[cfg_attr(unix, allow(dead_code))] 118 | pub struct Process { 119 | child: Arc>>, 120 | #[cfg(unix)] 121 | stop_signal: Signal, 122 | #[cfg(unix)] 123 | stop_timeout: Duration, 124 | } 125 | 126 | impl Process { 127 | fn start_child(params: &ProcessParams) -> Result { 128 | info!( 129 | "Starting process {:?} {}in {}.", 130 | params.command, 131 | if params.runs_in_shell { 132 | "in a shell " 133 | } else { 134 | "" 135 | }, 136 | params.directory, 137 | ); 138 | 139 | // Create child 140 | let child = params 141 | .process 142 | .dir(¶ms.directory) 143 | .stderr_to_stdout() 144 | .env("CI", "true") 145 | .env("GW_ACTION_NAME", ACTION_NAME) 146 | .env("GW_DIRECTORY", ¶ms.directory) 147 | .unchecked() 148 | .reader() 149 | .map_err(|err| ProcessError::StartFailure(err.to_string()))?; 150 | 151 | if let Some(pid) = child.pids().first() { 152 | trace!("Started process with pid {pid}.",); 153 | } 154 | 155 | Ok(child) 156 | } 157 | 158 | fn start(params: &ProcessParams) -> Result { 159 | let child = Arc::new(RwLock::new(Some(Process::start_child(params)?))); 160 | 161 | let command_id = params.command.clone(); 162 | let max_retries = params.retries; 163 | let thread_params = params.clone(); 164 | let thread_child = child.clone(); 165 | thread::spawn(move || { 166 | let mut tries = max_retries + 1; 167 | 168 | loop { 169 | trace!("Locking the subprocess to get the stdout."); 170 | if let Some(stdout) = thread_child.read().unwrap().as_ref() { 171 | let mut reader = BufReader::new(stdout).lines(); 172 | trace!("Reading lines from the stdout."); 173 | while let Some(Ok(line)) = reader.next() { 174 | debug!("[{command_id}] {line}"); 175 | } 176 | 177 | #[cfg_attr(not(unix), allow(unused_variables))] 178 | if let Ok(Some(output)) = stdout.try_wait() { 179 | #[cfg(unix)] 180 | if output.status.signal().is_some() { 181 | trace!("Process is signalled, no retries necessary."); 182 | return; 183 | } 184 | } 185 | } else { 186 | error!("Failed taking the stdout of process."); 187 | break; 188 | } 189 | 190 | tries -= 1; 191 | if tries == 0 { 192 | break; 193 | } 194 | 195 | warn!( 196 | "Process {:?} failed, retrying ({} retries left).", 197 | thread_params.command, tries 198 | ); 199 | 200 | sleep(Duration::from_millis(100)); 201 | match Process::start_child(&thread_params) { 202 | Ok(new_child) => { 203 | trace!("Locking the subprocess to replace the child with the new process."); 204 | if let Ok(mut unlocked_child) = thread_child.write() { 205 | unlocked_child.replace(new_child); 206 | } else { 207 | error!("Failed locking the child, the mutex might be poisoned."); 208 | } 209 | } 210 | Err(err) => { 211 | error!("Failed retrying the process: {err}."); 212 | break; 213 | } 214 | } 215 | } 216 | 217 | trace!("Locking the subprocess to remove the child."); 218 | if let Ok(mut unlocked_child) = thread_child.write() { 219 | unlocked_child.take(); 220 | trace!("The failed process is removed."); 221 | } else { 222 | error!("Failed locking the child, the mutex might be poisoned."); 223 | } 224 | 225 | error!( 226 | "Process {:?} {}, we are not retrying anymore.", 227 | thread_params.command, 228 | if max_retries > 0 { 229 | format!("failed more than {max_retries} times") 230 | } else { 231 | "failed with 0 retries".to_string() 232 | }, 233 | ); 234 | }); 235 | 236 | Ok(Process { 237 | child, 238 | #[cfg(unix)] 239 | stop_signal: params.stop_signal, 240 | #[cfg(unix)] 241 | stop_timeout: params.stop_timeout, 242 | }) 243 | } 244 | 245 | #[cfg(unix)] 246 | fn stop(&mut self) -> Result<(), ProcessError> { 247 | use duration_string::DurationString; 248 | use log::trace; 249 | use nix::sys::signal::kill; 250 | use nix::unistd::Pid; 251 | use std::thread::sleep; 252 | use std::time::Instant; 253 | 254 | trace!("Locking the subprocess to stop it."); 255 | if let Some(child) = self 256 | .child 257 | .read() 258 | .map_err(|_| ProcessError::MutexPoisoned)? 259 | .as_ref() 260 | { 261 | let pid = Pid::from_raw( 262 | *child 263 | .pids() 264 | .first() 265 | .ok_or(ProcessError::StopFailure("pid not found".to_string()))? 266 | as i32, 267 | ); 268 | 269 | trace!( 270 | "Trying to stop process: sending {} to {}.", 271 | self.stop_signal, 272 | pid 273 | ); 274 | kill(pid, self.stop_signal)?; 275 | 276 | let start_time = Instant::now(); 277 | while start_time.elapsed() < self.stop_timeout { 278 | if let Ok(Some(output)) = child.try_wait() { 279 | info!("Process stopped gracefully with status {}.", output.status); 280 | return Ok(()); 281 | } 282 | sleep(Duration::from_secs(1)); 283 | } 284 | 285 | debug!( 286 | "Process didn't stop gracefully after {}. Killing process.", 287 | DurationString::from(self.stop_timeout).to_string() 288 | ); 289 | 290 | child 291 | .kill() 292 | .map_err(|err| ProcessError::StopFailure(err.to_string()))?; 293 | 294 | info!("Process killed successfully."); 295 | } else { 296 | debug!("Cannot restart process, because it has already failed."); 297 | } 298 | 299 | Ok(()) 300 | } 301 | 302 | #[cfg(not(unix))] 303 | fn stop(&mut self) -> Result<(), ProcessError> { 304 | trace!("Locking the subprocess to stop it."); 305 | if let Some(child) = self 306 | .child 307 | .read() 308 | .map_err(|_| ProcessError::MutexPoisoned)? 309 | .as_ref() 310 | { 311 | child 312 | .kill() 313 | .map_err(|err| ProcessError::StopFailure(err.to_string()))?; 314 | 315 | info!("Process stopped successfully."); 316 | } else { 317 | debug!("Cannot restart process, because it has already failed."); 318 | } 319 | 320 | Ok(()) 321 | } 322 | } 323 | 324 | /// An action to run in the background and restart a subprocess. 325 | #[derive(Debug)] 326 | pub struct ProcessAction { 327 | params: ProcessParams, 328 | process: Process, 329 | } 330 | 331 | impl ProcessAction { 332 | /// Creates a new process in the background. 333 | pub fn new(params: ProcessParams) -> Result { 334 | let process = Process::start(¶ms)?; 335 | 336 | Ok(ProcessAction { params, process }) 337 | } 338 | 339 | fn run_inner(&mut self) -> Result<(), ProcessError> { 340 | self.process 341 | .stop() 342 | .map_err(|err| ProcessError::StopFailure(err.to_string()))?; 343 | self.process = Process::start(&self.params)?; 344 | 345 | Ok(()) 346 | } 347 | } 348 | 349 | impl Action for ProcessAction { 350 | /// Kills and restarts the subprocess. 351 | fn run(&mut self, _context: &Context) -> Result<(), ActionError> { 352 | Ok(self.run_inner()?) 353 | } 354 | } 355 | 356 | #[cfg(test)] 357 | #[cfg_attr(not(unix), allow(unused_imports))] 358 | mod tests { 359 | use super::*; 360 | use std::{fs, time::Instant}; 361 | use thread::sleep; 362 | 363 | const SLEEP_PARSING: &str = "sleep 100"; 364 | const SLEEP_INVALID: &str = "sleep '100"; 365 | const EXIT_NONZERO: &str = "exit 1"; 366 | 367 | #[cfg(unix)] 368 | const SLEEP: &str = "sleep 100"; 369 | 370 | #[cfg(not(unix))] 371 | const SLEEP: &str = "timeout /t 100"; 372 | 373 | #[test] 374 | fn it_should_start_a_new_process() -> Result<(), ProcessError> { 375 | let params = ProcessParams::new(String::from(SLEEP_PARSING), String::from("."), false)?; 376 | let mut action = ProcessAction::new(params)?; 377 | action.process.stop()?; 378 | 379 | assert_eq!("sleep", action.params.command); 380 | assert_eq!(".", action.params.directory); 381 | 382 | Ok(()) 383 | } 384 | 385 | #[test] 386 | fn it_should_fail_if_command_is_invalid() -> Result<(), ProcessError> { 387 | let failing_command = String::from(SLEEP_INVALID); 388 | let failing_params = ProcessParams::new(failing_command.clone(), String::from("."), false); 389 | 390 | assert_eq!( 391 | ProcessError::CommandParseFailure(failing_command), 392 | failing_params.unwrap_err(), 393 | ); 394 | 395 | Ok(()) 396 | } 397 | 398 | #[test] 399 | #[cfg(unix)] 400 | fn it_should_fail_if_signal_is_invalid() -> Result<(), ProcessError> { 401 | let failing_signal = String::from("SIGWTF"); 402 | let failing_params = ProcessParams::new(String::from(SLEEP), String::from("."), false)? 403 | .set_stop_signal(failing_signal.clone()); 404 | 405 | assert_eq!( 406 | ProcessError::SignalParseFailure(failing_signal), 407 | failing_params.unwrap_err(), 408 | ); 409 | 410 | Ok(()) 411 | } 412 | 413 | #[test] 414 | fn it_should_restart_the_process_gracefully() -> Result<(), ProcessError> { 415 | let stop_timeout = Duration::from_secs(5); 416 | let params = ProcessParams::new(String::from(SLEEP), String::from("."), false)?; 417 | let mut action = ProcessAction::new(params)?; 418 | 419 | let initial_time = Instant::now(); 420 | let first_pid = action 421 | .process 422 | .child 423 | .read() 424 | .unwrap() 425 | .as_ref() 426 | .unwrap() 427 | .pids(); 428 | action.run_inner()?; 429 | let second_pid = action 430 | .process 431 | .child 432 | .read() 433 | .unwrap() 434 | .as_ref() 435 | .unwrap() 436 | .pids(); 437 | action.process.stop()?; 438 | 439 | assert_ne!( 440 | first_pid, second_pid, 441 | "First and second run should have different pids." 442 | ); 443 | assert!( 444 | initial_time.elapsed() <= stop_timeout, 445 | "The stop timeout should not be elapsed." 446 | ); 447 | 448 | Ok(()) 449 | } 450 | 451 | #[test] 452 | fn it_should_retry_the_process_if_it_exits_until_the_retry_count() -> Result<(), ProcessError> { 453 | let params = ProcessParams::new(String::from(EXIT_NONZERO), String::from("."), true)?; 454 | let action = ProcessAction::new(params)?; 455 | 456 | sleep(Duration::from_secs(1)); 457 | 458 | let is_child_exited = action.process.child.read().unwrap().as_ref().is_none(); 459 | 460 | assert!(is_child_exited, "The child should exit."); 461 | 462 | Ok(()) 463 | } 464 | 465 | #[test] 466 | #[cfg(unix)] 467 | fn it_should_reset_the_retries() -> Result<(), ProcessError> { 468 | let tailed_file = "./test_directories/tailed_file"; 469 | let params = 470 | ProcessParams::new(format!("tail -f {tailed_file}"), String::from("."), false)?; 471 | 472 | // First time it should fail, because the file doesn't exist yet 473 | let mut action = ProcessAction::new(params)?; 474 | 475 | // Create the file and restart it quickly to see the retries reset 476 | fs::write(tailed_file, "").unwrap(); 477 | action.run_inner()?; 478 | 479 | let is_child_running = action.process.child.read().unwrap().as_ref().is_some(); 480 | assert!(is_child_running, "The child should be running."); 481 | 482 | action.process.stop()?; 483 | fs::remove_file(tailed_file).unwrap(); 484 | 485 | Ok(()) 486 | } 487 | } 488 | -------------------------------------------------------------------------------- /src/actions/script.rs: -------------------------------------------------------------------------------- 1 | use super::{utils::command::create_command, Action, ActionError}; 2 | use crate::context::Context; 3 | use duct::Expression; 4 | use log::{debug, error, info}; 5 | use std::io::{BufRead, BufReader}; 6 | use thiserror::Error; 7 | 8 | const ACTION_NAME: &str = "SCRIPT"; 9 | 10 | /// An action to run a custom shell script. 11 | /// 12 | /// The passed script is running in a subshell (`/bin/sh` on *nix, `cmd.exe` on Windows). 13 | /// so it can use any feature in these shells: variable expansion, pipes, redirection. 14 | /// Both the stdout and stderr will be captured and logged. If the script fails, 15 | /// the failure will also be logged. 16 | #[derive(Debug)] 17 | pub struct ScriptAction { 18 | directory: String, 19 | command: String, 20 | script: Expression, 21 | runs_in_shell: bool, 22 | } 23 | 24 | /// Custom error describing the error cases for the ScriptAction. 25 | #[derive(Debug, Error)] 26 | pub enum ScriptError { 27 | /// The command is invalid (usually mismatched quotations etc.). 28 | #[error("the command {0:?} cannot be parsed")] 29 | CommandParseFailure(String), 30 | /// The underlying Rust command creation failed. The parameter contains the error. 31 | #[error("the script cannot run: {0}")] 32 | ScriptFailure(#[from] std::io::Error), 33 | /// The script returned a non-zero exit code, usually meaning it failed to start 34 | /// or encountered an error. The parameters are the exit code and the failed output. 35 | #[error("the script returned non-zero exit code {0}")] 36 | NonZeroExitcode(i32), 37 | /// This means that an error occured when trying to read from the output of the script. 38 | #[error("the script returned invalid output")] 39 | OutputFailure, 40 | } 41 | 42 | impl From for ActionError { 43 | fn from(value: ScriptError) -> Self { 44 | match value { 45 | ScriptError::CommandParseFailure(_) 46 | | ScriptError::ScriptFailure(_) 47 | | ScriptError::NonZeroExitcode(_) 48 | | ScriptError::OutputFailure => ActionError::FailedAction(value.to_string()), 49 | } 50 | } 51 | } 52 | 53 | impl ScriptAction { 54 | /// Creates a new script to be started in the given directory. 55 | pub fn new( 56 | directory: String, 57 | original_command: String, 58 | runs_in_shell: bool, 59 | ) -> Result { 60 | let (command, script) = create_command(&original_command, runs_in_shell) 61 | .ok_or(ScriptError::CommandParseFailure(original_command))?; 62 | 63 | let script = script 64 | .env("CI", "true") 65 | .env("GW_ACTION_NAME", ACTION_NAME) 66 | .env("GW_DIRECTORY", &directory) 67 | .stderr_to_stdout() 68 | .stdout_capture() 69 | .dir(&directory) 70 | .unchecked(); 71 | 72 | Ok(ScriptAction { 73 | directory, 74 | command, 75 | script, 76 | runs_in_shell, 77 | }) 78 | } 79 | 80 | fn run_inner(&self, context: &Context) -> Result<(), ScriptError> { 81 | // We can run `sh_dangerous`, because it is on the user's computer. 82 | let mut script = self.script.clone(); 83 | 84 | // Set the environment variables 85 | for (key, value) in context { 86 | script = script.env(format!("GW_{key}"), value); 87 | } 88 | 89 | // Start the shell script 90 | info!( 91 | "Running script {:?} {}in {}.", 92 | self.command, 93 | if self.runs_in_shell { 94 | "in a shell " 95 | } else { 96 | "" 97 | }, 98 | self.directory, 99 | ); 100 | let child = script.reader()?; 101 | 102 | let reader = BufReader::new(&child).lines(); 103 | let command_id = self.command.as_str(); 104 | for line in reader { 105 | match line { 106 | Ok(line) => debug!("[{command_id}] {line}"), 107 | Err(_) => debug!("[{command_id}] "), 108 | } 109 | } 110 | 111 | if let Ok(Some(output)) = child.try_wait() { 112 | if output.status.success() { 113 | info!("Script {:?} finished successfully.", self.command); 114 | Ok(()) 115 | } else { 116 | Err(ScriptError::NonZeroExitcode( 117 | output.status.code().unwrap_or(-1), 118 | )) 119 | } 120 | } else { 121 | Err(ScriptError::OutputFailure) 122 | } 123 | } 124 | } 125 | 126 | impl Action for ScriptAction { 127 | /// Run the script in a subshell (`/bin/sh` on *nix, `cmd.exe` on Windows). 128 | /// If the script fails to start, return a non-zero error code or prints non-utf8 129 | /// characters, this function will result in an error. 130 | fn run(&mut self, context: &Context) -> Result<(), ActionError> { 131 | Ok(self.run_inner(context)?) 132 | } 133 | } 134 | 135 | #[cfg(test)] 136 | mod tests { 137 | use super::*; 138 | use std::collections::HashMap; 139 | 140 | fn validate_output(command: &str, asserter: F) 141 | where 142 | F: Fn(Vec<&str>), 143 | { 144 | let command = format!("[{command}] "); 145 | testing_logger::validate(|captured_logs| { 146 | let output: Vec<&str> = captured_logs 147 | .iter() 148 | .filter_map(|line| { 149 | if line.body.starts_with(&command) { 150 | Some(line.body.as_str().trim_start_matches(&command)) 151 | } else { 152 | None 153 | } 154 | }) 155 | .collect(); 156 | 157 | asserter(output); 158 | }); 159 | } 160 | 161 | const ECHO_TEST: &str = "echo test"; 162 | const EXIT_NONZERO: &str = "exit 1"; 163 | 164 | #[cfg(unix)] 165 | const ECHO_INVALID_UNICODE: &str = 166 | "python -c \"import sys; sys.stdout.buffer.write(b'\\xc3\\x28')\"; sys.stdout.flush()"; 167 | #[cfg(unix)] 168 | const ECHO_STDERR: &str = "echo err >&2"; 169 | #[cfg(unix)] 170 | const PRINTENV: &str = "printenv"; 171 | 172 | #[cfg(not(unix))] 173 | const PRINTENV: &str = "set"; 174 | 175 | #[test] 176 | fn it_should_create_new_script() { 177 | let command = String::from(ECHO_TEST); 178 | let action = ScriptAction::new(String::from("."), command, true).unwrap(); 179 | 180 | assert_eq!("echo", action.command); 181 | assert_eq!(".", action.directory); 182 | } 183 | 184 | #[test] 185 | fn it_should_fail_if_command_is_invalid() { 186 | let result = ScriptAction::new(String::from("."), String::from("echo 'test"), false); 187 | 188 | assert!( 189 | matches!(result, Err(ScriptError::CommandParseFailure(_))), 190 | "{result:?} should match CommandParseFailure" 191 | ); 192 | } 193 | 194 | #[test] 195 | fn it_should_run_the_script() -> Result<(), ScriptError> { 196 | testing_logger::setup(); 197 | 198 | let command = String::from(ECHO_TEST); 199 | let action = ScriptAction::new(String::from("."), command, true)?; 200 | 201 | let context: Context = HashMap::new(); 202 | action.run_inner(&context)?; 203 | 204 | validate_output("echo", |lines| { 205 | assert_eq!(vec!["test"], lines); 206 | }); 207 | 208 | Ok(()) 209 | } 210 | 211 | #[test] 212 | fn it_should_set_the_env_vars() -> Result<(), ScriptError> { 213 | testing_logger::setup(); 214 | 215 | let command = String::from(PRINTENV); 216 | let action = ScriptAction::new(String::from("."), command, true)?; 217 | 218 | let context: Context = HashMap::from([ 219 | ("TRIGGER_NAME", "TEST-TRIGGER".to_string()), 220 | ("CHECK_NAME", "TEST-CHECK".to_string()), 221 | ]); 222 | action.run_inner(&context)?; 223 | 224 | validate_output(PRINTENV, |lines| { 225 | assert!(lines.contains(&"CI=true")); 226 | assert!(lines.contains(&"GW_TRIGGER_NAME=TEST-TRIGGER")); 227 | assert!(lines.contains(&"GW_CHECK_NAME=TEST-CHECK")); 228 | assert!(lines.contains(&"GW_ACTION_NAME=SCRIPT")); 229 | assert!(lines.contains(&"GW_DIRECTORY=.")); 230 | }); 231 | 232 | Ok(()) 233 | } 234 | 235 | #[test] 236 | fn it_should_keep_the_already_set_env_vars() -> Result<(), ScriptError> { 237 | testing_logger::setup(); 238 | 239 | std::env::set_var("GW_TEST", "GW_TEST"); 240 | 241 | let command = String::from(PRINTENV); 242 | let action = ScriptAction::new(String::from("."), command, true)?; 243 | 244 | let context: Context = HashMap::new(); 245 | action.run_inner(&context)?; 246 | 247 | validate_output(PRINTENV, |lines| { 248 | assert!(lines.contains(&"GW_TEST=GW_TEST")); 249 | }); 250 | 251 | Ok(()) 252 | } 253 | 254 | #[test] 255 | #[cfg(unix)] 256 | fn it_should_catch_error_output() -> Result<(), ScriptError> { 257 | testing_logger::setup(); 258 | 259 | let command = String::from(ECHO_STDERR); 260 | let action = ScriptAction::new(String::from("."), command, true)?; 261 | 262 | let context: Context = HashMap::new(); 263 | action.run_inner(&context)?; 264 | 265 | validate_output("echo", |lines| { 266 | assert_eq!(vec!["err"], lines); 267 | }); 268 | 269 | Ok(()) 270 | } 271 | 272 | #[test] 273 | #[cfg(unix)] 274 | fn it_should_record_if_the_script_returns_non_utf8() -> Result<(), ScriptError> { 275 | testing_logger::setup(); 276 | 277 | let command = String::from(ECHO_INVALID_UNICODE); 278 | let action = ScriptAction::new(String::from("."), command, false)?; 279 | 280 | let context: Context = HashMap::new(); 281 | action.run_inner(&context)?; 282 | 283 | validate_output("python", |lines| { 284 | assert_eq!(vec![""], lines); 285 | }); 286 | 287 | Ok(()) 288 | } 289 | 290 | #[test] 291 | fn it_should_fail_if_the_script_fails() -> Result<(), ScriptError> { 292 | let command = String::from(EXIT_NONZERO); 293 | let action = ScriptAction::new(String::from("."), command, true)?; 294 | 295 | let context: Context = HashMap::new(); 296 | let result = action.run_inner(&context); 297 | assert!( 298 | matches!(result, Err(ScriptError::NonZeroExitcode(1))), 299 | "{result:?} should match non zero exit code" 300 | ); 301 | 302 | Ok(()) 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /src/actions/utils/command.rs: -------------------------------------------------------------------------------- 1 | use duct::{cmd, Expression}; 2 | use duct_sh::sh_dangerous; 3 | use log::{trace, warn}; 4 | 5 | pub fn create_command(original_command: &str, runs_in_shell: bool) -> Option<(String, Expression)> { 6 | // If we are not in a shell, test if the user might want to be in one (uses variables or pipes) 7 | if !runs_in_shell { 8 | let contains_variables = original_command 9 | .find('$') 10 | .and_then(|pos| original_command.chars().nth(pos + 1)) 11 | .map(|ch| ch.is_ascii_alphabetic() || ch == '{') 12 | == Some(true); 13 | 14 | let contains_suspicious = original_command.contains(" | ") 15 | || original_command.contains(" && ") 16 | || original_command.contains(" || "); 17 | 18 | if contains_variables || contains_suspicious { 19 | warn!("The command {original_command:?} contains a variable or other shell-specific character: you might want to run it in a shell (-S or -P).") 20 | } 21 | } 22 | 23 | // We have to split the command into parts to get the command id 24 | let split_args = shlex::split(original_command)?; 25 | let (command, args) = split_args.split_first()?; 26 | 27 | // If we are in a shell we can `sh_dangerous`, otherwise avoid bugs around shells 28 | let script = if runs_in_shell { 29 | sh_dangerous(original_command) 30 | } else { 31 | cmd(command, args) 32 | }; 33 | 34 | trace!("Parsed {original_command:?} to {script:?}."); 35 | 36 | Some((command.clone(), script)) 37 | } 38 | -------------------------------------------------------------------------------- /src/actions/utils/mod.rs: -------------------------------------------------------------------------------- 1 | /// Utilities for handling commands 2 | pub mod command; 3 | -------------------------------------------------------------------------------- /src/args.rs: -------------------------------------------------------------------------------- 1 | use duration_string::DurationString; 2 | use gumdrop::Options; 3 | use gw_bin::checks::git::GitTriggerArgument; 4 | use std::{env, str::FromStr}; 5 | 6 | #[derive(Clone, Debug)] 7 | pub enum TriggerArgument { 8 | Push, 9 | Tag(String), 10 | } 11 | 12 | impl FromStr for TriggerArgument { 13 | type Err = String; 14 | 15 | fn from_str(s: &str) -> Result { 16 | match s { 17 | "push" => Ok(TriggerArgument::Push), 18 | "tag" => Ok(TriggerArgument::Tag("*".to_string())), 19 | s if s.starts_with("tag:") => Ok(TriggerArgument::Tag( 20 | s.trim_start_matches("tag:").to_string(), 21 | )), 22 | s => Err(format!("cannot parse {s}, valid values: push, tag, tag:prefix")), 23 | } 24 | } 25 | } 26 | 27 | impl From for GitTriggerArgument { 28 | fn from(value: TriggerArgument) -> Self { 29 | match value { 30 | TriggerArgument::Push => GitTriggerArgument::Push, 31 | TriggerArgument::Tag(t) => GitTriggerArgument::Tag(t), 32 | } 33 | } 34 | } 35 | 36 | /// Watch a repository for changes and run scripts when it happens. 37 | #[derive(Debug, Options)] 38 | pub struct Args { 39 | /// The git repository to watch. 40 | #[options(free)] 41 | pub directory: Option, 42 | 43 | /// A script to run on changes, you can define multiple times. 44 | /// 45 | /// If there are no scripts given, it will only pull. 46 | #[options(long = "script", meta = "SCRIPT")] 47 | pub scripts: Vec, 48 | 49 | /// Run a script in a shell. 50 | #[options(short = "S", no_long, meta = "SCRIPT")] 51 | pub scripts_with_shell: Vec, 52 | 53 | /// A background process that will be restarted on change. 54 | #[options(meta = "PROCESS")] 55 | pub process: Option, 56 | 57 | /// Run a background process in a shell. 58 | #[options(short = "P", no_long, meta = "PROCESS")] 59 | pub process_with_shell: Option, 60 | 61 | /// Try to pull only once. Useful for cronjobs. 62 | #[options(long = "once", no_short)] 63 | pub once: bool, 64 | 65 | /// The trigger on which to run (can be `push`, `tag` or `tag:pattern`). 66 | /// 67 | /// The options are: 68 | /// - `push`: update on every commit, 69 | /// - `tag`: update on every tag on this branch, 70 | /// - `tag:pattern`: update on tags matching the glob. 71 | #[options(no_short, long = "on", default = "push")] 72 | pub trigger: TriggerArgument, 73 | 74 | /// Refreshes the repo with this interval. 75 | /// 76 | /// Can be a number postfixed with s(econd), m(inutes), h(ours), d(ays) 77 | #[options(long = "every", default = "1m")] 78 | pub delay: DurationString, 79 | 80 | /// Set the path for an ssh-key to be used when pulling. 81 | #[options(short = 'i', long = "ssh-key")] 82 | pub ssh_key: Option, 83 | 84 | /// Set the username for git to be used when pulling with HTTPS. 85 | #[options(no_short, meta = "USER")] 86 | pub git_username: Option, 87 | 88 | /// Set the token for git to be used when pulling with HTTPS. 89 | #[options(no_short, meta = "TOKEN")] 90 | pub git_token: Option, 91 | 92 | /// Add this line to the known_hosts file to be created (e.g. "example.com ssh-ed25519 AAAAC3..."). 93 | #[options(no_short, meta = "HOST")] 94 | pub git_known_host: Option, 95 | 96 | /// Runs an HTTP server on the URL, which allows to trigger by calling it. 97 | #[options(no_short)] 98 | pub http: Option, 99 | 100 | /// The number of times to retry the background process in case it fails. By default 0 for no retries. 101 | #[options(no_short, meta = "N")] 102 | pub process_retries: Option, 103 | 104 | /// The stop signal to give the background process. Useful for graceful shutdowns. By default SIGINT. (Only supported on *NIX) 105 | #[options(no_short, meta = "SIGNAL")] 106 | pub stop_signal: Option, 107 | 108 | /// The timeout to wait before killing for the background process to shutdown gracefully. By default 10s. 109 | #[options(no_short, meta = "TIMEOUT")] 110 | pub stop_timeout: Option, 111 | 112 | /// Increase verbosity, can be set multiple times (-v debug, -vv tracing). 113 | #[options(count)] 114 | pub verbose: u8, 115 | 116 | /// Only print error messages. 117 | #[options()] 118 | pub quiet: bool, 119 | 120 | /// Print the current version. 121 | #[options(short = "V")] 122 | pub version: bool, 123 | 124 | /// Print this help. 125 | #[options()] 126 | pub help: bool, 127 | } 128 | 129 | #[derive(Debug)] 130 | pub enum ArgAction { 131 | Process(String, bool), 132 | Script(String, bool), 133 | } 134 | 135 | pub fn parse_args() -> (Args, Vec) { 136 | let args = Args::parse_args_default_or_exit(); 137 | 138 | // We have to maintain positionality between different flags 139 | let arg_actions = env::args() 140 | .skip(2) 141 | .filter_map(|arg| { 142 | if args.process.as_ref() == Some(&arg) { 143 | Some(ArgAction::Process(arg, false)) 144 | } else if args.process_with_shell.as_ref() == Some(&arg) { 145 | Some(ArgAction::Process(arg, true)) 146 | } else if args.scripts.contains(&arg) { 147 | Some(ArgAction::Script(arg, false)) 148 | } else if args.scripts_with_shell.contains(&arg) { 149 | Some(ArgAction::Script(arg, true)) 150 | } else { 151 | None 152 | } 153 | }) 154 | .collect(); 155 | 156 | (args, arg_actions) 157 | } 158 | -------------------------------------------------------------------------------- /src/checks/git/config.rs: -------------------------------------------------------------------------------- 1 | use super::GitError; 2 | use dirs::home_dir; 3 | use std::{fs::File, io::Write, path::PathBuf}; 4 | use log::warn; 5 | 6 | /// Setup the gitconfig file. 7 | /// 8 | /// Git will fail if we are trying to access a directory with a different user than ours. 9 | /// To avoid this (mainly to make it work in containers), we are once again choosing usability. 10 | /// In case there is no gitconfig file (usually in containers), we are creating it and 11 | /// setting the current directory as safe directory inside. 12 | pub fn setup_gitconfig(directory: &str) -> Result<(), GitError> { 13 | let home = home_dir().unwrap_or(PathBuf::from("~")); 14 | let config = home.join(".gitconfig"); 15 | 16 | if !config.exists() { 17 | warn!( 18 | "There is no {}, creating with safe.directory = {}.", 19 | config.to_string_lossy(), 20 | directory 21 | ); 22 | 23 | let mut config_file = File::create(config).map_err(|_| GitError::ConfigLoadingFailed)?; 24 | writeln!(config_file, "[safe]\n directory = {directory}") 25 | .map_err(|_| GitError::ConfigLoadingFailed)?; 26 | } 27 | 28 | Ok(()) 29 | } 30 | -------------------------------------------------------------------------------- /src/checks/git/credentials.rs: -------------------------------------------------------------------------------- 1 | // Credential funtion graciously lifted from https://github.com/davidB/git2_credentials 2 | // The goal is to remove every extra feature (e.g. interactive usage, config parsing with pest) 3 | 4 | use dirs::home_dir; 5 | use log::{trace, warn}; 6 | use std::path::PathBuf; 7 | 8 | pub use git2; 9 | 10 | #[derive(Debug, Clone)] 11 | pub enum CredentialAuth { 12 | Ssh(String), 13 | Https(String, String), 14 | } 15 | 16 | pub struct CredentialHandler { 17 | username_attempts_count: usize, 18 | username_candidates: Vec, 19 | ssh_attempts_count: usize, 20 | ssh_key_candidates: Vec, 21 | https_credentials: Option<(String, String)>, 22 | cred_helper_bad: Option, 23 | cfg: git2::Config, 24 | } 25 | 26 | // implemention based on code & comment from cargo 27 | // https://github.com/rust-lang/cargo/blob/master/src/cargo/sources/git/utils.rs#L415-L628 28 | // License APACHE 29 | // but adapted to not use wrapper over function like withXxx(FnMut), a more OO approach 30 | impl CredentialHandler { 31 | pub fn new(cfg: git2::Config, auth: Option) -> Self { 32 | // Force using https credentials if given 33 | if let Some(CredentialAuth::Https(username, password)) = auth { 34 | return CredentialHandler { 35 | username_attempts_count: 0, 36 | username_candidates: vec!["git".to_string()], 37 | ssh_attempts_count: 0, 38 | ssh_key_candidates: vec![], 39 | cred_helper_bad: None, 40 | https_credentials: Some((username, password)), 41 | cfg, 42 | }; 43 | } 44 | 45 | // Generate a list of available keys 46 | let ssh_keys = if let Some(CredentialAuth::Ssh(path)) = auth { 47 | vec![PathBuf::from(path)] 48 | } else { 49 | let home = home_dir().unwrap_or(PathBuf::from("~")); 50 | vec![ 51 | home.join(".ssh/id_dsa"), 52 | home.join(".ssh/id_ecdsa"), 53 | home.join(".ssh/id_ecdsa_sk"), 54 | home.join(".ssh/id_ed25519"), 55 | home.join(".ssh/id_ed25519_sk"), 56 | home.join(".ssh/id_rsa"), 57 | ] 58 | }; 59 | 60 | let ssh_key_candidates: Vec = ssh_keys 61 | .into_iter() 62 | .filter(|key_path| key_path.exists()) 63 | .collect(); 64 | 65 | CredentialHandler { 66 | username_attempts_count: 0, 67 | username_candidates: vec!["git".to_string()], 68 | ssh_attempts_count: 0, 69 | ssh_key_candidates, 70 | cred_helper_bad: None, 71 | https_credentials: None, 72 | cfg, 73 | } 74 | } 75 | 76 | /// Prepare the authentication callbacks for cloning a git repository. 77 | /// 78 | /// The main purpose of this function is to construct the "authentication 79 | /// callback" which is used to clone a repository. This callback will attempt to 80 | /// find the right authentication on the system (maybe with user input) and will 81 | /// guide libgit2 in doing so. 82 | /// 83 | /// The callback is provided `allowed` types of credentials, and we try to do as 84 | /// much as possible based on that: 85 | /// 86 | /// - Prioritize SSH keys from the local ssh agent as they're likely the most 87 | /// reliable. The username here is prioritized from the credential 88 | /// callback, then from whatever is configured in git itself, and finally 89 | /// we fall back to the generic user of `git`. If no ssh agent try to use 90 | /// the default key ($HOME/.ssh/id_rsa, $HOME/.ssh/id_ed25519) 91 | /// 92 | /// - If a username/password is allowed, then we fallback to git2-rs's 93 | /// implementation of the credential helper. This is what is configured 94 | /// with `credential.helper` in git, and is the interface for the macOS 95 | /// keychain, for example. Else ask (on ui) the for username and password. 96 | /// 97 | /// - After the above two have failed, we just kinda grapple attempting to 98 | /// return *something*. 99 | /// 100 | /// If any form of authentication fails, libgit2 will repeatedly ask us for 101 | /// credentials until we give it a reason to not do so. To ensure we don't 102 | /// just sit here looping forever we keep track of authentications we've 103 | /// attempted and we don't try the same ones again. 104 | pub fn try_next_credential( 105 | &mut self, 106 | url: &str, 107 | username: Option<&str>, 108 | allowed: git2::CredentialType, 109 | ) -> Result { 110 | // libgit2's "USERNAME" authentication actually means that it's just 111 | // asking us for a username to keep going. This is currently only really 112 | // used for SSH authentication and isn't really an authentication type. 113 | // The logic currently looks like: 114 | // 115 | // let user = ...; 116 | // if (user.is_null()) 117 | // user = callback(USERNAME, null, ...); 118 | // 119 | // callback(SSH_KEY, user, ...) 120 | // 121 | // So if we're being called here then we know that (a) we're using ssh 122 | // authentication and (b) no username was specified in the URL that 123 | // we're trying to clone. We need to guess an appropriate username here, 124 | // but that may involve a few attempts. 125 | // (FIXME) Unfortunately we can't switch 126 | // usernames during one authentication session with libgit2, so to 127 | // handle this we bail out of this authentication session after setting 128 | // the flag `ssh_username_requested`, and then we handle this below. 129 | if allowed.contains(git2::CredentialType::USERNAME) { 130 | // debug_assert!(username.is_none()); 131 | let idx = self.username_attempts_count; 132 | self.username_attempts_count += 1; 133 | return match self.username_candidates.get(idx).map(|s| &s[..]) { 134 | Some(s) => git2::Cred::username(s), 135 | _ => Err(git2::Error::from_str("no more username to try")), 136 | }; 137 | } 138 | 139 | // An "SSH_KEY" authentication indicates that we need some sort of SSH 140 | // authentication. This can currently either come from the ssh-agent 141 | // process or from a raw in-memory SSH key. 142 | // 143 | // If we get called with this then the only way that should be possible 144 | // is if a username is specified in the URL itself (e.g., `username` is 145 | // Some), hence the unwrap() here. We try custom usernames down below. 146 | if allowed.contains(git2::CredentialType::SSH_KEY) { 147 | // If ssh-agent authentication fails, libgit2 will keep 148 | // calling this callback asking for other authentication 149 | // methods to try. Make sure we only try ssh-agent once. 150 | self.ssh_attempts_count += 1; 151 | let u = username.unwrap_or("git"); 152 | return if self.ssh_attempts_count == 1 { 153 | trace!("Trying ssh-key from agent with username {u}."); 154 | git2::Cred::ssh_key_from_agent(u) 155 | } else { 156 | let candidate_idx = self.ssh_attempts_count - 2; 157 | if candidate_idx < self.ssh_key_candidates.len() { 158 | let key = self.ssh_key_candidates.get(candidate_idx); 159 | match key { 160 | // try without passphrase 161 | Some(k) => { 162 | trace!("Trying ssh-key {} without passphrase.", k.to_string_lossy()); 163 | git2::Cred::ssh_key(u, None, k, None) 164 | } 165 | None => Err(git2::Error::from_str( 166 | "failed ssh authentication for repository", 167 | )), 168 | } 169 | } else { 170 | if self.ssh_key_candidates.is_empty() { 171 | warn!("There are no ssh-keys in ~/.ssh, run ssh-keygen or mount your .ssh directory."); 172 | } 173 | Err(git2::Error::from_str( 174 | "no ssh-key found that can authenticate to your repository", 175 | )) 176 | } 177 | }; 178 | } 179 | 180 | // Sometimes libgit2 will ask for a username/password in plaintext. 181 | // 182 | // If ssh-agent authentication fails, libgit2 will keep calling this 183 | // callback asking for other authentication methods to try. Check 184 | // cred_helper_bad to make sure we only try the git credentail helper 185 | // once, to avoid looping forever. 186 | if allowed.contains(git2::CredentialType::USER_PASS_PLAINTEXT) 187 | && self.cred_helper_bad.is_none() 188 | { 189 | if let Some((username, password)) = &self.https_credentials { 190 | trace!("Trying username-password from command line argument {username}."); 191 | return git2::Cred::userpass_plaintext(username, password); 192 | } 193 | 194 | trace!("Trying username-password authentication from credential helper."); 195 | let r = git2::Cred::credential_helper(&self.cfg, url, username); 196 | self.cred_helper_bad = Some(r.is_err()); 197 | return r; 198 | } 199 | 200 | // I'm... not sure what the DEFAULT kind of authentication is, but seems 201 | // easy to support? 202 | if allowed.contains(git2::CredentialType::DEFAULT) { 203 | return git2::Cred::default(); 204 | } 205 | 206 | // Stop trying 207 | trace!("There are not authentication available."); 208 | Err(git2::Error::from_str("no valid authentication available")) 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /src/checks/git/known_hosts.rs: -------------------------------------------------------------------------------- 1 | use super::GitError; 2 | use dirs::home_dir; 3 | use log::{debug, warn}; 4 | use std::{ 5 | fs::{create_dir, read_to_string, File}, 6 | io::Write, 7 | path::PathBuf, 8 | }; 9 | 10 | // https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/githubs-ssh-key-fingerprints 11 | const GITHUB_FINGERPRINTS: &str = "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= 12 | github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl 13 | github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk"; 14 | 15 | // https://docs.gitlab.com/ee/user/gitlab_com/index.html#ssh-host-keys-fingerprints 16 | const GITLAB_FINGERPRINTS: &str = "gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY== 17 | gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf 18 | gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9"; 19 | 20 | // https://bitbucket.org/site/ssh 21 | const BITBUCKET_FINGERPRINTS: &str = "bitbucket.org ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPIQmuzMBuKdWeF4+a2sjSSpBK0iqitSQ+5BM9KhpexuGt20JpTVM7u5BDZngncgrqDMbWdxMWWOGtZ9UgbqgZE= 22 | bitbucket.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIazEu89wgQZ4bqs3d63QSMzYVa0MuJ2e2gKTKqu+UUO 23 | bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDQeJzhupRu0u0cdegZIa8e86EG2qOCsIsD1Xw0xSeiPDlCr7kq97NLmMbpKTX6Esc30NuoqEEHCuc7yWtwp8dI76EEEB1VqY9QJq6vk+aySyboD5QF61I/1WeTwu+deCbgKMGbUijeXhtfbxSxm6JwGrXrhBdofTsbKRUsrN1WoNgUa8uqN1Vx6WAJw1JHPhglEGGHea6QICwJOAr/6mrui/oB7pkaWKHj3z7d1IC4KWLtY47elvjbaTlkN04Kc/5LFEirorGYVbt15kAUlqGM65pk6ZBxtaO3+30LVlORZkxOh+LKL/BvbZ/iRNhItLqNyieoQj/uh/7Iv4uyH/cV/0b4WDSd3DptigWq84lJubb9t/DnZlrJazxyDCulTmKdOR7vs9gMTo+uoIrPSb8ScTtvw65+odKAlBj59dhnVp9zd7QUojOpXlL62Aw56U4oO+FALuevvMjiWeavKhJqlR7i5n9srYcrNV7ttmDw7kf/97P5zauIhxcjX+xHv4M="; 24 | 25 | /// Setup the known host fingerprints. 26 | /// 27 | /// There is no simple way to configure ssh from within libgit2. To make it easier, 28 | /// we err on the side of usability and if there is no `known_hosts` file, we assume 29 | /// that we are in a container and create it with some default fingerprints. 30 | /// 31 | /// There is also a flag to add custom host key, in this case we only set that one 32 | /// in the `known_hosts`. 33 | pub fn setup_known_hosts(additional_host: Option) -> Result<(), GitError> { 34 | let ssh_dir = home_dir().unwrap_or(PathBuf::from("~")).join(".ssh"); 35 | if !ssh_dir.exists() { 36 | create_dir(&ssh_dir).map_err(|_| GitError::SshConfigFailed)?; 37 | } 38 | 39 | let known_hosts = ssh_dir.join("known_hosts"); 40 | 41 | if let Some(host) = additional_host { 42 | let mut is_additional_host_found = false; 43 | if known_hosts.exists() { 44 | let known_hosts_contents = 45 | read_to_string(&known_hosts).map_err(|_| GitError::SshConfigFailed)?; 46 | is_additional_host_found = known_hosts_contents.contains(&host); 47 | } 48 | 49 | if !is_additional_host_found { 50 | let mut known_hosts_file = File::options() 51 | .append(true) 52 | .create(true) 53 | .open(&known_hosts) 54 | .map_err(|_| GitError::SshConfigFailed)?; 55 | 56 | debug!( 57 | "Host key not found in {}, adding from arguments.", 58 | known_hosts.to_string_lossy() 59 | ); 60 | writeln!(known_hosts_file, "{host}").map_err(|_| GitError::SshConfigFailed)?; 61 | } 62 | } else if !known_hosts.exists() { 63 | let mut known_hosts_file = 64 | File::create(&known_hosts).map_err(|_| GitError::SshConfigFailed)?; 65 | 66 | warn!( 67 | "There is no {}, creating with default fingerprints.", 68 | known_hosts.to_string_lossy() 69 | ); 70 | writeln!(known_hosts_file, "{GITHUB_FINGERPRINTS}") 71 | .map_err(|_| GitError::SshConfigFailed)?; 72 | writeln!(known_hosts_file, "{GITLAB_FINGERPRINTS}") 73 | .map_err(|_| GitError::SshConfigFailed)?; 74 | writeln!(known_hosts_file, "{BITBUCKET_FINGERPRINTS}") 75 | .map_err(|_| GitError::SshConfigFailed)?; 76 | } 77 | 78 | Ok(()) 79 | } 80 | -------------------------------------------------------------------------------- /src/checks/git/repository.rs: -------------------------------------------------------------------------------- 1 | use super::{ 2 | credentials::{CredentialAuth, CredentialHandler}, 3 | GitError, 4 | }; 5 | use git2::{ 6 | AnnotatedCommit, AutotagOption, Config, FetchOptions, Oid, RemoteCallbacks, Repository, 7 | StatusOptions, 8 | }; 9 | use log::{debug, trace}; 10 | use std::collections::HashMap; 11 | 12 | pub struct GitRepositoryInformation { 13 | pub ref_name: String, 14 | pub branch_name: String, 15 | pub commit_sha: Oid, 16 | pub commit_short_sha: String, 17 | pub remote_name: String, 18 | pub remote_url: String, 19 | } 20 | 21 | /// A directory that is opened as a git repository. 22 | /// 23 | /// It is a wrapper around the underlying `git2` [Repository](git2::Repository). 24 | pub struct GitRepository { 25 | repo: Repository, 26 | auth: Option, 27 | } 28 | 29 | /// Return the 7 characters short hash version for a commit SHA 30 | pub fn shorthash(sha: &Oid) -> String { 31 | sha.to_string()[0..7].to_string() 32 | } 33 | 34 | impl GitRepository { 35 | /// Open a directory as a GitRepository. Fails if the directory is not a valid git repo. 36 | pub fn open(directory: &str) -> Result { 37 | let repo = Repository::open(directory).map_err(|err| { 38 | GitError::NotAGitRepository(String::from(directory), err.message().trim().to_string()) 39 | })?; 40 | 41 | // Do a sanity check to fail instantly if there are any issues 42 | let git_repo = GitRepository { repo, auth: None }; 43 | git_repo.get_repository_information()?; 44 | 45 | Ok(git_repo) 46 | } 47 | 48 | pub fn set_auth(&mut self, auth: CredentialAuth) { 49 | self.auth = Some(auth); 50 | } 51 | 52 | /// Get information about the current repository, for context and usage in GitRepository 53 | pub fn get_repository_information(&self) -> Result { 54 | let Self { repo, .. } = self; 55 | let head = repo.head().map_err(|_| GitError::NotOnABranch)?; 56 | let ref_name = head.name().ok_or(GitError::NotOnABranch)?; 57 | let commit_sha = head 58 | .peel_to_commit() 59 | .map_err(|_| GitError::NotOnABranch)? 60 | .id(); 61 | 62 | let branch_name = head.shorthand().ok_or(GitError::NotOnABranch)?; 63 | let remote_buf = repo 64 | .branch_upstream_remote(ref_name) 65 | .map_err(|_| GitError::NoRemoteForBranch(String::from(branch_name)))?; 66 | let remote_name = remote_buf 67 | .as_str() 68 | .ok_or_else(|| GitError::NoRemoteForBranch(String::from(branch_name)))?; 69 | 70 | let remote = repo 71 | .find_remote(remote_name) 72 | .map_err(|_| GitError::NoRemoteForBranch(String::from(branch_name)))?; 73 | 74 | let remote_url = remote 75 | .url() 76 | .ok_or(GitError::NoRemoteForBranch(String::from(branch_name)))?; 77 | 78 | Ok(GitRepositoryInformation { 79 | ref_name: ref_name.to_string(), 80 | branch_name: branch_name.to_string(), 81 | commit_short_sha: shorthash(&commit_sha), 82 | commit_sha, 83 | remote_url: remote_url.to_string(), 84 | remote_name: remote_name.to_string(), 85 | }) 86 | } 87 | 88 | // Inspired from: https://github.com/rust-lang/git2-rs/blob/master/examples/pull.rs 89 | pub fn fetch(&self) -> Result { 90 | let Self { repo, .. } = self; 91 | let GitRepositoryInformation { 92 | branch_name, 93 | remote_name, 94 | .. 95 | } = self.get_repository_information()?; 96 | 97 | trace!("Trying to fetch {branch_name} from {remote_name}."); 98 | 99 | let mut remote = repo 100 | .find_remote(&remote_name) 101 | .map_err(|_| GitError::NoRemoteForBranch(branch_name.clone()))?; 102 | 103 | // Setup authentication callbacks to fetch the repository 104 | let mut cb = RemoteCallbacks::new(); 105 | let git_config = Config::open_default().map_err(|_| GitError::ConfigLoadingFailed)?; 106 | let mut ch = CredentialHandler::new(git_config, self.auth.clone()); 107 | cb.credentials(move |url, username, allowed| { 108 | ch.try_next_credential(url, username, allowed) 109 | }); 110 | 111 | // Set option to download tags automatically 112 | let mut opts = FetchOptions::new(); 113 | opts.remote_callbacks(cb); 114 | opts.download_tags(AutotagOption::Auto); 115 | 116 | // Fetch the remote state 117 | remote 118 | .fetch(&[branch_name.clone()], Some(&mut opts), None) 119 | .map_err(|err| GitError::FetchFailed(err.message().trim().to_string()))?; 120 | 121 | let fetch_head = repo 122 | .find_reference("FETCH_HEAD") 123 | .map_err(|err| GitError::FetchFailed(err.message().trim().to_string()))?; 124 | let fetch_commit = repo 125 | .reference_to_annotated_commit(&fetch_head) 126 | .map_err(|err| GitError::FetchFailed(err.message().trim().to_string()))?; 127 | 128 | trace!( 129 | "Fetched {remote_name}/{branch_name} successfully to {}.", 130 | fetch_head 131 | .peel_to_commit() 132 | .map(|c| shorthash(&c.id())) 133 | .unwrap_or("unknown reference".to_string()) 134 | ); 135 | 136 | Ok(fetch_commit) 137 | } 138 | 139 | pub fn check_if_updatable(&self, fetch_commit: &AnnotatedCommit) -> Result { 140 | let Self { repo, .. } = self; 141 | let (analysis, _) = repo 142 | .merge_analysis(&[fetch_commit]) 143 | .map_err(|_| GitError::MergeConflict)?; 144 | 145 | if analysis.is_fast_forward() { 146 | trace!("Fetched commit can be fast forwarded."); 147 | Ok(true) 148 | } else if analysis.is_up_to_date() { 149 | trace!("Fetched commit is up to date."); 150 | Ok(false) 151 | } else { 152 | if analysis.is_unborn() { 153 | debug!("Fetched commit is not pointing to a valid branch (unborn), failing."); 154 | } else if analysis.is_normal() { 155 | debug!("Fetched commit is a merge conflict, failing."); 156 | } 157 | Err(GitError::MergeConflict) 158 | } 159 | } 160 | 161 | pub fn find_tags( 162 | &self, 163 | last_commit_id: Oid, 164 | pattern: &str, 165 | ) -> Result, GitError> { 166 | let Self { repo, .. } = self; 167 | let GitRepositoryInformation { 168 | commit_sha: first_commit_id, 169 | commit_short_sha: first_commit_short_sha, 170 | .. 171 | } = self.get_repository_information()?; 172 | 173 | // Walk from the fetched commit 174 | let mut revwalk = repo.revwalk().map_err(|_| GitError::TagMatchingFailed)?; 175 | revwalk.push(last_commit_id).map_err(|_| { 176 | GitError::FetchFailed("fetched commit is not on this branch".to_string()) 177 | })?; 178 | trace!( 179 | "Walking through fetched commits between {}..{}.", 180 | shorthash(&last_commit_id), 181 | first_commit_short_sha 182 | ); 183 | 184 | // Collect all tag references beforehand to improve performance 185 | // If a tag does not point to a valid commit, ignore it 186 | let tag_names = repo 187 | .tag_names(Some(pattern)) 188 | .map_err(|_| GitError::TagMatchingFailed)?; 189 | let tag_commits: HashMap = tag_names 190 | .iter() 191 | .flatten() 192 | .flat_map(|tag_name| { 193 | repo.find_reference(&format!("refs/tags/{tag_name}")) 194 | .and_then(|tag| tag.peel_to_commit()) 195 | .map(|tag| (tag.id(), tag_name.to_string())) 196 | }) 197 | .collect(); 198 | 199 | // Go through the list of commits, and register if a commit has a tag pointing to it 200 | let mut tags = vec![]; 201 | for oid in revwalk { 202 | let oid = oid.map_err(|_| GitError::TagMatchingFailed)?; 203 | 204 | if oid == first_commit_id { 205 | break; 206 | } 207 | 208 | if let Some(tag_name) = tag_commits.get(&oid) { 209 | debug!("Commit {} has a matching tag: {tag_name}.", shorthash(&oid)); 210 | tags.push((tag_name.clone(), oid)); 211 | } 212 | } 213 | 214 | if tags.is_empty() { 215 | debug!("There is no new commit with tag matching \"{pattern}\"."); 216 | } 217 | 218 | // Put it into chronological order 219 | tags.reverse(); 220 | 221 | Ok(tags) 222 | } 223 | 224 | pub fn pull(&self, commit_id: Oid) -> Result<(), GitError> { 225 | let Self { repo, .. } = self; 226 | let GitRepositoryInformation { 227 | branch_name, 228 | ref_name, 229 | .. 230 | } = self.get_repository_information()?; 231 | 232 | trace!("Pulling {branch_name}."); 233 | 234 | if !repo 235 | .statuses(Some(StatusOptions::new().include_ignored(false))) 236 | .map_err(|_| GitError::DirtyWorkingTree)? 237 | .is_empty() 238 | { 239 | return Err(GitError::DirtyWorkingTree); 240 | } 241 | 242 | let msg = format!("Fast-Forward: Setting {} to id: {}.", ref_name, commit_id); 243 | 244 | let fetch_short = shorthash(&commit_id); 245 | trace!("Setting {} to id: {}.", ref_name, fetch_short); 246 | 247 | let mut branch_ref = repo 248 | .find_reference(&ref_name) 249 | .map_err(|_| GitError::NotOnABranch)?; 250 | branch_ref 251 | .set_target(commit_id, &msg) 252 | .map_err(|_| GitError::FailedSettingHead(fetch_short.to_string()))?; 253 | repo.set_head(&ref_name) 254 | .map_err(|_| GitError::FailedSettingHead(fetch_short.to_string()))?; 255 | repo.checkout_head(Some(git2::build::CheckoutBuilder::default().force())) 256 | .map_err(|_| GitError::FailedSettingHead(fetch_short.to_string()))?; 257 | 258 | debug!("Checked out {} on branch {}.", fetch_short, branch_name); 259 | 260 | Ok(()) 261 | } 262 | } 263 | -------------------------------------------------------------------------------- /src/checks/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::context::Context; 2 | use mockall::automock; 3 | use thiserror::Error; 4 | 5 | /// A check to fetch and pull a local git repository. 6 | pub mod git; 7 | /// A check to watch a directory for changes. 8 | pub mod watch; 9 | 10 | /// A custom error for describing the error cases for checks 11 | #[derive(Debug, Error)] 12 | pub enum CheckError { 13 | /// Cannot initialize check, because it has a misconfiguration. 14 | #[error("not configured correctly: {0}")] 15 | Misconfigured(String), 16 | /// Cannot run check, because there isn't enough permission. 17 | #[error("permission denied: {0}")] 18 | PermissionDenied(String), 19 | /// Cannot update the check, because there is a conflict. 20 | /// This can be a merge conflict, a filesystem issue 21 | #[error("there is a conflict: {0}")] 22 | Conflict(String), 23 | /// Running the trigger failed. 24 | #[error("failed while running: {0}")] 25 | FailedUpdate(String), 26 | } 27 | 28 | /// A check is a process that tests if there are any changes and updates it. 29 | /// 30 | /// Checks may include: 31 | /// - git fetch and git pull ([git::GitCheck]) 32 | /// - watch a directory for updates ([watch::WatchCheck]) 33 | /// - etc. 34 | #[automock] 35 | pub trait Check { 36 | /// Check if there are changes and update if necessary. 37 | fn check(&mut self, context: &mut Context) -> Result; 38 | } 39 | -------------------------------------------------------------------------------- /src/checks/watch.rs: -------------------------------------------------------------------------------- 1 | use super::{Check, CheckError}; 2 | use crate::context::Context; 3 | use std::result::Result; 4 | 5 | /// A check to watch a directory for changes. 6 | pub struct WatchCheck; 7 | 8 | impl Check for WatchCheck { 9 | fn check(&mut self, _context: &mut Context) -> Result { 10 | todo!() 11 | } 12 | } 13 | 14 | #[cfg(test)] 15 | mod tests {} 16 | -------------------------------------------------------------------------------- /src/context.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | pub type Context = HashMap<&'static str, String>; 4 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Watch local git repositories, keep in sync with remote and run commands. 2 | //! 3 | //! ## How it works 4 | //! 5 | //! `gw` is built up from **triggers**, **checks** and **actions**. 6 | //! Triggers are long running background processes that initiates checks 7 | //! (for example periodic triggers, or HTTP triggers). Checks tests 8 | //! if there are any changes in the directory (be it git or filesystem changes) 9 | //! and runs the actions if there was. Actions are arbitrary code that runs 10 | //! (e.g. user-defined shell scripts). 11 | //! 12 | //! ```ignore 13 | //! +---------+ +--------+ +--------+ 14 | //! | trigger | ----> | checks | ----> | action | 15 | //! +---------+ +--------+ +--------+ 16 | //! ``` 17 | //! 18 | 19 | /// An action is a process that runs if any changes occured (e.g. [running actions](actions::script::ScriptAction)). 20 | pub mod actions; 21 | /// A check is a process that tests if there are any changes and updates it. 22 | pub mod checks; 23 | /// A trigger is a long running background process, which initiates the checks 24 | /// (e.g. [on a schedule](triggers::schedule::ScheduleTrigger), [on HTTP request](triggers::http::HttpTrigger) 25 | /// or [once](triggers::once::OnceTrigger)). 26 | pub mod triggers; 27 | 28 | /// The main program loop, that runs the triggers, checks and actions infinitely. 29 | pub mod start; 30 | 31 | /// The context which can share data between the different steps. 32 | pub mod context; 33 | -------------------------------------------------------------------------------- /src/logger.rs: -------------------------------------------------------------------------------- 1 | use crate::{args::Args, MainError}; 2 | use log::{warn, Level, LevelFilter}; 3 | use simplelog::{ 4 | format_description, Color, ColorChoice, ConfigBuilder, LevelPadding, TermLogger, TerminalMode, 5 | }; 6 | 7 | // Use the same format as simple_logger 8 | const TIMESTAMP_FORMAT_OFFSET: &[simplelog::FormatItem<'_>] = format_description!( 9 | "[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:3][offset_hour sign:mandatory]:[offset_minute]" 10 | ); 11 | 12 | pub fn init_logger(args: &Args) -> Result<(), MainError> { 13 | TermLogger::init( 14 | match (args.quiet, args.verbose) { 15 | (true, _) => LevelFilter::Error, 16 | (false, 0) => LevelFilter::Info, 17 | (false, 1) => LevelFilter::Debug, 18 | (false, _) => LevelFilter::Trace, 19 | }, 20 | ConfigBuilder::new() 21 | .set_level_color(Level::Debug, Some(Color::Magenta)) 22 | .set_level_color(Level::Trace, None) 23 | .set_level_padding(LevelPadding::Right) 24 | .set_target_level(LevelFilter::Off) 25 | .set_thread_level(LevelFilter::Off) 26 | .set_time_format_custom(TIMESTAMP_FORMAT_OFFSET) 27 | .set_time_offset_to_local() 28 | .map_err(|_| MainError::FailedLoggerTimezones)? 29 | .build(), 30 | TerminalMode::Mixed, 31 | ColorChoice::Auto, 32 | )?; 33 | 34 | if args.verbose > 3 { 35 | warn!("Okay, it's time to stop. It won't get more verbose than this.") 36 | } 37 | 38 | Ok(()) 39 | } 40 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use args::{parse_args, ArgAction}; 2 | use gw_bin::{ 3 | actions::{ 4 | process::{ProcessAction, ProcessParams}, 5 | script::ScriptAction, 6 | Action, ActionError, 7 | }, 8 | checks::{ 9 | git::{CredentialAuth, GitCheck}, 10 | Check, CheckError, 11 | }, 12 | start::{start, StartError}, 13 | triggers::{ 14 | http::HttpTrigger, once::OnceTrigger, schedule::ScheduleTrigger, signal::SignalTrigger, 15 | Trigger, 16 | }, 17 | }; 18 | use log::{debug, error, warn, SetLoggerError}; 19 | use logger::init_logger; 20 | use std::{fs, process, time::Duration}; 21 | use thiserror::Error; 22 | 23 | mod args; 24 | mod logger; 25 | 26 | #[derive(Debug, Error)] 27 | pub enum MainError { 28 | #[error("You have to pass a directory to watch.")] 29 | MissingDirectoryArg, 30 | #[error("Directory {0} not found.")] 31 | NonExistentDirectory(String), 32 | #[error("You cannot start multiple processes, only add -p or -P once.")] 33 | MultipleProcessArgs, 34 | #[error("Check failed: {0}.")] 35 | FailedCheck(#[from] CheckError), 36 | #[error("Failed setting up logger with timezones.")] 37 | FailedLoggerTimezones, 38 | #[error("Failed setting up logger.")] 39 | FailedLogger(#[from] SetLoggerError), 40 | #[error(transparent)] 41 | FailedStart(#[from] StartError), 42 | #[error("Action failed: {0}.")] 43 | FailedAction(#[from] ActionError), 44 | } 45 | 46 | fn main_inner() -> Result<(), MainError> { 47 | let (args, arg_actions) = parse_args(); 48 | 49 | if args.version { 50 | println!("{}", env!("CARGO_PKG_VERSION")); 51 | process::exit(0); 52 | } 53 | 54 | init_logger(&args)?; 55 | 56 | // Check if directory exists and convert to full path 57 | let directory_relative = args.directory.ok_or(MainError::MissingDirectoryArg)?; 58 | let directory_path = fs::canonicalize(directory_relative.clone()) 59 | .map_err(|_| MainError::NonExistentDirectory(directory_relative.clone()))?; 60 | let directory = directory_path 61 | .to_str() 62 | .ok_or(MainError::NonExistentDirectory(directory_relative))? 63 | .to_string(); 64 | 65 | // Setup triggers. 66 | let mut triggers: Vec> = vec![Box::new(SignalTrigger::new())]; 67 | if args.once { 68 | debug!("Setting up OnceTrigger (this will disable all other triggers)."); 69 | triggers.push(Box::new(OnceTrigger)); 70 | } else { 71 | let duration: Duration = args.delay.into(); 72 | if !duration.is_zero() { 73 | debug!("Setting up ScheduleTrigger on every {}.", args.delay); 74 | triggers.push(Box::new(ScheduleTrigger::new(duration))); 75 | } 76 | if let Some(http) = args.http { 77 | debug!("Setting up HttpTrigger on {http}."); 78 | triggers.push(Box::new(HttpTrigger::new(http))); 79 | } 80 | } 81 | 82 | // Setup check. 83 | let git_trigger = args.trigger.into(); 84 | debug!("Setting up GitCheck for \"{directory}\" on every {git_trigger}."); 85 | let mut git_check = GitCheck::open(&directory, args.git_known_host, git_trigger)?; 86 | if let Some(ssh_key) = args.ssh_key { 87 | git_check.set_auth(CredentialAuth::Ssh(ssh_key)); 88 | } 89 | if let (Some(username), Some(password)) = (args.git_username, args.git_token) { 90 | git_check.set_auth(CredentialAuth::Https(username, password)); 91 | } 92 | let mut check: Box = Box::new(git_check); 93 | 94 | // Setup actions. 95 | if arg_actions 96 | .iter() 97 | .filter(|a| matches!(a, ArgAction::Process(_, _))) 98 | .count() 99 | > 1 100 | { 101 | return Err(MainError::MultipleProcessArgs); 102 | } 103 | let mut actions: Vec> = vec![]; 104 | for arg_action in arg_actions { 105 | match arg_action { 106 | ArgAction::Script(script, runs_in_shell) => { 107 | debug!("Setting up ScriptAction {script:?} on change."); 108 | actions.push(Box::new( 109 | ScriptAction::new(directory.clone(), script, runs_in_shell) 110 | .map_err(ActionError::from)?, 111 | )); 112 | } 113 | ArgAction::Process(process, runs_in_shell) => { 114 | debug!("Setting up ProcessAction {process:?} on change."); 115 | let mut process_params = 116 | ProcessParams::new(process, directory.clone(), runs_in_shell) 117 | .map_err(ActionError::from)?; 118 | 119 | if let Some(retries) = args.process_retries { 120 | process_params.set_retries(retries); 121 | } 122 | if let Some(ref stop_signal) = args.stop_signal { 123 | process_params 124 | .set_stop_signal(stop_signal.clone()) 125 | .map_err(ActionError::from)?; 126 | } 127 | if let Some(stop_timeout) = args.stop_timeout { 128 | process_params.set_stop_timeout(stop_timeout.into()); 129 | } 130 | 131 | actions.push(Box::new( 132 | ProcessAction::new(process_params).map_err(ActionError::from)?, 133 | )); 134 | } 135 | } 136 | } 137 | 138 | if actions.is_empty() { 139 | warn!("There are no actions defined: we will only pull!"); 140 | } 141 | 142 | // Start the main script. 143 | start(triggers, &mut check, &mut actions)?; 144 | Ok(()) 145 | } 146 | 147 | fn main() { 148 | if let Err(err) = main_inner() { 149 | error!("{err}"); 150 | process::exit(1); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/start.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | actions::Action, 3 | checks::{Check, CheckError}, 4 | context::Context, 5 | triggers::{Trigger, TriggerError}, 6 | }; 7 | use log::{debug, error, info}; 8 | use std::{sync::mpsc, thread}; 9 | use thiserror::Error; 10 | 11 | /// A custom error implementation for the start function 12 | #[derive(Debug, Error)] 13 | pub enum StartError { 14 | #[error("You have to define at least one trigger.")] 15 | NoTriggers, 16 | #[error("Trigger failed: {0}.")] 17 | MisconfiguredTrigger(#[from] TriggerError), 18 | #[error("Check failed: {0}.")] 19 | FailedCheck(#[from] CheckError), 20 | } 21 | 22 | /// The main program loop, that runs the triggers, checks and actions infinitely. 23 | pub fn start( 24 | triggers: Vec>, 25 | check: &mut Box, 26 | actions: &mut [Box], 27 | ) -> Result<(), StartError> { 28 | let (tx, rx) = mpsc::channel::>(); 29 | 30 | if triggers.is_empty() { 31 | return Err(StartError::NoTriggers); 32 | } 33 | 34 | for trigger in triggers { 35 | let tx = tx.clone(); 36 | thread::spawn(move || { 37 | let result = trigger.listen(tx); 38 | if let Err(err) = result { 39 | error!("Trigger failed: {err}."); 40 | } 41 | }); 42 | } 43 | 44 | debug!("Waiting on triggers."); 45 | while let Ok(Some(mut context)) = rx.recv() { 46 | match check.check(&mut context) { 47 | Ok(true) => { 48 | info!( 49 | "There are updates, {}.", 50 | if actions.is_empty() { 51 | "pulling" 52 | } else { 53 | "running actions" 54 | } 55 | ); 56 | for action in actions.iter_mut() { 57 | let result = action.run(&context); 58 | if let Err(err) = result { 59 | error!("Action failed, we will not continue: {err}."); 60 | break; 61 | } 62 | } 63 | } 64 | Ok(false) => { 65 | debug!("There are no updates."); 66 | } 67 | Err(err) => { 68 | error!("Check failed: {err}."); 69 | } 70 | } 71 | } 72 | 73 | debug!("Finished running."); 74 | 75 | Ok(()) 76 | } 77 | 78 | #[cfg(test)] 79 | mod tests { 80 | use super::*; 81 | use crate::{ 82 | actions::{Action, MockAction}, 83 | checks::{Check, MockCheck}, 84 | triggers::{MockTrigger, Trigger}, 85 | }; 86 | use std::collections::HashMap; 87 | 88 | #[test] 89 | fn it_should_call_once() { 90 | // Setup mock triggers. 91 | let mut mock_trigger = MockTrigger::new(); 92 | mock_trigger.expect_listen().returning(|tx| { 93 | tx.send(Some(HashMap::new()))?; 94 | tx.send(None)?; 95 | Ok(()) 96 | }); 97 | let triggers: Vec> = vec![Box::new(mock_trigger)]; 98 | 99 | // Setup mock check. 100 | let mut mock_check = MockCheck::new(); 101 | mock_check.expect_check().times(1).returning(|_| Ok(true)); 102 | let mut check: Box = Box::new(mock_check); 103 | 104 | // Setup mock action. 105 | let mut mock_action = MockAction::new(); 106 | mock_action.expect_run().times(1).returning(|_| Ok(())); 107 | let actions: &mut [Box] = &mut [Box::new(mock_action)]; 108 | 109 | let result = start(triggers, &mut check, actions); 110 | assert!(result.is_ok()); 111 | } 112 | 113 | #[test] 114 | fn it_should_not_run_on_a_false_check() { 115 | // Setup mock triggers. 116 | let mut mock_trigger = MockTrigger::new(); 117 | mock_trigger.expect_listen().returning(|tx| { 118 | tx.send(Some(HashMap::new()))?; 119 | tx.send(None)?; 120 | Ok(()) 121 | }); 122 | let triggers: Vec> = vec![Box::new(mock_trigger)]; 123 | 124 | // Setup mock check. 125 | let mut mock_check = MockCheck::new(); 126 | mock_check.expect_check().times(1).returning(|_| Ok(false)); 127 | let mut check: Box = Box::new(mock_check); 128 | 129 | // Setup mock action. 130 | let mut mock_action = MockAction::new(); 131 | mock_action.expect_run().times(0); 132 | let actions: &mut [Box] = &mut [Box::new(mock_action)]; 133 | 134 | let result = start(triggers, &mut check, actions); 135 | assert!(result.is_ok()); 136 | } 137 | 138 | #[test] 139 | fn it_should_not_run_on_a_failed_check() { 140 | // Setup mock triggers. 141 | let mut mock_trigger = MockTrigger::new(); 142 | mock_trigger.expect_listen().returning(|tx| { 143 | tx.send(Some(HashMap::new()))?; 144 | tx.send(None)?; 145 | Ok(()) 146 | }); 147 | let triggers: Vec> = vec![Box::new(mock_trigger)]; 148 | 149 | // Setup mock check. 150 | let mut mock_check = MockCheck::new(); 151 | mock_check 152 | .expect_check() 153 | .times(1) 154 | .returning(|_| Err(CheckError::Misconfigured(String::from("Testing purposes.")))); 155 | let mut check: Box = Box::new(mock_check); 156 | 157 | // Setup mock action. 158 | let mut mock_action = MockAction::new(); 159 | mock_action.expect_run().times(0); 160 | let actions: &mut [Box] = &mut [Box::new(mock_action)]; 161 | 162 | let result = start(triggers, &mut check, actions); 163 | assert!(result.is_ok()); 164 | } 165 | 166 | #[test] 167 | fn it_should_fail_without_triggers() { 168 | // Setup empty triggers. 169 | let triggers: Vec> = vec![]; 170 | 171 | // Setup mock check. 172 | let mut mock_check = MockCheck::new(); 173 | mock_check.expect_check().times(0); 174 | let mut check: Box = Box::new(mock_check); 175 | 176 | // Setup mock action. 177 | let mut mock_action = MockAction::new(); 178 | mock_action.expect_run().times(0); 179 | let actions: &mut [Box] = &mut [Box::new(mock_action)]; 180 | 181 | let result = start(triggers, &mut check, actions); 182 | assert!(result.is_err()); 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/triggers/http.rs: -------------------------------------------------------------------------------- 1 | use super::{Trigger, TriggerError}; 2 | use crate::context::Context; 3 | use log::{debug, info}; 4 | use std::{collections::HashMap, sync::mpsc::Sender}; 5 | use thiserror::Error; 6 | use tiny_http::{Response, Server}; 7 | 8 | const TRIGGER_NAME: &str = "HTTP"; 9 | 10 | /// A trigger that runs on an HTTP request. 11 | /// 12 | /// This could be used to trigger checks from git remotes (e.g. GitHub, GitLab) with webhooks. 13 | /// Given that your server can be reached from the outside, you can pass your server's hostname 14 | /// or IP address and have actions running on git changes immediately. 15 | pub struct HttpTrigger { 16 | http: String, 17 | } 18 | 19 | /// Custom error describing the error cases for the HttpTrigger. 20 | #[derive(Debug, Error)] 21 | pub enum HttpError { 22 | /// Initializing the HTTP server failed. It usually means the configuration was incorrect 23 | /// or the port was already allocated. 24 | #[error("cannot start server on {0}")] 25 | CantStartServer(String), 26 | /// Cannot send trigger with Sender. This usually because the receiver is dropped. 27 | #[error("cannot trigger changes, receiver hang up")] 28 | ReceiverHangup(#[from] std::sync::mpsc::SendError>), 29 | /// Failed to send response. 30 | #[error("failed to send response")] 31 | FailedResponse(#[from] std::io::Error), 32 | } 33 | 34 | impl From for TriggerError { 35 | fn from(val: HttpError) -> Self { 36 | match val { 37 | HttpError::CantStartServer(s) => TriggerError::Misconfigured(s), 38 | HttpError::ReceiverHangup(s) => TriggerError::ReceiverHangup(s), 39 | HttpError::FailedResponse(s) => TriggerError::FailedTrigger(s.to_string()), 40 | } 41 | } 42 | } 43 | 44 | impl HttpTrigger { 45 | /// Create an new HTTP trigger with a HTTP url. It accepts an address as a string, 46 | /// for example "0.0.0.0:1234". 47 | pub fn new(http: String) -> Self { 48 | Self { http } 49 | } 50 | 51 | fn listen_inner(&self, tx: Sender>) -> Result<(), HttpError> { 52 | let listener = 53 | Server::http(&self.http).map_err(|_| HttpError::CantStartServer(self.http.clone()))?; 54 | info!("Listening on {}...", self.http); 55 | for request in listener.incoming_requests() { 56 | debug!("Received request on {} {}", request.method(), request.url()); 57 | 58 | let context: Context = HashMap::from([ 59 | ("TRIGGER_NAME", TRIGGER_NAME.to_string()), 60 | ("HTTP_METHOD", request.method().to_string()), 61 | ("HTTP_URL", request.url().to_string()), 62 | ]); 63 | tx.send(Some(context)).map_err(HttpError::from)?; 64 | 65 | request.respond(Response::from_string("OK"))?; 66 | } 67 | Ok(()) 68 | } 69 | } 70 | 71 | impl Trigger for HttpTrigger { 72 | /// Starts a minimal HTTP 1.1 server, that triggers on every request. 73 | /// 74 | /// Every method and every URL triggers and returns 200 status code with plaintext "OK". 75 | fn listen(&self, tx: Sender>) -> Result<(), TriggerError> { 76 | self.listen_inner(tx)?; 77 | 78 | Ok(()) 79 | } 80 | } 81 | 82 | #[cfg(test)] 83 | mod tests { 84 | use super::*; 85 | use std::{ 86 | error::Error, 87 | sync::mpsc, 88 | thread::{self, sleep}, 89 | time::Duration, 90 | }; 91 | 92 | #[test] 93 | fn it_should_be_created_from_http_url() { 94 | let trigger = HttpTrigger::new(String::from("0.0.0.0:1234")); 95 | assert_eq!("0.0.0.0:1234", &trigger.http); 96 | } 97 | 98 | #[test] 99 | fn it_should_return_ok_on_every_request() -> Result<(), Box> { 100 | let trigger = HttpTrigger::new(String::from("0.0.0.0:10101")); 101 | let (tx, rx) = mpsc::channel::>(); 102 | 103 | thread::spawn(move || { 104 | let _ = trigger.listen_inner(tx); 105 | }); 106 | 107 | // Sleep for the HTTP server to start up. 108 | sleep(Duration::from_millis(100)); 109 | 110 | let result = ureq::get("http://localhost:10101").call()?; 111 | assert_eq!(200, result.status()); 112 | assert_eq!("OK", result.into_body().read_to_string()?); 113 | 114 | let result = ureq::post("http://localhost:10101/trigger").send_empty()?; 115 | assert_eq!(200, result.status()); 116 | assert_eq!("OK", result.into_body().read_to_string()?); 117 | 118 | let msg = rx.recv()?; 119 | let context = msg.unwrap(); 120 | assert_eq!(TRIGGER_NAME, context.get("TRIGGER_NAME").unwrap()); 121 | assert_eq!("GET", context.get("HTTP_METHOD").unwrap()); 122 | assert_eq!("/", context.get("HTTP_URL").unwrap()); 123 | 124 | let msg = rx.recv()?; 125 | let context = msg.unwrap(); 126 | assert_eq!(TRIGGER_NAME, context.get("TRIGGER_NAME").unwrap()); 127 | assert_eq!("POST", context.get("HTTP_METHOD").unwrap()); 128 | assert_eq!("/trigger", context.get("HTTP_URL").unwrap()); 129 | 130 | Ok(()) 131 | } 132 | 133 | #[test] 134 | fn it_should_fail_if_http_url_invalid() { 135 | let trigger = HttpTrigger::new(String::from("aaaaa")); 136 | 137 | let (tx, _rx) = mpsc::channel::>(); 138 | 139 | let result = trigger.listen_inner(tx); 140 | assert!( 141 | matches!(result, Err(HttpError::CantStartServer(_))), 142 | "{result:?} should be CantStartServer" 143 | ) 144 | } 145 | 146 | #[test] 147 | fn it_should_fail_if_sending_fails() -> Result<(), Box> { 148 | let trigger = HttpTrigger::new(String::from("0.0.0.0:10102")); 149 | let (tx, rx) = mpsc::channel::>(); 150 | 151 | thread::spawn(move || { 152 | // Sleep for the HTTP server to start up. 153 | sleep(Duration::from_millis(200)); 154 | 155 | let _ = ureq::get("http://localhost:10102").call(); 156 | }); 157 | 158 | // Drop receiver to create a hangup error 159 | drop(rx); 160 | 161 | let result = trigger.listen_inner(tx); 162 | assert!( 163 | matches!(result, Err(HttpError::ReceiverHangup(_))), 164 | "{result:?} should be ReceiverHangup" 165 | ); 166 | 167 | Ok(()) 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /src/triggers/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::context::Context; 2 | use mockall::automock; 3 | use std::sync::mpsc::Sender; 4 | use thiserror::Error; 5 | 6 | /// A trigger that runs on an HTTP request. 7 | pub mod http; 8 | /// A trigger that runs the checks once and then exits. 9 | pub mod once; 10 | /// A trigger that runs the checks periodically. 11 | pub mod schedule; 12 | /// A trigger that runs the checks periodically. 13 | pub mod signal; 14 | 15 | /// A custom error for describing the error cases for triggers 16 | #[derive(Debug, Error)] 17 | pub enum TriggerError { 18 | /// Cannot initialize trigger, because it has a misconfiguration. 19 | #[error("not configured correctly: {0}")] 20 | Misconfigured(String), 21 | /// Cannot send trigger with Sender. This usually because the receiver is dropped. 22 | #[error("cannot trigger changes, receiver hang up")] 23 | ReceiverHangup(#[from] std::sync::mpsc::SendError>), 24 | /// Running the trigger failed. 25 | #[error("{0}")] 26 | FailedTrigger(String), 27 | } 28 | 29 | /// A trigger is a long running background process, which initiates the checks. 30 | /// 31 | /// Triggers may include: 32 | /// - schedules ([schedule::ScheduleTrigger]) 33 | /// - HTTP servers ([http::HttpTrigger]) 34 | /// - etc. 35 | #[automock] 36 | pub trait Trigger: Sync + Send { 37 | /// Start the trigger process. 38 | fn listen(&self, tx: Sender>) -> Result<(), TriggerError>; 39 | } 40 | -------------------------------------------------------------------------------- /src/triggers/once.rs: -------------------------------------------------------------------------------- 1 | use super::{Trigger, TriggerError}; 2 | use crate::context::Context; 3 | use log::debug; 4 | use std::{collections::HashMap, sync::mpsc::Sender}; 5 | 6 | const TRIGGER_NAME: &str = "ONCE"; 7 | 8 | /// A trigger that runs the checks once and then exits. 9 | pub struct OnceTrigger; 10 | 11 | impl Trigger for OnceTrigger { 12 | /// Starts a trigger that runs once and terminates after. 13 | fn listen(&self, tx: Sender>) -> Result<(), TriggerError> { 14 | debug!("Triggering only once."); 15 | let context: Context = HashMap::from([("TRIGGER_NAME", TRIGGER_NAME.to_string())]); 16 | tx.send(Some(context))?; 17 | tx.send(None)?; 18 | Ok(()) 19 | } 20 | } 21 | 22 | #[cfg(test)] 23 | mod tests { 24 | use super::*; 25 | use std::sync::mpsc; 26 | 27 | #[test] 28 | fn it_should_trigger_once_and_stop() { 29 | let trigger = OnceTrigger; 30 | let (tx, rx) = mpsc::channel::>(); 31 | 32 | trigger.listen(tx).unwrap(); 33 | 34 | let msgs: Vec<_> = rx.iter().collect(); 35 | assert_eq!( 36 | vec![ 37 | Some(HashMap::from([("TRIGGER_NAME", TRIGGER_NAME.to_string())])), 38 | None 39 | ], 40 | msgs 41 | ); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/triggers/schedule.rs: -------------------------------------------------------------------------------- 1 | use super::{Trigger, TriggerError}; 2 | use crate::context::Context; 3 | use duration_string::DurationString; 4 | use log::info; 5 | use std::{ 6 | collections::HashMap, 7 | sync::mpsc::Sender, 8 | thread::sleep, 9 | time::{Duration, Instant}, 10 | }; 11 | use thiserror::Error; 12 | 13 | const TRIGGER_NAME: &str = "SCHEDULE"; 14 | 15 | /// A trigger that runs the checks periodically. 16 | /// 17 | /// This is running in an infinite loop, triggering every time. 18 | pub struct ScheduleTrigger { 19 | duration: Duration, 20 | timeout: Option, 21 | } 22 | 23 | /// Custom error describing the error cases for the ScheduleTrigger. 24 | #[derive(Debug, Error)] 25 | pub enum ScheduleError { 26 | /// Cannot send trigger with Sender. This usually because the receiver is dropped. 27 | #[error("cannot trigger changes, receiver hang up")] 28 | ReceiverHangup(#[from] std::sync::mpsc::SendError>), 29 | } 30 | 31 | impl From for TriggerError { 32 | fn from(val: ScheduleError) -> Self { 33 | match val { 34 | ScheduleError::ReceiverHangup(s) => TriggerError::ReceiverHangup(s), 35 | } 36 | } 37 | } 38 | 39 | impl ScheduleTrigger { 40 | /// Creates a new ScheduleTrigger with duration. 41 | pub fn new(duration: Duration) -> Self { 42 | Self { 43 | duration, 44 | timeout: None, 45 | } 46 | } 47 | 48 | /// Creates a new ScheduleTrigger with duration and timeout. 49 | pub fn new_with_timeout(duration: Duration, timeout: Duration) -> Self { 50 | Self { 51 | duration, 52 | timeout: Some(timeout), 53 | } 54 | } 55 | 56 | /// Runs one step in the scheduled time process. Returns true, if it should continue, 57 | /// returns false in case of an error or a timeout. One step should take exactly the duration. 58 | /// In case of an error it terminates or if it will reach the final timeout it will 59 | /// wait until the end of the timeout and returns with false. 60 | pub fn step( 61 | &self, 62 | tx: Sender>, 63 | final_timeout: Option, 64 | ) -> Result { 65 | let next_check = Instant::now() + self.duration; 66 | 67 | let context: Context = HashMap::from([ 68 | ("TRIGGER_NAME", TRIGGER_NAME.to_string()), 69 | ( 70 | "SCHEDULE_DELAY", 71 | DurationString::from(self.duration).to_string(), 72 | ), 73 | ]); 74 | tx.send(Some(context))?; 75 | 76 | if let Some(final_timeout) = final_timeout { 77 | if next_check > final_timeout { 78 | let until_final_timeout = final_timeout - Instant::now(); 79 | sleep(until_final_timeout); 80 | return Ok(false); 81 | } 82 | } 83 | // TODO: handle overlaps 84 | let until_next_check = next_check - Instant::now(); 85 | sleep(until_next_check); 86 | 87 | // We should handle if the sleep was too long and it went over the timeout 88 | if let Some(final_timeout) = final_timeout { 89 | Ok(Instant::now() < final_timeout) 90 | } else { 91 | Ok(true) 92 | } 93 | } 94 | } 95 | 96 | impl Trigger for ScheduleTrigger { 97 | /// Starts a scheduled trigger on a new thread, starting the steps in a loop. 98 | /// Every step triggers and then waits the given duration. In case of an error, 99 | /// it terminates or if it will reach the final timeout it will wait until 100 | /// the end of the timeout and return. 101 | fn listen(&self, tx: Sender>) -> Result<(), TriggerError> { 102 | let final_timeout = self.timeout.map(|t| Instant::now() + t); 103 | info!( 104 | "Starting schedule in every {}.", 105 | DurationString::new(self.duration) 106 | ); 107 | 108 | loop { 109 | let should_continue = self.step(tx.clone(), final_timeout)?; 110 | if !should_continue { 111 | break; 112 | } 113 | } 114 | 115 | Ok(()) 116 | } 117 | } 118 | 119 | #[cfg(test)] 120 | mod tests { 121 | use super::*; 122 | use crate::triggers::TriggerError; 123 | use std::{sync::mpsc, time::Instant}; 124 | 125 | #[test] 126 | fn it_should_be_created_from_duration() { 127 | let trigger = ScheduleTrigger::new(Duration::from_millis(100)); 128 | assert_eq!(Duration::from_millis(100), trigger.duration); 129 | assert_eq!(None, trigger.timeout); 130 | } 131 | 132 | #[test] 133 | fn it_should_be_created_from_duration_and_timeout() { 134 | let trigger = ScheduleTrigger::new_with_timeout( 135 | Duration::from_millis(100), 136 | Duration::from_millis(200), 137 | ); 138 | assert_eq!(Duration::from_millis(100), trigger.duration); 139 | assert_eq!(Some(Duration::from_millis(200)), trigger.timeout); 140 | } 141 | 142 | #[test] 143 | fn it_should_trigger_every_100_ms() -> Result<(), TriggerError> { 144 | let trigger = ScheduleTrigger::new(Duration::from_millis(100)); 145 | let (tx, rx) = mpsc::channel::>(); 146 | 147 | for _ in 0..5 { 148 | let start = Instant::now(); 149 | 150 | let should_continue = trigger.step(tx.clone(), None)?; 151 | assert!(should_continue); 152 | 153 | // It should be close to the timings 154 | let msg = rx.recv().unwrap(); 155 | let diff = start.elapsed(); 156 | assert!( 157 | diff >= Duration::from_millis(95), 158 | "Diff {} should be later than 95ms.", 159 | DurationString::from(diff) 160 | ); 161 | 162 | // It should contain the hashmap 163 | let context = msg.unwrap(); 164 | assert_eq!(TRIGGER_NAME, context.get("TRIGGER_NAME").unwrap()); 165 | assert_eq!("100ms", context.get("SCHEDULE_DELAY").unwrap()); 166 | } 167 | 168 | Ok(()) 169 | } 170 | 171 | #[test] 172 | fn it_should_not_continue_after_the_timeout() -> Result<(), TriggerError> { 173 | let trigger = ScheduleTrigger::new(Duration::from_millis(100)); 174 | let (tx, _rx) = mpsc::channel::>(); 175 | 176 | let start = Instant::now(); 177 | let final_timeout = start + Duration::from_millis(350); 178 | for _ in 0..5 { 179 | let should_continue = trigger.step(tx.clone(), Some(final_timeout))?; 180 | 181 | // First three should pass, last two fail 182 | if Instant::now() < final_timeout { 183 | assert!( 184 | should_continue, 185 | "Should continue after {} passed, before 300ms.", 186 | DurationString::from(start.elapsed()) 187 | ); 188 | } else { 189 | assert!( 190 | !should_continue, 191 | "Should continue after {} passed, after 300ms.", 192 | DurationString::from(start.elapsed()) 193 | ); 194 | }; 195 | } 196 | 197 | Ok(()) 198 | } 199 | 200 | #[test] 201 | fn it_should_not_trigger_on_a_send_error() { 202 | let trigger = ScheduleTrigger::new(Duration::from_millis(100)); 203 | let (tx, rx) = mpsc::channel::>(); 204 | 205 | // Close receiving end, to create a send error 206 | drop(rx); 207 | 208 | let final_timeout = Instant::now() + Duration::from_millis(350); 209 | let result = trigger.step(tx.clone(), Some(final_timeout)); 210 | 211 | // It should fail, because of ReceiverHangup 212 | assert!( 213 | matches!(result, Err(ScheduleError::ReceiverHangup(_)),), 214 | "{result:?} should be ReceiverHangup" 215 | ); 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /src/triggers/signal.rs: -------------------------------------------------------------------------------- 1 | use super::{Trigger, TriggerError}; 2 | use crate::context::Context; 3 | use std::sync::mpsc::Sender; 4 | use log::debug; 5 | 6 | #[cfg(unix)] 7 | use std::sync::atomic::AtomicU8; 8 | 9 | const _TRIGGER_NAME: &str = "SIGNAL"; 10 | 11 | /// A trigger that terminates the program on a signal. 12 | pub struct SignalTrigger { 13 | #[cfg(unix)] 14 | trigger_count: AtomicU8, 15 | } 16 | 17 | impl SignalTrigger { 18 | pub fn new() -> SignalTrigger { 19 | SignalTrigger { 20 | #[cfg(unix)] 21 | trigger_count: AtomicU8::new(0), 22 | } 23 | } 24 | 25 | #[cfg(unix)] 26 | fn listen_inner(&self, tx: Sender>, signals: I) -> Result<(), TriggerError> 27 | where 28 | I: IntoIterator, 29 | { 30 | use log::error; 31 | use std::{process, sync::atomic::Ordering, thread::sleep, time::Duration}; 32 | for signal in signals.into_iter() { 33 | let previous = self.trigger_count.fetch_add(1, Ordering::Acquire); 34 | if previous == 0 { 35 | debug!("Got signal {signal}, terminating after all actions finished.",); 36 | if tx.send(None).is_err() { 37 | error!("Failed terminating the application with signal {signal}."); 38 | } 39 | } else { 40 | // Allow a little time for the clean shutdown to still happen. 41 | sleep(Duration::from_millis(100)); 42 | debug!("Got signal {signal}, terminating right now.",); 43 | process::exit(signal); 44 | } 45 | } 46 | 47 | Ok(()) 48 | } 49 | } 50 | 51 | impl Default for SignalTrigger { 52 | fn default() -> Self { 53 | Self::new() 54 | } 55 | } 56 | 57 | impl Trigger for SignalTrigger { 58 | /// Starts a trigger that iterates over signals and terminates the program. 59 | #[cfg(unix)] 60 | fn listen(&self, tx: Sender>) -> Result<(), TriggerError> { 61 | use log::warn; 62 | use signal_hook::{ 63 | consts::TERM_SIGNALS, 64 | iterator::{exfiltrator::SignalOnly, SignalsInfo}, 65 | }; 66 | let signals = SignalsInfo::::new(TERM_SIGNALS); 67 | if let Ok(mut signals) = signals { 68 | self.listen_inner(tx, &mut signals)?; 69 | } else { 70 | warn!("Failed setting up signal handler."); 71 | } 72 | 73 | Ok(()) 74 | } 75 | 76 | #[cfg(not(unix))] 77 | fn listen(&self, _tx: Sender>) -> Result<(), TriggerError> { 78 | debug!("Signal handlers are not supported on non-unix systems."); 79 | 80 | Ok(()) 81 | } 82 | } 83 | 84 | #[cfg(test)] 85 | #[cfg(unix)] 86 | mod tests { 87 | use super::*; 88 | use std::sync::mpsc; 89 | 90 | #[test] 91 | fn it_should_trigger_on_the_first_signal() { 92 | let trigger = SignalTrigger::new(); 93 | let (tx, rx) = mpsc::channel::>(); 94 | 95 | let signals = vec![9]; 96 | 97 | trigger.listen_inner(tx, signals).unwrap(); 98 | 99 | let msgs: Vec<_> = rx.iter().collect(); 100 | assert_eq!(vec![None], msgs); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /test_directories/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daniel7grant/gw/5546789d02a626cc273767f5c27fc40e7570d79d/test_directories/.keep --------------------------------------------------------------------------------