├── .github └── workflows │ └── continuous-integration.yml ├── .gitignore ├── .rpm ├── .emptydaemonaddr ├── ebbflow.spec ├── ebbflowd.service ├── empty.config.yaml └── empty.key ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── assets ├── debian │ ├── postinst │ ├── postrm │ └── prerm ├── e.ico ├── ebbbg.bmp ├── ebbbn.bmp ├── ebbflowd.service ├── empty.config.yaml ├── empty.key ├── justeblack.png └── windowswelcome.rtf ├── docker ├── Dockerfile └── entrypoint.sh ├── examples └── server.rs ├── src ├── certs.rs ├── config.rs ├── daemon │ ├── connection.rs │ ├── health.rs │ └── mod.rs ├── dns.rs ├── ebbflow.rs ├── ebbflowd.rs ├── infoserver.rs ├── lib.rs ├── messagequeue.rs ├── messaging.rs └── signal.rs ├── tests ├── basic.rs ├── certs │ ├── myCA.key │ ├── myCA.pem │ ├── test.crt │ └── test.key └── mockebb.rs └── wix └── main.wxs /.github/workflows/continuous-integration.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - master 5 | 6 | name: Continuous Integration 7 | 8 | jobs: 9 | quickcheck: 10 | runs-on: ubuntu-latest 11 | outputs: 12 | version: ${{ steps.rustversion.outputs.rustversion }} 13 | steps: 14 | - uses: actions/checkout@v2 15 | - run: cargo check 16 | - run: cargo pkgid 17 | - run: 'echo "$(cargo pkgid | cut -d# -f2)"' 18 | - id: rustversion 19 | run: 'echo "::set-output name=rustversion::$(cargo pkgid | cut -d# -f2)"' 20 | build: 21 | needs: quickcheck 22 | name: 'build-${{matrix.os}}' 23 | runs-on: ${{ matrix.os }} 24 | strategy: 25 | matrix: 26 | os: [ubuntu-latest, windows-latest, macos-latest] 27 | steps: 28 | - uses: actions/checkout@v2 29 | - name: Build 30 | run: cargo build 31 | - name: Run tests 32 | run: cargo test 33 | fedorarpmbuild: 34 | needs: [build] 35 | runs-on: ubuntu-latest 36 | name: FedoraRpm 37 | steps: 38 | - uses: actions/checkout@v2 39 | - name: BuildRpm 40 | id: buildrpm 41 | uses: ebbflow-io/cargo-rpm-amd64-fedora@1.1 42 | - name: Upload RPM Artifact 43 | uses: actions/upload-artifact@v2 44 | with: 45 | name: fedorarpm 46 | path: ./target/x86_64-unknown-linux-musl/release/rpmbuild/RPMS/x86_64/* 47 | opensuseleaprpmbuild: 48 | needs: [build] 49 | runs-on: ubuntu-latest 50 | name: OpensuseLeapRpm 51 | steps: 52 | - uses: actions/checkout@v2 53 | - name: BuildRpm 54 | id: buildrpm 55 | uses: ebbflow-io/cargo-rpm-amd64-opensuseleap@1.0 56 | - name: Upload RPM Artifact 57 | uses: actions/upload-artifact@v2 58 | with: 59 | name: opensuserpm 60 | path: ./target/x86_64-unknown-linux-musl/release/rpmbuild/RPMS/x86_64/* 61 | debbuild: 62 | needs: [build] 63 | runs-on: ubuntu-latest 64 | name: Amd64Deb 65 | steps: 66 | - uses: actions/checkout@v2 67 | - name: BuildDeb 68 | id: debbuild 69 | uses: ebbflow-io/cargo-deb-amd64-ubuntu@1.0 70 | - name: Upload Deb Artifact 71 | uses: actions/upload-artifact@v2 72 | with: 73 | name: amd64deb 74 | path: ./target/x86_64-unknown-linux-musl/debian/* 75 | raspbianbuild: 76 | needs: [build] 77 | runs-on: ubuntu-latest 78 | name: Armv7Deb 79 | steps: 80 | - uses: actions/checkout@v2 81 | - name: BuildDeb 82 | id: debbuild 83 | uses: ebbflow-io/cargo-deb-armv7-debian@1.0 84 | - name: Upload Deb Artifact 85 | uses: actions/upload-artifact@v2 86 | with: 87 | name: armv7deb 88 | path: ./target/armv7-unknown-linux-musleabihf/debian/* 89 | windowsbuild: 90 | needs: [build] 91 | runs-on: windows-latest 92 | env: 93 | RUSTFLAGS: '-C target-feature=+crt-static' 94 | name: Windows 95 | steps: 96 | - uses: actions/checkout@v2 97 | - uses: actions-rs/toolchain@v1 98 | with: 99 | toolchain: stable 100 | - uses: actions-rs/install@v0.1 101 | with: 102 | crate: cargo-wix 103 | version: latest 104 | - run: cargo wix -v --nocapture -o . 105 | - name: Upload MSI Artifact 106 | uses: actions/upload-artifact@v2 107 | with: 108 | name: windows 109 | path: ./*.msi 110 | macosbuild: 111 | needs: [build, quickcheck] 112 | runs-on: macos-latest 113 | name: MacOS 114 | steps: 115 | - uses: actions/checkout@v2 116 | - name: Build 117 | run: cargo build --release 118 | - name: Zip up macos binaries 119 | run: 'zip -j ebbflow_${{needs.quickcheck.outputs.version}}_macos.zip ./target/release/ebbflowd ./target/release/ebbflow' 120 | - name: Upload Zipped Artifact 121 | uses: actions/upload-artifact@v2 122 | with: 123 | name: macos 124 | path: 'ebbflow_${{needs.quickcheck.outputs.version}}_macos.zip' 125 | - name: Upload Bin Artifact 126 | uses: actions/upload-artifact@v2 127 | with: 128 | name: macos 129 | path: ./target/release/ebbflow 130 | - name: Upload Daemon Artifact 131 | uses: actions/upload-artifact@v2 132 | with: 133 | name: macos 134 | path: ./target/release/ebbflowd 135 | amd64binaries: 136 | needs: [build, quickcheck] 137 | runs-on: ubuntu-latest 138 | name: Amd64StaticBinaries 139 | steps: 140 | - uses: actions/checkout@v2 141 | - name: StaticBinaryBuild 142 | id: amd64staticbuild 143 | uses: ebbflow-io/cargo-deb-amd64-ubuntu@1.0 144 | with: 145 | cmd: cargo build --release --target=x86_64-unknown-linux-musl 146 | - name: Upload Daemon Artifact 147 | uses: actions/upload-artifact@v2 148 | with: 149 | name: amd64binaries 150 | path: ./target/x86_64-unknown-linux-musl/release/ebbflowd 151 | - name: Upload Ebbflow Artifact 152 | uses: actions/upload-artifact@v2 153 | with: 154 | name: amd64binaries 155 | path: ./target/x86_64-unknown-linux-musl/release/ebbflow 156 | - run: 'zip -j amd64binaries.zip ./target/x86_64-unknown-linux-musl/release/ebbflow ./target/x86_64-unknown-linux-musl/release/ebbflowd' 157 | - name: Upload Zip Artifact 158 | uses: actions/upload-artifact@v2 159 | with: 160 | name: amd64binaries 161 | path: amd64binaries.zip 162 | arm7binaries: 163 | needs: [build, quickcheck] 164 | runs-on: ubuntu-latest 165 | name: Armv7StaticBinaries 166 | steps: 167 | - uses: actions/checkout@v2 168 | - name: StaticBinaryBuild 169 | id: armv7statibuild 170 | uses: ebbflow-io/cargo-deb-armv7-debian@1.0 171 | with: 172 | cmd: cargo build --release --target=armv7-unknown-linux-musleabihf 173 | - name: Upload Daemon Artifact 174 | uses: actions/upload-artifact@v2 175 | with: 176 | name: armv7binaries 177 | path: ./target/armv7-unknown-linux-musleabihf/release/ebbflowd 178 | - name: Upload Ebbflow Artifact 179 | uses: actions/upload-artifact@v2 180 | with: 181 | name: armv7binaries 182 | path: ./target/armv7-unknown-linux-musleabihf/release/ebbflow 183 | - run: 'zip -j armv7binaries.zip ./target/armv7-unknown-linux-musleabihf/release/ebbflow ./target/armv7-unknown-linux-musleabihf/release/ebbflowd' 184 | - name: Upload Zip Artifact 185 | uses: actions/upload-artifact@v2 186 | with: 187 | name: armv7binaries 188 | path: armv7binaries.zip 189 | linuxamd64dockerbuild: 190 | needs: [amd64binaries] 191 | runs-on: ubuntu-latest 192 | steps: 193 | - uses: actions/checkout@v2 194 | - uses: actions/download-artifact@v2 195 | - run: cp ./amd64binaries/ebbflow ./docker/ 196 | - run: chmod +x ./docker/ebbflow 197 | - name: Build and push Docker images 198 | uses: docker/build-push-action@v1 199 | with: 200 | username: ${{ secrets.DOCKER_USERNAME }} 201 | password: ${{ secrets.DOCKER_PASSWORD }} 202 | repository: ebbflow/ebbflow-client-linux-amd64 203 | tags: latest 204 | path: ./docker/ 205 | update_release_draft: 206 | needs: [quickcheck, arm7binaries, linuxamd64dockerbuild, amd64binaries, macosbuild, windowsbuild, raspbianbuild, debbuild, opensuseleaprpmbuild, fedorarpmbuild] 207 | runs-on: ubuntu-latest 208 | steps: 209 | - uses: actions/checkout@v2 210 | - name: Create Release 211 | id: create_release 212 | uses: actions/create-release@v1 213 | env: 214 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token 215 | with: 216 | tag_name: '${{ needs.quickcheck.outputs.version }}' 217 | release_name: Release ${{ needs.quickcheck.outputs.version }} 218 | body: 'Change Me' 219 | draft: true 220 | prerelease: false 221 | - uses: actions/download-artifact@v2 222 | - run: ls -lha 223 | - name: Upload amd64 deb Release Asset 224 | uses: actions/upload-release-asset@v1 225 | env: 226 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 227 | with: 228 | upload_url: ${{ steps.create_release.outputs.upload_url }} 229 | asset_path: ./amd64deb/ebbflow_${{needs.quickcheck.outputs.version}}_amd64.deb 230 | asset_name: 'ebbflow_${{needs.quickcheck.outputs.version}}_amd64.deb' 231 | asset_content_type: application/vnd.debian.binary-package 232 | - name: Upload armv7 deb Release Asset 233 | uses: actions/upload-release-asset@v1 234 | env: 235 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 236 | with: 237 | upload_url: ${{ steps.create_release.outputs.upload_url }} 238 | asset_path: ./armv7deb/ebbflow_${{needs.quickcheck.outputs.version}}_armhf.deb 239 | asset_name: 'ebbflow_${{needs.quickcheck.outputs.version}}_armhf.deb' 240 | asset_content_type: application/vnd.debian.binary-package 241 | - name: Upload amd64 rpm fedora Release Asset 242 | uses: actions/upload-release-asset@v1 243 | env: 244 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 245 | with: 246 | upload_url: ${{ steps.create_release.outputs.upload_url }} 247 | asset_path: ./fedorarpm/ebbflow-${{needs.quickcheck.outputs.version}}-1.x86_64.rpm 248 | asset_name: 'ebbflow_${{needs.quickcheck.outputs.version}}-1_amd64_fedora.rpm' 249 | asset_content_type: application/octet-stream 250 | - name: Upload amd64 rpm opensuse Release Asset 251 | uses: actions/upload-release-asset@v1 252 | env: 253 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 254 | with: 255 | upload_url: ${{ steps.create_release.outputs.upload_url }} 256 | asset_path: ./opensuserpm/ebbflow-${{needs.quickcheck.outputs.version}}-1.x86_64.rpm 257 | asset_name: 'ebbflow_${{needs.quickcheck.outputs.version}}-1_amd64_opensuseleap.rpm' 258 | asset_content_type: application/octet-stream 259 | - name: Upload Windows Release Asset 260 | uses: actions/upload-release-asset@v1 261 | env: 262 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 263 | with: 264 | upload_url: ${{ steps.create_release.outputs.upload_url }} 265 | asset_path: ./windows/ebbflow-${{needs.quickcheck.outputs.version}}-x86_64.msi 266 | asset_name: 'ebbflow_${{needs.quickcheck.outputs.version}}.msi' 267 | asset_content_type: application/octet-stream 268 | - name: Upload Macos Release Asset 269 | uses: actions/upload-release-asset@v1 270 | env: 271 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 272 | with: 273 | upload_url: ${{ steps.create_release.outputs.upload_url }} 274 | asset_path: ./macos/ebbflow_${{needs.quickcheck.outputs.version}}_macos.zip 275 | asset_name: 'ebbflow_${{needs.quickcheck.outputs.version}}_macos.zip' 276 | asset_content_type: application/zip 277 | - name: Upload Armv7 Static Binary Zip Release Asset 278 | uses: actions/upload-release-asset@v1 279 | env: 280 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 281 | with: 282 | upload_url: ${{ steps.create_release.outputs.upload_url }} 283 | asset_path: ./armv7binaries/armv7binaries.zip 284 | asset_name: 'ebbflow_${{needs.quickcheck.outputs.version}}_linux_armv7_binaries.zip' 285 | asset_content_type: application/zip 286 | - name: Upload Amd64 Static Binary Zip Release Asset 287 | uses: actions/upload-release-asset@v1 288 | env: 289 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 290 | with: 291 | upload_url: ${{ steps.create_release.outputs.upload_url }} 292 | asset_path: ./amd64binaries/amd64binaries.zip 293 | asset_name: 'ebbflow_${{needs.quickcheck.outputs.version}}_linux_amd64_binaries.zip' 294 | asset_content_type: application/zip 295 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | *.swp 4 | -------------------------------------------------------------------------------- /.rpm/.emptydaemonaddr: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ebbflow-io/ebbflow/0f1bfc988bedba7dee93e90e523cdacf1fb13ccb/.rpm/.emptydaemonaddr -------------------------------------------------------------------------------- /.rpm/ebbflow.spec: -------------------------------------------------------------------------------- 1 | %define __spec_install_post %{nil} 2 | %define __os_install_post %{_dbpath}/brp-compress 3 | %define debug_package %{nil} 4 | 5 | Name: ebbflow 6 | Summary: The on-host executable client for interacting with Ebbflow.io 7 | Version: @@VERSION@@ 8 | Release: @@RELEASE@@ 9 | License: University of Illinois/NCSA Open Source License Copyright (c) All rights reserved. 10 | Group: System Environment/Daemons 11 | Source0: %{name}-%{version}.tar.gz 12 | URL: https://ebbflow.io 13 | 14 | BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root 15 | BuildRequires: systemd 16 | 17 | Requires(post): systemd 18 | Requires(preun): systemd 19 | Requires(postun): systemd 20 | 21 | %description 22 | %{summary} 23 | 24 | %prep 25 | %setup -q 26 | 27 | %install 28 | rm -rf %{buildroot} 29 | mkdir -p %{buildroot} 30 | cp -a * %{buildroot} 31 | 32 | %clean 33 | rm -rf %{buildroot} 34 | 35 | %systemd_post ebbflowd.service 36 | 37 | %preun 38 | %systemd_preun ebbflowd.service 39 | 40 | %postun 41 | %systemd_postun_with_restart ebbflowd.service 42 | 43 | %files 44 | %defattr(-,root,root,-) 45 | %{_bindir}/* 46 | %{_sbindir}/* 47 | %{_unitdir}/ebbflowd.service 48 | %attr(0644,root,root) %config(noreplace) /etc/ebbflow/config.yaml 49 | %attr(0644,root,root) %config(noreplace) /etc/ebbflow/.daemonaddr 50 | %attr(0600,root,root) %config(noreplace) /etc/ebbflow/host.key 51 | -------------------------------------------------------------------------------- /.rpm/ebbflowd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Ebbflow Client Service 3 | After=network.target 4 | StartLimitIntervalSec=0 5 | 6 | [Service] 7 | ExecStart=/usr/sbin/ebbflowd 8 | Restart=always 9 | RestartSec=1 10 | Type=simple 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /.rpm/empty.config.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ebbflow-io/ebbflow/0f1bfc988bedba7dee93e90e523cdacf1fb13ccb/.rpm/empty.config.yaml -------------------------------------------------------------------------------- /.rpm/empty.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ebbflow-io/ebbflow/0f1bfc988bedba7dee93e90e523cdacf1fb13ccb/.rpm/empty.key -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ebbflow" 3 | version = "1.1.0" 4 | authors = ["Ryan Gorup "] 5 | edition = "2018" 6 | readme = "README.md" 7 | keywords = ["ssh", "proxy", "loadbalancer", "tcp", "ebbflow"] 8 | maintenance = { status = "actively-developed" } 9 | license-file = "LICENSE" 10 | homepage = "https://ebbflow.io" 11 | description = "The on-host executable client for interacting with Ebbflow.io" 12 | categories = ["network-programming", "command-line-utilities", "web-programming"] 13 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 14 | 15 | [[bin]] 16 | name = "ebbflow" 17 | path = "src/ebbflow.rs" 18 | 19 | [[bin]] 20 | name = "ebbflowd" 21 | path = "src/ebbflowd.rs" 22 | 23 | [dependencies] 24 | bitflags = "1.2" 25 | chrono = "0.4" 26 | clap = "3.0.0-beta.1" 27 | ebbflow-api = { git = "https://github.com/ebbflow-io/ebbflow-api" } 28 | env_logger = "0.7.1" 29 | futures = "0.3.5" 30 | hostname = "0.3" 31 | hyper = "0.13" 32 | hyper-rustls = "0.21.0" 33 | lazy_static = "1.4" 34 | log = "0.4.11" 35 | notify = "5.0.0-pre.2" 36 | parking_lot = "0.11" 37 | rand = { version = "0.7.3", features = ["small_rng"] } 38 | regex = "1.3" 39 | reqwest = { version = "0.10", features = ["json", "rustls-tls"], default-features = false } 40 | rustls = "0.18" 41 | serde = { version = "1.0", features = ["derive"] } 42 | serde_cbor = "0.11.1" 43 | serde_json = "1.0" 44 | serde_yaml = "0.8" 45 | tokio = { version = "0.2", features = ["full"] } 46 | tokio-rustls = "0.14" 47 | trust-dns-resolver = "0.19.5" 48 | webpki = "0.21" 49 | 50 | [target.'cfg(windows)'.dependencies] 51 | windows-service = { git = "https://github.com/mullvad/windows-service-rs" } 52 | winlog = { version = "0.2.6", features = ["env_logger"] } 53 | 54 | [dev-dependencies] 55 | hyper = "0.13" 56 | hostname = "0.3" 57 | 58 | ### 59 | ### .deb package related! 60 | ### 61 | [package.metadata.deb] 62 | name = "ebbflow" 63 | extended-description = """\ 64 | A command line utility for proxying data to Ebbflow (see https://ebbflow.io).""" 65 | priority = "optional" 66 | section = "net" 67 | depends = "$auto" 68 | conf-files = ["/etc/ebbflow/config.yaml", "/etc/ebbflow/host.key", "/etc/ebbflow/.daemonaddr"] 69 | maintainer-scripts="assets/debian" 70 | assets = [ 71 | ["assets/ebbflowd.service", "/lib/systemd/system/ebbflowd.service", "644"], 72 | ["assets/empty.config.yaml", "/etc/ebbflow/config.yaml", "644"], 73 | ["assets/empty.key", "/etc/ebbflow/host.key", "600"], 74 | ["target/release/ebbflow", "/usr/bin/", "755"], 75 | ["target/release/ebbflowd", "/usr/sbin/", "755"] 76 | ] 77 | 78 | ### 79 | ### RPM Build related! 80 | ### 81 | [package.metadata.rpm.cargo] 82 | buildflags = ["--release"] 83 | target = "x86_64-unknown-linux-musl" 84 | 85 | [package.metadata.rpm.targets] 86 | ebbflow = { path = "/usr/bin/ebbflow" } 87 | ebbflowd = { path = "/usr/sbin/ebbflowd" } 88 | 89 | [package.metadata.rpm.files] 90 | "ebbflowd.service" = { path = "/usr/lib/systemd/system/ebbflowd.service", mode = "644" } 91 | "empty.config.yaml" = { path = "/etc/ebbflow/config.yaml", mode = "644" } 92 | "empty.key" = { path = "/etc/ebbflow/host.key", mode = "600" } 93 | ".emptydaemonaddr" = { path = "/etc/ebbflow/.daemonaddr", mode = "644" } 94 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | University of Illinois/NCSA Open Source License Copyright (c) All rights reserved. 2 | 3 | Developed by: Ryan Gorup. Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated documentation files 5 | (the "Software"), to deal with the Software without restriction, including without 6 | limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | and/or sell copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | Redistributions of source code must retain the above copyright notice, this list 11 | of conditions and the following disclaimers. Redistributions in binary form must 12 | reproduce the above copyright notice, this list of conditions and the following 13 | disclaimers in the documentation and/or other materials provided with the 14 | distribution. Neither the names of Ryan Gorup, nor the names of its contributors 15 | may be used to endorse or promote products derived from this Software without 16 | specific prior written permission. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT 17 | WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 18 | WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE 20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF 21 | CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 22 | SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ebbflow Client 2 | This is the end-host client for [ebbflow](https://ebbflow.io). This is used to proxy SSH or TCP connections between ebbflow and your local server. It typically runs as a daemon which is initiated during the install process, but can also be ran directly which suits containers. 3 | 4 | Full documentation can be found on the Ebbflow website: [Client Documentation](https://ebbflow.io/documentation#client). 5 | 6 | ``` 7 | ebbflow --help 8 | ``` 9 | 10 | ![Continuous Integration](https://github.com/ebbflow-io/ebbflow/workflows/Continuous%20Integration/badge.svg) ![Docker Image Size (tag)](https://img.shields.io/docker/image-size/ebbflow/ebbflow-client-linux-amd64/latest) 11 | 12 | ## Downloading, Updating, and Removing 13 | 14 | Please visit Ebbflow's [client documentation](https://ebbflow.io/documentation#client) for up to date instructions on installing and managing the client: 15 | 16 | - Docs: https://ebbflow.io/documentation#client 17 | - RaspberryPi Guide: https://ebbflow.io/guides/raspberrypi 18 | 19 | ## Building & Testing 20 | 21 | The client is built, tested, and packaged using the github action workflow configured in `.github/workflows`. When a release is expected, the released artifacts are downloaded to Ebbflow package servers and hosted through https://pkg.ebbflow.io. 22 | 23 | As of now, testing is largely manual. The client is tested on various OSs & architectures before being released and vended. In the future, much of this testing could be completed in additional github workflow actions, but that is TBD. 24 | 25 | To build the client locally, you can simply fork/clone/download the repo and run `cargo build`, then continuing to execute the binaries manually. To execute with elevated privileges on linux/macos, run `sudo ./target/debug/ebbflow` or `sudo ./target/debug/ebbflowd`. 26 | 27 | To build the various packages, you need the following tools. 28 | 29 | ### Linux 30 | - Install `cargo-deb`: https://crates.io/crates/cargo-deb (Only works on debian based OSs, ubuntu, debian..) 31 | - Building: `cargo deb` 32 | - Install `cargo-rpm`: https://crates.io/crates/cargo-rpm (Only works on rpm-based architectures, fedora, opensuse, ..) 33 | - `cargo rpm build` 34 | 35 | ### MacOS 36 | - Zip `ebbflow` and `ebbflowd`, just those 2 files 37 | - Homebrew is used to distribute packages, see https://github.com/ebbflow-io/homebrew-ebbflow/ 38 | 39 | ### Windows 40 | - To build `cargo-wix`: https://crates.io/crates/cargo-wix (Only works on windows) 41 | - `cargo wix` 42 | - Chocolatey is used to distribute packages, see https://github.com/ebbflow-io/chocolatey-ebbflow 43 | - Winget is also used to distribute packages, `winget search ebbflow` 44 | 45 | ### Other Debian or RPM based 46 | - Feel free to contact ebbflow and we can work to get packages published for you! 47 | 48 | ### Building from source, *nix Based (Arch, Nix) 49 | - The simplest method is to just grab the 'ebbflow' binary, and run with `run-blocking`. This has no file-based dependencies if you pass the host key to during the command like so: `EBB_KEY=... ebbflow run-blocking --dns example.com --port 8000`. This is fully supported. 50 | - Alternatively, you can run the daemon, but it does have file/directory dependencies, and you need to configure that. **Note** that this is uncharted waters, and its possible that the following instructions can change; 51 | - You will need to run the `ebbflowd` binary as a background service, as in `systemd` or similar. 52 | - The client uses the `/etc/ebbflow/` dir to store 3 files 53 | - `host.key` the private host key, should be `600` permissions, should (ideally) not be transferred between hosts, and should NEVER be entered into source control (i'm looking at you, nix config) 54 | - `config.yaml` the configuration file for the daemon. This file is read from to determine how the daemon should run, what endpoints exist, etc. This file is written to when endpoints are disabled or enabled, the log level changes, and a few other commands, via `ebbflow config ..`. So it is possible to have this file static, but I wouldn't recommend it. 55 | - `.daemonaddr` is a file the daemon writes its ipv4 socket addr to for its HTTP server. Running `ebbflow status` reads this file, then makes an http request for the status. The daemon will work fine, proxy-wise, if this file cannot be written or read from, it will just slowly loop on attempts to write it. 56 | - Now, you can change the root directory of which ebbflow looks for these three files by setting the `EBB_CFG_DIR` env var. The daemon will look at this first, and fall back to `/etc/ebbflow`. So you can provide this to another directory and ebbflow will look for the three files in this directory instead. 57 | - **IMPORTANT** If you change the directory, all invocations of the `ebbflow` command should have this env var passed to it as well! `ebbflow` and `ebbflowd` communicate via these files, so if `ebbflowd` is looking in `/your/custom/dir/` and `ebbflow` is looking in `/etc/ebbflow/`, then there will be issues.. 58 | - If you package up ebbflow for another distro like nix or arch or whatever, and you want Ebbflow to take over ownership of your work, then email us at `support at ebbflow.io`. 59 | 60 | ## Contributing 61 | 62 | Contributions are welcome! Submit a pull request and we can go from there. 63 | 64 | ## License 65 | 66 | See LICENSE 67 | -------------------------------------------------------------------------------- /assets/debian/postinst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # This will only remove masks created by d-s-h on package removal. 5 | deb-systemd-helper unmask ebbflowd.service > /dev/null || true 6 | 7 | # was-enabled defaults to true, so new installations run enable. 8 | if deb-systemd-helper --quiet was-enabled ebbflowd.service 9 | then 10 | # Enables the unit on first installation, creates new 11 | # symlinks on upgrades if the unit file has changed. 12 | deb-systemd-helper enable ebbflowd.service > /dev/null || true 13 | deb-systemd-invoke start ebbflowd 14 | else 15 | # Update the statefile to add new symlinks (if any), which need to be 16 | # cleaned up on purge. Also remove old symlinks. 17 | deb-systemd-helper update-state ebbflowd.service > /dev/null || true 18 | fi -------------------------------------------------------------------------------- /assets/debian/postrm: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # In case this system is running systemd, we make systemd reload the unit files 5 | # to pick up changes. 6 | if [ -d /run/systemd/system ] ; then 7 | systemctl --system daemon-reload >/dev/null || true 8 | fi 9 | 10 | if [ "$1" = "purge" ] || [ "$1" = "remove" ]; then 11 | if [ -x "/usr/bin/deb-systemd-helper" ]; then 12 | deb-systemd-helper purge ebbflowd.service >/dev/null || true 13 | deb-systemd-helper unmask ebbflowd.service >/dev/null 14 | fi 15 | fi 16 | 17 | rm -f /etc/ebbflow/.daemonaddr -------------------------------------------------------------------------------- /assets/debian/prerm: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | deb-systemd-invoke stop ebbflowd -------------------------------------------------------------------------------- /assets/e.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ebbflow-io/ebbflow/0f1bfc988bedba7dee93e90e523cdacf1fb13ccb/assets/e.ico -------------------------------------------------------------------------------- /assets/ebbbg.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ebbflow-io/ebbflow/0f1bfc988bedba7dee93e90e523cdacf1fb13ccb/assets/ebbbg.bmp -------------------------------------------------------------------------------- /assets/ebbbn.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ebbflow-io/ebbflow/0f1bfc988bedba7dee93e90e523cdacf1fb13ccb/assets/ebbbn.bmp -------------------------------------------------------------------------------- /assets/ebbflowd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Ebbflow Client Service 3 | After=network.target 4 | StartLimitIntervalSec=0 5 | 6 | [Service] 7 | ExecStart=/usr/sbin/ebbflowd 8 | Restart=always 9 | RestartSec=1 10 | Type=simple 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /assets/empty.config.yaml: -------------------------------------------------------------------------------- 1 | --- -------------------------------------------------------------------------------- /assets/empty.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ebbflow-io/ebbflow/0f1bfc988bedba7dee93e90e523cdacf1fb13ccb/assets/empty.key -------------------------------------------------------------------------------- /assets/justeblack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ebbflow-io/ebbflow/0f1bfc988bedba7dee93e90e523cdacf1fb13ccb/assets/justeblack.png -------------------------------------------------------------------------------- /assets/windowswelcome.rtf: -------------------------------------------------------------------------------- 1 | {\rtf1\ansi\deff0\nouicompat{\fonttbl{\f0\fnil\fcharset0 Courier New;}} 2 | {\colortbl ;\red0\green0\blue255;} 3 | {\*\generator Riched20 10.0.19041}\viewkind4\uc1 4 | \pard\f0\fs22\lang1033 Welcome to Ebbflow. This installer will create a Windows Service which runs in the background. More information can be found in the documentation online [1]. Also, please see the Terms, aka License Agreement found on Ebbflow's website [2].\par 5 | \par 6 | [1] {{\field{\*\fldinst{HYPERLINK https://ebbflow.io/documentation#client }}{\fldrslt{https://ebbflow.io/documentation#client\ul0\cf0}}}}\f0\fs22\par 7 | [2] {{\field{\*\fldinst{HYPERLINK https://ebbflow.io/terms }}{\fldrslt{https://ebbflow.io/terms\ul0\cf0}}}}\f0\fs22\par 8 | } 9 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.12.0 2 | 3 | COPY ./ebbflow / 4 | COPY ./entrypoint.sh / 5 | 6 | ENTRYPOINT [ "/entrypoint.sh" ] 7 | CMD ["run-blocking", "--help"] 8 | -------------------------------------------------------------------------------- /docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -euo pipefail 3 | 4 | /ebbflow $@ -------------------------------------------------------------------------------- /examples/server.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate env_logger; 4 | 5 | use hyper::service::{make_service_fn, service_fn}; 6 | use hyper::{Body, Error, Response, Server}; 7 | use log::LevelFilter; 8 | 9 | #[tokio::main] 10 | async fn main() { 11 | env_logger::builder().filter_level(LevelFilter::Info).init(); 12 | 13 | // Construct our SocketAddr to listen on... 14 | let addr = ([127, 0, 0, 1], 8080).into(); 15 | 16 | // And a MakeService to handle each connection... 17 | let make_service = make_service_fn(|_| async { 18 | Ok::<_, Error>(service_fn(|_req| async { 19 | info!("Received request, returning response"); 20 | Ok::<_, Error>(Response::new(Body::from(format!( 21 | "Hello, World! Hostname: {:?}\n", 22 | hostname::get().unwrap() 23 | )))) 24 | })) 25 | }); 26 | 27 | // Then bind and serve... 28 | let server = Server::bind(&addr).serve(make_service); 29 | 30 | // Finally, spawn `server` onto an Executor... 31 | info!("Spawning server on 8080.."); 32 | if let Err(e) = server.await { 33 | error!("server error: {}", e); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/certs.rs: -------------------------------------------------------------------------------- 1 | //! Static refs to LE CA certs https://letsencrypt.org/certificates/ 2 | 3 | // Providing the IA certs is a workaround the fact that the 4 | // server does not provide IA certs. If it did, we could just verify 5 | // the roots which are few and very slow to change 6 | 7 | use rustls::RootCertStore; 8 | 9 | const LE_IA_X3: &str = r#" 10 | -----BEGIN CERTIFICATE----- 11 | MIIEkjCCA3qgAwIBAgIQCgFBQgAAAVOFc2oLheynCDANBgkqhkiG9w0BAQsFADA/ 12 | MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT 13 | DkRTVCBSb290IENBIFgzMB4XDTE2MDMxNzE2NDA0NloXDTIxMDMxNzE2NDA0Nlow 14 | SjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzAhBgNVBAMT 15 | GkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFgzMIIBIjANBgkqhkiG9w0BAQEFAAOC 16 | AQ8AMIIBCgKCAQEAnNMM8FrlLke3cl03g7NoYzDq1zUmGSXhvb418XCSL7e4S0EF 17 | q6meNQhY7LEqxGiHC6PjdeTm86dicbp5gWAf15Gan/PQeGdxyGkOlZHP/uaZ6WA8 18 | SMx+yk13EiSdRxta67nsHjcAHJyse6cF6s5K671B5TaYucv9bTyWaN8jKkKQDIZ0 19 | Z8h/pZq4UmEUEz9l6YKHy9v6Dlb2honzhT+Xhq+w3Brvaw2VFn3EK6BlspkENnWA 20 | a6xK8xuQSXgvopZPKiAlKQTGdMDQMc2PMTiVFrqoM7hD8bEfwzB/onkxEz0tNvjj 21 | /PIzark5McWvxI0NHWQWM6r6hCm21AvA2H3DkwIDAQABo4IBfTCCAXkwEgYDVR0T 22 | AQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwfwYIKwYBBQUHAQEEczBxMDIG 23 | CCsGAQUFBzABhiZodHRwOi8vaXNyZy50cnVzdGlkLm9jc3AuaWRlbnRydXN0LmNv 24 | bTA7BggrBgEFBQcwAoYvaHR0cDovL2FwcHMuaWRlbnRydXN0LmNvbS9yb290cy9k 25 | c3Ryb290Y2F4My5wN2MwHwYDVR0jBBgwFoAUxKexpHsscfrb4UuQdf/EFWCFiRAw 26 | VAYDVR0gBE0wSzAIBgZngQwBAgEwPwYLKwYBBAGC3xMBAQEwMDAuBggrBgEFBQcC 27 | ARYiaHR0cDovL2Nwcy5yb290LXgxLmxldHNlbmNyeXB0Lm9yZzA8BgNVHR8ENTAz 28 | MDGgL6AthitodHRwOi8vY3JsLmlkZW50cnVzdC5jb20vRFNUUk9PVENBWDNDUkwu 29 | Y3JsMB0GA1UdDgQWBBSoSmpjBH3duubRObemRWXv86jsoTANBgkqhkiG9w0BAQsF 30 | AAOCAQEA3TPXEfNjWDjdGBX7CVW+dla5cEilaUcne8IkCJLxWh9KEik3JHRRHGJo 31 | uM2VcGfl96S8TihRzZvoroed6ti6WqEBmtzw3Wodatg+VyOeph4EYpr/1wXKtx8/ 32 | wApIvJSwtmVi4MFU5aMqrSDE6ea73Mj2tcMyo5jMd6jmeWUHK8so/joWUoHOUgwu 33 | X4Po1QYz+3dszkDqMp4fklxBwXRsW10KXzPMTZ+sOPAveyxindmjkW8lGy+QsRlG 34 | PfZ+G6Z6h7mjem0Y+iWlkYcV4PIWL1iwBi8saCbGS5jN2p8M+X+Q7UNKEkROb3N6 35 | KOqkqm57TH2H3eDJAkSnh6/DNFu0Qg== 36 | -----END CERTIFICATE----- 37 | "#; 38 | 39 | /// This is their Backup, and maybe their 'next' IA CA cert 40 | const LE_IA_X4: &str = r#" 41 | -----BEGIN CERTIFICATE----- 42 | MIIEkjCCA3qgAwIBAgIQCgFBQgAAAVOFc6bLEeMfizANBgkqhkiG9w0BAQsFADA/ 43 | MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT 44 | DkRTVCBSb290IENBIFgzMB4XDTE2MDMxNzE2NDEwMloXDTIxMDMxNzE2NDEwMlow 45 | SjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzAhBgNVBAMT 46 | GkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFg0MIIBIjANBgkqhkiG9w0BAQEFAAOC 47 | AQ8AMIIBCgKCAQEA4SR0Qnu3kTHZc/84qtjORFy3OQrcRK4NvUW5lzdnr71QT1/T 48 | EFRr90HajmPmbVvA6ECpjEH80QOJ/2JhCWDWBwV4mpC9GmQ+T9zPdy+Ja8tnr0FN 49 | xY0AwGv+jYTctfKVMajo9pCgQ0qTdFyzPkNpS4kiR3RRPplkw80kAfmELyh3FyKn 50 | 3cNsCExmLzd0xW+TjrBGNxZh0VCYyLAPT1hTfKz22i2WYVCtQ9wKpk+etVK5nI7v 51 | Tt9GszHcIPxpwqMgdT7sOBs2TmZm0t/1ZqSTL3umDpQ+YD1KSxxvurRNHDyRWG4v 52 | TcTacNvtATl2wEnn6TW1FAaQweWS4hD9a7m0hQIDAQABo4IBfTCCAXkwEgYDVR0T 53 | AQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwfwYIKwYBBQUHAQEEczBxMDIG 54 | CCsGAQUFBzABhiZodHRwOi8vaXNyZy50cnVzdGlkLm9jc3AuaWRlbnRydXN0LmNv 55 | bTA7BggrBgEFBQcwAoYvaHR0cDovL2FwcHMuaWRlbnRydXN0LmNvbS9yb290cy9k 56 | c3Ryb290Y2F4My5wN2MwHwYDVR0jBBgwFoAUxKexpHsscfrb4UuQdf/EFWCFiRAw 57 | VAYDVR0gBE0wSzAIBgZngQwBAgEwPwYLKwYBBAGC3xMBAQEwMDAuBggrBgEFBQcC 58 | ARYiaHR0cDovL2Nwcy5yb290LXgxLmxldHNlbmNyeXB0Lm9yZzA8BgNVHR8ENTAz 59 | MDGgL6AthitodHRwOi8vY3JsLmlkZW50cnVzdC5jb20vRFNUUk9PVENBWDNDUkwu 60 | Y3JsMB0GA1UdDgQWBBTFsatOTLHNZDCTfsGEmQWr5gPiJTANBgkqhkiG9w0BAQsF 61 | AAOCAQEANlaeSdstfAtqFN3jdRZJFjx9X+Ob3PIDlekPYQ1OQ1Uw43rE1FUj7hUw 62 | g2MJKfs9b7M0WoQg7C20nJY/ajsg7pWhUG3J6rlkDTfVY9faeWi0qsPYXE6BpBDr 63 | 5BrW/Xv8yT8U2BiEAmNggWq8dmFl82fghmLzHBM8X8NZ3ZwA1fGePA53AP5IoD+0 64 | ArpW8Ik1sSuQBjZ8oQLfN+G8OoY7MNRopyLyQQCNy4aWfE+xYnoVoa5+yr+aPiX0 65 | 7YQrY/cKawAn7QB4PyF5//IKSAVs7mAuB68wbMdE3FKfOHfJ24W4z/bIJTrTY8Y5 66 | Sr4AUhtzf8oVDrHZYWRrP4joIcOu/Q== 67 | -----END CERTIFICATE----- 68 | "#; 69 | 70 | const LE_IA_R3: &str = r#" 71 | -----BEGIN CERTIFICATE----- 72 | MIIFFjCCAv6gAwIBAgIRAJErCErPDBinU/bWLiWnX1owDQYJKoZIhvcNAQELBQAw 73 | TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh 74 | cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw 75 | WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg 76 | RW5jcnlwdDELMAkGA1UEAxMCUjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK 77 | AoIBAQC7AhUozPaglNMPEuyNVZLD+ILxmaZ6QoinXSaqtSu5xUyxr45r+XXIo9cP 78 | R5QUVTVXjJ6oojkZ9YI8QqlObvU7wy7bjcCwXPNZOOftz2nwWgsbvsCUJCWH+jdx 79 | sxPnHKzhm+/b5DtFUkWWqcFTzjTIUu61ru2P3mBw4qVUq7ZtDpelQDRrK9O8Zutm 80 | NHz6a4uPVymZ+DAXXbpyb/uBxa3Shlg9F8fnCbvxK/eG3MHacV3URuPMrSXBiLxg 81 | Z3Vms/EY96Jc5lP/Ooi2R6X/ExjqmAl3P51T+c8B5fWmcBcUr2Ok/5mzk53cU6cG 82 | /kiFHaFpriV1uxPMUgP17VGhi9sVAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC 83 | AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB 84 | Af8CAQAwHQYDVR0OBBYEFBQusxe3WFbLrlAJQOYfr52LFMLGMB8GA1UdIwQYMBaA 85 | FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw 86 | AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw 87 | Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB 88 | gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCFyk5HPqP3hUSFvNVneLKYY611TR6W 89 | PTNlclQtgaDqw+34IL9fzLdwALduO/ZelN7kIJ+m74uyA+eitRY8kc607TkC53wl 90 | ikfmZW4/RvTZ8M6UK+5UzhK8jCdLuMGYL6KvzXGRSgi3yLgjewQtCPkIVz6D2QQz 91 | CkcheAmCJ8MqyJu5zlzyZMjAvnnAT45tRAxekrsu94sQ4egdRCnbWSDtY7kh+BIm 92 | lJNXoB1lBMEKIq4QDUOXoRgffuDghje1WrG9ML+Hbisq/yFOGwXD9RiX8F6sw6W4 93 | avAuvDszue5L3sz85K+EC4Y/wFVDNvZo4TYXao6Z0f+lQKc0t8DQYzk1OXVu8rp2 94 | yJMC6alLbBfODALZvYH7n7do1AZls4I9d1P4jnkDrQoxB3UqQ9hVl3LEKQ73xF1O 95 | yK5GhDDX8oVfGKF5u+decIsH4YaTw7mP3GFxJSqv3+0lUFJoi5Lc5da149p90Ids 96 | hCExroL1+7mryIkXPeFM5TgO9r0rvZaBFOvV2z0gp35Z0+L4WPlbuEjN/lxPFin+ 97 | HlUjr8gRsI3qfJOQFy/9rKIJR0Y/8Omwt/8oTWgy1mdeHmmjk7j1nYsvC9JSQ6Zv 98 | MldlTTKB3zhThV1+XWYp6rjd5JW1zbVWEkLNxE7GJThEUG3szgBVGP7pSWTUTsqX 99 | nLRbwHOoq7hHwg== 100 | -----END CERTIFICATE----- 101 | "#; 102 | 103 | const LE_IA_R4: &str = r#" 104 | -----BEGIN CERTIFICATE----- 105 | MIIFFjCCAv6gAwIBAgIRAIp5IlCr5SxSbO7Pf8lC3WIwDQYJKoZIhvcNAQELBQAw 106 | TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh 107 | cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw 108 | WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg 109 | RW5jcnlwdDELMAkGA1UEAxMCUjQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK 110 | AoIBAQCzKNx3KdPnkb7ztwoAx/vyVQslImNTNq/pCCDfDa8oPs3Gq1e2naQlGaXS 111 | Mm1Jpgi5xy+hm5PFIEBrhDEgoo4wYCVg79kaiT8faXGy2uo/c0HEkG9m/X2eWNh3 112 | z81ZdUTJoQp7nz8bDjpmb7Z1z4vLr53AcMX/0oIKr13N4uichZSk5gA16H5OOYHH 113 | IYlgd+odlvKLg3tHxG0ywFJ+Ix5FtXHuo+8XwgOpk4nd9Z/buvHa4H6Xh3GBHhqC 114 | VuQ+fBiiCOUWX6j6qOBIUU0YFKAMo+W2yrO1VRJrcsdafzuM+efZ0Y4STTMzAyrx 115 | E+FCPMIuWWAubeAHRzNl39Jnyk2FAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC 116 | AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB 117 | Af8CAQAwHQYDVR0OBBYEFDadPuCxQPYnLHy/jZ0xivZUpkYmMB8GA1UdIwQYMBaA 118 | FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw 119 | AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw 120 | Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB 121 | gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCJbu5CalWO+H+Az0lmIG14DXmlYHQE 122 | k26umjuCyioWs2icOlZznPTcZvbfq02YPHGTCu3ctggVDULJ+fwOxKekzIqeyLNk 123 | p8dyFwSAr23DYBIVeXDpxHhShvv0MLJzqqDFBTHYe1X5X2Y7oogy+UDJxV2N24/g 124 | Z8lxG4Vr2/VEfUOrw4Tosl5Z+1uzOdvTyBcxD/E5rGgTLczmulctHy3IMTmdTFr0 125 | FnU0/HMQoquWQuODhFqzMqNcsdbjANUBwOEQrKI8Sy6+b84kHP7PtO+S4Ik8R2k7 126 | ZeMlE1JmxBi/PZU860YlwT8/qOYToCHVyDjhv8qutbf2QnUl3SV86th2I1QQE14s 127 | 0y7CdAHcHkw3sAEeYGkwCA74MO+VFtnYbf9B2JBOhyyWb5087rGzitu5MTAW41X9 128 | DwTeXEg+a24tAeht+Y1MionHUwa4j7FB/trN3Fnb/r90+4P66ZETVIEcjseUSMHO 129 | w6yqv10/H/dw/8r2EDUincBBX3o9DL3SadqragkKy96HtMiLcqMMGAPm0gti1b6f 130 | bnvOdr0mrIVIKX5nzOeGZORaYLoSD4C8qvFT7U+Um6DMo36cVDNsPmkF575/s3C2 131 | CxGiCPQqVxPgfNSh+2CPd2Xv04lNeuw6gG89DlOhHuoFKRlmPnom+gwqhz3ZXMfz 132 | TfmvjrBokzCICA== 133 | -----END CERTIFICATE----- 134 | "#; 135 | 136 | const LE_IA_E1: &str = r#" 137 | -----BEGIN CERTIFICATE----- 138 | MIICxjCCAk2gAwIBAgIRALO93/inhFu86QOgQTWzSkUwCgYIKoZIzj0EAwMwTzEL 139 | MAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2VhcmNo 140 | IEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDIwHhcNMjAwOTA0MDAwMDAwWhcN 141 | MjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3MgRW5j 142 | cnlwdDELMAkGA1UEAxMCRTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQkXC2iKv0c 143 | S6Zdl3MnMayyoGli72XoprDwrEuf/xwLcA/TmC9N/A8AmzfwdAVXMpcuBe8qQyWj 144 | +240JxP2T35p0wKZXuskR5LBJJvmsSGPwSSB/GjMH2m6WPUZIvd0xhajggEIMIIB 145 | BDAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMB 146 | MBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFFrz7Sv8NsI3eblSMOpUb89V 147 | yy6sMB8GA1UdIwQYMBaAFHxClq7eS0g7+pL4nozPbYupcjeVMDIGCCsGAQUFBwEB 148 | BCYwJDAiBggrBgEFBQcwAoYWaHR0cDovL3gyLmkubGVuY3Iub3JnLzAnBgNVHR8E 149 | IDAeMBygGqAYhhZodHRwOi8veDIuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYG 150 | Z4EMAQIBMA0GCysGAQQBgt8TAQEBMAoGCCqGSM49BAMDA2cAMGQCMHt01VITjWH+ 151 | Dbo/AwCd89eYhNlXLr3pD5xcSAQh8suzYHKOl9YST8pE9kLJ03uGqQIwWrGxtO3q 152 | YJkgsTgDyj2gJrjubi1K9sZmHzOa25JK1fUpE8ZwYii6I4zPPS/Lgul/ 153 | -----END CERTIFICATE----- 154 | "#; 155 | 156 | const LE_IA_E2: &str = r#" 157 | -----BEGIN CERTIFICATE----- 158 | MIICxjCCAkygAwIBAgIQTtI99q9+x/mwxHJv+VEqdzAKBggqhkjOPQQDAzBPMQsw 159 | CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg 160 | R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw0y 161 | NTA5MTUxNjAwMDBaMDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNy 162 | eXB0MQswCQYDVQQDEwJFMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABCOaLO3lixmN 163 | YVWex+ZVYOiTLgi0SgNWtU4hufk50VU4Zp/LbBVDxCsnsI7vuf4xp4Cu+ETNggGE 164 | yBqJ3j8iUwe5Yt/qfSrRf1/D5R58duaJ+IvLRXeASRqEL+VkDXrW3qOCAQgwggEE 165 | MA4GA1UdDwEB/wQEAwIBhjAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEw 166 | EgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUbZkq9U0C6+MRwWC6km+NPS7x 167 | 6kQwHwYDVR0jBBgwFoAUfEKWrt5LSDv6kviejM9ti6lyN5UwMgYIKwYBBQUHAQEE 168 | JjAkMCIGCCsGAQUFBzAChhZodHRwOi8veDIuaS5sZW5jci5vcmcvMCcGA1UdHwQg 169 | MB4wHKAaoBiGFmh0dHA6Ly94Mi5jLmxlbmNyLm9yZy8wIgYDVR0gBBswGTAIBgZn 170 | gQwBAgEwDQYLKwYBBAGC3xMBAQEwCgYIKoZIzj0EAwMDaAAwZQIxAPJCN9qpyDmZ 171 | tX8K3m8UYQvK51BrXclM6WfrdeZlUBKyhTXUmFAtJw4X6A0x9mQFPAIwJa/No+KQ 172 | UAM1u34E36neL/Zba7ombkIOchSgx1iVxzqtFWGddgoG+tppRPWhuhhn 173 | -----END CERTIFICATE----- 174 | "#; 175 | 176 | const LE_ROOT: &str = r#" 177 | -----BEGIN CERTIFICATE----- 178 | MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw 179 | TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh 180 | cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 181 | WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu 182 | ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY 183 | MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc 184 | h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ 185 | 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U 186 | A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW 187 | T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH 188 | B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC 189 | B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv 190 | KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn 191 | OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn 192 | jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw 193 | qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI 194 | rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV 195 | HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq 196 | hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL 197 | ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ 198 | 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK 199 | NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 200 | ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur 201 | TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC 202 | jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc 203 | oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq 204 | 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA 205 | mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d 206 | emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= 207 | -----END CERTIFICATE----- 208 | "#; 209 | 210 | const LE_ROOT_X2: &str = r#" 211 | -----BEGIN CERTIFICATE----- 212 | MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw 213 | CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg 214 | R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00 215 | MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT 216 | ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw 217 | EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW 218 | +1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9 219 | ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T 220 | AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI 221 | zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW 222 | tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1 223 | /q4AaOeMSQ+2b1tbFfLn 224 | -----END CERTIFICATE----- 225 | "#; 226 | 227 | lazy_static! { 228 | pub static ref ROOTS: RootCertStore = { 229 | let mut roots = RootCertStore::empty(); 230 | roots.add_pem_file(&mut &LE_IA_X3.as_bytes()[..]).unwrap(); 231 | roots.add_pem_file(&mut &LE_IA_X4.as_bytes()[..]).unwrap(); 232 | roots.add_pem_file(&mut &LE_IA_R3.as_bytes()[..]).unwrap(); 233 | roots.add_pem_file(&mut &LE_IA_R4.as_bytes()[..]).unwrap(); 234 | roots.add_pem_file(&mut &LE_IA_E1.as_bytes()[..]).unwrap(); 235 | roots.add_pem_file(&mut &LE_IA_E2.as_bytes()[..]).unwrap(); 236 | roots.add_pem_file(&mut &LE_ROOT.as_bytes()[..]).unwrap(); 237 | roots.add_pem_file(&mut &LE_ROOT_X2.as_bytes()[..]).unwrap(); 238 | roots 239 | }; 240 | } 241 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use std::str::FromStr; 3 | use std::{fs::OpenOptions, net::SocketAddrV4}; 4 | use tokio::fs; 5 | use tokio::fs::OpenOptions as TokioOpenOptions; 6 | use tokio::io::Error as IoError; 7 | use tokio::io::ErrorKind; 8 | 9 | // Path to the Config file, see EbbflowDaemonConfig in the config module. 10 | #[cfg(target_os = "linux")] 11 | lazy_static! { 12 | pub static ref CONFIG_PATH: String = { 13 | match std::env::var("EBB_CFG_DIR").ok() { 14 | Some(p) => p.trim_end_matches('/').to_string(), 15 | None => "/etc/ebbflow".to_string(), 16 | } 17 | }; 18 | } 19 | #[cfg(target_os = "macos")] 20 | pub const CONFIG_PATH: &str = "/usr/local/etc/ebbflow"; 21 | #[cfg(windows)] 22 | lazy_static! { 23 | pub static ref CONFIG_PATH: String = { "\\Program Files\\ebbflow".to_string() }; 24 | } 25 | 26 | pub fn config_path_root() -> String { 27 | CONFIG_PATH.to_string() 28 | } 29 | 30 | #[cfg(windows)] 31 | pub fn config_file_full() -> String { 32 | format!("{}\\{}", config_path_root(), CONFIG_FILE) 33 | } 34 | 35 | #[cfg(not(windows))] 36 | pub fn config_file_full() -> String { 37 | format!("{}/{}", config_path_root(), CONFIG_FILE) 38 | } 39 | 40 | #[cfg(windows)] 41 | pub fn key_file_full() -> String { 42 | format!("{}\\{}", config_path_root(), KEY_FILE) 43 | } 44 | 45 | #[cfg(not(windows))] 46 | pub fn key_file_full() -> String { 47 | format!("{}/{}", config_path_root(), KEY_FILE) 48 | } 49 | 50 | #[cfg(windows)] 51 | pub fn addr_file_full() -> String { 52 | format!("{}\\{}", config_path_root(), ADDR_FILE) 53 | } 54 | 55 | #[cfg(not(windows))] 56 | pub fn addr_file_full() -> String { 57 | format!("{}/{}", config_path_root(), ADDR_FILE) 58 | } 59 | 60 | pub async fn write_addr(addr: &str) -> Result<(), ConfigError> { 61 | Ok(fs::write(addr_file_full(), addr.trim()).await?) 62 | } 63 | 64 | pub async fn read_addr() -> Result { 65 | let s = fs::read_to_string(addr_file_full()).await?; 66 | if s.is_empty() { 67 | return Err(ConfigError::Empty); 68 | } 69 | 70 | Ok(s.trim() 71 | .to_string() 72 | .parse() 73 | .map_err(|_| ConfigError::Parsing)?) 74 | } 75 | 76 | pub const CONFIG_FILE: &str = "config.yaml"; 77 | pub const KEY_FILE: &str = "host.key"; 78 | pub const ADDR_FILE: &str = ".daemonaddr"; 79 | 80 | #[derive(PartialEq, Debug)] 81 | pub enum ConfigError { 82 | Parsing, 83 | FileNotFound, 84 | FilePermissions, 85 | Empty, 86 | Unknown(String), 87 | } 88 | 89 | pub async fn getkey() -> Result { 90 | let s = fs::read_to_string(key_file_full()).await?; 91 | if s.is_empty() { 92 | return Err(ConfigError::Empty); 93 | } 94 | 95 | Ok(s.trim().to_string()) 96 | } 97 | 98 | pub async fn setkey(k: &str) -> Result<(), ConfigError> { 99 | Ok(fs::write(key_file_full(), k.trim().as_bytes()).await?) 100 | } 101 | 102 | impl From for ConfigError { 103 | fn from(ioe: IoError) -> Self { 104 | match ioe.kind() { 105 | ErrorKind::NotFound => ConfigError::FileNotFound, 106 | ErrorKind::PermissionDenied => ConfigError::FilePermissions, 107 | _ => ConfigError::Unknown(format!("Unexepected error reading config file {:?}", ioe)), 108 | } 109 | } 110 | } 111 | 112 | #[derive(Debug, Clone, Serialize, Deserialize)] 113 | #[serde(untagged)] 114 | pub enum PossiblyEmptyEbbflowDaemonConfig { 115 | Empty, 116 | EbbflowDaemonConfig(EbbflowDaemonConfig), 117 | } 118 | 119 | /// Configuration for Ebbflow. Will be parsed to/from a YAML file located at 120 | /// - /etc/ebbflow for Linux 121 | /// - TBD for Windows 122 | #[derive(Debug, Clone, Serialize, Deserialize)] 123 | pub struct EbbflowDaemonConfig { 124 | /// A list of endpoints to host, see Endpoint 125 | pub endpoints: Vec, 126 | /// SSH Config overrides, not needed 127 | pub ssh: Option, 128 | /// The level to log the daemon with 129 | pub loglevel: Option, 130 | } 131 | 132 | impl EbbflowDaemonConfig { 133 | pub async fn check_permissions() -> Result<(), ConfigError> { 134 | let mut std = OpenOptions::new(); 135 | std.write(true).create(true); 136 | let options = TokioOpenOptions::from(std); 137 | 138 | options.open(config_file_full()).await?; 139 | options.open(key_file_full()).await?; 140 | Ok(()) 141 | } 142 | 143 | pub async fn load_from_file_or_new() -> Result { 144 | let cfg = match Self::load_from_file().await { 145 | Ok(existing) => existing, 146 | Err(e) => match e { 147 | ConfigError::Empty | ConfigError::FileNotFound => EbbflowDaemonConfig { 148 | endpoints: vec![], 149 | ssh: None, 150 | loglevel: None, 151 | }, 152 | _ => return Err(e), 153 | }, 154 | }; 155 | Ok(cfg) 156 | } 157 | 158 | pub async fn load_from_file() -> Result { 159 | Self::load_from_file_path(&config_file_full()).await 160 | } 161 | 162 | pub async fn load_from_file_path(p: &str) -> Result { 163 | let filebytes = fs::read(p).await?; 164 | 165 | let parsed: EbbflowDaemonConfig = match serde_yaml::from_slice(&filebytes[..]) { 166 | Ok(p) => match p { 167 | PossiblyEmptyEbbflowDaemonConfig::Empty => return Err(ConfigError::Empty), 168 | PossiblyEmptyEbbflowDaemonConfig::EbbflowDaemonConfig(c) => c, 169 | }, 170 | Err(_e) => { 171 | info!("Error parsing configuration file"); 172 | return Err(ConfigError::Parsing); 173 | } 174 | }; 175 | 176 | Ok(parsed) 177 | } 178 | 179 | pub async fn save_to_file(&self) -> Result<(), ConfigError> { 180 | let b: String = match serde_yaml::to_string(self) { 181 | Ok(s) => s, 182 | Err(_e) => { 183 | info!("Error parsing current configuration into a YAML file"); 184 | return Err(ConfigError::Parsing); 185 | } 186 | }; 187 | 188 | Ok(fs::write(config_file_full(), b.as_bytes()).await?) 189 | } 190 | } 191 | 192 | /// An Endpoint to host. Provide the DNS name, and the local port. 193 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 194 | pub struct Endpoint { 195 | /// The port your application runs on 196 | pub port: u16, 197 | /// The DNS name of the endpoint being hosted 198 | pub dns: String, 199 | /// the maximum amount of open connections, defaults to 200 200 | pub maxconns: u16, 201 | /// the maxmimum amount of idle connections to Ebbflow, will be capped at X 202 | pub maxidle: u16, 203 | /// Is this endpoint enabled or disabled? 204 | pub enabled: bool, 205 | /// Health Check, will automatically Enable and Disable based on pass/fail 206 | pub healthcheck: Option, 207 | } 208 | 209 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 210 | pub struct HealthCheck { 211 | /// The port to health check, defaults to the Endpoint's port. 212 | pub port: Option, 213 | /// How many successes before we consider this healthy? Defaults to 3. 214 | pub consider_healthy_threshold: Option, 215 | /// How many failures until we consider this unhealthy? Defaults to 3. 216 | pub consider_unhealthy_threshold: Option, 217 | /// The type of HealthCheck, only `TCP` is available now. 218 | pub r#type: HealthCheckType, 219 | /// How often (in seconds) the health check should be evaluated. Defaults to 5. 220 | pub frequency_secs: Option, 221 | } 222 | 223 | pub struct ConcreteHealthCheck { 224 | pub port: u16, 225 | pub consider_healthy_threshold: u16, 226 | pub consider_unhealthy_threshold: u16, 227 | pub r#type: HealthCheckType, 228 | pub frequency_secs: u16, 229 | } 230 | 231 | impl ConcreteHealthCheck { 232 | pub fn new(default_port: u16, hc: &HealthCheck) -> Self { 233 | Self { 234 | port: hc.port.unwrap_or(default_port), 235 | consider_healthy_threshold: hc.consider_healthy_threshold.unwrap_or(3), 236 | consider_unhealthy_threshold: hc.consider_unhealthy_threshold.unwrap_or(3), 237 | frequency_secs: hc.frequency_secs.unwrap_or(5), 238 | r#type: hc.r#type.clone(), 239 | } 240 | } 241 | } 242 | 243 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 244 | pub enum HealthCheckType { 245 | /// Just a simple TCP Connection, no data transfer only connect 246 | TCP, 247 | } 248 | 249 | impl FromStr for HealthCheckType { 250 | type Err = String; 251 | fn from_str(s: &str) -> Result { 252 | match s.to_lowercase().trim() { 253 | "tcp" => Ok(HealthCheckType::TCP), 254 | _ => Err(format!("Could not parse {} into a HealthCheckType", s)), 255 | } 256 | } 257 | } 258 | 259 | #[derive(Debug, Clone, Serialize, Deserialize)] 260 | pub struct Ssh { 261 | /// the maximum amount of open connections 262 | pub maxconns: u16, 263 | /// The local port, defaults to 22 264 | pub port: u16, 265 | /// Is SSH enabled? 266 | pub enabled: bool, 267 | /// the maxmimum amount of idle connections to Ebbflow, will be capped at X 268 | pub maxidle: u16, 269 | /// The hostname to use as the target, defaults the OS provided Hostname 270 | pub hostname_override: Option, 271 | } 272 | 273 | impl Ssh { 274 | pub fn new(enabled: bool, hostname: Option) -> Ssh { 275 | Self { 276 | maxconns: 20, 277 | port: 22, 278 | enabled, 279 | hostname_override: hostname, 280 | maxidle: 5, 281 | } 282 | } 283 | } 284 | -------------------------------------------------------------------------------- /src/daemon/connection.rs: -------------------------------------------------------------------------------- 1 | use crate::messaging::{ 2 | HelloResponseIssue, HelloV0, Message, MessageError, StartTrafficResponseV0, 3 | }; 4 | use crate::{messagequeue::MessageQueue, signal::SignalReceiver}; 5 | use futures::future::{Either, Future}; 6 | use rand::rngs::SmallRng; 7 | use rand::Rng; 8 | use rand::SeedableRng; 9 | use std::io::Error as IoError; 10 | use std::net::{SocketAddr, SocketAddrV4}; 11 | use std::sync::Arc; 12 | use std::time::Duration; 13 | use std::time::Instant; 14 | use tokio::io::{AsyncRead, AsyncWrite}; 15 | use tokio::net::TcpStream; 16 | use tokio::prelude::*; 17 | use tokio::time::timeout as tokiotimeout; 18 | use tokio::time::timeout; 19 | use tokio_rustls::client::TlsStream; 20 | use tokio_rustls::TlsConnector; 21 | 22 | const LONG_TIMEOUT: Duration = Duration::from_secs(3); 23 | const SHORT_TIMEOUT: Duration = Duration::from_millis(1_000); 24 | const SUPER_SHORT_TIMEOUT: Duration = Duration::from_millis(300); 25 | const KILL_ACTIVE_DELAY: Duration = Duration::from_secs(60); 26 | const BAD_ERROR_DELAY: Duration = Duration::from_secs(55); 27 | const MIN_EBBFLOW_ERROR_DELAY: Duration = Duration::from_secs(5); 28 | const MAX_IDLE_CONNETION_TIME: Duration = Duration::from_secs(60 * 62); // at least an hour 29 | 30 | #[derive(Debug)] 31 | enum ConnectionError { 32 | Io(IoError), 33 | Timeout(&'static str), 34 | Messaging(MessageError), 35 | BadRequest, 36 | UnexpectedMessage, 37 | Forbidden, 38 | NotFound, 39 | Shutdown, 40 | Multiple(String), 41 | } 42 | 43 | impl From for ConnectionError { 44 | fn from(v: MessageError) -> Self { 45 | ConnectionError::Messaging(v) 46 | } 47 | } 48 | 49 | impl From for ConnectionError { 50 | fn from(v: IoError) -> Self { 51 | ConnectionError::Io(v) 52 | } 53 | } 54 | 55 | pub struct EndpointConnectionArgs { 56 | pub endpoint: String, 57 | pub key: String, 58 | pub addrs: Vec, 59 | pub ctype: EndpointConnectionType, 60 | pub connector: TlsConnector, 61 | pub ebbflow_addr: SocketAddrV4, 62 | pub ebbflow_dns: webpki::DNSName, 63 | } 64 | 65 | #[derive(Debug, Clone, Copy, PartialEq)] 66 | pub enum EndpointConnectionType { 67 | Ssh, 68 | Tls, 69 | } 70 | 71 | /// Entry point for establishing an individual connection to Ebbflow, awaiting a connection, and then proxying data between the two. 72 | pub async fn run_connection( 73 | receiver: SignalReceiver, 74 | args: EndpointConnectionArgs, 75 | idle_permit: tokio::sync::OwnedSemaphorePermit, 76 | meta: Arc, 77 | message: Arc, 78 | ) { 79 | meta.add_idle(); 80 | let getstreamresult = timeout( 81 | MAX_IDLE_CONNETION_TIME, 82 | establish_ebbflow_connection_and_await_traffic_signal(receiver.clone(), &args), 83 | ) 84 | .await; 85 | // Important: Release any idle connections ASAP so we can start another connection 86 | meta.remove_idle(); 87 | 88 | let stream = match getstreamresult { 89 | Ok(Ok(stream)) => stream, 90 | Ok(Err(e)) => { 91 | match e { 92 | ConnectionError::Forbidden 93 | | ConnectionError::NotFound 94 | | ConnectionError::BadRequest => { 95 | let s = format!( 96 | "A connection for endpoint {} failed due to {:?}", 97 | args.endpoint, e 98 | ); 99 | warn!("{}", s); 100 | message.add_message(s); 101 | trace!("Bad error delay"); 102 | jittersleep1p5(BAD_ERROR_DELAY).await; 103 | } 104 | _ => { 105 | let s = format!( 106 | "Failed to connect to Ebbflow for endpoint {} failed due to {:?}", 107 | args.endpoint, e 108 | ); 109 | warn!("{}", s); 110 | message.add_message(s); 111 | } 112 | } 113 | trace!("Minimum Delay"); 114 | jittersleep1p5(MIN_EBBFLOW_ERROR_DELAY).await; 115 | 116 | return; 117 | } 118 | Err(_e) => { 119 | debug!( 120 | "Timed out connecting to Ebbflow {:?}", 121 | MAX_IDLE_CONNETION_TIME 122 | ); 123 | return; 124 | } 125 | }; 126 | drop(idle_permit); 127 | 128 | let (ebbstream, localtcp, _now) = 129 | match connect_local_with_ebbflow_communication(stream, &args, message).await { 130 | Ok(triple) => triple, 131 | Err(_e) => { 132 | trace!("Minimum Delay"); 133 | jittersleep1p5(MIN_EBBFLOW_ERROR_DELAY).await; 134 | return; 135 | } 136 | }; 137 | trace!( 138 | "Connection handshake complete and a local connection has been established, proxying data" 139 | ); 140 | 141 | // We have two connections that are ready to be proxied, lesgo 142 | meta.add_active(); 143 | let r = proxy_data(ebbstream, localtcp, &args, receiver).await; 144 | meta.remove_active(); 145 | match r { 146 | Ok(_) => {} 147 | Err(e) => { 148 | debug!("error from proxy_data {:?}", e); 149 | } 150 | } 151 | } 152 | 153 | /// This waits for a connection to Ebbflow, and only returns with a valid connection to the local server. 154 | /// 155 | /// Specifically, this will inform Ebbflow if the local connection is ready or not after it connects locally. 156 | async fn establish_ebbflow_connection_and_await_traffic_signal( 157 | mut receiver: SignalReceiver, 158 | args: &EndpointConnectionArgs, 159 | ) -> Result, ConnectionError> { 160 | // Connect to Ebbflow 161 | let mut tlsstream = connect_ebbflow(args).await?; 162 | 163 | let receiverfut = Box::pin(async move { 164 | receiver.wait().await; 165 | trace!("Receiver told to stop"); 166 | }); 167 | 168 | // Say Hello 169 | let hello = create_hello(args)?; 170 | tlsstream.write_all(&hello[..]).await?; 171 | tlsstream.flush().await?; 172 | 173 | // Receive Response, timed out as Ebbflow should be quick 174 | let message = tos( 175 | await_message(&mut tlsstream), 176 | "error waiting for hello response", 177 | ) 178 | .await??; 179 | match message { 180 | Message::HelloResponseV0(hr) => match hr.issue { 181 | Some(HelloResponseIssue::Forbidden) => return Err(ConnectionError::Forbidden), 182 | Some(HelloResponseIssue::NotFound) => return Err(ConnectionError::NotFound), 183 | Some(HelloResponseIssue::BadRequest) => return Err(ConnectionError::BadRequest), 184 | None => {} // Yay, we can continue! 185 | }, 186 | _ => return Err(ConnectionError::UnexpectedMessage), 187 | } 188 | debug!("Awaiting TrafficStart ({})", args.endpoint,); 189 | 190 | // Await Connection 191 | let stream = match futures::future::select( 192 | receiverfut, 193 | Box::pin(async move { await_traffic_start(tlsstream).await }), // No timeout, as the connection can be idle for a while 194 | ) 195 | .await 196 | { 197 | Either::Left((_, readf)) => { 198 | trace!("Stopping connection await due to signal stop"); 199 | drop(readf); 200 | return Err(ConnectionError::Shutdown); 201 | } 202 | Either::Right((readresult, _r)) => readresult?, 203 | }; 204 | Ok(stream) 205 | } 206 | 207 | async fn connect_local_with_ebbflow_communication( 208 | mut stream: TlsStream, 209 | args: &EndpointConnectionArgs, 210 | message: Arc, 211 | ) -> Result<(TlsStream, TcpStream, Instant), ConnectionError> { 212 | let now = Instant::now(); 213 | // Traffic start, connect local real quick 214 | let local = match connect_local(&args.addrs).await { 215 | Ok(localstream) => { 216 | trace!( 217 | "Connected to local addr {:?} for {}", 218 | args.addrs, 219 | args.endpoint 220 | ); 221 | let response = starttrafficresponse(true)?; 222 | stream.write_all(&response[..]).await?; 223 | localstream 224 | } 225 | Err(e) => { 226 | let s = format!( 227 | "ERROR: Received traffic but could not connect to local host addr {:?} for {} {:?}", 228 | args.addrs, args.endpoint, e 229 | ); 230 | warn!("{}", s); 231 | message.add_message(s); 232 | let response = starttrafficresponse(false)?; 233 | stream.write_all(&response[..]).await?; 234 | return Err(e); 235 | } 236 | }; 237 | 238 | Ok((stream, local, now)) 239 | } 240 | 241 | async fn proxy_data( 242 | mut tlsstream: TlsStream, 243 | mut local: TcpStream, 244 | _args: &EndpointConnectionArgs, 245 | mut receiver: SignalReceiver, 246 | ) -> Result<(), ConnectionError> { 247 | // Now we have both, let's create the proxy future, which we can hard-abort 248 | let (proxyabortable, handle) = 249 | futures::future::abortable(Box::pin( 250 | async move { proxy(&mut local, &mut tlsstream).await }, 251 | )); 252 | 253 | match futures::future::select( 254 | Box::pin(async move { receiver.wait().await }), 255 | proxyabortable, 256 | ) 257 | .await 258 | { 259 | // If the same receiver from above fires, let's sleep, then kill the connection 260 | Either::Left((_, readf)) => { 261 | tokio::spawn(readf); // This lets the future continue running until we kill it 262 | jittersleep1p5(KILL_ACTIVE_DELAY).await; 263 | handle.abort(); 264 | Ok(()) 265 | } 266 | // The connection ran its course, but remember it was an abortable future so we need to look at that result first 267 | Either::Right((proxyresult, _r)) => { 268 | match proxyresult { 269 | Ok(innerresult) => innerresult, 270 | Err(_) => { 271 | // This seems unreachable? The abort future should Err only if we called .abort which only happens if the Either::Left wins... But let's handle it anyways.. 272 | Err(ConnectionError::Timeout("unreachable segment, terminated")) 273 | } 274 | } 275 | } 276 | } 277 | } 278 | 279 | async fn connect_ebbflow( 280 | args: &EndpointConnectionArgs, 281 | ) -> Result, ConnectionError> { 282 | let tcpstream = tol( 283 | TcpStream::connect(args.ebbflow_addr), 284 | "connecting to ebbflow", 285 | ) 286 | .await??; 287 | tcpstream.set_keepalive(Some(Duration::from_secs(1)))?; 288 | tcpstream.set_nodelay(true)?; 289 | Ok(args 290 | .connector 291 | .connect(args.ebbflow_dns.as_ref(), tcpstream) 292 | .await?) 293 | } 294 | 295 | async fn await_message(tlsstream: &mut TlsStream) -> Result { 296 | let mut lenbuf: [u8; 4] = [0; 4]; 297 | tlsstream.read_exact(&mut lenbuf[..]).await?; 298 | let len = u32::from_be_bytes(lenbuf) as usize; 299 | 300 | if len > 200 * 1024 { 301 | return Err(ConnectionError::Messaging(MessageError::Internal( 302 | "message too big safeguard", 303 | ))); 304 | } 305 | 306 | let mut msgbuf = vec![0; len]; 307 | tlsstream.read_exact(&mut msgbuf[..]).await?; 308 | Ok(Message::from_wire_without_the_length_prefix(&msgbuf[..])?) 309 | } 310 | 311 | async fn connect_local(addrs: &[SocketAddr]) -> Result { 312 | let mut errors = Vec::new(); 313 | for a in addrs { 314 | match toss(TcpStream::connect(a), "connecting to local host").await { 315 | Ok(Ok(tcpstream)) => { 316 | tcpstream.set_keepalive(Some(Duration::from_secs(1)))?; 317 | tcpstream.set_nodelay(true)?; 318 | return Ok(tcpstream); 319 | } 320 | Ok(Err(e)) => { 321 | errors.push((a.clone(), e.into())); 322 | } 323 | Err(e) => { 324 | errors.push((a.clone(), e)); 325 | } 326 | } 327 | } 328 | Err(ConnectionError::Multiple(format!( 329 | "multiple {}", 330 | errors 331 | .iter() 332 | .map(|(a, e)| format!("{:?} - {:?}", a, e)) 333 | .collect::>() 334 | .join(", ") 335 | ))) 336 | } 337 | 338 | async fn await_traffic_start( 339 | mut tlsstream: TlsStream, 340 | ) -> Result, ConnectionError> { 341 | let message = await_message(&mut tlsstream).await?; 342 | match message { 343 | Message::StartTrafficV0 => Ok(tlsstream), 344 | _ => Err(ConnectionError::UnexpectedMessage), 345 | } 346 | } 347 | 348 | fn create_hello(args: &EndpointConnectionArgs) -> Result, ConnectionError> { 349 | let t = match args.ctype { 350 | EndpointConnectionType::Ssh => crate::messaging::EndpointType::Ssh, 351 | EndpointConnectionType::Tls => crate::messaging::EndpointType::Tls, 352 | }; 353 | let hello = HelloV0::new(args.key.clone(), t, args.endpoint.clone()); 354 | let message = Message::HelloV0(hello); 355 | Ok(message.to_wire_message()?) 356 | } 357 | 358 | fn starttrafficresponse(good: bool) -> Result, ConnectionError> { 359 | let message = Message::StartTrafficResponseV0(StartTrafficResponseV0 { 360 | open_local_success_ready: good, 361 | }); 362 | Ok(message.to_wire_message()?) 363 | } 364 | 365 | /// Actually transfer the bytes between the two parties 366 | async fn proxy( 367 | local: &mut TcpStream, 368 | ebbflow: &mut TlsStream, 369 | ) -> Result<(), ConnectionError> { 370 | let (mut localreader, mut localwriter) = tokio::io::split(local); 371 | let (mut ebbflowreader, mut ebbflowwriter) = tokio::io::split(ebbflow); 372 | 373 | let local2ebb = 374 | Box::pin(async move { copy_bytes_ez(&mut localreader, &mut ebbflowwriter).await }); 375 | let ebb2local = 376 | Box::pin(async move { copy_bytes_ez(&mut ebbflowreader, &mut localwriter).await }); 377 | 378 | match futures::future::select(local2ebb, ebb2local).await { 379 | Either::Left((_server_read_res, _c2s_future)) => (), 380 | Either::Right((_client_read_res, _s2c_future)) => (), 381 | } 382 | Ok(()) 383 | } 384 | 385 | // ezpzlemonsqueezy 386 | async fn copy_bytes_ez(r: &mut R, w: &mut W) -> Result<(), ConnectionError> 387 | where 388 | R: AsyncRead + Unpin + Send, 389 | W: AsyncWrite + Unpin + Send, 390 | { 391 | let mut buf = vec![0; 10 * 1024]; 392 | let mut first = true; 393 | loop { 394 | let n = r.read(&mut buf[0..]).await?; 395 | if first { 396 | first = false; 397 | } 398 | 399 | if n == 0 { 400 | return Ok(()); 401 | } 402 | w.write_all(&buf[0..n]).await?; 403 | w.flush().await?; 404 | } 405 | } 406 | 407 | /// long timeout 408 | async fn tol(future: T, msg: &'static str) -> Result 409 | where 410 | T: Future, 411 | { 412 | match tokiotimeout(LONG_TIMEOUT, future).await { 413 | Ok(r) => Ok(r), 414 | Err(_) => Err(ConnectionError::Timeout(msg)), 415 | } 416 | } 417 | 418 | /// super short timeout 419 | async fn toss(future: T, msg: &'static str) -> Result 420 | where 421 | T: Future, 422 | { 423 | match tokiotimeout(SUPER_SHORT_TIMEOUT, future).await { 424 | Ok(r) => Ok(r), 425 | Err(_) => Err(ConnectionError::Timeout(msg)), 426 | } 427 | } 428 | 429 | /// short timeout 430 | async fn tos(future: T, msg: &'static str) -> Result 431 | where 432 | T: Future, 433 | { 434 | match tokiotimeout(SHORT_TIMEOUT, future).await { 435 | Ok(r) => Ok(r), 436 | Err(_) => Err(ConnectionError::Timeout(msg)), 437 | } 438 | } 439 | 440 | // Sleeps somewhere between 1 and 1.5 of the `dur` 441 | async fn jittersleep1p5(dur: Duration) { 442 | let mut small_rng = SmallRng::from_entropy(); 443 | let max = dur.mul_f32(1.5); 444 | let millis = small_rng.gen_range(dur.as_millis(), max.as_millis()); 445 | trace!("jittersleep {}ms", millis); 446 | tokio::time::delay_for(Duration::from_millis(millis as u64)).await; 447 | } 448 | -------------------------------------------------------------------------------- /src/daemon/health.rs: -------------------------------------------------------------------------------- 1 | use crate::config::ConcreteHealthCheck; 2 | use parking_lot::Mutex; 3 | use std::{collections::VecDeque, sync::Arc}; 4 | 5 | pub struct HealthData {} 6 | 7 | pub struct HealthMaster { 8 | inner: Arc>, 9 | pub cfg: ConcreteHealthCheck, 10 | } 11 | 12 | impl HealthMaster { 13 | pub fn new(cfg: ConcreteHealthCheck) -> Self { 14 | Self { 15 | inner: Arc::new(Mutex::new(HealthMasterInner::new())), 16 | cfg, 17 | } 18 | } 19 | 20 | pub fn report_check_result(&self, result: bool) -> bool { 21 | self.inner.lock().report_check_result(result, &self.cfg) 22 | } 23 | 24 | pub fn data(&self) -> (bool, VecDeque<(bool, u128)>) { 25 | self.inner.lock().data() 26 | } 27 | } 28 | 29 | pub struct HealthMasterInner { 30 | healthy: bool, 31 | recent: VecDeque<(bool, u128)>, 32 | } 33 | 34 | impl HealthMasterInner { 35 | pub fn new() -> Self { 36 | Self { 37 | healthy: false, 38 | recent: VecDeque::new(), 39 | } 40 | } 41 | 42 | pub fn report_check_result(&mut self, result: bool, cfg: &ConcreteHealthCheck) -> bool { 43 | use std::time::SystemTime; 44 | 45 | let t = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) { 46 | Ok(n) => n.as_millis(), 47 | Err(_) => 0, 48 | }; 49 | self.recent.push_front((result, t)); 50 | self.status(cfg) 51 | } 52 | 53 | pub fn data(&self) -> (bool, VecDeque<(bool, u128)>) { 54 | (self.healthy, self.recent.clone()) 55 | } 56 | 57 | pub fn status(&mut self, cfg: &ConcreteHealthCheck) -> bool { 58 | while self.recent.len() 59 | > std::cmp::max( 60 | cfg.consider_healthy_threshold as usize, 61 | cfg.consider_unhealthy_threshold as usize, 62 | ) 63 | { 64 | self.recent.pop_back(); 65 | } 66 | 67 | if self.healthy { 68 | // we need X consecutive unhealthies 69 | let mut unhealthies = 0; 70 | for (check, _time) in self.recent.iter() { 71 | if *check { 72 | break; 73 | } else { 74 | unhealthies += 1; 75 | } 76 | } 77 | // We were healthy, but had `unhealthies` consecutive unhealthies, so we are now unhealthy 78 | if unhealthies >= cfg.consider_unhealthy_threshold { 79 | self.healthy = false; 80 | self.healthy 81 | // We are healthy, but did not make the threshold, so we are healthy 82 | } else { 83 | self.healthy = true; 84 | self.healthy 85 | } 86 | } else { 87 | // we need X consecutive healthies 88 | let mut healthies = 0; 89 | for (check, _time) in self.recent.iter() { 90 | if *check { 91 | healthies += 1; 92 | } else { 93 | break; 94 | } 95 | } 96 | // We were unhealthy, but had `healthies` consecutive healthies, so we are now healthy 97 | if healthies >= cfg.consider_healthy_threshold { 98 | self.healthy = true; 99 | self.healthy 100 | // We are unhealthy, but did not make the threshold, so we are unhealthy 101 | } else { 102 | self.healthy = false; 103 | self.healthy 104 | } 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/daemon/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod connection; 2 | pub mod health; 3 | 4 | use crate::daemon::connection::{run_connection, EndpointConnectionArgs, EndpointConnectionType}; 5 | use crate::dns::DnsResolver; 6 | use crate::{ 7 | certs::ROOTS, 8 | config::{ConcreteHealthCheck, HealthCheck, HealthCheckType}, 9 | messagequeue::MessageQueue, 10 | signal::{SignalReceiver, SignalSender}, 11 | }; 12 | use futures::future::select; 13 | use futures::future::Either; 14 | use health::HealthMaster; 15 | use parking_lot::Mutex; 16 | use rand::rngs::SmallRng; 17 | use rand::seq::SliceRandom; 18 | use rand::SeedableRng; 19 | use rustls::{ClientConfig, RootCertStore}; 20 | use std::net::{SocketAddr, SocketAddrV4}; 21 | use std::sync::atomic::{AtomicUsize, Ordering}; 22 | use std::sync::Arc; 23 | use std::{collections::VecDeque, time::Duration}; 24 | use tokio::{ 25 | sync::Semaphore, 26 | time::{delay_for, timeout}, 27 | }; 28 | use tokio_rustls::TlsConnector; 29 | 30 | const EBBFLOW_DNS: &str = "s.ebbflow.io"; 31 | const EBBFLOW_PORT: u16 = 443; 32 | const MAX_IDLE: usize = 1000; 33 | 34 | pub struct SharedInfo { 35 | dns: DnsResolver, 36 | key: Mutex>, 37 | roots: RootCertStore, 38 | hardcoded_ebbflow_addr: Option, 39 | hardcoded_ebbflow_dns: Option, 40 | } 41 | 42 | impl SharedInfo { 43 | pub async fn new() -> Result { 44 | Self::innernew(None, None, ROOTS.clone()).await 45 | } 46 | 47 | pub async fn new_with_ebbflow_overrides( 48 | hardcoded_ebbflow_addr: SocketAddrV4, 49 | hardcoded_ebbflow_dns: String, 50 | roots: RootCertStore, 51 | ) -> Result { 52 | Self::innernew( 53 | Some(hardcoded_ebbflow_addr), 54 | Some(hardcoded_ebbflow_dns), 55 | roots, 56 | ) 57 | .await 58 | } 59 | 60 | async fn innernew( 61 | overriddenmaybe: Option, 62 | overridedns: Option, 63 | roots: RootCertStore, 64 | ) -> Result { 65 | Ok(Self { 66 | dns: DnsResolver::new().await?, 67 | key: Mutex::new(None), 68 | roots, 69 | hardcoded_ebbflow_addr: overriddenmaybe, 70 | hardcoded_ebbflow_dns: overridedns, 71 | }) 72 | } 73 | 74 | pub fn update_key(&self, newkey: String) { 75 | let mut key = self.key.lock(); 76 | *key = Some(newkey); 77 | } 78 | 79 | pub fn key(&self) -> Option { 80 | self.key.lock().clone() 81 | } 82 | 83 | pub fn roots(&self) -> RootCertStore { 84 | self.roots.clone() 85 | } 86 | 87 | pub async fn ebbflow_addr(&self) -> SocketAddrV4 { 88 | if let Some(overridden) = self.hardcoded_ebbflow_addr { 89 | return overridden; 90 | } 91 | let ips = self.dns.ips(EBBFLOW_DNS).await.unwrap_or_else(|_| { 92 | vec![ 93 | "75.2.87.195".parse().unwrap(), 94 | "99.83.181.168".parse().unwrap(), 95 | "34.120.207.167".parse().unwrap(), 96 | ] 97 | }); // TODO: Update fallbacks 98 | 99 | let mut small_rng = SmallRng::from_entropy(); 100 | let chosen = ips[..].choose(&mut small_rng); 101 | SocketAddrV4::new(*chosen.unwrap(), EBBFLOW_PORT) 102 | } 103 | 104 | pub fn ebbflow_dns(&self) -> webpki::DNSName { 105 | if let Some(overridden) = &self.hardcoded_ebbflow_dns { 106 | webpki::DNSNameRef::try_from_ascii_str(&overridden) 107 | .unwrap() 108 | .to_owned() 109 | } else { 110 | webpki::DNSNameRef::try_from_ascii_str(EBBFLOW_DNS) 111 | .unwrap() 112 | .to_owned() 113 | } 114 | } 115 | } 116 | 117 | #[derive(Debug, Clone)] 118 | pub struct EndpointArgs { 119 | pub ctype: EndpointConnectionType, 120 | pub idleconns: usize, 121 | pub maxconns: usize, 122 | pub endpoint: String, 123 | pub port: u16, 124 | pub message_queue: Arc, 125 | pub healthcheck: Option, 126 | } 127 | 128 | pub struct EndpointMeta { 129 | idle: AtomicUsize, 130 | active: AtomicUsize, 131 | stopper: SignalSender, 132 | healthdata: Option>, 133 | } 134 | use serde::{Deserialize, Serialize}; 135 | 136 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 137 | pub enum HealthOverall { 138 | NOT_CONFIGURED, 139 | HEALTHY(VecDeque<(bool, u128)>), 140 | UNHEALTHY(VecDeque<(bool, u128)>), 141 | } 142 | 143 | impl EndpointMeta { 144 | pub fn new(stopper: SignalSender, hm: Option>) -> Self { 145 | Self { 146 | idle: AtomicUsize::new(0), 147 | active: AtomicUsize::new(0), 148 | healthdata: hm, 149 | stopper, 150 | } 151 | } 152 | 153 | pub fn num_active(&self) -> usize { 154 | self.active.load(Ordering::SeqCst) 155 | } 156 | 157 | pub fn num_idle(&self) -> usize { 158 | self.idle.load(Ordering::SeqCst) 159 | } 160 | 161 | pub fn stop(&self) { 162 | self.stopper.send_signal(); 163 | } 164 | 165 | pub fn health(&self) -> HealthOverall { 166 | match &self.healthdata { 167 | None => HealthOverall::NOT_CONFIGURED, 168 | Some(hm) => { 169 | let (status, datapoints) = hm.data(); 170 | if status { 171 | HealthOverall::HEALTHY(datapoints) 172 | } else { 173 | HealthOverall::UNHEALTHY(datapoints) 174 | } 175 | } 176 | } 177 | } 178 | 179 | fn add_idle(&self) { 180 | self.idle.fetch_add(1, Ordering::SeqCst); 181 | } 182 | 183 | fn remove_idle(&self) { 184 | self.idle.fetch_sub(1, Ordering::SeqCst); 185 | } 186 | 187 | fn add_active(&self) { 188 | self.active.fetch_add(1, Ordering::SeqCst); 189 | } 190 | 191 | fn remove_active(&self) { 192 | self.active.fetch_sub(1, Ordering::SeqCst); 193 | } 194 | } 195 | 196 | /// This runs this endpoint. To stop it, SEND THE SIGNAL. 197 | pub async fn spawn_endpoint(info: Arc, mut args: EndpointArgs) -> Arc { 198 | // This sender and receiver are the 'master' sender and receivers that are sent 199 | // to us when 'enabled' or 'disabled' is set on the endpoint in the config 200 | let sender = SignalSender::new(); 201 | let receiver = sender.new_receiver(); 202 | 203 | let maybe_hc = match &args.healthcheck { 204 | None => None, 205 | Some(hc) => { 206 | let chc = ConcreteHealthCheck::new(args.port, &hc); 207 | Some(Arc::new(HealthMaster::new(chc))) 208 | } 209 | }; 210 | 211 | let meta = Arc::new(EndpointMeta::new(sender, maybe_hc.clone())); 212 | let metac1 = meta.clone(); 213 | let endpoint = args.endpoint.clone(); 214 | 215 | let message_queue = args.message_queue.clone(); 216 | 217 | args.idleconns = std::cmp::min(args.idleconns, MAX_IDLE); 218 | 219 | let m = format!("Endpoint {} is enabled", endpoint); 220 | info!("{}", m); 221 | message_queue.add_message(m); 222 | 223 | let conn_addrs = get_addrs(args.port, args.message_queue.clone()).await; 224 | let hc_info = match maybe_hc { 225 | None => None, 226 | Some(hc) => Some(( 227 | hc.clone(), 228 | get_addrs(hc.cfg.port, args.message_queue.clone()).await, 229 | )), 230 | }; 231 | 232 | let e = endpoint.clone(); 233 | tokio::spawn(async move { 234 | loop { 235 | // See if we go unhealthy, or the main receiver is done first. 236 | let mut r = receiver.clone(); 237 | match select( 238 | Box::pin(async { 239 | await_healthy(&hc_info, &e, &message_queue).await; 240 | }), 241 | Box::pin(async move { r.wait().await }), 242 | ) 243 | .await 244 | { 245 | // healthy! 246 | Either::Left(_) => (), 247 | // We were told to stop, so don't loop 248 | Either::Right(_) => { 249 | message_queue 250 | .add_message(format!("Endpoint {} was disabled, shutting down", endpoint)); 251 | break; 252 | } 253 | } 254 | 255 | let m = format!("Endpoint {} starting up", e); 256 | info!("{}", m); 257 | message_queue.add_message(m); 258 | let healthy_connection_sender = SignalSender::new(); 259 | let mut healthy_connection_receiver_conns1 = healthy_connection_sender.new_receiver(); 260 | let healthy_connection_receiver_conns2 = healthy_connection_sender.new_receiver(); 261 | 262 | let i = info.clone(); 263 | let m = metac1.clone(); 264 | let a = args.clone(); 265 | let ca = conn_addrs.clone(); 266 | let ee = e.clone(); 267 | tokio::spawn(async move { 268 | match select( 269 | Box::pin(async move { healthy_connection_receiver_conns1.wait().await }), 270 | Box::pin(async move { 271 | inner_run_endpoint(i, a.clone(), healthy_connection_receiver_conns2, m, ca) 272 | .await 273 | }), 274 | ) 275 | .await 276 | { 277 | Either::Left(_) => debug!("Endpoint {} is shutting down", &ee), 278 | Either::Right(_) => info!("Unreachable? inner_run_endpoint finished"), 279 | } 280 | }); 281 | 282 | // See if we go unhealthy, or the main receiver is done first. 283 | let mut r = receiver.clone(); 284 | match select( 285 | Box::pin(async { 286 | await_unhealthy(&hc_info, &endpoint, &message_queue).await; 287 | }), 288 | Box::pin(async move { r.wait().await }), 289 | ) 290 | .await 291 | { 292 | // Unhealthy! 293 | Either::Left(_) => (), 294 | // We were told to stop, so don't loop 295 | Either::Right(_) => { 296 | message_queue 297 | .add_message(format!("Endpoint {} was disabled, shutting down", endpoint)); 298 | break; 299 | } 300 | } 301 | // Kill the connections, we are unhealthy 302 | healthy_connection_sender.send_signal(); 303 | 304 | let m = format!("Endpoint {} shutting down", e); 305 | info!("{}", m); 306 | message_queue.add_message(m); 307 | } 308 | let m = format!("Endpoint {} is disabled", endpoint); 309 | info!("{}", m); 310 | message_queue.add_message(m); 311 | }); 312 | 313 | meta 314 | } 315 | 316 | async fn await_healthy( 317 | hc: &Option<(Arc, Vec)>, 318 | e: &str, 319 | mq: &Arc, 320 | ) { 321 | match hc { 322 | // Start healthy if no healthcheck 323 | None => (), 324 | Some((hc, addrs)) => loop { 325 | let r = healthcheck(&hc.cfg, addrs, e).await; 326 | 327 | if hc.report_check_result(r) { 328 | let m = format!("Endpoint {} considered healthy", e); 329 | info!("{}", m); 330 | mq.add_message(m); 331 | return; 332 | } 333 | 334 | delay_for(Duration::from_secs(hc.cfg.frequency_secs as u64)).await; 335 | }, 336 | } 337 | } 338 | 339 | async fn await_unhealthy( 340 | hc: &Option<(Arc, Vec)>, 341 | e: &str, 342 | mq: &Arc, 343 | ) { 344 | match hc { 345 | // Never consider unhealthy if no healthcheck 346 | None => futures::future::pending::<()>().await, 347 | Some((hc, addrs)) => loop { 348 | let r = healthcheck(&hc.cfg, addrs, e).await; 349 | 350 | if !hc.report_check_result(r) { 351 | let m = format!("Endpoint {} considered unhealthy", e); 352 | info!("{}", m); 353 | mq.add_message(m); 354 | return; 355 | } 356 | 357 | delay_for(Duration::from_secs(hc.cfg.frequency_secs as u64)).await; 358 | }, 359 | } 360 | } 361 | 362 | // Checks both ipv4 and ipv6 addr, if can connect returns true 363 | async fn healthcheck(hc: &ConcreteHealthCheck, addrs: &Vec, e: &str) -> bool { 364 | match timeout(Duration::from_secs(1), inner_healthcheck(hc, addrs)).await { 365 | Ok(r) => { 366 | debug!("{} health check result: {}", e, r); 367 | r 368 | } 369 | Err(_) => { 370 | debug!("{} health check timed out and was considered a failure", e); 371 | false 372 | } 373 | } 374 | } 375 | 376 | async fn inner_healthcheck(hc: &ConcreteHealthCheck, addrs: &Vec) -> bool { 377 | match hc.r#type { 378 | HealthCheckType::TCP => { 379 | for addr in addrs { 380 | if tokio::net::TcpStream::connect(addr).await.is_ok() { 381 | return true; 382 | } 383 | } 384 | false 385 | } 386 | } 387 | } 388 | 389 | async fn get_addrs(port: u16, mq: Arc) -> Vec { 390 | loop { 391 | match tokio::net::lookup_host(format!("localhost:{}", port)).await { 392 | Ok(i) => { 393 | let mut addrs: Vec = i.collect(); 394 | // prioritize ipv4 addrs 395 | addrs.sort_by(|l, r| match (l.is_ipv4(), r.is_ipv4()) { 396 | (true, false) => std::cmp::Ordering::Less, 397 | (false, true) => std::cmp::Ordering::Greater, 398 | _ => std::cmp::Ordering::Equal, 399 | }); 400 | break addrs; 401 | } 402 | Err(e) => { 403 | let msg = format!("Extremely unexpected error, could not resolve DNS of localhost, will try again in a second {:?}", e); 404 | error!("{}", msg); 405 | mq.add_message(msg); 406 | tokio::time::delay_for(Duration::from_secs(1)).await; 407 | } 408 | } 409 | } 410 | } 411 | 412 | async fn inner_run_endpoint( 413 | info: Arc, 414 | args: EndpointArgs, 415 | receiver: SignalReceiver, 416 | meta: Arc, 417 | addrs: Vec, 418 | ) { 419 | let mut ccfg = ClientConfig::new(); 420 | ccfg.root_store = info.roots(); 421 | let idlesem = Arc::new(Semaphore::new(args.idleconns)); 422 | let maxsem = Arc::new(Semaphore::new(args.maxconns)); 423 | let ccfg = Arc::new(ccfg); 424 | 425 | loop { 426 | let idlesemc = idlesem.clone(); 427 | let maxsemc = maxsem.clone(); 428 | 429 | // We can never have more than MAX permits out, we must have one to have a connection. 430 | let maxpermit = maxsemc.acquire_owned().await; 431 | trace!( 432 | "acquired max permit i{} a{}", 433 | meta.num_idle(), 434 | meta.num_active() 435 | ); 436 | // Once we are OK with our max, we must have an IDLE connection available 437 | let idlepermit = idlesemc.acquire_owned().await; 438 | trace!("acquired idle permit"); 439 | 440 | // We have a permit, start a connection 441 | let receiverc = receiver.clone(); 442 | let messageq = args.message_queue.clone(); 443 | let args = create_args(&info, &args, &addrs[..], ccfg.clone()).await; 444 | debug!( 445 | "Creating new connection runner for {} (localaddr: {:?})", 446 | args.endpoint, args.addrs 447 | ); 448 | let m = meta.clone(); 449 | trace!( 450 | "ebbflow addrs {:?} dns {:?}", 451 | args.ebbflow_addr, 452 | args.ebbflow_dns 453 | ); 454 | tokio::spawn(async move { 455 | run_connection(receiverc, args, idlepermit, m.clone(), messageq).await; 456 | debug!("Connection ended i{} a{}", m.num_idle(), m.num_active()); 457 | drop(maxpermit); 458 | }); 459 | } 460 | } 461 | 462 | async fn create_args( 463 | info: &Arc, 464 | args: &EndpointArgs, 465 | addrs: &[SocketAddr], 466 | ccfg: Arc, 467 | ) -> EndpointConnectionArgs { 468 | let connector = TlsConnector::from(ccfg); 469 | 470 | EndpointConnectionArgs { 471 | endpoint: args.endpoint.clone(), 472 | key: info.key().unwrap_or_else(|| "unset".to_string()), 473 | addrs: addrs.to_vec(), 474 | ctype: args.ctype, 475 | ebbflow_addr: info.ebbflow_addr().await, 476 | ebbflow_dns: info.ebbflow_dns(), 477 | connector, 478 | } 479 | } 480 | -------------------------------------------------------------------------------- /src/dns.rs: -------------------------------------------------------------------------------- 1 | use std::net::{IpAddr, Ipv4Addr}; 2 | use trust_dns_resolver::config::NameServerConfigGroup; 3 | use trust_dns_resolver::config::ResolverConfig; 4 | use trust_dns_resolver::config::ResolverOpts; 5 | use trust_dns_resolver::TokioAsyncResolver; 6 | 7 | pub struct DnsResolver { 8 | trust: TokioAsyncResolver, 9 | } 10 | use std::time::Duration; 11 | const TIMEOUT: Duration = Duration::from_secs(1); 12 | const TTL: Duration = Duration::from_secs(60 * 60); 13 | 14 | impl DnsResolver { 15 | pub async fn new() -> Result { 16 | let mut opts = ResolverOpts::default(); 17 | opts.positive_max_ttl = Some(TTL); 18 | opts.negative_max_ttl = Some(TTL); 19 | 20 | let group = NameServerConfigGroup::from_ips_clear( 21 | &[ 22 | IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1)), 23 | IpAddr::V4(Ipv4Addr::new(1, 0, 0, 1)), 24 | IpAddr::V4(Ipv4Addr::new(9, 9, 9, 9)), 25 | IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)), 26 | IpAddr::V4(Ipv4Addr::new(8, 8, 4, 4)), 27 | ], 28 | 53, 29 | ); 30 | let config = ResolverConfig::from_parts(None, vec![], group); 31 | 32 | let r = TokioAsyncResolver::tokio(config, opts) 33 | .await 34 | .map_err(|_e| ())?; 35 | Ok(Self { trust: r }) 36 | } 37 | 38 | pub async fn ips(&self, domain: &str) -> Result, ()> { 39 | let ips = match tokio::time::timeout(TIMEOUT, self.trust.ipv4_lookup(domain)) 40 | .await 41 | .map_err(|_| ())? 42 | { 43 | Err(e) => { 44 | if let trust_dns_resolver::error::ResolveErrorKind::NoRecordsFound { 45 | query: _, 46 | valid_until: _, 47 | } = e.kind() 48 | { 49 | debug!("No records for domain {}", domain); 50 | return Ok(vec![]); 51 | } 52 | return Err(()); 53 | } 54 | Ok(ips) => ips, 55 | }; 56 | 57 | Ok(ips.iter().cloned().collect()) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/ebbflowd.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | 4 | use ebbflow::config::{ 5 | config_file_full, config_path_root, getkey, key_file_full, ConfigError, EbbflowDaemonConfig, 6 | }; 7 | use ebbflow::daemon::SharedInfo; 8 | use ebbflow::run_daemon; 9 | use ebbflow::signal::SignalReceiver; 10 | #[allow(unused)] 11 | use ebbflow::{certs::ROOTS, infoserver::run_info_server, signal::SignalSender}; 12 | use futures::future::BoxFuture; 13 | use log::LevelFilter; 14 | use notify::{event::Event, event::EventKind, Config, RecommendedWatcher, RecursiveMode, Watcher}; 15 | use std::sync::Arc; 16 | use tokio::sync::Notify; 17 | 18 | const DEFAULT_LEVEL: LevelFilter = LevelFilter::Warn; 19 | 20 | #[cfg(windows)] 21 | fn main() { 22 | let _ = windows::run(); 23 | } 24 | 25 | #[cfg(windows)] 26 | mod windows { 27 | use ebbflow::signal::SignalSender; 28 | use std::{ffi::OsString, time::Duration}; 29 | use windows_service::{ 30 | define_windows_service, 31 | service::{ 32 | ServiceControl, ServiceControlAccept, ServiceExitCode, ServiceState, ServiceStatus, 33 | ServiceType, 34 | }, 35 | service_control_handler::{self, ServiceControlHandlerResult}, 36 | service_dispatcher, Result, 37 | }; 38 | 39 | const SERVICE_NAME: &str = "ebbflowClientService"; 40 | const SERVICE_TYPE: ServiceType = ServiceType::OWN_PROCESS; 41 | 42 | pub fn run() -> Result<()> { 43 | winlog::try_register("Ebbflow Service Log").unwrap(); 44 | 45 | // Register generated `ffi_service_main` with the system and start the service, blocking 46 | // this thread until the service is stopped. 47 | service_dispatcher::start(SERVICE_NAME, ffi_service_main) 48 | } 49 | 50 | // Generate the windows service boilerplate. 51 | // The boilerplate contains the low-level service entry function (ffi_service_main) that parses 52 | // incoming service arguments into Vec and passes them to user defined service 53 | // entry (my_service_main). 54 | define_windows_service!(ffi_service_main, my_service_main); 55 | 56 | pub fn my_service_main(_arguments: Vec) { 57 | if let Err(_e) = run_service() { 58 | // Handle the error, by logging or something. 59 | } 60 | } 61 | 62 | pub fn run_service() -> Result<()> { 63 | // Create a channel to be able to poll a stop event from the service worker loop. 64 | let sender = SignalSender::new(); 65 | let r = sender.new_receiver(); 66 | 67 | // Define system service event handler that will be receiving service events. 68 | let event_handler = move |control_event| -> ServiceControlHandlerResult { 69 | match control_event { 70 | // Notifies a service to report its current status information to the service 71 | // control manager. Always return NoError even if not implemented. 72 | ServiceControl::Interrogate => ServiceControlHandlerResult::NoError, 73 | 74 | // Handle stop 75 | ServiceControl::Stop => { 76 | sender.send_signal(); 77 | ServiceControlHandlerResult::NoError 78 | } 79 | 80 | _ => ServiceControlHandlerResult::NotImplemented, 81 | } 82 | }; 83 | 84 | // Register system service event handler. 85 | // The returned status handle should be used to report service status changes to the system. 86 | let status_handle = service_control_handler::register(SERVICE_NAME, event_handler)?; 87 | 88 | // Tell the system that service is running 89 | status_handle.set_service_status(ServiceStatus { 90 | service_type: SERVICE_TYPE, 91 | current_state: ServiceState::Running, 92 | controls_accepted: ServiceControlAccept::STOP, 93 | exit_code: ServiceExitCode::Win32(0), 94 | checkpoint: 0, 95 | wait_hint: Duration::from_secs(60), 96 | process_id: None, 97 | })?; 98 | 99 | let mut rt = tokio::runtime::Runtime::new().unwrap(); 100 | 101 | let loglevel = rt.block_on(crate::determine_log_level()); 102 | std::env::set_var("RUST_LOG", &loglevel.to_string()); 103 | winlog::init("Ebbflow Service Log").unwrap(); 104 | 105 | let existcode = rt.block_on(async move { 106 | match super::realmain(r).await { 107 | Ok(()) => { 108 | info!("Daemon shutting down"); 109 | 0 110 | } 111 | Err(e) => { 112 | error!("Error in daemon {}", e); 113 | 1 114 | } 115 | } 116 | }); 117 | 118 | // loop { 119 | // // Poll shutdown event. 120 | // match shutdown_rx.recv_timeout(Duration::from_secs(1)) { 121 | // // Break the loop either upon stop or channel disconnect 122 | // Ok(_) | Err(mpsc::RecvTimeoutError::Disconnected) => break, 123 | 124 | // // Continue work if no events were received within the timeout 125 | // Err(mpsc::RecvTimeoutError::Timeout) => (), 126 | // }; 127 | // } 128 | 129 | // Tell the system that service has stopped. 130 | status_handle.set_service_status(ServiceStatus { 131 | service_type: SERVICE_TYPE, 132 | current_state: ServiceState::Stopped, 133 | controls_accepted: ServiceControlAccept::empty(), 134 | exit_code: ServiceExitCode::Win32(existcode), 135 | checkpoint: 0, 136 | wait_hint: Duration::default(), 137 | process_id: None, 138 | })?; 139 | 140 | Ok(()) 141 | } 142 | } 143 | 144 | #[cfg(not(windows))] 145 | #[tokio::main] 146 | async fn main() -> Result<(), ()> { 147 | let loglevel = crate::determine_log_level().await; 148 | 149 | env_logger::builder() 150 | .filter_level(loglevel) 151 | .filter_module("rustls", log::LevelFilter::Error) // This baby gets noisy at lower levels 152 | .init(); 153 | 154 | let sender = SignalSender::new(); 155 | let r = sender.new_receiver(); 156 | 157 | match realmain(r).await { 158 | Ok(_) => { 159 | info!("Daemon exiting"); 160 | Ok(()) 161 | } 162 | Err(e) => { 163 | warn!("Daemon exited with error: {:?}", e); 164 | eprintln!("Daemon exited with error: {:?}", e); 165 | Err(()) 166 | } 167 | } 168 | } 169 | 170 | async fn determine_log_level() -> LevelFilter { 171 | let leveloption = match std::env::var("EBB_LOG_LEVEL").ok() { 172 | Some(l) => Some(l), 173 | None => match config_reload().await { 174 | Ok((Some(cfg), _)) => cfg.loglevel, 175 | _ => None, 176 | }, 177 | }; 178 | use std::str::FromStr; 179 | 180 | match leveloption { 181 | Some(s) => match LevelFilter::from_str(&s) { 182 | Ok(lf) => lf, 183 | Err(_) => crate::DEFAULT_LEVEL, 184 | }, 185 | None => crate::DEFAULT_LEVEL, 186 | } 187 | } 188 | 189 | async fn realmain(mut wait: SignalReceiver) -> Result<(), String> { 190 | info!("Config file dir {}", config_path_root()); 191 | 192 | let notify = Arc::new(Notify::new()); 193 | let notifyc = notify.clone(); 194 | let mut watcher: RecommendedWatcher = 195 | match Watcher::new_immediate(move |res: Result| { 196 | trace!("Received a notification"); 197 | match res { 198 | Ok(event) => match event.kind { 199 | EventKind::Create(_) | EventKind::Modify(_) => { 200 | debug!("Received a notication for create or modify"); 201 | notifyc.notify(); 202 | } 203 | _ => { 204 | trace!( 205 | "Received notication for an event that we don't care about {:#?}", 206 | event 207 | ); 208 | } 209 | }, 210 | Err(e) => { 211 | panic!("Error listening for file events {:?}", e); 212 | } 213 | } 214 | }) { 215 | Ok(x) => x, 216 | Err(e) => return Err(format!("Error creating new file watcher {:?}", e)), 217 | }; 218 | 219 | // We only care about mutations 220 | if let Err(e) = watcher.configure(Config::PreciseEvents(true)) { 221 | return Err(format!( 222 | "Unable to set file event configuration options (precise) {:?}", 223 | e 224 | )); 225 | } 226 | 227 | // Add a path to be watched. All files and directories at that path and 228 | // below will be monitored for changes. 229 | if let Err(e) = watcher.watch(config_path_root(), RecursiveMode::Recursive) { 230 | return Err(format!( 231 | "Unable to set file event configuration options {:?}", 232 | e 233 | )); 234 | } 235 | 236 | let sharedinfo = match ( 237 | std::env::var("UNSTABLE_EBB_ADDR").ok(), 238 | std::env::var("UNSTABLE_EBB_DNS").ok(), 239 | ) { 240 | (Some(addr), Some(dns)) => { 241 | let addr = match addr.parse() { 242 | Ok(x) => x, 243 | Err(e) => { 244 | return Err(format!( 245 | "Error creating ipv4 addr from overriden value {} {:?}", 246 | addr, e 247 | )) 248 | } 249 | }; 250 | Arc::new( 251 | match SharedInfo::new_with_ebbflow_overrides(addr, dns.to_string(), ROOTS.clone()) 252 | .await 253 | { 254 | Ok(x) => x, 255 | Err(e) => return Err(format!("Error creating daemon settings {:?}", e)), 256 | }, 257 | ) 258 | } 259 | _ => Arc::new(match SharedInfo::new().await { 260 | Ok(x) => x, 261 | Err(e) => return Err(format!("Error creating daemon settings {:?}", e)), 262 | }), 263 | }; 264 | 265 | let runner = run_daemon(sharedinfo, Box::pin(config_reload), notify).await; 266 | let runnerc = runner.clone(); 267 | 268 | // Spawn the server that produces info about the daemon 269 | tokio::spawn(run_info_server(runnerc)); 270 | 271 | wait.wait().await; 272 | Ok(()) 273 | } 274 | 275 | pub fn config_reload( 276 | ) -> BoxFuture<'static, Result<(Option, Option), ConfigError>> { 277 | Box::pin(async { 278 | debug!("Will read config file, {}", config_file_full()); 279 | let cfg = match EbbflowDaemonConfig::load_from_file().await { 280 | Ok(c) => Some(c), 281 | Err(ref e) if &ConfigError::Empty == e => None, 282 | Err(e) => return Err(e), 283 | }; 284 | debug!( 285 | "Config file parsed successfully, now trying key file {}", 286 | key_file_full() 287 | ); 288 | let key = match getkey().await { 289 | Ok(k) => Some(k), 290 | Err(e) => match e { 291 | ConfigError::Empty | ConfigError::FileNotFound => None, 292 | _ => return Err(e), 293 | }, 294 | }; 295 | debug!("Found key: {}", key.is_some()); 296 | Ok((cfg, key)) 297 | }) 298 | } 299 | -------------------------------------------------------------------------------- /src/infoserver.rs: -------------------------------------------------------------------------------- 1 | use crate::{config::addr_file_full, DaemonRunner}; 2 | use hyper::service::{make_service_fn, service_fn}; 3 | use hyper::{Body, Request, Response, Server}; 4 | use std::sync::Arc; 5 | use std::{convert::Infallible, net::SocketAddr, time::Duration}; 6 | 7 | pub async fn run_info_server(runner: Arc) { 8 | loop { 9 | debug!("Starting info server.."); 10 | // Let the OS pick an address 11 | let addr = SocketAddr::from(([127, 0, 0, 1], 0)); 12 | // And a MakeService to handle each connection... 13 | let runnerc = runner.clone(); 14 | let make_service = make_service_fn(move |_conn| { 15 | let runnerc = runnerc.clone(); 16 | async move { 17 | Ok::<_, Infallible>(service_fn(move |req| { 18 | let runnerccc = runnerc.clone(); 19 | async move { handle(req, runnerccc).await } 20 | })) 21 | } 22 | }); 23 | 24 | // Then bind and serve... 25 | let addr = match Server::try_bind(&addr) { 26 | Ok(r) => r, 27 | Err(e) => { 28 | error!("Error spawning info server {:?}", e); 29 | tokio::time::delay_for(Duration::from_millis(5_000)).await; 30 | continue; 31 | } 32 | }; 33 | let server = addr.serve(make_service); 34 | error!("Server spawning on addr {}", server.local_addr()); 35 | 36 | if let Err(e) = crate::config::write_addr(&server.local_addr().to_string()).await { 37 | error!( 38 | "Could not write the address of the daemon info server to file {} {:?}", 39 | addr_file_full(), 40 | e 41 | ); 42 | tokio::time::delay_for(Duration::from_millis(5_000)).await; 43 | continue; 44 | } 45 | 46 | // And run forever... 47 | if let Err(e) = server.await { 48 | eprintln!("server error: {}", e); 49 | } 50 | } 51 | } 52 | 53 | async fn handle( 54 | req: Request, 55 | runner: Arc, 56 | ) -> Result, Infallible> { 57 | Ok(match req.uri().path() { 58 | "/unstable_status" => { 59 | let status = runner.status().await; 60 | match serde_json::to_string_pretty(&status) { 61 | Ok(serialized) => Response::new(Body::from(serialized)), 62 | Err(e) => { 63 | warn!( 64 | "Could not serialize the status from the runner, status {:?} e {:?}", 65 | status, e 66 | ); 67 | Response::builder() 68 | .status(500) 69 | .body(Body::from("Error getting status")) 70 | .unwrap() 71 | } 72 | } 73 | } 74 | _ => Response::builder() 75 | .status(400) 76 | .body(Body::from("Invalid Request")) 77 | .unwrap(), 78 | }) 79 | } 80 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | #[macro_use] 4 | extern crate lazy_static; 5 | 6 | use crate::config::{ConfigError, EbbflowDaemonConfig, Endpoint}; 7 | use crate::daemon::connection::EndpointConnectionType; 8 | use crate::daemon::EndpointMeta; 9 | use crate::daemon::{spawn_endpoint, EndpointArgs, SharedInfo}; 10 | use daemon::HealthOverall; 11 | use futures::future::BoxFuture; 12 | use messagequeue::MessageQueue; 13 | use serde::{Deserialize, Serialize}; 14 | use std::collections::hash_map::Entry; 15 | use std::collections::HashMap; 16 | use std::collections::HashSet; 17 | use std::pin::Pin; 18 | use std::sync::Arc; 19 | use tokio::sync::Mutex; 20 | use tokio::sync::Notify; 21 | 22 | pub const MAX_MAX_IDLE: usize = 1000; 23 | 24 | pub mod certs; 25 | pub mod config; 26 | pub mod daemon; 27 | pub mod dns; 28 | pub mod infoserver; 29 | pub mod messagequeue; 30 | pub mod messaging; 31 | pub mod signal; 32 | 33 | #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] 34 | pub enum DaemonStatusMeta { 35 | Uninitialized, 36 | Good, 37 | } 38 | 39 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 40 | pub struct DaemonStatus { 41 | pub meta: DaemonStatusMeta, 42 | pub endpoints: Vec<(String, DaemonEndpointStatus)>, 43 | pub ssh: Option<(String, DaemonEndpointStatus)>, 44 | pub messages: Vec<(String, String)>, 45 | } 46 | 47 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 48 | pub enum DaemonEndpointStatus { 49 | Disabled, 50 | Enabled { 51 | active: usize, 52 | idle: usize, 53 | health: Option, 54 | }, 55 | } 56 | 57 | impl DaemonEndpointStatus { 58 | fn from_ref(ed: &EnabledDisabled) -> Self { 59 | match ed { 60 | EnabledDisabled::Disabled => DaemonEndpointStatus::Disabled, 61 | EnabledDisabled::Enabled(meta) => DaemonEndpointStatus::Enabled { 62 | active: meta.num_active(), 63 | idle: meta.num_idle(), 64 | health: Some(meta.health()), 65 | }, 66 | } 67 | } 68 | } 69 | 70 | /// terribly named but its getting tough to think of words 71 | enum EnabledDisabled { 72 | Enabled(Arc), 73 | Disabled, 74 | } 75 | 76 | impl EnabledDisabled { 77 | pub fn stop(&mut self) { 78 | if let EnabledDisabled::Enabled(meta) = self { 79 | debug!("Sending signal to stop"); 80 | meta.stop(); 81 | } 82 | *self = EnabledDisabled::Disabled; 83 | } 84 | } 85 | 86 | struct EndpointInstance { 87 | enabledisable: EnabledDisabled, 88 | existing_config: Endpoint, 89 | } 90 | 91 | struct SshInstance { 92 | existing_config: SshConfiguration, 93 | enabledisable: EnabledDisabled, 94 | } 95 | 96 | #[derive(Debug, PartialEq)] 97 | struct SshConfiguration { 98 | port: u16, 99 | max: usize, 100 | hostname: String, 101 | enabled: bool, 102 | maxidle: usize, 103 | } 104 | 105 | pub enum EnableDisableTarget { 106 | All, 107 | Ssh, 108 | Endpoint(String), 109 | } 110 | 111 | pub struct DaemonRunner { 112 | inner: Mutex, 113 | } 114 | 115 | impl DaemonRunner { 116 | pub fn new(info: Arc) -> Self { 117 | Self { 118 | inner: Mutex::new(InnerDaemonRunner::new(info)), 119 | } 120 | } 121 | 122 | pub async fn update_config(&self, config: Option, key: Option) { 123 | let mut inner = self.inner.lock().await; 124 | inner.update_config(config, key).await; 125 | } 126 | 127 | pub async fn status(&self) -> DaemonStatus { 128 | let inner = self.inner.lock().await; 129 | inner.status() 130 | } 131 | 132 | pub async fn submit_error_message(&self, message: String) { 133 | self.inner.lock().await.submit_error_message(message); 134 | } 135 | } 136 | 137 | struct InnerDaemonRunner { 138 | endpoints: HashMap, 139 | statusmeta: DaemonStatusMeta, 140 | ssh: Option, 141 | info: Arc, 142 | message_queue: Arc, 143 | } 144 | 145 | impl InnerDaemonRunner { 146 | pub fn new(info: Arc) -> Self { 147 | Self { 148 | endpoints: HashMap::new(), 149 | ssh: None, 150 | statusmeta: DaemonStatusMeta::Uninitialized, 151 | info, 152 | message_queue: Arc::new(MessageQueue::new()), 153 | } 154 | } 155 | 156 | pub fn submit_error_message(&self, message: String) { 157 | self.message_queue.add_message(message); 158 | } 159 | 160 | pub async fn update_config( 161 | &mut self, 162 | config: Option, 163 | key: Option, 164 | ) { 165 | // Update the key 166 | if let Some(k) = key { 167 | self.info.update_key(k); 168 | } 169 | 170 | // We do this so we can later info.key().unwrap(). (defense in depth) 171 | if self.info.key().is_none() { 172 | self.statusmeta = DaemonStatusMeta::Uninitialized; 173 | let e = format!( 174 | "INFO: Key not set, doing nothing (cfg present {})", 175 | config.is_some() 176 | ); 177 | self.submit_error_message(e.to_string()); 178 | error!("{}", e); 179 | return; 180 | } 181 | self.statusmeta = DaemonStatusMeta::Good; 182 | 183 | let mut config = match config { 184 | Some(c) => c, 185 | None => return, 186 | }; 187 | 188 | let mut set = HashSet::with_capacity(config.endpoints.len()); 189 | trace!("Config updating, {} endpoints", config.endpoints.len()); 190 | for e in config.endpoints.iter() { 191 | debug!("reading config endpoint {} enabled {}", e.dns, e.enabled); 192 | set.insert(e.dns.clone()); 193 | } 194 | 195 | self.endpoints.retain(|existing_dns, existing_instance| { 196 | if set.contains(existing_dns) { 197 | trace!("set compare had existing {}", existing_dns); 198 | true 199 | } else { 200 | debug!("set compare no longer had existing entry {}", existing_dns); 201 | // The set of configs does NOT have this endpoint, we need to stop it. 202 | existing_instance.enabledisable.stop(); 203 | // It will stop, new we return false to state we should remove this entry. 204 | false 205 | } 206 | }); 207 | 208 | for endpoint in config.endpoints.drain(..) { 209 | match self.endpoints.entry(endpoint.dns.clone()) { 210 | Entry::Occupied(mut oe) => { 211 | // if the same, do nothing 212 | let current_instance = oe.get_mut(); 213 | 214 | if current_instance.existing_config == endpoint { 215 | debug!( 216 | "Configuration for an endpoint did not change, doing nothing {}", 217 | endpoint.dns 218 | ); 219 | // do nothing!!s 220 | } else { 221 | debug!("Configuration for an endpoint CHANGED, will stop existing and start new one {}", endpoint.dns); 222 | 223 | let newenabledisable = if endpoint.enabled { 224 | debug!("New configuration is enabled, stopping existing one and setting new one to enabled"); 225 | // Stop the existing one (may not be running anways) 226 | current_instance.enabledisable.stop(); 227 | // Create a new one 228 | let meta = spawn_endpointasdfsfa( 229 | endpoint.clone(), 230 | self.info.clone(), 231 | self.message_queue.clone(), 232 | ) 233 | .await; 234 | EnabledDisabled::Enabled(meta) 235 | } else { 236 | debug!("New configuration is DISABLED, stopping existing one and setting new one to enabled"); 237 | // stop the current one. If it wasn't running anways, then this is still OK. 238 | current_instance.enabledisable.stop(); 239 | // we weren't running, so just return disabled 240 | EnabledDisabled::Disabled 241 | }; 242 | 243 | // Insert this new config 244 | oe.insert(EndpointInstance { 245 | enabledisable: newenabledisable, 246 | existing_config: endpoint, 247 | }); 248 | } 249 | } 250 | Entry::Vacant(ve) => { 251 | debug!("Configuration for an endpoint that did NOT previously exist found, will create it {}", endpoint.dns); 252 | let enabledisable = if endpoint.enabled { 253 | EnabledDisabled::Enabled( 254 | spawn_endpointasdfsfa( 255 | endpoint.clone(), 256 | self.info.clone(), 257 | self.message_queue.clone(), 258 | ) 259 | .await, 260 | ) 261 | } else { 262 | EnabledDisabled::Disabled 263 | }; 264 | ve.insert(EndpointInstance { 265 | enabledisable, 266 | existing_config: endpoint, 267 | }); 268 | } 269 | } 270 | } 271 | 272 | match (config.ssh, &mut self.ssh) { 273 | (Some(newcfg), ssh) => { 274 | let newconfig = SshConfiguration { 275 | port: newcfg.port, 276 | max: newcfg.maxconns as usize, 277 | hostname: newcfg.hostname_override.unwrap_or_else(hostname_or_die), 278 | enabled: newcfg.enabled, 279 | maxidle: newcfg.maxidle as usize, 280 | }; 281 | 282 | if ssh.is_none() || newconfig != ssh.as_ref().unwrap().existing_config { 283 | if let Some(instance) = ssh { 284 | instance.enabledisable.stop(); 285 | } 286 | let enabledisabled = if newconfig.enabled { 287 | // start the new one and set it 288 | let args = EndpointArgs { 289 | ctype: EndpointConnectionType::Ssh, 290 | idleconns: newconfig.maxidle, 291 | maxconns: newconfig.max, 292 | endpoint: newconfig.hostname.clone(), 293 | port: newconfig.port, 294 | message_queue: self.message_queue.clone(), 295 | healthcheck: None, 296 | }; 297 | 298 | let meta = spawn_endpoint(self.info.clone(), args).await; 299 | EnabledDisabled::Enabled(meta) 300 | } else { 301 | EnabledDisabled::Disabled 302 | }; 303 | 304 | self.ssh = Some(SshInstance { 305 | existing_config: newconfig, 306 | enabledisable: enabledisabled, 307 | }); 308 | } 309 | // else they are equal so do nothing 310 | } 311 | (None, Some(oldcfg)) => { 312 | oldcfg.enabledisable.stop(); 313 | self.ssh = None; 314 | } 315 | (None, None) => {} 316 | } 317 | } 318 | 319 | pub fn status(&self) -> DaemonStatus { 320 | let ssh = match &self.ssh { 321 | Some(sshinstance) => Some(( 322 | sshinstance.existing_config.hostname.clone(), 323 | DaemonEndpointStatus::from_ref(&sshinstance.enabledisable), 324 | )), 325 | None => None, 326 | }; 327 | let e = self 328 | .endpoints 329 | .iter() 330 | .map(|(s, e)| (s.clone(), DaemonEndpointStatus::from_ref(&e.enabledisable))) 331 | .collect(); 332 | 333 | DaemonStatus { 334 | meta: self.statusmeta, 335 | endpoints: e, 336 | ssh, 337 | messages: self.message_queue.get_messages(), 338 | } 339 | } 340 | } 341 | 342 | pub async fn spawn_endpointasdfsfa( 343 | e: crate::config::Endpoint, 344 | info: Arc, 345 | message_queue: Arc, 346 | ) -> Arc { 347 | let port = e.port; 348 | 349 | let idle = e.maxidle; 350 | let idle = std::cmp::min(idle, MAX_MAX_IDLE as u16); 351 | 352 | let args = EndpointArgs { 353 | ctype: EndpointConnectionType::Tls, 354 | idleconns: idle as usize, 355 | maxconns: e.maxconns as usize, 356 | endpoint: e.dns, 357 | port, 358 | message_queue, 359 | healthcheck: e.healthcheck, 360 | }; 361 | 362 | spawn_endpoint(info, args).await 363 | } 364 | 365 | #[allow(clippy::type_complexity)] 366 | pub async fn run_daemon( 367 | info: Arc, 368 | cfg_reload: Pin< 369 | Box< 370 | dyn Fn() -> BoxFuture< 371 | 'static, 372 | Result<(Option, Option), ConfigError>, 373 | > + Send 374 | + Sync 375 | + 'static, 376 | >, 377 | >, 378 | cfg_notifier: Arc, 379 | ) -> Arc { 380 | let runner = Arc::new(DaemonRunner::new(info)); 381 | let runnerc = runner.clone(); 382 | 383 | let cfgrealoadfn = cfg_reload; 384 | 385 | tokio::spawn(async move { 386 | loop { 387 | match cfgrealoadfn().await { 388 | Ok((newconfig, newkey)) => { 389 | debug!("New config loaded successfully"); 390 | runnerc.update_config(newconfig, newkey).await; 391 | debug!("New config applied"); 392 | } 393 | Err(e) => { 394 | let e = format!("Error reading new configuration {:?}", e); 395 | warn!("{}", e); 396 | runnerc.submit_error_message(e).await; 397 | } 398 | } 399 | trace!("Now waiting for notification"); 400 | cfg_notifier.notified().await; 401 | trace!("Got a notification"); 402 | } 403 | }); 404 | 405 | runner 406 | } 407 | 408 | pub fn hostname_or_die() -> String { 409 | match hostname::get() { 410 | Ok(s) => { 411 | match s.to_str() { 412 | Some(s) => s.to_string(), 413 | None => { 414 | eprintln!("Error retrieving the hostname from the OS, could not turn {:?} into String", s); 415 | error!("Error retrieving the hostname from the OS, could not turn {:?} into String", s); 416 | std::process::exit(1); 417 | } 418 | } 419 | } 420 | Err(e) => { 421 | eprintln!("Error retrieving the hostname from the OS {:?}", e); 422 | error!("Error retrieving the hostname from the OS {:?}", e); 423 | std::process::exit(1); 424 | } 425 | } 426 | } 427 | -------------------------------------------------------------------------------- /src/messagequeue.rs: -------------------------------------------------------------------------------- 1 | use parking_lot::RwLock; 2 | use std::collections::VecDeque; 3 | 4 | #[derive(Debug)] 5 | pub struct MessageQueue { 6 | q: RwLock>, 7 | } 8 | 9 | impl MessageQueue { 10 | pub fn new() -> Self { 11 | Self { 12 | q: RwLock::new(VecDeque::new()), 13 | } 14 | } 15 | 16 | pub fn now() -> String { 17 | let now = chrono::offset::Utc::now(); 18 | let formatted = now.format("%v %T %Z"); 19 | formatted.to_string() 20 | } 21 | 22 | pub fn add_message(&self, m: String) { 23 | let mut qq = self.q.write(); 24 | qq.push_front((Self::now(), m)); 25 | qq.truncate(25); 26 | } 27 | 28 | pub fn get_messages(&self) -> Vec<(String, String)> { 29 | let mut cloned = { 30 | let qq = self.q.read(); 31 | qq.clone() 32 | }; 33 | 34 | cloned.drain(..).collect() 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/messaging.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | const VERSION: &'static str = env!("CARGO_PKG_VERSION"); 4 | 5 | #[derive(Debug)] 6 | pub enum MessageError { 7 | Parse, 8 | UnknownMessage, 9 | Internal(&'static str), 10 | } 11 | 12 | #[derive(Debug, PartialEq)] 13 | pub enum Message { 14 | /// Client -> Ebbflow - Hi, this connection is for this endpoint (TLS or SSH) 15 | HelloV0(HelloV0), 16 | StatusRequestV0, 17 | StatusResponseV0(StatusResponseV0), 18 | EnableDisableRequestV0(EnableDisableRequestV0), 19 | EnableDisableResponseV0, 20 | HelloResponseV0(HelloResponseV0), 21 | StartTrafficV0, 22 | StartTrafficResponseV0(StartTrafficResponseV0), 23 | } 24 | 25 | impl From for MessageError { 26 | fn from(_e: serde_cbor::Error) -> Self { 27 | MessageError::Parse 28 | } 29 | } 30 | 31 | impl Message { 32 | /// length of message and payload u32 (4 + payload len) 33 | /// message id u32 34 | /// payload 35 | pub fn to_wire_message(&self) -> Result, MessageError> { 36 | let mut message_id = self.message_id().to_be_bytes().to_vec(); 37 | let mut payload = self.payload()?; 38 | let len: u32 = message_id.len() as u32 + payload.len() as u32; 39 | 40 | let mut message = len.to_be_bytes().to_vec(); 41 | message.append(&mut message_id); 42 | message.append(&mut payload); 43 | 44 | Ok(message) 45 | } 46 | 47 | /// give this the buffer WIHTOUT the length, e.g. message id then payload 48 | pub fn from_wire_without_the_length_prefix(buf: &[u8]) -> Result { 49 | if buf.len() < 4 { 50 | return Err(MessageError::Parse); 51 | } 52 | 53 | let mut messagebuf: [u8; 4] = [0; 4]; 54 | messagebuf.copy_from_slice(&buf[0..4]); 55 | 56 | let messageid: u32 = u32::from_be_bytes(messagebuf); 57 | 58 | let payload = &buf[4..]; 59 | 60 | match messageid { 61 | 1 => Ok(Message::HelloV0(serde_cbor::from_slice(&payload)?)), 62 | 2 => Ok(Message::StatusRequestV0), 63 | 3 => Ok(Message::StatusResponseV0(serde_cbor::from_slice(&payload)?)), 64 | 4 => Ok(Message::EnableDisableRequestV0(serde_cbor::from_slice( 65 | &payload, 66 | )?)), 67 | 5 => Ok(Message::EnableDisableResponseV0), 68 | 6 => Ok(Message::HelloResponseV0(serde_cbor::from_slice(&payload)?)), 69 | 7 => Ok(Message::StartTrafficV0), 70 | 8 => Ok(Message::StartTrafficResponseV0(serde_cbor::from_slice( 71 | &payload, 72 | )?)), 73 | _ => Err(MessageError::UnknownMessage), 74 | } 75 | } 76 | 77 | fn payload(&self) -> Result, MessageError> { 78 | use Message::*; 79 | Ok(match self { 80 | HelloV0(x) => serde_cbor::to_vec(&x)?, 81 | StatusRequestV0 => vec![], 82 | StatusResponseV0(x) => serde_cbor::to_vec(&x)?, 83 | EnableDisableRequestV0(x) => serde_cbor::to_vec(&x)?, 84 | EnableDisableResponseV0 => vec![], 85 | HelloResponseV0(x) => serde_cbor::to_vec(&x)?, 86 | StartTrafficV0 => vec![], 87 | StartTrafficResponseV0(x) => serde_cbor::to_vec(&x)?, 88 | }) 89 | } 90 | 91 | fn message_id(&self) -> u32 { 92 | use Message::*; 93 | match self { 94 | HelloV0(_) => 1, 95 | StatusRequestV0 => 2, 96 | StatusResponseV0(_) => 3, 97 | EnableDisableRequestV0(_) => 4, 98 | EnableDisableResponseV0 => 5, 99 | HelloResponseV0(_) => 6, 100 | StartTrafficV0 => 7, 101 | StartTrafficResponseV0(_) => 8, 102 | } 103 | } 104 | } 105 | 106 | #[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize, PartialEq)] 107 | pub enum EndpointType { 108 | Ssh, 109 | Tls, 110 | } 111 | 112 | /// This message is a ServerHello v0. 113 | #[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq)] 114 | pub struct HelloV0 { 115 | /// The key 116 | pub key: String, 117 | /// The type of the endpoint for this connection 118 | pub endpoint_type: EndpointType, 119 | /// e.g. test.mywebsite.com if Tls, or my-hostname if Ssh 120 | pub endpoint_value: String, 121 | /// random KV pairs 122 | pub meta: HashMap, 123 | } 124 | 125 | impl HelloV0 { 126 | pub fn new(key: String, endpoint: EndpointType, e: String) -> HelloV0 { 127 | HelloV0 { 128 | key, 129 | endpoint_type: endpoint, 130 | endpoint_value: e, 131 | // Important: Never add customer-identifying data or here, privacy is extremely important 132 | meta: { 133 | let mut map = HashMap::new(); 134 | map.insert("version".to_string(), VERSION.to_string()); 135 | map 136 | }, 137 | } 138 | } 139 | } 140 | 141 | #[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq)] 142 | pub enum EnableDisableTarget { 143 | AllEverything, 144 | AllEndpoints, 145 | Ssh, 146 | SpecificEndpoints(Vec), 147 | } 148 | 149 | /// `should_be_running` dictates what this message should do, and then the rest of the args 150 | /// specify which endpoints are targeted by the request. 151 | #[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq)] 152 | pub struct EnableDisableRequestV0 { 153 | pub should_be_running: bool, 154 | pub target: EnableDisableTarget, 155 | } 156 | 157 | #[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq)] 158 | pub struct StatusResponseV0 { 159 | pub statuses: Vec, 160 | } 161 | 162 | #[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq)] 163 | pub struct StatusV0 { 164 | /// ssh or tls 165 | pub endpoint_type: EndpointType, 166 | /// e.g. myhost or test.mysite.com 167 | pub endpoint_value: String, 168 | /// is this bad boi enabled? 169 | pub enabled: bool, 170 | } 171 | 172 | #[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq)] 173 | pub enum HelloResponseIssue { 174 | NotFound, 175 | Forbidden, 176 | BadRequest, 177 | } 178 | 179 | #[derive(Debug, serde::Serialize, serde::Deserialize, PartialEq)] 180 | pub struct HelloResponseV0 { 181 | pub issue: Option, 182 | } 183 | 184 | #[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize, PartialEq)] 185 | pub struct StartTrafficResponseV0 { 186 | pub open_local_success_ready: bool, 187 | } 188 | 189 | #[cfg(test)] 190 | mod tests { 191 | // Note this useful idiom: importing names from outer (for mod tests) scope. 192 | use super::*; 193 | 194 | #[test] 195 | fn to_from_wire() { 196 | let key = "testkey"; 197 | let etype = EndpointType::Ssh; 198 | let endpoint = "myhostname"; 199 | let meta = HashMap::new(); 200 | 201 | let hv0 = HelloV0 { 202 | key: key.to_string(), 203 | endpoint_type: etype, 204 | endpoint_value: endpoint.to_string(), 205 | meta, 206 | }; 207 | 208 | let message = Message::HelloV0(hv0); 209 | let wire_message = message.to_wire_message().unwrap(); 210 | 211 | let mut lenbuf = [0; 4]; 212 | lenbuf.copy_from_slice(&wire_message[0..4]); 213 | let len: u32 = u32::from_be_bytes(lenbuf); 214 | 215 | // Verify the length we parsed from the wire_message is equal to the wire_message's buf len minus 4. 216 | assert_eq!(wire_message.len() - 4, len as usize); 217 | 218 | let parsed_from_wire = 219 | Message::from_wire_without_the_length_prefix(&wire_message[4..]).unwrap(); 220 | 221 | // The two messages should be equal! 222 | assert_eq!(message, parsed_from_wire); 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /src/signal.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use tokio::sync::watch::{channel, Receiver, Sender}; 3 | 4 | #[derive(Clone)] 5 | pub struct SignalSender { 6 | sender: Arc>, 7 | receiver: Receiver, 8 | } 9 | 10 | impl SignalSender { 11 | #[allow(clippy::new_without_default)] 12 | pub fn new() -> Self { 13 | let (s, r) = channel(false); 14 | Self { 15 | sender: Arc::new(s), 16 | receiver: r, 17 | } 18 | } 19 | 20 | pub fn new_receiver(&self) -> SignalReceiver { 21 | SignalReceiver::new(self.receiver.clone()) 22 | } 23 | 24 | pub fn send_signal(&self) { 25 | let _ = self.sender.broadcast(true); 26 | } 27 | } 28 | 29 | #[derive(Clone)] 30 | pub struct SignalReceiver { 31 | receiver: Receiver, 32 | } 33 | 34 | impl SignalReceiver { 35 | fn new(receiver: Receiver) -> Self { 36 | Self { receiver } 37 | } 38 | 39 | pub async fn wait(&mut self) { 40 | loop { 41 | let r = self.receiver.recv().await; 42 | match r { 43 | None => break, 44 | Some(b) => { 45 | if b { 46 | break; 47 | } 48 | } 49 | } 50 | } 51 | } 52 | } 53 | 54 | #[cfg(test)] 55 | mod signaltests { 56 | use super::SignalSender; 57 | use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; 58 | use std::sync::Arc; 59 | use std::time::Duration; 60 | use tokio::time::delay_for; 61 | 62 | #[tokio::test] 63 | async fn basic() { 64 | let s = SignalSender::new(); 65 | let mut r = s.new_receiver(); 66 | 67 | let flipper = Arc::new(AtomicBool::new(false)); 68 | let flipperc = flipper.clone(); 69 | 70 | let _ = tokio::spawn(async move { 71 | r.wait().await; 72 | flipperc.store(true, Ordering::SeqCst); 73 | }); 74 | 75 | delay_for(Duration::from_millis(50)).await; 76 | assert!(!flipper.load(Ordering::SeqCst)); 77 | s.send_signal(); 78 | delay_for(Duration::from_millis(50)).await; 79 | assert!(flipper.load(Ordering::SeqCst)); 80 | } 81 | 82 | #[tokio::test] 83 | async fn basic_cloned_receivers() { 84 | let s = SignalSender::new(); 85 | let r = s.new_receiver(); 86 | 87 | let n = 4; 88 | 89 | let flipper = Arc::new(AtomicUsize::new(0)); 90 | 91 | for _i in 0..n { 92 | let flipperc = flipper.clone(); 93 | let mut rc = r.clone(); 94 | tokio::spawn(async move { 95 | rc.wait().await; 96 | flipperc.fetch_add(1, Ordering::SeqCst); 97 | }); 98 | } 99 | 100 | delay_for(Duration::from_millis(50)).await; 101 | assert_eq!(0, flipper.load(Ordering::SeqCst)); 102 | s.send_signal(); 103 | delay_for(Duration::from_millis(50)).await; 104 | assert_eq!(n, flipper.load(Ordering::SeqCst)); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /tests/basic.rs: -------------------------------------------------------------------------------- 1 | mod mockebb; 2 | #[macro_use] 3 | extern crate log; 4 | 5 | #[cfg(test)] 6 | mod basic_tests_v0 { 7 | use crate::mockebb::listen_and_process; 8 | use crate::mockebb::load_root; 9 | use ebbflow::{ 10 | config::{ConfigError, EbbflowDaemonConfig, Endpoint, HealthCheck, HealthCheckType, Ssh}, 11 | daemon::{HealthOverall, SharedInfo}, 12 | run_daemon, DaemonEndpointStatus, DaemonRunner, DaemonStatusMeta, 13 | }; 14 | use futures::future::BoxFuture; 15 | use std::sync::Arc; 16 | use std::time::Duration; 17 | use tokio::net::TcpListener; 18 | use tokio::net::TcpStream; 19 | use tokio::prelude::*; 20 | use tokio::sync::Mutex; 21 | use tokio::{sync::Notify, time::delay_for}; 22 | 23 | const MOCKEBBSPAWNDELAY: Duration = Duration::from_millis(100); 24 | 25 | #[tokio::test] 26 | async fn basic_bytes() { 27 | // logger(); 28 | let testclientport = 49193; 29 | let customerport = 49194; 30 | let serverport = 49195; 31 | 32 | tokio::spawn(listen_and_process(customerport, testclientport)); 33 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 34 | info!("Spawned ebb"); 35 | 36 | let (_notify, _arcmutex, _) = 37 | start_basic_daemon(testclientport, ezconfigendpoitnonly(serverport as u16)).await; 38 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 39 | info!("Spawned daemon"); 40 | 41 | let serverconnhandle = tokio::spawn(get_one_proxied_connection(serverport)); 42 | info!("Spawned server"); 43 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 44 | let mut customer = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 45 | .await 46 | .unwrap(); 47 | info!("Connected"); 48 | 49 | let mut server = serverconnhandle.await.unwrap().unwrap(); 50 | 51 | // at this point, we have the customer conn and server conn, let's send some bytes. 52 | let writeme: [u8; 102] = [1; 102]; 53 | customer.write_all(&writeme[..]).await.unwrap(); 54 | info!("Wrote Customer Stuff"); 55 | 56 | let mut readme: [u8; 102] = [0; 102]; 57 | server.read_exact(&mut readme[..]).await.unwrap(); 58 | info!("Read Server Stuff"); 59 | 60 | assert_eq!(readme[..], writeme[..]); 61 | } 62 | 63 | #[allow(unused)] 64 | fn logger() { 65 | env_logger::builder() 66 | .filter_level(log::LevelFilter::Debug) 67 | .init(); 68 | } 69 | 70 | #[tokio::test] 71 | async fn two_connections() { 72 | //logger(); 73 | let testclientport = 49196; 74 | let customerport = 49197; 75 | let serverport = 49198; 76 | 77 | tokio::spawn(listen_and_process(customerport, testclientport)); 78 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 79 | info!("Spawned ebb"); 80 | 81 | let (_notify, _arcmutex, _) = 82 | start_basic_daemon(testclientport, ezconfigendpoitnonly(serverport as u16)).await; 83 | info!("Spawned daemon"); 84 | 85 | let serverconnhandle = tokio::spawn(get_one_proxied_connection(serverport)); 86 | info!("Spawned server"); 87 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 88 | let mut customer = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 89 | .await 90 | .unwrap(); 91 | info!("Connected"); 92 | 93 | let mut server = serverconnhandle.await.unwrap().unwrap(); 94 | 95 | // at this point, we have the customer conn and server conn, let's send some bytes. 96 | let writeme: [u8; 102] = [1; 102]; 97 | customer.write_all(&writeme[..]).await.unwrap(); 98 | info!("Wrote Customer Stuff"); 99 | 100 | let mut readme: [u8; 102] = [0; 102]; 101 | server.read_exact(&mut readme[..]).await.unwrap(); 102 | info!("Read Server Stuff"); 103 | 104 | assert_eq!(readme[..], writeme[..]); 105 | 106 | let serverconnhandle2 = tokio::spawn(get_one_proxied_connection(serverport)); 107 | info!("spawned second server"); 108 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 109 | let mut customer2 = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 110 | .await 111 | .unwrap(); 112 | info!("Connected 2"); 113 | let mut server2 = serverconnhandle2.await.unwrap().unwrap(); 114 | info!("both now ready"); 115 | 116 | let writeme: [u8; 102] = [1; 102]; 117 | customer2.write_all(&writeme[..]).await.unwrap(); 118 | info!("Wrote Customer2 Stuff"); 119 | 120 | let mut readme: [u8; 102] = [0; 102]; 121 | server2.read_exact(&mut readme[..]).await.unwrap(); 122 | info!("Read Server2 Stuff"); 123 | assert_eq!(readme[..], writeme[..]); 124 | 125 | // Test the other stuff is still going. 126 | let writeme: [u8; 102] = [1; 102]; 127 | customer.write_all(&writeme[..]).await.unwrap(); 128 | info!("Wrote Customer Stuff again"); 129 | 130 | let mut readme: [u8; 102] = [0; 102]; 131 | server.read_exact(&mut readme[..]).await.unwrap(); 132 | info!("Read Server Stuff again"); 133 | 134 | assert_eq!(readme[..], writeme[..]); 135 | } 136 | 137 | #[tokio::test] 138 | async fn basic_bytes_ssh() { 139 | // logger(); 140 | let testclientport = 49183; 141 | let customerport = 49184; 142 | let serverport = 49185; 143 | 144 | tokio::spawn(listen_and_process(customerport, testclientport)); 145 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 146 | info!("Spawned ebb"); 147 | 148 | let x = "testhostname.isawaseoma-fasdf.adf1".to_string(); 149 | 150 | let cfg = EbbflowDaemonConfig { 151 | endpoints: vec![], 152 | ssh: Some(Ssh { 153 | port: serverport, // We need to override the SSH port or else it will hit the actual ssh server the host 154 | hostname_override: Some(x), 155 | enabled: true, 156 | maxconns: 100, 157 | maxidle: 2, 158 | }), 159 | loglevel: None, 160 | }; 161 | 162 | let (_notify, _arcmutex, _) = start_basic_daemon(testclientport, cfg).await; 163 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 164 | info!("Spawned daemon"); 165 | 166 | let serverconnhandle = tokio::spawn(get_one_proxied_connection(serverport as usize)); 167 | info!("Spawned server"); 168 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 169 | let mut customer = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 170 | .await 171 | .unwrap(); 172 | info!("Connected"); 173 | 174 | let mut server = serverconnhandle.await.unwrap().unwrap(); 175 | 176 | // at this point, we have the customer conn and server conn, let's send some bytes. 177 | let writeme: [u8; 102] = [1; 102]; 178 | customer.write_all(&writeme[..]).await.unwrap(); 179 | info!("Wrote Customer Stuff"); 180 | 181 | let mut readme: [u8; 102] = [0; 102]; 182 | server.read_exact(&mut readme[..]).await.unwrap(); 183 | info!("Read Server Stuff"); 184 | 185 | assert_eq!(readme[..], writeme[..]); 186 | } 187 | 188 | #[tokio::test] 189 | async fn endpoint_start_disabled_fails_then_enable_works() { 190 | // logger(); 191 | let testclientport = 49543; 192 | let customerport = 49544; 193 | let serverport = 49545; 194 | 195 | tokio::spawn(listen_and_process(customerport, testclientport)); 196 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 197 | info!("Spawned ebb"); 198 | 199 | let mut initial_endpoint = ezconfigendpoitnonly(serverport as u16); 200 | initial_endpoint.endpoints.get_mut(0).unwrap().enabled = false; 201 | 202 | let (notify, arcmutex, _) = start_basic_daemon(testclientport, initial_endpoint).await; 203 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 204 | info!("Spawned daemon"); 205 | 206 | let serverconnhandle = tokio::spawn(get_one_proxied_connection(serverport)); 207 | info!("Spawned server early that should return successfully later"); 208 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 209 | 210 | // We should not be able to connect 211 | let should_err = { 212 | match TcpStream::connect(format!("127.0.0.1:{}", customerport)).await { 213 | Ok(mut s) => { 214 | info!("Connected Client"); 215 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 216 | // We should be disconnected soon, or not be able to write 217 | let _r = s.write(&[0; 4][..]).await; 218 | info!("Wrote Customer Stuff"); 219 | let mut buf = vec![0; 10]; 220 | let r = s.read(&mut buf[..]).await; 221 | info!("Read Customer Stuff {:?}", r); 222 | if let Ok(0) = r { 223 | Err(std::io::Error::from(std::io::ErrorKind::NotConnected)) 224 | } else { 225 | r 226 | } 227 | } 228 | Err(e) => Err(e), 229 | } 230 | }; 231 | assert!(should_err.is_err()); 232 | 233 | let mut cfg = ezconfigendpoitnonly(serverport as u16); 234 | cfg.endpoints.get_mut(0).unwrap().enabled = true; 235 | { 236 | let mut x = arcmutex.lock().await; 237 | *x = cfg; 238 | } 239 | notify.notify(); 240 | tokio::time::delay_for(MOCKEBBSPAWNDELAY * 2).await; 241 | 242 | // at this point, we have the customer conn and server conn, let's send some bytes. 243 | let mut customer = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 244 | .await 245 | .unwrap(); 246 | info!("Connected Client that should succeed"); 247 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 248 | let mut server = serverconnhandle.await.unwrap().unwrap(); // we spawned ONE Accept thing earlier, it should have NOT resolved and only NOW resolves once we connect again 249 | let writeme: [u8; 10212] = [5; 10212]; 250 | customer.write_all(&writeme[..]).await.unwrap(); 251 | info!("Wrote Customer Stuff should succeed"); 252 | let mut readme: [u8; 10212] = [0; 10212]; 253 | server.read_exact(&mut readme[..]).await.unwrap(); 254 | info!("Read Server Stuff should succeed"); 255 | assert_eq!(readme[..], writeme[..]); 256 | } 257 | 258 | #[tokio::test] 259 | async fn endpoint_start_enabled_disable_reenable() { 260 | // logger(); 261 | 262 | let testclientport = 49143; 263 | let customerport = 49144; 264 | let serverport = 49145; 265 | 266 | tokio::spawn(listen_and_process(customerport, testclientport)); 267 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 268 | info!("Spawned ebb"); 269 | 270 | let (notify, arcmutex, _) = 271 | start_basic_daemon(testclientport, ezconfigendpoitnonly(serverport as u16)).await; 272 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 273 | info!("Spawned daemon"); 274 | 275 | let serverconnhandle = tokio::spawn(get_one_proxied_connection(serverport)); 276 | info!("Spawned server"); 277 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 278 | let mut customer = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 279 | .await 280 | .unwrap(); 281 | info!("Connected I"); 282 | 283 | let mut server = serverconnhandle.await.unwrap().unwrap(); 284 | 285 | // at this point, we have the customer conn and server conn, let's send some bytes. 286 | let writeme: [u8; 102] = [1; 102]; 287 | customer.write_all(&writeme[..]).await.unwrap(); 288 | info!("Wrote Customer Stuff I"); 289 | 290 | let mut readme: [u8; 102] = [0; 102]; 291 | server.read_exact(&mut readme[..]).await.unwrap(); 292 | info!("Read Server Stuff I"); 293 | 294 | assert_eq!(readme[..], writeme[..]); 295 | let serverconnhandle = tokio::spawn(get_one_proxied_connection(serverport)); 296 | 297 | // Now we shut it off and assume we cannot connect 298 | let mut cfg = ezconfigendpoitnonly(serverport as u16); 299 | cfg.endpoints.get_mut(0).unwrap().enabled = false; 300 | { 301 | let mut x = arcmutex.lock().await; 302 | *x = cfg; 303 | } 304 | notify.notify(); 305 | tokio::time::delay_for(MOCKEBBSPAWNDELAY * 2).await; 306 | 307 | // we have something ready 308 | // We should not be able to connect 309 | let should_err = { 310 | match TcpStream::connect(format!("127.0.0.1:{}", customerport)).await { 311 | Ok(mut s) => { 312 | info!("Connected Client II"); 313 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 314 | // We should be disconnected soon, or not be able to write 315 | let _r = s.write(&[0; 4][..]).await; 316 | info!("Wrote Customer Stuff II"); 317 | let mut buf = vec![0; 10]; 318 | let r = s.read(&mut buf[..]).await; 319 | info!("Read Customer Stuff II {:?}", r); 320 | if let Ok(0) = r { 321 | Err(std::io::Error::from(std::io::ErrorKind::NotConnected)) 322 | } else { 323 | r 324 | } 325 | } 326 | Err(e) => Err(e), 327 | } 328 | }; 329 | assert!(should_err.is_err()); 330 | 331 | let mut cfg = ezconfigendpoitnonly(serverport as u16); 332 | cfg.endpoints.get_mut(0).unwrap().enabled = true; 333 | { 334 | let mut x = arcmutex.lock().await; 335 | *x = cfg; 336 | } 337 | notify.notify(); 338 | tokio::time::delay_for(MOCKEBBSPAWNDELAY * 2).await; 339 | 340 | // at this point, we have the customer conn and server conn, let's send some bytes. 341 | let mut customer = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 342 | .await 343 | .unwrap(); 344 | info!("Connected Client III"); 345 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 346 | let mut server = serverconnhandle.await.unwrap().unwrap(); // we spawned ONE Accept thing earlier, it should have NOT resolved and only NOW resolves once we connect again 347 | let writeme: [u8; 10212] = [5; 10212]; 348 | customer.write_all(&writeme[..]).await.unwrap(); 349 | info!("Wrote Customer Stuff III"); 350 | let mut readme: [u8; 10212] = [0; 10212]; 351 | server.read_exact(&mut readme[..]).await.unwrap(); 352 | info!("Read Server Stuff III"); 353 | assert_eq!(readme[..], writeme[..]); 354 | } 355 | 356 | #[tokio::test] 357 | async fn ssh_start_enabled_disable_reenable() { 358 | // logger(); 359 | let testclientport = 43143; 360 | let customerport = 43144; 361 | let serverport = 43145; 362 | 363 | tokio::spawn(listen_and_process(customerport, testclientport)); 364 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 365 | info!("Spawned ebb"); 366 | 367 | let hostname = "asdf31".to_string(); 368 | let mut cfg = EbbflowDaemonConfig { 369 | endpoints: vec![], 370 | ssh: Some(Ssh { 371 | port: serverport, // We need to override the SSH port or else it will hit the actual ssh server the host 372 | hostname_override: Some(hostname), 373 | enabled: true, 374 | maxconns: 100, 375 | maxidle: 2, 376 | }), 377 | loglevel: None, 378 | }; 379 | 380 | let (notify, arcmutex, _) = start_basic_daemon(testclientport, cfg.clone()).await; 381 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 382 | info!("Spawned daemon"); 383 | 384 | let serverconnhandle = tokio::spawn(get_one_proxied_connection(serverport as usize)); 385 | info!("Spawned server"); 386 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 387 | let mut customer = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 388 | .await 389 | .unwrap(); 390 | info!("Connected I"); 391 | 392 | let mut server = serverconnhandle.await.unwrap().unwrap(); 393 | 394 | // at this point, we have the customer conn and server conn, let's send some bytes. 395 | let writeme: [u8; 102] = [1; 102]; 396 | customer.write_all(&writeme[..]).await.unwrap(); 397 | info!("Wrote Customer Stuff I"); 398 | 399 | let mut readme: [u8; 102] = [0; 102]; 400 | server.read_exact(&mut readme[..]).await.unwrap(); 401 | info!("Read Server Stuff I"); 402 | 403 | assert_eq!(readme[..], writeme[..]); 404 | let serverconnhandle = tokio::spawn(get_one_proxied_connection(serverport as usize)); 405 | 406 | // Now we shut it off and assume we cannot connect 407 | cfg.ssh.as_mut().unwrap().enabled = false; 408 | { 409 | let mut x = arcmutex.lock().await; 410 | *x = cfg.clone(); 411 | } 412 | notify.notify(); 413 | tokio::time::delay_for(MOCKEBBSPAWNDELAY * 2).await; 414 | 415 | // we have something ready 416 | // We should not be able to connect 417 | let should_err = { 418 | match TcpStream::connect(format!("127.0.0.1:{}", customerport)).await { 419 | Ok(mut s) => { 420 | info!("Connected Client II"); 421 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 422 | // We should be disconnected soon, or not be able to write 423 | let _r = s.write(&[0; 4][..]).await; 424 | info!("Wrote Customer Stuff II"); 425 | let mut buf = vec![0; 10]; 426 | let r = s.read(&mut buf[..]).await; 427 | info!("Read Customer Stuff II {:?}", r); 428 | if let Ok(0) = r { 429 | Err(std::io::Error::from(std::io::ErrorKind::NotConnected)) 430 | } else { 431 | r 432 | } 433 | } 434 | Err(e) => Err(e), 435 | } 436 | }; 437 | assert!(should_err.is_err()); 438 | 439 | cfg.ssh.as_mut().unwrap().enabled = true; 440 | { 441 | let mut x = arcmutex.lock().await; 442 | *x = cfg; 443 | } 444 | notify.notify(); 445 | tokio::time::delay_for(MOCKEBBSPAWNDELAY * 2).await; 446 | 447 | // at this point, we have the customer conn and server conn, let's send some bytes. 448 | let mut customer = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 449 | .await 450 | .unwrap(); 451 | info!("Connected Client III"); 452 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 453 | let mut server = serverconnhandle.await.unwrap().unwrap(); // we spawned ONE Accept thing earlier, it should have NOT resolved and only NOW resolves once we connect again 454 | let writeme: [u8; 10212] = [5; 10212]; 455 | customer.write_all(&writeme[..]).await.unwrap(); 456 | info!("Wrote Customer Stuff III"); 457 | let mut readme: [u8; 10212] = [0; 10212]; 458 | server.read_exact(&mut readme[..]).await.unwrap(); 459 | info!("Read Server Stuff III"); 460 | assert_eq!(readme[..], writeme[..]); 461 | } 462 | 463 | #[tokio::test] 464 | async fn ssh_start_disabled_fails_then_enable_works() { 465 | // logger(); 466 | let testclientport = 49443; 467 | let customerport = 49444; 468 | let serverport = 49445; 469 | 470 | tokio::spawn(listen_and_process(customerport, testclientport)); 471 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 472 | info!("Spawned ebb"); 473 | 474 | let hostname = "asdf31".to_string(); 475 | let mut cfg = EbbflowDaemonConfig { 476 | endpoints: vec![], 477 | ssh: Some(Ssh { 478 | port: serverport, // We need to override the SSH port or else it will hit the actual ssh server the host 479 | hostname_override: Some(hostname), 480 | enabled: false, 481 | maxconns: 100, 482 | maxidle: 2, 483 | }), 484 | loglevel: None, 485 | }; 486 | 487 | let (notify, arcmutex, _) = start_basic_daemon(testclientport, cfg.clone()).await; 488 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 489 | info!("Spawned daemon"); 490 | 491 | let serverconnhandle = tokio::spawn(get_one_proxied_connection(serverport as usize)); 492 | info!("Spawned server early that should return successfully later"); 493 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 494 | 495 | // We should not be able to connect 496 | let should_err = { 497 | match TcpStream::connect(format!("127.0.0.1:{}", customerport)).await { 498 | Ok(mut s) => { 499 | info!("Connected Client"); 500 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 501 | // We should be disconnected soon, or not be able to write 502 | let _r = s.write(&[0; 4][..]).await; 503 | info!("Wrote Customer Stuff"); 504 | let mut buf = vec![0; 10]; 505 | let r = s.read(&mut buf[..]).await; 506 | info!("Read Customer Stuff {:?}", r); 507 | if let Ok(0) = r { 508 | Err(std::io::Error::from(std::io::ErrorKind::NotConnected)) 509 | } else { 510 | r 511 | } 512 | } 513 | Err(e) => Err(e), 514 | } 515 | }; 516 | assert!(should_err.is_err()); 517 | 518 | cfg.ssh.as_mut().unwrap().enabled = true; 519 | { 520 | let mut x = arcmutex.lock().await; 521 | *x = cfg; 522 | } 523 | notify.notify(); 524 | tokio::time::delay_for(MOCKEBBSPAWNDELAY * 2).await; 525 | 526 | // at this point, we have the customer conn and server conn, let's send some bytes. 527 | let mut customer = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 528 | .await 529 | .unwrap(); 530 | info!("Connected Client that should succeed"); 531 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 532 | let mut server = serverconnhandle.await.unwrap().unwrap(); // we spawned ONE Accept thing earlier, it should have NOT resolved and only NOW resolves once we connect again 533 | let writeme: [u8; 10212] = [5; 10212]; 534 | customer.write_all(&writeme[..]).await.unwrap(); 535 | info!("Wrote Customer Stuff should succeed"); 536 | let mut readme: [u8; 10212] = [0; 10212]; 537 | server.read_exact(&mut readme[..]).await.unwrap(); 538 | info!("Read Server Stuff should succeed"); 539 | assert_eq!(readme[..], writeme[..]); 540 | } 541 | 542 | #[tokio::test] 543 | async fn just_status_check() { 544 | let testclientport = 49155; 545 | let customerport = 49156; 546 | 547 | let e0 = "mysite.com".to_string(); 548 | let ep0 = 13413; 549 | let e1 = "othersite.com".to_string(); 550 | let ep1 = 12341; 551 | let hn = "hostname".to_string(); 552 | let sshp = 131; 553 | 554 | let cfg = EbbflowDaemonConfig { 555 | endpoints: vec![ 556 | Endpoint { 557 | port: ep0, 558 | dns: e0.clone(), 559 | maxconns: 1000, 560 | maxidle: 2, 561 | enabled: true, 562 | healthcheck: None, 563 | }, 564 | Endpoint { 565 | port: ep1, 566 | dns: e1.clone(), 567 | maxconns: 1000, 568 | maxidle: 3, 569 | enabled: true, 570 | healthcheck: None, 571 | }, 572 | ], 573 | ssh: Some(Ssh { 574 | maxconns: 1, 575 | port: sshp, 576 | enabled: true, 577 | maxidle: 1, 578 | hostname_override: Some(hn.clone()), 579 | }), 580 | loglevel: None, 581 | }; 582 | 583 | tokio::spawn(listen_and_process(customerport, testclientport)); 584 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 585 | info!("Spawned ebb"); 586 | 587 | let (_notify, _arcmutex, daemon) = start_basic_daemon(testclientport, cfg).await; 588 | tokio::time::delay_for(MOCKEBBSPAWNDELAY * 3).await; 589 | info!("Spawned daemon"); 590 | 591 | let status = daemon.status().await; 592 | 593 | assert_eq!(DaemonStatusMeta::Good, status.meta); 594 | assert_eq!( 595 | Some(( 596 | hn, 597 | DaemonEndpointStatus::Enabled { 598 | active: 0, 599 | idle: 1, 600 | health: Some(HealthOverall::NOT_CONFIGURED), 601 | } 602 | )), 603 | status.ssh 604 | ); 605 | 606 | assert_eq!(2, status.endpoints.len()); 607 | 608 | for (endpoint, status) in status.endpoints.iter() { 609 | println!("e {} s {:?}", endpoint, status); 610 | if e0 == endpoint.as_str() { 611 | assert_eq!( 612 | &DaemonEndpointStatus::Enabled { 613 | active: 0, 614 | idle: 2, 615 | health: Some(HealthOverall::NOT_CONFIGURED), 616 | }, 617 | status 618 | ); 619 | } else if e1 == endpoint.as_str() { 620 | assert_eq!( 621 | &DaemonEndpointStatus::Enabled { 622 | active: 0, 623 | idle: 3, 624 | health: Some(HealthOverall::NOT_CONFIGURED), 625 | }, 626 | status 627 | ); 628 | } else { 629 | panic!("unexpected") 630 | } 631 | } 632 | } 633 | 634 | #[tokio::test] 635 | async fn health_check_starts_up_single_connection_different_port() { 636 | // logger(); 637 | let testclientport = 35001; 638 | let customerport = 35002; 639 | let serverport: usize = 35003; 640 | let healthcheckport: usize = 35004; 641 | 642 | tokio::spawn(listen_and_process(customerport, testclientport)); 643 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 644 | info!("Spawned ebb"); 645 | 646 | let (_notify, _arcmutex, _) = start_basic_daemon( 647 | testclientport, 648 | EbbflowDaemonConfig { 649 | endpoints: vec![Endpoint { 650 | port: serverport as u16, 651 | dns: "ebbflow.io".to_string(), 652 | maxconns: 1000, 653 | maxidle: 1, 654 | enabled: true, 655 | healthcheck: Some(HealthCheck { 656 | port: Some(healthcheckport as u16), 657 | consider_healthy_threshold: None, 658 | consider_unhealthy_threshold: None, 659 | r#type: HealthCheckType::TCP, 660 | frequency_secs: Some(2), 661 | }), 662 | }], 663 | ssh: None, 664 | loglevel: None, 665 | }, 666 | ) 667 | .await; 668 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 669 | info!("Spawned daemon"); 670 | 671 | // the health check server is a diff port to make it easy 672 | tokio::spawn(async move { 673 | spawn_healthy_tcplistener(healthcheckport).await.unwrap(); 674 | }); 675 | 676 | let serverconnhandle = tokio::spawn(get_one_proxied_connection(serverport)); 677 | info!("Spawned server"); 678 | 679 | tokio::time::delay_for(Duration::from_secs(3)).await; 680 | 681 | // Here we should fail for a few seconds (6) 682 | // We should not be able to connect 683 | let should_err = { 684 | match TcpStream::connect(format!("127.0.0.1:{}", customerport)).await { 685 | Ok(mut s) => { 686 | info!("Connected Client"); 687 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 688 | // We should be disconnected soon, or not be able to write 689 | let _r = s.write(&[0; 4][..]).await; 690 | info!("Wrote Customer Stuff"); 691 | let mut buf = vec![0; 10]; 692 | let r = s.read(&mut buf[..]).await; 693 | info!("Read Customer Stuff {:?}", r); 694 | if let Ok(0) = r { 695 | Err(std::io::Error::from(std::io::ErrorKind::NotConnected)) 696 | } else { 697 | r 698 | } 699 | } 700 | Err(e) => Err(e), 701 | } 702 | }; 703 | assert!(should_err.is_err()); 704 | 705 | // Now we should be good! 706 | tokio::time::delay_for(Duration::from_secs(7)).await; 707 | 708 | tokio::time::delay_for(MOCKEBBSPAWNDELAY).await; 709 | let mut customer = TcpStream::connect(format!("127.0.0.1:{}", customerport)) 710 | .await 711 | .unwrap(); 712 | info!("Connected"); 713 | 714 | let mut server = serverconnhandle.await.unwrap().unwrap(); 715 | 716 | // at this point, we have the customer conn and server conn, let's send some bytes. 717 | let writeme: [u8; 102] = [1; 102]; 718 | customer.write_all(&writeme[..]).await.unwrap(); 719 | info!("Wrote Customer Stuff"); 720 | 721 | let mut readme: [u8; 102] = [0; 102]; 722 | server.read_exact(&mut readme[..]).await.unwrap(); 723 | info!("Read Server Stuff"); 724 | 725 | assert_eq!(readme[..], writeme[..]); 726 | } 727 | 728 | async fn spawn_healthy_tcplistener(port: usize) -> Result { 729 | let mut listener = TcpListener::bind(format!("127.0.0.1:{}", port)) 730 | .await 731 | .unwrap(); 732 | 733 | loop { 734 | let (socket, _) = listener.accept().await?; 735 | info!("Got a connection, shoving it to another task for a second then killing it"); 736 | tokio::spawn(async move { 737 | delay_for(Duration::from_secs(1)).await; 738 | drop(socket); 739 | }); 740 | } 741 | } 742 | 743 | async fn get_one_proxied_connection(port: usize) -> Result { 744 | let mut listener = TcpListener::bind(format!("127.0.0.1:{}", port)) 745 | .await 746 | .unwrap(); 747 | 748 | let (socket, _) = listener.accept().await?; 749 | info!("Got a proxied connection to the server, giving back"); 750 | Ok(socket) 751 | } 752 | 753 | async fn start_basic_daemon( 754 | ebbport: usize, 755 | cfg: EbbflowDaemonConfig, 756 | ) -> ( 757 | Arc, 758 | Arc>, 759 | Arc, 760 | ) { 761 | let info = SharedInfo::new_with_ebbflow_overrides( 762 | format!("127.0.0.1:{}", ebbport).parse().unwrap(), 763 | "preview.ebbflow.io".to_string(), 764 | load_root(), 765 | ) 766 | .await 767 | .unwrap(); 768 | 769 | let (am, f) = config_with_reloading(cfg); 770 | 771 | let nc = Arc::new(Notify::new()); 772 | let n = nc.clone(); 773 | 774 | let m = run_daemon(Arc::new(info), f, n.clone()).await; 775 | 776 | (nc, am, m) 777 | } 778 | 779 | fn ezconfigendpoitnonly(port: u16) -> EbbflowDaemonConfig { 780 | EbbflowDaemonConfig { 781 | endpoints: vec![Endpoint { 782 | port, 783 | dns: "ebbflow.io".to_string(), 784 | maxconns: 1000, 785 | maxidle: 1, 786 | enabled: true, 787 | healthcheck: None, 788 | }], 789 | ssh: None, 790 | loglevel: None, 791 | } 792 | } 793 | 794 | fn config_with_reloading( 795 | cfg: EbbflowDaemonConfig, 796 | ) -> ( 797 | Arc>, 798 | std::pin::Pin< 799 | Box< 800 | dyn Fn() -> BoxFuture< 801 | 'static, 802 | Result<(Option, Option), ConfigError>, 803 | > + Send 804 | + Sync 805 | + 'static, 806 | >, 807 | >, 808 | ) { 809 | let cfg: Arc> = Arc::new(Mutex::new(cfg)); 810 | 811 | let c = cfg.clone(); 812 | ( 813 | cfg, 814 | Box::pin(move || { 815 | let cc = c.clone(); 816 | Box::pin( 817 | async move { Ok((Some(cc.lock().await.clone()), Some("asdf".to_string()))) }, 818 | ) 819 | }), 820 | ) 821 | } 822 | } 823 | -------------------------------------------------------------------------------- /tests/certs/myCA.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | Proc-Type: 4,ENCRYPTED 3 | DEK-Info: DES-EDE3-CBC,3E790E109F1F95AF 4 | 5 | lFMNtofLLfEBDxqiIspDig9xqTVdRUyB/C34cTq+JriaFZeeIcl+CCGVvnde7Dsg 6 | BKUzm7l0xvDXFmIqg3cRkdXhLvZfqtuB5okHNbIc8CXYdgCiEAVWYEl3sKiRAsWN 7 | RogHcshBRX+o/8sf+ZjDuU2oBhiOrZMTCa/z6SkAUN1C21Sgiw4sqnvSJrVXQTGt 8 | pkiMY8nx4BKvNN+zDaj69AmJquK9tLFOTkF3XU02etn6k1IqCcH1GIuXXsBoTYNn 9 | wgxs5RWlTVRl9NQmrNnZ3U+L/0buKAZGFysj/gLEThCsPriGU6nsAihUpg4CySWx 10 | 6q9Y8DQrUjxHYLKj7iwSXh6F75pZR4OjSNIS7XlXn4YwRvawV6pKS3gEkS+oI8xz 11 | jhD3UgnCAlny5wsaCKz7B38HIURq5qUTHQIYfyhN09lPGkrSf+B97Ix7hzc2IejM 12 | Ig+N169FqhfYUneoFz0+67bZfxFPf9igoE3eMxmM8qtNzZyiPA7DkbbTVRLZbLzG 13 | cnjvOuSlJnYzUAU63CHDuBRF1UcgTF3AHLksxBgOaSwyzKpf2Gl0eZVfi23/XMMK 14 | wYkGWJ9GpFbeSZBGGfBaSA4TArJkqUcgwiOg5HCtkaw7n5Y0hmsOfgQGrwMWT9RB 15 | E33UgZ3WW9Bh3w6LTqodvm517JN7Xb/8/NBj1rOJ9MWzNiWq/darD9VN913GYD1b 16 | S85DgvXJsed4yXbd0NB0iMXW0Sd545TYj2FIYuGygNHSQG/Mrkchv6G+DWnC/KbU 17 | SmeSDqk4T+vqjobjmnEi+rPdNF0G5yq4KbALrXIDrf6vnfg+2pIUWmF19V2n9EP+ 18 | 8koSDyESnKD4xoV6fRl2uYPt9TXhSpMtXTePOMwbouVu7Th/ZwQ3CtjDCZJSddsy 19 | ZaTI61vKhbEvEfJqgGq/oKg+s4mnaBjYIzVyFBNNCldIheUAgwtKBmPD4aV/RfjL 20 | vHc0uZKkFL4/vfP2F/w2/TsNUadrdsKZtVD0HwNbS/zLP9e4SjjeznZbCNam8vy9 21 | orIHRPLFd9M/GhtMBJCk2RjiEQwPBkKAuvKO5yn33Z/XAYVG83wfSRvl4UXx9f0C 22 | A+kgPHJo+RdVoCfJJngCBPPoKdJOjm/jDDFpOhK3z4T2hEosonUGJlblvoWkrm2A 23 | dW33YWyzRfFybRjckqS0OkWwJHq95QTYWqEf3EfqdTZaPXBxQbc+42cb0W6D5vxh 24 | yVnR/vxWcMgaUvKZmr+ojQhHqgOcr368VKucvjUpV9Kp87Z/epP1w83MeI5I76aj 25 | yTKqZtuV5QU/sM0VhHQLjr87hIKj9lGXobLzcrOe+HCxP7CqoHQ/PJqShbrADEJp 26 | fvw7N/OoxoH/pgoniqpv+cpZpohn4KZkTJpjavLVFSHC8QpreSQHRHIubgQ6R3Bq 27 | EVb+WM7lBwXZu7D2udhYWL/gitd5kW84fuSwVciXmUojuV6zrFAtMhE/m9h8gEJi 28 | ZQEo9TqEELvF15AledqDLnbS2BCIrAzPIB475+FxseWwwkkRDUhH+HTOvi3e0ooi 29 | 3Bsns34IXfeB688vhsfBHTtLg0++g9zISg+NC5QVNWfBcpMx+gDvDQ== 30 | -----END RSA PRIVATE KEY----- 31 | -------------------------------------------------------------------------------- /tests/certs/myCA.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDEDCCAfgCCQDQ/SjS1+pBizANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQGEwJV 3 | UzELMAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxDTALBgNVBAoMBHRlc3Qx 4 | DTALBgNVBAsMBHRlc3QwHhcNMTkwODIwMTYzMDU5WhcNMjQwODE4MTYzMDU5WjBK 5 | MQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxDTAL 6 | BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IB 7 | DwAwggEKAoIBAQCm3WLJ0qwqjIbNrmC5xFDynparBRVLRbE4BrkWdAWJJ5hgBrxZ 8 | 0mg8U+eauEaWyBEx3f/kbzB3iWQiSa3Hkp1G9/C78EZ9OVLcbcpmWZ6ofY4ALxcV 9 | rvluPBXFw6Dl6+mDQHR5svrIPuAsqFZuApn1mllkxqk7oycg0pDUeyZNDSDHfS59 10 | 510hCGpZ6Mc6nkUrGOLVGPraTqviQDSs/5PdIV30C2IIKIKqvHfOsCLcxCTXhv2m 11 | UvvJYuFylw445B4UgkXbN1w6B9bEjLduL1ogSeVMr7LMMKGP5alnrC4ubKDnnUdP 12 | lE3eBflK+6zpSqYZ3+Y6YweBWmMKT2QOtHS1AgMBAAEwDQYJKoZIhvcNAQELBQAD 13 | ggEBAA1SYwAgeRia3D/FfvOOaybb8J4FYCg0Iz1kgp5QUAVEWDmQx0OU+gk/MpcI 14 | SoO+bo51rNVhOAWBXV+x3aSIIr0Yu48wqJFX1iSCrdLEkR2KPDN4pNvYjMmyrBVr 15 | h6QNRkBRDm0tM91DatGGymj0xCuO8udifK9mvAgFnmU3hOfCQOgSfhhltDCE2aG+ 16 | TvOO8QcYeTpsDjDZCtZrVCFhDJ4L7rvjgu0MjfY5ZTmo20EK3+53hDCWXRPytMsn 17 | 900+RQ70WSIHB7I633C9NFHVDWmRJh/TN7pvtp2lDELs6HbVNL/4/0+Q1Ost8VsZ 18 | mwjQfTXKWLjWGhREmHMrEAIBI0k= 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /tests/certs/test.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIECjCCAvKgAwIBAgIJAKzm03LQ8Yd/MA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV 3 | BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTENMAsGA1UECgwE 4 | dGVzdDENMAsGA1UECwwEdGVzdDAeFw0xOTEwMDgwMjA2MTRaFw0yNDEwMDYwMjA2 5 | MTRaMEoxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRs 6 | ZTENMAsGA1UECgwEdGVzdDENMAsGA1UECwwEdGVzdDCCASIwDQYJKoZIhvcNAQEB 7 | BQADggEPADCCAQoCggEBAOVNivygjYFV1CV186qzZSXnCfwaPKQAAydwB65QsYx6 8 | fu+MNjlB1nx0WNohxfA8d+FpkUU39xPNE/GCb1MqZZNHD1eeSjyZMG8F+nyNy2n2 9 | GIST0cu41gxpb7JvWeigxdbmoxbfOXeHv4JDk6w3U4ctcbfVj7qxg9cTkgd7taL+ 10 | LylYca06bCYQAThDsG0UW9a4ETs7Iek6zV2br7aGfAfpdy+IjcwIzoaBSZLsw4/c 11 | J/Ep65r0B/82tLkaIY/lJwehNb2d4MbbQMQGvXcnNpJEuLkxoB1K5rbcKtdhmDYj 12 | 4Ba2EcbhFVkpde+0x+qwAHYqfSdStVy+X9MYz3t1+CMCAwEAAaOB8jCB7zBkBgNV 13 | HSMEXTBboU6kTDBKMQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAcM 14 | B1NlYXR0bGUxDTALBgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3SCCQDQ/SjS1+pB 15 | izAJBgNVHRMEAjAAMAsGA1UdDwQEAwIE8DBvBgNVHREEaDBmgg90ZXN0LmViYmZs 16 | b3cuaW+CDHMuZWJiZmxvdy5pb4IMaC5lYmJmbG93LmlvggxlLmViYmZsb3cuaW+C 17 | D3RvYnkuZWJiZmxvdy5pb4IMKi5lYmJmbG93LmlvggplYmJmbG93LmlvMA0GCSqG 18 | SIb3DQEBCwUAA4IBAQA7oO5IbtR23eZwWey97fd5TUo0cxdM4uWrI6FX7shF8YP0 19 | BXUhLfmG5KX/4GUST1wZ5qO814EDuY6dmkDCHUPp+O8qGMOx+na5maql49nE7Mkh 20 | 6UbETqzLiULERc4MgpGWHuGYnq82zIlOCKW9MKmxHKiMCnGU/k+dsg8MKFQTSpeV 21 | svXvOSL83pz2jOvQA67S+f/eEwHmtm7UKYUqw7I8W56oNwqmaDgiafxucrFjMPdR 22 | KjueyS3p+85XEnUbMG/u9X2VLlDQlKz7eImIGVBPWoUXaLzyFIHlnyDz9y8DIfrp 23 | FI1dG+u40Ed8PDyPetq9pvLgNDtIr7hFz9cqNXIE 24 | -----END CERTIFICATE----- 25 | -------------------------------------------------------------------------------- /tests/certs/test.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEA5U2K/KCNgVXUJXXzqrNlJecJ/Bo8pAADJ3AHrlCxjHp+74w2 3 | OUHWfHRY2iHF8Dx34WmRRTf3E80T8YJvUyplk0cPV55KPJkwbwX6fI3LafYYhJPR 4 | y7jWDGlvsm9Z6KDF1uajFt85d4e/gkOTrDdThy1xt9WPurGD1xOSB3u1ov4vKVhx 5 | rTpsJhABOEOwbRRb1rgROzsh6TrNXZuvtoZ8B+l3L4iNzAjOhoFJkuzDj9wn8Snr 6 | mvQH/za0uRohj+UnB6E1vZ3gxttAxAa9dyc2kkS4uTGgHUrmttwq12GYNiPgFrYR 7 | xuEVWSl177TH6rAAdip9J1K1XL5f0xjPe3X4IwIDAQABAoIBACbFQq/OUb7BjVo8 8 | JFQDeyG6e9S1dvVfzyi254WQvUR8XcQRGAX8XNC0N2XCeh5Riw+A8wAaI1ukvC+E 9 | l2Afdgc955kv+Wtfl3HeN+khbHhhFOotuDZS7VYx6aHo8/HCZVEdtAuk+Kl+OWJE 10 | HM7sl+HuTCECKgguBZWzCGjdgOuq7suvpfg7jFFABp79LZGHo/mwLkZrvJUaSCyJ 11 | 0aTvpunKfWXrQTRcsJ8sKNHZAlp7DLPMIU8QOZrFtOT9ayk9qZjOqThuo1cxnANR 12 | heIhVS1zIOLKf8C5xSlYD6Im/7VCgVNYohT/Ze6uP2Z6BbZYoAEFhaDcKcYukOhY 13 | OI5Lr9ECgYEA9cup+JiJT9MIpXUXUgqI9m70MKcia3V5qF7flBYxdtx7EZghnqrJ 14 | H9DQqxhzdt2giyPxWuKIMrV7650gIch4RPIzVckLzCRg2Ydw5ve8AzKqgbS7iylR 15 | 1uezGubRdkWK9bPcx5C/KV2CynyHgYsNFmtlHLCYQbwVNHoStYlSpYkCgYEA7tKX 16 | 9LSkcA2HNhTqPHQ2kyQSJlWgdqtpauJ1FhZbQummF8W7GBOY+XMo0nWIqH6iq0R1 17 | F3kzPUvHUJzzgPoGLDWJLnSzci5XrN6dV3D/a36lLk9CNDSl6wb4ndqlqtzunNty 18 | Aub4zQAm4ygbUd7DIl/Yg4w93iVO4dxES74+cUsCgYEAz5+E6Q/4VSxfDlx1IYDy 19 | pq+cp4hWo8zswhO56j5A9DVT483NAFklYTGjQhN6XTrixQXMd0gJYkQ49BKatM7B 20 | AQRR5rHXvE4yXS1yajdzYoQm0T+Yd/mkPtMjUKMO5kfiFKAk65JSV5+8vkworn2F 21 | xZ1cILrvMf+tLdG8AgqIfHkCgYB2GeyNVZuP0hDQXM1s8edfIlJbZLLCnIe4FJ7k 22 | OlANKfA6B007Xc2jZiCE2xymfmFBIsxovEXrhQLeyY7ddEcDK8JYF9v5lhNDaJ8U 23 | c8vLw8g5u3Y4zORT8pGVtl+AvFq/dQHPj5rv/ag9Y0MHXIrQ/+bZPswFhBubM8sO 24 | TdkhOwKBgQCBPp7gaBHnu8DW7k4RZEuGTMEdFBgFo3JPYM9h64RjKnnfoIQxpC5M 25 | SSoW444lUax5bAL8wH+6cfAQ7sDCTvfEacXpHyb0jUi8YvPHiug5FYvOvG0xCENl 26 | n6gZ2SbXPSga1hxNmtjHkz1OUa6Z3iWerU6toB0Ozq6WjeQ9gMiKQQ== 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /tests/mockebb.rs: -------------------------------------------------------------------------------- 1 | //! This is a mock version of Ebbflow to be used for testing 2 | use ebbflow::messaging::*; 3 | use log::info; 4 | use parking_lot::Mutex; 5 | use rustls::RootCertStore; 6 | use rustls::ServerConfig; 7 | use rustls::{Certificate, PrivateKey}; 8 | use std::fs; 9 | use std::io; 10 | use std::io::BufReader; 11 | use std::net::Shutdown; 12 | use std::sync::Arc; 13 | use tokio::io::AsyncRead; 14 | use tokio::io::AsyncWrite; 15 | use tokio::net::TcpListener; 16 | use tokio::net::TcpStream; 17 | use tokio::prelude::*; 18 | use tokio_rustls::server::TlsStream; 19 | use tokio_rustls::TlsAcceptor; 20 | 21 | pub async fn listen_and_process( 22 | port_for_customers: usize, 23 | port_for_tested_client: usize, 24 | ) -> Result<(), io::Error> { 25 | let tested_clients = Arc::new(Mutex::new(Vec::new())); 26 | let mut scfg = ServerConfig::new(rustls::NoClientAuth::new()); 27 | scfg.set_single_cert( 28 | load_certs("tests/certs/test.crt"), 29 | load_private_key("tests/certs/test.key"), 30 | ) 31 | .unwrap(); 32 | let scfg = Arc::new(scfg); 33 | let acceptor = TlsAcceptor::from(scfg.clone()); 34 | let mut listener = TcpListener::bind(format!("127.0.0.1:{}", port_for_tested_client)).await?; 35 | 36 | let tc = tested_clients.clone(); 37 | tokio::spawn(async move { 38 | loop { 39 | let (socket, _) = listener.accept().await.unwrap(); 40 | let mut tlsstream = acceptor.accept(socket).await.unwrap(); 41 | 42 | // receive message 43 | let mut lenbuf: [u8; 4] = [0; 4]; 44 | tlsstream.read_exact(&mut lenbuf[..]).await.unwrap(); 45 | let len = u32::from_be_bytes(lenbuf); 46 | 47 | let mut msgbuf = vec![0; len as usize]; 48 | tlsstream.read_exact(&mut msgbuf[..]).await.unwrap(); 49 | 50 | let msg = Message::from_wire_without_the_length_prefix(&msgbuf[..]).unwrap(); 51 | if let Message::HelloV0(_) = msg { 52 | info!("Got message from daemon {:?}", msg); 53 | } else { 54 | panic!("asdfas"); 55 | } 56 | 57 | // send message 58 | let msg = Message::HelloResponseV0(HelloResponseV0 { issue: None }); 59 | let msgvec = msg.to_wire_message().unwrap(); 60 | tlsstream.write_all(&msgvec[..]).await.unwrap(); 61 | tlsstream.flush().await.unwrap(); 62 | info!("Wrote message to {:?}", msg); 63 | 64 | tc.lock().push(tlsstream); 65 | } 66 | }); 67 | 68 | tokio::spawn(async move { 69 | let mut listener = TcpListener::bind(format!("127.0.0.1:{}", port_for_customers)) 70 | .await 71 | .unwrap(); 72 | 73 | let tc = tested_clients.clone(); 74 | loop { 75 | let (socket, _) = listener.accept().await.unwrap(); 76 | let x = tc.clone(); 77 | tokio::spawn(async move { 78 | let r = handleconn(socket, x).await; 79 | info!("Connection finished, result {:?}", r); 80 | }); 81 | } 82 | }); 83 | futures::future::pending::<()>().await; 84 | Ok(()) 85 | } 86 | 87 | async fn handleconn( 88 | socket: TcpStream, 89 | tc: Arc>>>, 90 | ) -> Result<(), std::io::Error> { 91 | info!("Got connection on customer server"); 92 | 93 | let mut clienttestedstream = match tc.lock().pop() { 94 | Some(x) => x, 95 | None => { 96 | info!("Did NOT find server for customer, disconnecting customer"); 97 | // No server, disconnect client 98 | let _ = socket.shutdown(Shutdown::Both); 99 | return Ok(()); 100 | } 101 | }; 102 | info!("Found local connection for customer connection"); 103 | 104 | // send message 105 | let msg = Message::StartTrafficV0; 106 | let msgvec = msg.to_wire_message().unwrap(); 107 | clienttestedstream.write_all(&msgvec[..]).await?; 108 | clienttestedstream.flush().await?; 109 | info!("Wrote message to {:?}", msg); 110 | 111 | // receive message 112 | let mut lenbuf: [u8; 4] = [0; 4]; 113 | clienttestedstream.read_exact(&mut lenbuf[..]).await?; 114 | let len = u32::from_be_bytes(lenbuf); 115 | 116 | let mut msgbuf = vec![0; len as usize]; 117 | clienttestedstream.read_exact(&mut msgbuf[..]).await?; 118 | 119 | let msg = Message::from_wire_without_the_length_prefix(&msgbuf[..]).unwrap(); 120 | if let Message::StartTrafficResponseV0(_inner) = msg { 121 | info!("Got message from daemon {:?}", msg); 122 | } else { 123 | panic!("asdfas"); 124 | } 125 | 126 | tokio::spawn(copyezcopy(clienttestedstream, socket)); 127 | 128 | Ok(()) 129 | } 130 | 131 | pub async fn copyezcopy(stream1: TlsStream, stream2: TcpStream) { 132 | info!("Will proxy data"); 133 | let (mut s1r, mut s1w) = tokio::io::split(stream1); 134 | let (mut s2r, mut s2w) = tokio::io::split(stream2); 135 | 136 | tokio::spawn(async move { 137 | copy_bytes_ez(&mut s1r, &mut s2w).await.unwrap(); 138 | }); 139 | tokio::spawn(async move { 140 | copy_bytes_ez(&mut s2r, &mut s1w).await.unwrap(); 141 | }); 142 | } 143 | 144 | async fn copy_bytes_ez(r: &mut R, w: &mut W) -> Result<(), std::io::Error> 145 | where 146 | R: AsyncRead + Unpin + Send, 147 | W: AsyncWrite + Unpin + Send, 148 | { 149 | let mut buf = [0; 10 * 1024]; 150 | 151 | loop { 152 | let n = r.read(&mut buf[0..]).await?; 153 | 154 | if n == 0 { 155 | return Ok(()); 156 | } 157 | w.write_all(&buf[0..n]).await?; 158 | w.flush().await?; 159 | } 160 | } 161 | 162 | pub fn load_root() -> RootCertStore { 163 | let mut store = RootCertStore::empty(); 164 | for cert in load_certs("tests/certs/myCA.pem") { 165 | store.add(&cert).unwrap(); 166 | } 167 | store 168 | } 169 | 170 | /// PANICS!!!! 171 | pub fn load_certs(filename: &str) -> Vec { 172 | let certfile = fs::File::open(filename).expect("cannot open certificate file"); 173 | let mut reader = BufReader::new(certfile); 174 | rustls::internal::pemfile::certs(&mut reader).unwrap() 175 | } 176 | 177 | /// PANICS!! 178 | pub fn load_private_key(filename: &str) -> PrivateKey { 179 | let rsa_keys = { 180 | let keyfile = fs::File::open(filename).expect("cannot open private key file"); 181 | let mut reader = BufReader::new(keyfile); 182 | rustls::internal::pemfile::rsa_private_keys(&mut reader) 183 | .expect("file contains invalid rsa private key") 184 | }; 185 | 186 | let pkcs8_keys = { 187 | let keyfile = fs::File::open(filename).expect("cannot open private key file"); 188 | let mut reader = BufReader::new(keyfile); 189 | rustls::internal::pemfile::pkcs8_private_keys(&mut reader) 190 | .expect("file contains invalid pkcs8 private key (encrypted keys not supported)") 191 | }; 192 | 193 | // prefer to load pkcs8 keys 194 | if !pkcs8_keys.is_empty() { 195 | pkcs8_keys[0].clone() 196 | } else { 197 | assert!(!rsa_keys.is_empty()); 198 | rsa_keys[0].clone() 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /wix/main.wxs: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 34 | 35 | 36 | 44 | 45 | 57 | 58 | 61 | 62 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 79 | 80 | 85 | 86 | 87 | 117 | 118 | 125 | 136 | 147 | 148 | 149 | 153 | 158 | 159 | 160 | 161 | 162 | 170 | 171 | 172 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 193 | 197 | 198 | 199 | 202 | 203 | 204 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 245 | 246 | 247 | 252 | 253 | 254 | 255 | 263 | 264 | 265 | 266 | 274 | 275 | 276 | 277 | 278 | --------------------------------------------------------------------------------