├── .cargo └── config ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── build_image.yml │ └── unit_tests.yml ├── .gitignore ├── .releaserc.yml ├── ARCHITECTURE.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── Dockerfile.arm64 ├── Dockerfile.armv7 ├── LICENSE ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── client ├── Cargo.toml ├── src │ ├── client.rs │ ├── error.rs │ ├── lib.rs │ └── proxy.rs └── tests │ └── client.rs ├── core ├── Cargo.toml └── src │ ├── error.rs │ ├── lib.rs │ ├── link.rs │ ├── proxy.rs │ ├── signal.rs │ ├── socket.rs │ ├── state.rs │ ├── stream.rs │ ├── tests │ ├── mod.rs │ ├── proxy.rs │ └── socket_mocks.rs │ ├── toxic.rs │ └── toxics │ ├── bandwidth.rs │ ├── latency.rs │ ├── limit_data.rs │ ├── mod.rs │ ├── noop.rs │ ├── slicer.rs │ ├── slow_close.rs │ ├── test_utils.rs │ └── timeout.rs ├── scripts ├── bin │ ├── semantic-release-rust │ └── set-cargo-version ├── flamegraph.sh ├── install-semantic-release.sh ├── publish-image.sh └── run-coverage.sh └── server ├── Cargo.toml └── src ├── api ├── filters.rs ├── handlers.rs └── mod.rs ├── args.rs ├── error.rs ├── file.rs ├── main.rs ├── store.rs └── util.rs /.cargo/config: -------------------------------------------------------------------------------- 1 | [target.armv7-unknown-linux-gnueabihf] 2 | linker = "arm-linux-gnueabihf-gcc" 3 | 4 | [target.aarch64-unknown-linux-gnu] 5 | linker = "aarch64-linux-gnu-gcc" 6 | 7 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Additional context** 20 | Add any other context about the problem here. 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/build_image.yml: -------------------------------------------------------------------------------- 1 | on: 2 | repository_dispatch: 3 | types: [build-artifacts] 4 | 5 | name: Build Artifacts 6 | 7 | jobs: 8 | build_image_and_publish: 9 | strategy: 10 | fail-fast: true 11 | matrix: 12 | include: 13 | - label: x86_64 14 | suffix: amd64 15 | platform: linux/amd64 16 | dockerfile: Dockerfile 17 | - label: ARM64 18 | suffix: arm64 19 | platform: linux/arm64 20 | dockerfile: Dockerfile.arm64 21 | - label: ARMv7 22 | suffix: armv7 23 | platform: linux/arm/v7 24 | dockerfile: Dockerfile.armv7 25 | name: "${{ matrix.label }}: Build and Publish Docker Image" 26 | if: github.event.client_payload.tag != '' 27 | runs-on: ubuntu-latest 28 | steps: 29 | - uses: actions/checkout@v2 30 | with: 31 | ref: 'refs/tags/${{ github.event.client_payload.tag }}' 32 | 33 | - name: Set up Docker Buildx 34 | uses: docker/setup-buildx-action@v1 35 | 36 | - name: Login to DockerHub 37 | uses: docker/login-action@v1 38 | with: 39 | username: ${{ secrets.DOCKERHUB_USERNAME }} 40 | password: ${{ secrets.DOCKERHUB_TOKEN }} 41 | 42 | - name: Login to GitHub Container Registry 43 | uses: docker/login-action@v1 44 | with: 45 | registry: ghcr.io 46 | username: ${{ github.repository_owner }} 47 | password: ${{ secrets.GITHUB_TOKEN }} 48 | 49 | - name: Build and push 50 | uses: docker/build-push-action@v2 51 | with: 52 | context: . 53 | platforms: ${{ matrix.platform }} 54 | push: true 55 | file: ${{ matrix.dockerfile }} 56 | tags: | 57 | oguzbilgener/noxious:latest-${{ matrix.suffix }} 58 | oguzbilgener/noxious:${{ github.event.client_payload.version }}-${{ matrix.suffix }} 59 | ghcr.io/oguzbilgener/noxious:latest-${{ matrix.suffix }} 60 | ghcr.io/oguzbilgener/noxious:${{ github.event.client_payload.version }}-${{ matrix.suffix }} 61 | 62 | publish_multi_arch_image: 63 | name: Publish Multi-Arch Image 64 | if: github.event.client_payload.tag != '' 65 | runs-on: ubuntu-latest 66 | needs: 67 | - build_image_and_publish 68 | steps: 69 | - uses: actions/checkout@v2 70 | 71 | - name: Login to DockerHub 72 | uses: docker/login-action@v1 73 | with: 74 | username: ${{ secrets.DOCKERHUB_USERNAME }} 75 | password: ${{ secrets.DOCKERHUB_TOKEN }} 76 | 77 | - name: Login to GitHub Container Registry 78 | uses: docker/login-action@v1 79 | with: 80 | registry: ghcr.io 81 | username: ${{ github.repository_owner }} 82 | password: ${{ secrets.GITHUB_TOKEN }} 83 | 84 | - name: Install manifest-tool 85 | run: | 86 | curl -fSL -o ./manifest-tool https://github.com/estesp/manifest-tool/releases/download/v1.0.2/manifest-tool-linux-amd64 87 | curl -fSL -o ./manifest-tool.asc https://github.com/estesp/manifest-tool/releases/download/v1.0.2/manifest-tool-linux-amd64.asc 88 | export GNUPGHOME="$(mktemp -d)" 89 | gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys 27F3EA268A97867EAF0BD05C0F386284C03A1162 90 | gpg --batch --verify manifest-tool.asc manifest-tool 91 | chmod +x ./manifest-tool 92 | rm manifest-tool.asc 93 | 94 | - name: Push Multi-Arch Manifest 95 | run: | 96 | 97 | ./manifest-tool push from-args \ 98 | --platforms linux/amd64,linux/arm64,linux/arm/v7 \ 99 | --template oguzbilgener/noxious:${{ github.event.client_payload.version }}-ARCHVARIANT \ 100 | --target oguzbilgener/noxious:${{ github.event.client_payload.version }} 101 | 102 | ./manifest-tool push from-args \ 103 | --platforms linux/amd64,linux/arm64,linux/arm/v7 \ 104 | --template ghcr.io/oguzbilgener/noxious:${{ github.event.client_payload.version }}-ARCHVARIANT \ 105 | --target ghcr.io/oguzbilgener/noxious:${{ github.event.client_payload.version }} 106 | 107 | ./manifest-tool push from-args \ 108 | --platforms linux/amd64,linux/arm64,linux/arm/v7 \ 109 | --template oguzbilgener/noxious:latest-ARCHVARIANT \ 110 | --target oguzbilgener/noxious:latest 111 | 112 | ./manifest-tool push from-args \ 113 | --platforms linux/amd64,linux/arm64,linux/arm/v7 \ 114 | --template ghcr.io/oguzbilgener/noxious:latest-ARCHVARIANT \ 115 | --target ghcr.io/oguzbilgener/noxious:latest -------------------------------------------------------------------------------- /.github/workflows/unit_tests.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: Test and Release 4 | 5 | jobs: 6 | clippy_check: 7 | name: Clippy 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - run: rustup component add clippy 12 | - uses: actions-rs/clippy-check@v1 13 | with: 14 | token: ${{ secrets.GITHUB_TOKEN }} 15 | args: --all-features 16 | 17 | build_and_test: 18 | name: Test Noxious 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v2 22 | - uses: actions-rs/toolchain@v1 23 | with: 24 | toolchain: stable 25 | - name: Cargo cache 26 | uses: actions/cache@v2 27 | with: 28 | path: | 29 | ~/.cargo/registry 30 | ~/.cargo/git 31 | target 32 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} 33 | - name: Build release 34 | uses: actions-rs/cargo@v1 35 | with: 36 | command: build 37 | args: --release --all-features 38 | - name: Run cargo-tarpaulin 39 | uses: actions-rs/tarpaulin@v0.1 40 | with: 41 | version: "0.16.0" 42 | args: "--ignore-tests --out Lcov -- --test-threads 1" 43 | 44 | - name: Coveralls 45 | uses: coverallsapp/github-action@master 46 | with: 47 | github-token: ${{ secrets.GITHUB_TOKEN }} 48 | path-to-lcov: './lcov.info' 49 | 50 | - name: Archive code coverage results 51 | uses: actions/upload-artifact@v1 52 | with: 53 | name: code-coverage-report 54 | path: cobertura.xml 55 | 56 | semantic_release: 57 | name: Semantic Release 58 | needs: [build_and_test, clippy_check] 59 | if: github.ref == 'refs/heads/rc' || github.ref == 'refs/heads/main' 60 | runs-on: ubuntu-latest 61 | steps: 62 | - uses: actions/checkout@v2 63 | 64 | - uses: actions-rs/toolchain@v1 65 | with: 66 | toolchain: stable 67 | - name: Cargo cache 68 | uses: actions/cache@v2 69 | with: 70 | path: | 71 | ~/.cargo/registry 72 | ~/.cargo/git 73 | target 74 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} 75 | 76 | - name: Setup Node for semantic release 77 | uses: actions/setup-node@v2 78 | with: 79 | node-version: "14" 80 | 81 | - name: Install semantic release 82 | run: npm install -g semantic-release @semantic-release/exec @semantic-release/git 83 | 84 | - name: Run semantic release 85 | id: semantic_release 86 | shell: bash 87 | run: | 88 | semantic-release 89 | TAG=$(git --no-pager tag --points-at HEAD) 90 | echo "::set-output name=tag::$TAG" 91 | echo "::set-output name=version::${TAG:1}" 92 | env: 93 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 94 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} 95 | 96 | - name: Upload executable 97 | uses: actions/upload-artifact@v2 98 | with: 99 | name: noxious-server-${{ runner.os }}-${{ github.sha }} 100 | path: ./target/release/noxious-server 101 | retention-days: 1 102 | 103 | # TODO: remove this from here and move it to the build artifacts workflow 104 | # So that we are sure that the version numbers are updated in the binary 105 | - name: Upload executable to release 106 | uses: svenstaro/upload-release-action@v1-release 107 | if: steps.semantic_release.outputs.tag != '' 108 | with: 109 | repo_token: ${{ secrets.PAT }} 110 | file: ./target/release/noxious-server 111 | asset_name: noxious-server-${{ steps.semantic_release.outputs.tag }}-linux-amd64 112 | tag: ${{ steps.semantic_release.outputs.tag }} 113 | overwrite: true 114 | 115 | - name: Dispatch Build Image 116 | uses: peter-evans/repository-dispatch@v1 117 | if: steps.semantic_release.outputs.tag != '' 118 | with: 119 | token: ${{ secrets.PAT }} 120 | event-type: build-artifacts 121 | client-payload: '{"tag": "${{ steps.semantic_release.outputs.tag }}", "version": "${{ steps.semantic_release.outputs.version }}" }' -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # These are backup files generated by rustfmt 6 | **/*.rs.bk 7 | 8 | .DS_Store 9 | 10 | *.code-workspace 11 | .vscode/ 12 | 13 | toxiproxy.json 14 | 15 | perf.data 16 | perf.data.old 17 | 18 | 19 | tarpaulin-report.html 20 | cobertura.xml 21 | lcov.info -------------------------------------------------------------------------------- /.releaserc.yml: -------------------------------------------------------------------------------- 1 | branches: 2 | - name: main 3 | - name: rc 4 | prerelease: true 5 | plugins: 6 | - "@semantic-release/commit-analyzer" 7 | - "@semantic-release/release-notes-generator" 8 | - - "@semantic-release/exec" 9 | - prepareCmd: "./scripts/bin/set-cargo-version ./server/Cargo.toml ${nextRelease.version} && cargo build --release --all-features" 10 | - - "@semantic-release/git" 11 | - assets: 12 | - server/Cargo.toml 13 | - core/Cargo.toml 14 | - client/Cargo.toml 15 | - Cargo.lock 16 | - "@semantic-release/github" 17 | - - "@semantic-release/exec" 18 | - verifyConditionsCmd: "./scripts/bin/semantic-release-rust verify-conditions" 19 | prepareCmd: "./scripts/bin/semantic-release-rust prepare ${nextRelease.version}" 20 | publishCmd: "./scripts/bin/semantic-release-rust publish" 21 | -------------------------------------------------------------------------------- /ARCHITECTURE.md: -------------------------------------------------------------------------------- 1 | # Architecture 2 | 3 | Noxious server consists of a core crate, and a server crate. 4 | 5 | The `core` crate contains the serializable data types, as well as the proxy runner, toxics and some state management. The `server` crate contains the REST API server and the `Store` which manages proxy and toxic CRUD operations. 6 | 7 | The `client` library uses the types from `core` offers an async client to the Toxiproxy API interface. 8 | 9 | ### Link 10 | 11 | For every client connection, an upstream and a downstream `Link` are created. Each link owns a read and write handle, as well as the toxics for its direction. When a link is established, the read and write handles are connected via toxics. 12 | 13 | ### Toxic 14 | 15 | In Noxious, most toxics simply take an input `Stream` of `Bytes`, and write data to a `Sink` of `Bytes`. Data passes through a chain of toxics and each toxic can manipulate the data passed, i.e. add delay, split packets into smaller packets etc. Some toxics like `SlowClose` and `LimitData` need more information about the connection, so they also take a `Stop` signal. 16 | 17 | ### Toxic Runner 18 | 19 | Noxious has a few key differences from Toxiproxy in how it adds and executes toxics on proxy connections: 20 | 21 | 1. It does not insert a `Noop` toxic as the first toxic in the chain. 22 | 2. When the toxics are updated for a proxy, it re-creates links with new toxic chains instead of mutating the existing toxic chain, without closing the proxy connection. 23 | 3. When a proxy is updated, it drops the old proxy, causing old connection to disconnect. This is practically the same behavior as Toxicproxy, as if you update the listen address or upstream address, you must close the proxy connections. -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = ["core", "server", "client"] 4 | 5 | [profile.dev] 6 | panic = "abort" 7 | 8 | [profile.release] 9 | panic = "abort" 10 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.51 as depbuild 2 | # This is a stage with built dependencies and a dummy server project 3 | WORKDIR /usr/src 4 | COPY ./Cargo.toml ./Cargo.toml 5 | COPY ./Cargo.lock ./Cargo.lock 6 | COPY ./core ./core 7 | # TODO: find a way to avoid adding client to the workspace 8 | COPY ./client ./client 9 | 10 | RUN cargo new --bin --name noxious-server server 11 | COPY ./server/Cargo.toml ./server/Cargo.toml 12 | 13 | RUN cargo build --release --package noxious-server 14 | 15 | RUN rm -f ./target/release/deps/noxious_server* 16 | RUN rm ./target/release/noxious-server* 17 | 18 | # ----------- 19 | 20 | FROM rust:1.51 as serverbuild 21 | WORKDIR /usr/src 22 | COPY --from=depbuild /usr/src/Cargo.toml ./Cargo.toml 23 | COPY --from=depbuild /usr/src/Cargo.lock ./Cargo.lock 24 | COPY --from=depbuild /usr/src/core ./core 25 | COPY --from=depbuild /usr/src/client ./client 26 | COPY --from=depbuild /usr/src/target ./target 27 | COPY ./server ./server 28 | 29 | RUN cargo build --release --package noxious-server 30 | 31 | # ----------- 32 | 33 | FROM debian:buster-slim as server 34 | 35 | WORKDIR /app 36 | COPY --from=serverbuild /usr/src/target/release/noxious-server ./noxious-server 37 | 38 | EXPOSE 8474 39 | 40 | ENTRYPOINT ["./noxious-server"] 41 | CMD ["--host=0.0.0.0"] -------------------------------------------------------------------------------- /Dockerfile.arm64: -------------------------------------------------------------------------------- 1 | # Because docker arm builds on QEMU on GitHub actions is super slow (30+ minutes slow), 2 | # We cross-compile noxious for arm64 on amd64, then add the final binary in a arm64 image 3 | FROM --platform=linux/amd64 rust:latest as serverbuild 4 | 5 | ARG TARGET="aarch64-unknown-linux-gnu" 6 | 7 | RUN rustup target install $TARGET 8 | 9 | RUN apt-get update && apt-get install -y gcc-aarch64-linux-gnu 10 | WORKDIR /usr/src 11 | 12 | WORKDIR /usr/src 13 | COPY . ./ 14 | 15 | RUN cargo build \ 16 | --release \ 17 | --target $TARGET \ 18 | --package noxious-server 19 | 20 | FROM debian:buster-slim as server 21 | ARG TARGET="aarch64-unknown-linux-gnu" 22 | WORKDIR /app 23 | COPY --from=serverbuild /usr/src/target/$TARGET/release/noxious-server ./noxious-server 24 | 25 | EXPOSE 8474 26 | 27 | ENTRYPOINT ["./noxious-server"] 28 | CMD ["--host=0.0.0.0"] 29 | -------------------------------------------------------------------------------- /Dockerfile.armv7: -------------------------------------------------------------------------------- 1 | # Because docker arm builds on QEMU on GitHub actions is super slow (30+ minutes slow), 2 | # We cross-compile noxious for armv7 on amd64, then add the final binary in a armv7 image 3 | FROM --platform=linux/amd64 rust:latest as serverbuild 4 | 5 | ARG TARGET="armv7-unknown-linux-gnueabihf" 6 | 7 | RUN rustup target install $TARGET 8 | 9 | RUN apt-get update && apt-get install -y gcc-arm-linux-gnueabihf 10 | WORKDIR /usr/src 11 | 12 | WORKDIR /usr/src 13 | COPY . ./ 14 | 15 | RUN cargo build \ 16 | --release \ 17 | --target $TARGET \ 18 | --package noxious-server 19 | 20 | FROM debian:buster-slim as server 21 | ARG TARGET="armv7-unknown-linux-gnueabihf" 22 | WORKDIR /app 23 | COPY --from=serverbuild /usr/src/target/$TARGET/release/noxious-server ./noxious-server 24 | 25 | EXPOSE 8474 26 | 27 | ENTRYPOINT ["./noxious-server"] 28 | CMD ["--host=0.0.0.0"] 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT OR Apache-2.0 -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Oguz Bilgener 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # noxious 2 | 3 | ![Docker Image Version (latest by date)](https://img.shields.io/docker/v/oguzbilgener/noxious) 4 | [![Unit Tests](https://github.com/oguzbilgener/noxious/actions/workflows/unit_tests.yml/badge.svg)](https://github.com/oguzbilgener/noxious/actions/workflows/unit_tests.yml) 5 | [![Coverage Status](https://coveralls.io/repos/github/oguzbilgener/noxious/badge.svg?branch=main)](https://coveralls.io/github/oguzbilgener/noxious?branch=main) 6 | ![Crates.io](https://img.shields.io/crates/l/noxious) 7 | [![semantic-release](https://img.shields.io/badge/semantic--release-enabled-brightgreen?logo=semantic-release)](https://github.com/semantic-release/semantic-release) 8 | 9 | A Rust port of [Toxiproxy] server, which is a TCP proxy to simulate network and system conditions for chaos and resiliency testing. 10 | 11 | Noxious is fully compatible with Toxiproxy with the same REST API, so you can use the Toxiproxy CLI and all the existing [client libraries][clients] for Toxiproxy with noxious. 12 | 13 | An async Rust client library called [noxious-client] is also available to interact with Noxious or Toxiproxy. 14 | 15 | Also check out [ARCHITECTURE.md] for implementation details. 16 | 17 | [Toxiproxy]: https://github.com/Shopify/toxiproxy 18 | [clients]: https://github.com/Shopify/toxiproxy#clients 19 | [noxious-client]: https://docs.rs/noxious-client 20 | [ARCHITECTURE.md]: [https://github.com/oguzbilgener/noxious/blob/main/ARCHITECTURE.md] 21 | 22 | ### Quick Start 23 | 24 | Noxious server is available on [Docker Hub] and [GitHub Packages] for AMD64 and ARM. You can also find the executables for linux/amd64 in the [Releases] page. 25 | 26 | Alternatively, you can build Noxious from source with [cargo]. Run the `cargo build --release` command and the executable will be available at `./target/release/noxious-server`. 27 | 28 | You can run `noxious-server --help` to get the list of arguments. By default the API server listens on port **8474**. This can be changed by providing the `--port` command line argument. You can provide a JSON config file that declares an array of proxies to be created on startup with the `--config ./path/to/file.json` argument. 29 | 30 | For an extensive guide on how to use the Toxiproxy clients, please visit the [Toxiproxy] GitHub repository. 31 | 32 | [Docker Hub]: https://hub.docker.com/repository/docker/oguzbilgener/noxious 33 | [GitHub Packages]: https://github.com/users/oguzbilgener/packages/container/package/noxious 34 | [Releases]: https://github.com/oguzbilgener/noxious/releases 35 | [cargo]: https://doc.rust-lang.org/book/ch01-01-installation.html#installation 36 | 37 | #### With Docker 38 | 39 | When running in Docker, you will need to make sure that Noxious can reach the services that you are testing, and you can reach the ports that Noxious exposes for these services. You can use docker-compose, host networking or a bridge network, as described below. 40 | 41 | Suppose you have a web service running in the `myserver` container, connected to network `my-net`, listening on port `8000`: 42 | 43 | ```sh 44 | docker network create -d bridge my-net 45 | docker run --name myserver --rm -p 8000:8000 --network=my-net myimage:latest 46 | ``` 47 | 48 | You can start Noxious with a command like: 49 | 50 | 51 | ```sh 52 | docker run --name noxious \ 53 | --rm \ 54 | -p 8474:8474 \ 55 | -p 8001:8001 \ 56 | --network=my-net \ 57 | oguzbilgener/noxious 58 | ``` 59 | 60 | You can create the proxy by using one of the [clients] or the toxiproxy-cli, or by using cURL: 61 | 62 | ```sh 63 | curl --request POST \ 64 | --url http://localhost:8474/proxies \ 65 | --header 'Content-Type: application/json' \ 66 | --data '{ 67 | "name": "myserver", 68 | "listen": "0.0.0.0:8001", 69 | "upstream": "myserver:8000", 70 | "enabled": true 71 | }' 72 | ``` 73 | 74 | Now you should be able to access your server via Noxious at `http://localhost:8001`, or at `http://noxious:8001` from another container within the same Docker network. 75 | 76 | You can add a latency toxic to simulate a bad network condition: 77 | 78 | ```sh 79 | curl --request POST \ 80 | --url http://localhost:8474/proxies/myserver/toxics \ 81 | --header 'Content-Type: application/json' \ 82 | --data '{ 83 | "name": "myserver_latency", 84 | "type":"latency", 85 | "toxicity": 1, 86 | "direction": "upstream", 87 | "attributes": { 88 | "latency": 200, 89 | "jitter": 50 90 | } 91 | }' 92 | ``` 93 | 94 | 95 | #### Populating Proxies 96 | 97 | In addition to the initial JSON config, you can use the CLI or the REST API clients to create proxies: 98 | 99 | ```sh 100 | toxiproxy-cli create test_redis -l localhost:26379 -u localhost:6379 101 | ``` 102 | 103 | #### Adding Toxics 104 | 105 | You can add toxics using the client libraries, or via the CLI: 106 | 107 | ```sh 108 | toxiproxy-cli toxic add test_redis -t latency -a latency=1000 109 | ``` 110 | 111 | See the [Toxiproxy README][toxics_docs] for the full documentation of toxics. 112 | 113 | [toxics_docs]: https://github.com/Shopify/toxiproxy#toxics 114 | 115 | ### License 116 | 117 | Licensed under either of Apache License, Version 2.0 or MIT license at your option. -------------------------------------------------------------------------------- /client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "noxious-client" 3 | version = "0.1.0" 4 | description = "An async client library for Toxiproxy and its Rust port, Noxious" 5 | authors = ["Oguz Bilgener "] 6 | edition = "2018" 7 | documentation = "https://docs.rs/noxious-client" 8 | license = "MIT OR Apache-2.0" 9 | categories = ["network-programming", "development-tools"] 10 | keywords = ["proxy", "toxiproxy", "chaos", "tcp"] 11 | 12 | include = [ 13 | "CHANGELOG.md", 14 | "LICENSE", 15 | "README.md", 16 | ".gitignore", 17 | "Cargo.toml", 18 | "src/*.rs", 19 | "src/*/*.rs", 20 | "tests/*.rs", 21 | "benches/**/*.rs", 22 | ] 23 | 24 | [badges] 25 | maintenance = { status = "actively-developed" } 26 | 27 | [dependencies] 28 | # noxious = { path = "../core" } 29 | noxious = { version = "0.1.1" } 30 | reqwest = { version = "0.11", features = ["json"] } 31 | tokio = { version = "1", features = ["macros", "sync", "net", "rt", "signal"] } 32 | thiserror = "1.0.24" 33 | serde = { version = "^1.0.123", features = ["derive"] } 34 | 35 | 36 | [dev-dependencies] 37 | tokio = { version = "1", features = [ 38 | "net", 39 | "sync", 40 | "signal", 41 | "io-util", 42 | "rt", 43 | "macros", 44 | "time", 45 | "test-util", 46 | ] } 47 | tokio-test = "0.4.0" 48 | serde_json = "^1.0.62" 49 | wiremock = "0.5" 50 | 51 | [[test]] 52 | name = "client" 53 | -------------------------------------------------------------------------------- /client/src/client.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use crate::{ 4 | error::{ApiErrorResponse, ClientError}, 5 | proxy::Proxy, 6 | }; 7 | use noxious::proxy::{ProxyConfig, ProxyWithToxics}; 8 | use reqwest::{Client as HttpClient, Response, StatusCode}; 9 | 10 | /// A client for Noxious and Toxiproxy 11 | /// It follows the same naming conventions for the methods. 12 | #[derive(Debug)] 13 | pub struct Client { 14 | base_url: String, 15 | } 16 | 17 | // TODO: fix the error type 18 | pub type Result = std::result::Result; 19 | 20 | impl Client { 21 | /// Create a new client 22 | /// 23 | /// Panics if the given url starts with `https://`. 24 | pub fn new(url: &str) -> Client { 25 | if url.starts_with("https://") { 26 | panic!("the toxiproxy client does not support https"); 27 | } 28 | let base_url = if !url.starts_with("http://") { 29 | format!("http://{}", url) 30 | } else { 31 | url.to_owned() 32 | }; 33 | Client { base_url } 34 | } 35 | 36 | /// Returns a proxy by name, if it already exists 37 | pub async fn proxy(&self, name: &str) -> Result { 38 | let res = HttpClient::new() 39 | .get(self.base_url.clone() + "/proxies/" + name) 40 | .send() 41 | .await?; 42 | if res.status().is_success() { 43 | Ok(Proxy::from_proxy_with_toxics( 44 | &self.base_url, 45 | res.json::().await?, 46 | )) 47 | } else { 48 | Err(get_error_body(res, StatusCode::OK).await) 49 | } 50 | } 51 | 52 | /// Returns a map with all the proxies and their toxics 53 | pub async fn proxies(&self) -> Result> { 54 | let res = HttpClient::new() 55 | .get(self.base_url.clone() + "/proxies") 56 | .send() 57 | .await?; 58 | if res.status().is_success() { 59 | Ok(res 60 | .json::>() 61 | .await? 62 | .into_iter() 63 | .map(|(name, proxy)| (name, Proxy::from_proxy_with_toxics(&self.base_url, proxy))) 64 | .collect()) 65 | } else { 66 | Err(get_error_body(res, StatusCode::OK).await) 67 | } 68 | } 69 | 70 | /// Instantiates a new proxy config, sends it to the server 71 | /// The server starts listening on the specified address 72 | pub async fn create_proxy(&self, name: &str, listen: &str, upstream: &str) -> Result { 73 | let mut proxy = Proxy::new(&self.base_url, name, listen, upstream); 74 | proxy.save().await?; 75 | Ok(proxy) 76 | } 77 | 78 | /// Create a list of proxies using a configuration list. If a proxy already exists, 79 | /// it will be replaced with the specified configuration. 80 | pub async fn populate(&self, proxies: &[ProxyConfig]) -> Result> { 81 | let res = HttpClient::new() 82 | .post(self.base_url.clone() + "/populate") 83 | .json(proxies) 84 | .send() 85 | .await?; 86 | if res.status().is_success() { 87 | Ok(res 88 | .json::>() 89 | .await? 90 | .into_iter() 91 | .map(|item| Proxy::from_proxy_with_toxics(&self.base_url, item)) 92 | .collect::>()) 93 | } else { 94 | Err(get_error_body(res, StatusCode::CREATED).await) 95 | } 96 | } 97 | 98 | /// Resets the state of all proxies by removing all the toxic from all proxies 99 | pub async fn reset_state(&self) -> Result<()> { 100 | let res = HttpClient::new() 101 | .post(self.base_url.clone() + "/reset") 102 | .send() 103 | .await?; 104 | if res.status().is_success() { 105 | Ok(()) 106 | } else { 107 | Err(get_error_body(res, StatusCode::NO_CONTENT).await) 108 | } 109 | } 110 | } 111 | 112 | pub(crate) async fn get_error_body(res: Response, expected_status: StatusCode) -> ClientError { 113 | let code = res.status(); 114 | if let Ok(api_error) = res.json::().await { 115 | ClientError::ApiError(api_error) 116 | } else { 117 | ClientError::UnexpectedStatusCode(code.into(), expected_status.into()) 118 | } 119 | } 120 | 121 | #[cfg(test)] 122 | mod tests { 123 | use super::*; 124 | 125 | #[test] 126 | #[should_panic] 127 | fn client_does_not_allow_https() { 128 | let _client = Client::new("https://blahblah"); 129 | } 130 | 131 | #[test] 132 | fn client_adds_protocol() { 133 | let client = Client::new("blahblah"); 134 | assert_eq!("http://blahblah", client.base_url); 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /client/src/error.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use thiserror::Error; 3 | 4 | /// The errors that the client returns 5 | #[derive(Debug, Clone, Error, PartialEq)] 6 | pub enum ClientError { 7 | /// An I/O error happened 8 | #[error("I/O error: {0:?}")] 9 | IoError(String), 10 | /// Server returned an error 11 | #[error("API error: {0}")] 12 | ApiError(ApiErrorResponse), 13 | /// Unexpected status code, cannot parse the response body 14 | #[error("Unexpected response code {0}, expected {1}")] 15 | UnexpectedStatusCode(u16, u16), 16 | } 17 | 18 | /// The struct that describes the error response 19 | #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] 20 | pub struct ApiErrorResponse { 21 | /// The error message 22 | #[serde(rename = "error")] 23 | pub message: String, 24 | /// The error code which is usually the same as the http status code 25 | #[serde(rename = "status")] 26 | pub status_code: u16, 27 | } 28 | 29 | impl std::fmt::Display for ApiErrorResponse { 30 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 31 | write!(f, "{}: {}", self.status_code, self.message) 32 | } 33 | } 34 | 35 | impl From for ClientError { 36 | fn from(err: reqwest::Error) -> Self { 37 | ClientError::IoError(err.to_string()) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /client/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![warn( 3 | missing_copy_implementations, 4 | missing_debug_implementations, 5 | missing_docs, 6 | trivial_numeric_casts, 7 | unused_extern_crates, 8 | unused_import_braces, 9 | unused_qualifications 10 | )] 11 | #![cfg_attr(feature = "clippy", warn(cast_possible_truncation))] 12 | #![cfg_attr(feature = "clippy", warn(cast_possible_wrap))] 13 | #![cfg_attr(feature = "clippy", warn(cast_precision_loss))] 14 | #![cfg_attr(feature = "clippy", warn(cast_sign_loss))] 15 | #![cfg_attr(feature = "clippy", warn(missing_docs_in_private_items))] 16 | #![cfg_attr(feature = "clippy", warn(mut_mut))] 17 | #![cfg_attr(feature = "clippy", warn(print_stdout))] 18 | #![cfg_attr(all(not(test), feature = "clippy"), warn(result_unwrap_used))] 19 | #![cfg_attr(feature = "clippy", warn(unseparated_literal_suffix))] 20 | #![cfg_attr(feature = "clippy", warn(wrong_pub_self_convention))] 21 | 22 | //! # noxious-client 23 | 24 | mod client; 25 | /// The errors returned by the server 26 | pub mod error; 27 | mod proxy; 28 | pub use client::Client; 29 | pub use noxious::proxy::ProxyConfig; 30 | pub use noxious::toxic::{StreamDirection, Toxic, ToxicKind}; 31 | pub use proxy::Proxy; 32 | -------------------------------------------------------------------------------- /client/src/proxy.rs: -------------------------------------------------------------------------------- 1 | use crate::client::get_error_body; 2 | use crate::client::Result; 3 | use noxious::{ 4 | proxy::{ProxyConfig, ProxyWithToxics}, 5 | toxic::{StreamDirection, Toxic, ToxicKind}, 6 | }; 7 | use reqwest::{Client as HttpClient, StatusCode}; 8 | 9 | /// A proxy object returned by the [`Client`](Client). 10 | /// To manipulate this proxy and manipulate the toxics, you can call methods on 11 | /// this object. 12 | #[derive(Debug, Clone, PartialEq)] 13 | pub struct Proxy { 14 | base_url: String, 15 | created: bool, 16 | /// Contains the proxy listen and upstream address, name. You can mutate them 17 | /// and call `.save()` to update the proxy. 18 | pub config: ProxyConfig, 19 | toxics: Vec, 20 | } 21 | 22 | impl Proxy { 23 | /// Save saves changes to a proxy such as its enabled status or upstream port. 24 | /// Note: this does not update the toxics 25 | pub async fn save(&mut self) -> Result<()> { 26 | let request = if self.created { 27 | HttpClient::new() 28 | .post(self.base_url.clone() + "/proxies/" + &self.config.name) 29 | .json(&self.config) 30 | } else { 31 | HttpClient::new() 32 | .post(self.base_url.clone() + "/proxies") 33 | .json(&self.config) 34 | }; 35 | let response = request.send().await?; 36 | if response.status().is_success() { 37 | self.created = true; 38 | Ok(()) 39 | } else { 40 | let expected_status = if self.created { 41 | StatusCode::OK 42 | } else { 43 | StatusCode::CREATED 44 | }; 45 | Err(get_error_body(response, expected_status).await) 46 | } 47 | } 48 | 49 | /// Enable a proxy again after it has been disabled 50 | pub async fn enable(&mut self) -> Result<()> { 51 | self.config.enabled = true; 52 | self.save().await 53 | } 54 | 55 | /// Disable a proxy so that no connections can pass through. This will drop all active connections. 56 | pub async fn disable(&mut self) -> Result<()> { 57 | self.config.enabled = false; 58 | self.save().await 59 | } 60 | 61 | /// Returns whether this proxy is enabled or not 62 | pub fn is_enabled(&self) -> bool { 63 | self.config.enabled 64 | } 65 | 66 | /// Give this proxy a new name, save it. 67 | pub async fn change_name(&mut self, new_name: &str) -> Result<()> { 68 | let old_name = self.config.name.clone(); 69 | self.config.name = new_name.to_owned(); 70 | let res = HttpClient::new() 71 | .post(self.base_url.clone() + "/proxies/" + &old_name) 72 | .json(&self.config) 73 | .send() 74 | .await?; 75 | if res.status().is_success() { 76 | Ok(()) 77 | } else { 78 | Err(get_error_body(res, StatusCode::OK).await) 79 | } 80 | } 81 | 82 | /// Delete a proxy complete and close all existing connections through it. All information about 83 | /// the proxy such as listen port and active toxics will be deleted as well. If you just wish to 84 | /// stop and later enable a proxy, use `enable()` and `disable()`. 85 | pub async fn delete(self) -> Result<()> { 86 | let res = HttpClient::new() 87 | .delete(self.base_url.clone() + "/proxies/" + &self.config.name) 88 | .send() 89 | .await?; 90 | if res.status().is_success() { 91 | Ok(()) 92 | } else { 93 | Err(get_error_body(res, StatusCode::NO_CONTENT).await) 94 | } 95 | } 96 | 97 | /// Returns a map of all active toxics and their attributes 98 | pub async fn toxics(&self) -> Result> { 99 | let res = HttpClient::new() 100 | .get(self.base_url.clone() + "/proxies/" + &self.config.name + "/toxics") 101 | .send() 102 | .await?; 103 | 104 | if res.status().is_success() { 105 | Ok(res.json::>().await?) 106 | } else { 107 | Err(get_error_body(res, StatusCode::OK).await) 108 | } 109 | } 110 | 111 | /// Add a new toxic to this proxy. 112 | 113 | /// # Example 114 | /// ```ignore 115 | /// use noxious_client::{Client, Toxic, ToxicKind, StreamDirection}; 116 | /// 117 | /// #[tokio::main] 118 | /// async fn main() { 119 | /// let toxic = Toxic { 120 | /// kind: ToxicKind::Latency { latency: 40, jitter: 5 }, 121 | /// name: "myProxy_latency".to_owned(), 122 | /// toxicity: 0.9, 123 | /// direction: StreamDirection::Upstream, 124 | /// }; 125 | /// 126 | /// let client = Client::new("127.0.0.1:8474"); 127 | /// let result = client.add_toxic(&toxic).await; 128 | /// } 129 | /// ``` 130 | /// 131 | pub async fn add_toxic(&self, toxic: &Toxic) -> Result { 132 | let res = HttpClient::new() 133 | .post(self.base_url.clone() + "/proxies/" + &self.config.name + "/toxics") 134 | .json(toxic) 135 | .send() 136 | .await?; 137 | 138 | if res.status().is_success() { 139 | Ok(res.json::().await?) 140 | } else { 141 | Err(get_error_body(res, StatusCode::OK).await) 142 | } 143 | } 144 | 145 | /// Updates a toxic with the given name 146 | /// If toxicity is below zero, it will be sent as 0 147 | pub async fn update_toxic( 148 | &self, 149 | name: &str, 150 | toxicity: f32, 151 | kind: ToxicKind, 152 | direction: StreamDirection, 153 | ) -> Result { 154 | let toxicity: f32 = if toxicity < 0.0 { 0.0 } else { toxicity }; 155 | let toxic = Toxic { 156 | kind, 157 | name: name.to_owned(), 158 | toxicity, 159 | direction, 160 | }; 161 | let res = HttpClient::new() 162 | .post(self.base_url.clone() + "/proxies/" + &self.config.name + "/toxics/" + name) 163 | .json(&toxic) 164 | .send() 165 | .await?; 166 | 167 | if res.status().is_success() { 168 | Ok(res.json::().await?) 169 | } else { 170 | Err(get_error_body(res, StatusCode::OK).await) 171 | } 172 | } 173 | 174 | /// Removes a toxic with the given name 175 | pub async fn remove_toxic(&self, name: &str) -> Result<()> { 176 | let res = HttpClient::new() 177 | .delete(self.base_url.clone() + "/proxies/" + &self.config.name + "/toxics/" + name) 178 | .send() 179 | .await?; 180 | if res.status().is_success() { 181 | Ok(()) 182 | } else { 183 | Err(get_error_body(res, StatusCode::NO_CONTENT).await) 184 | } 185 | } 186 | 187 | pub(crate) fn new(base_url: &str, name: &str, listen: &str, upstream: &str) -> Proxy { 188 | Proxy { 189 | base_url: base_url.to_owned(), 190 | created: false, 191 | config: ProxyConfig { 192 | name: name.to_owned(), 193 | listen: listen.to_owned(), 194 | upstream: upstream.to_owned(), 195 | enabled: true, 196 | rand_seed: None, 197 | }, 198 | toxics: Vec::new(), 199 | } 200 | } 201 | 202 | #[doc(hidden)] 203 | pub fn from_proxy_with_toxics(base_url: &str, obj: ProxyWithToxics) -> Proxy { 204 | Proxy { 205 | base_url: base_url.to_owned(), 206 | created: true, 207 | config: obj.proxy, 208 | toxics: obj.toxics, 209 | } 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /client/tests/client.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use noxious::proxy::ProxyWithToxics; 4 | use noxious_client::{ 5 | error::{ApiErrorResponse, ClientError}, 6 | Client, Proxy, ProxyConfig, StreamDirection, Toxic, ToxicKind, 7 | }; 8 | use tokio_test::assert_ok; 9 | use wiremock::matchers::{body_json, method, path}; 10 | use wiremock::{Mock, MockServer, ResponseTemplate}; 11 | 12 | #[tokio::test] 13 | async fn get_proxies_empty() { 14 | let mock_server = MockServer::start().await; 15 | let body = serde_json::to_value(HashMap::::new()).unwrap(); 16 | Mock::given(method("GET")) 17 | .and(path("/proxies")) 18 | .respond_with(ResponseTemplate::new(200).set_body_json(body)) 19 | .mount(&mock_server) 20 | .await; 21 | 22 | let client = Client::new(&mock_server.uri()); 23 | let result = client.proxies().await; 24 | assert_eq!(Ok(HashMap::new()), result); 25 | } 26 | 27 | #[tokio::test] 28 | async fn get_proxy_not_found() { 29 | let mock_server = MockServer::start().await; 30 | let body = ApiErrorResponse { 31 | message: "proxy not found".to_owned(), 32 | status_code: 404, 33 | }; 34 | 35 | Mock::given(method("GET")) 36 | .and(path("/proxies/blah")) 37 | .respond_with(ResponseTemplate::new(404).set_body_json(&body)) 38 | .mount(&mock_server) 39 | .await; 40 | 41 | let client = Client::new(&mock_server.uri()); 42 | let result = client.proxy("blah").await; 43 | assert_eq!(Err(ClientError::ApiError(body)), result); 44 | } 45 | 46 | #[tokio::test] 47 | async fn get_proxy_found() { 48 | let mock_server = MockServer::start().await; 49 | let body = ProxyWithToxics { 50 | proxy: ProxyConfig { 51 | name: "blah".to_owned(), 52 | listen: "127.0.0.1:5555".to_owned(), 53 | upstream: "127.0.0.1:5556".to_owned(), 54 | enabled: true, 55 | rand_seed: None, 56 | }, 57 | toxics: vec![Toxic { 58 | kind: ToxicKind::SlowClose { delay: 1000 }, 59 | name: "t1".to_owned(), 60 | toxicity: 0.5, 61 | direction: StreamDirection::Upstream, 62 | }], 63 | }; 64 | 65 | Mock::given(method("GET")) 66 | .and(path("/proxies/blah")) 67 | .respond_with(ResponseTemplate::new(200).set_body_json(&body)) 68 | .mount(&mock_server) 69 | .await; 70 | 71 | let client = Client::new(&mock_server.uri()); 72 | let result = client.proxy("blah").await; 73 | let proxy = Proxy::from_proxy_with_toxics(&mock_server.uri(), body); 74 | assert_eq!(Ok(proxy), result); 75 | } 76 | 77 | #[tokio::test] 78 | async fn reset_state_success() { 79 | let mock_server = MockServer::start().await; 80 | 81 | Mock::given(method("POST")) 82 | .and(path("/reset")) 83 | .respond_with(ResponseTemplate::new(204)) 84 | .mount(&mock_server) 85 | .await; 86 | 87 | let client = Client::new(&mock_server.uri()); 88 | let result = client.reset_state().await; 89 | assert_eq!(Ok(()), result); 90 | } 91 | 92 | #[tokio::test] 93 | async fn populate() { 94 | let mock_server = MockServer::start().await; 95 | 96 | let config1 = ProxyConfig { 97 | name: "blah".to_owned(), 98 | listen: "127.0.0.1:5555".to_owned(), 99 | upstream: "127.0.0.1:5556".to_owned(), 100 | enabled: true, 101 | rand_seed: None, 102 | }; 103 | let config2 = ProxyConfig { 104 | name: "p2".to_owned(), 105 | listen: "127.0.0.1:5553".to_owned(), 106 | upstream: "127.0.0.1:5554".to_owned(), 107 | enabled: true, 108 | rand_seed: None, 109 | }; 110 | 111 | let p1 = ProxyWithToxics { 112 | proxy: config1.clone(), 113 | toxics: vec![Toxic { 114 | kind: ToxicKind::SlowClose { delay: 1000 }, 115 | name: "t1".to_owned(), 116 | toxicity: 0.5, 117 | direction: StreamDirection::Upstream, 118 | }], 119 | }; 120 | let p2 = ProxyWithToxics { 121 | proxy: config2.clone(), 122 | toxics: vec![Toxic { 123 | kind: ToxicKind::Latency { 124 | latency: 1000, 125 | jitter: 30, 126 | }, 127 | name: "t1".to_owned(), 128 | toxicity: 1.0, 129 | direction: StreamDirection::Downstream, 130 | }], 131 | }; 132 | let proxies = vec![p1, p2]; 133 | let input = vec![config1, config2]; 134 | 135 | Mock::given(method("POST")) 136 | .and(path("/populate")) 137 | .respond_with(ResponseTemplate::new(201).set_body_json(&proxies)) 138 | .mount(&mock_server) 139 | .await; 140 | 141 | let client = Client::new(&mock_server.uri()); 142 | let result = client.populate(&input).await; 143 | let expected = proxies 144 | .into_iter() 145 | .map(|p| Proxy::from_proxy_with_toxics(&mock_server.uri(), p)) 146 | .collect(); 147 | assert_eq!(Ok(expected), result); 148 | } 149 | 150 | #[tokio::test] 151 | async fn populate_error() { 152 | let mock_server = MockServer::start().await; 153 | 154 | let body = ApiErrorResponse { 155 | message: "something odd happened".to_owned(), 156 | status_code: 500, 157 | }; 158 | 159 | let input = vec![]; 160 | 161 | Mock::given(method("POST")) 162 | .and(path("/populate")) 163 | .respond_with(ResponseTemplate::new(500).set_body_json(&body)) 164 | .mount(&mock_server) 165 | .await; 166 | 167 | let client = Client::new(&mock_server.uri()); 168 | let result = client.populate(&input).await; 169 | assert_eq!(Err(ClientError::ApiError(body)), result); 170 | } 171 | 172 | #[tokio::test] 173 | async fn reset_state_error() { 174 | let mock_server = MockServer::start().await; 175 | 176 | let body = ApiErrorResponse { 177 | message: "something odd happened".to_owned(), 178 | status_code: 500, 179 | }; 180 | 181 | Mock::given(method("POST")) 182 | .and(path("/reset")) 183 | .respond_with(ResponseTemplate::new(500).set_body_json(&body)) 184 | .mount(&mock_server) 185 | .await; 186 | 187 | let client = Client::new(&mock_server.uri()); 188 | let result = client.reset_state().await; 189 | assert_eq!(Err(ClientError::ApiError(body)), result); 190 | } 191 | 192 | #[tokio::test] 193 | async fn reset_state_unexpected() { 194 | let mock_server = MockServer::start().await; 195 | 196 | let body = serde_json::json!({ 197 | "thing": "asdf" 198 | }); 199 | 200 | Mock::given(method("POST")) 201 | .and(path("/reset")) 202 | .respond_with(ResponseTemplate::new(500).set_body_json(&body)) 203 | .mount(&mock_server) 204 | .await; 205 | 206 | let client = Client::new(&mock_server.uri()); 207 | let result = client.reset_state().await; 208 | assert_eq!(Err(ClientError::UnexpectedStatusCode(500, 204)), result); 209 | } 210 | 211 | #[tokio::test] 212 | async fn reset_state_io_error() { 213 | let client = Client::new("blah"); 214 | let result = client.reset_state().await; 215 | match result { 216 | Err(ClientError::IoError(_)) => {} 217 | _ => { 218 | #[cfg_attr(tarpaulin, ignore)] 219 | panic!("invalid error kind") 220 | } 221 | } 222 | } 223 | 224 | #[tokio::test] 225 | async fn populate_io_error() { 226 | let client = Client::new("blah"); 227 | let result = client.populate(&Vec::new()).await; 228 | match result { 229 | Err(ClientError::IoError(_)) => {} 230 | _ => { 231 | #[cfg_attr(tarpaulin, ignore)] 232 | panic!("invalid error kind") 233 | } 234 | } 235 | } 236 | 237 | #[tokio::test] 238 | async fn get_proxies_io_error() { 239 | let client = Client::new("blah"); 240 | let result = client.proxies().await; 241 | match result { 242 | Err(ClientError::IoError(_)) => {} 243 | _ => { 244 | #[cfg_attr(tarpaulin, ignore)] 245 | panic!("invalid error kind") 246 | } 247 | } 248 | } 249 | 250 | #[tokio::test] 251 | async fn get_proxy_io_error() { 252 | let client = Client::new("blah"); 253 | let result = client.proxy("asdf").await; 254 | match result { 255 | Err(ClientError::IoError(_)) => {} 256 | _ => { 257 | #[cfg_attr(tarpaulin, ignore)] 258 | panic!("invalid error kind") 259 | } 260 | } 261 | } 262 | 263 | #[tokio::test] 264 | async fn disable_proxy() { 265 | let mock_server = MockServer::start().await; 266 | let body = ProxyWithToxics { 267 | proxy: ProxyConfig { 268 | name: "blah".to_owned(), 269 | listen: "127.0.0.1:5555".to_owned(), 270 | upstream: "127.0.0.1:5556".to_owned(), 271 | enabled: true, 272 | rand_seed: None, 273 | }, 274 | toxics: vec![Toxic { 275 | kind: ToxicKind::SlowClose { delay: 1000 }, 276 | name: "t1".to_owned(), 277 | toxicity: 0.5, 278 | direction: StreamDirection::Upstream, 279 | }], 280 | }; 281 | let mut body2 = body.clone(); 282 | body2.proxy.enabled = false; 283 | let update_payload = &body2.proxy; 284 | 285 | Mock::given(method("GET")) 286 | .and(path("/proxies/blah")) 287 | .respond_with(ResponseTemplate::new(200).set_body_json(&body)) 288 | .mount(&mock_server) 289 | .await; 290 | 291 | Mock::given(method("POST")) 292 | .and(path("/proxies/blah")) 293 | .and(body_json(update_payload)) 294 | .respond_with(ResponseTemplate::new(200).set_body_json(&body)) 295 | .mount(&mock_server) 296 | .await; 297 | 298 | let client = Client::new(&mock_server.uri()); 299 | let result = client.proxy("blah").await; 300 | let mut proxy = Proxy::from_proxy_with_toxics(&mock_server.uri(), body); 301 | assert_eq!(Ok(proxy.clone()), result); 302 | 303 | assert_ok!(proxy.disable().await); 304 | assert!(!proxy.is_enabled()); 305 | } 306 | 307 | #[tokio::test] 308 | async fn enable_proxy() { 309 | let mock_server = MockServer::start().await; 310 | let body = ProxyWithToxics { 311 | proxy: ProxyConfig { 312 | name: "blah".to_owned(), 313 | listen: "127.0.0.1:5555".to_owned(), 314 | upstream: "127.0.0.1:5556".to_owned(), 315 | enabled: false, 316 | rand_seed: None, 317 | }, 318 | toxics: vec![Toxic { 319 | kind: ToxicKind::SlowClose { delay: 1000 }, 320 | name: "t1".to_owned(), 321 | toxicity: 0.5, 322 | direction: StreamDirection::Upstream, 323 | }], 324 | }; 325 | let mut body2 = body.clone(); 326 | body2.proxy.enabled = true; 327 | let update_payload = &body2.proxy; 328 | 329 | Mock::given(method("GET")) 330 | .and(path("/proxies/blah")) 331 | .respond_with(ResponseTemplate::new(200).set_body_json(&body)) 332 | .mount(&mock_server) 333 | .await; 334 | 335 | Mock::given(method("POST")) 336 | .and(path("/proxies/blah")) 337 | .and(body_json(update_payload)) 338 | .respond_with(ResponseTemplate::new(200).set_body_json(&body)) 339 | .mount(&mock_server) 340 | .await; 341 | 342 | let client = Client::new(&mock_server.uri()); 343 | let result = client.proxy("blah").await; 344 | let mut proxy = Proxy::from_proxy_with_toxics(&mock_server.uri(), body); 345 | assert_eq!(Ok(proxy.clone()), result); 346 | 347 | assert_ok!(proxy.enable().await); 348 | assert!(proxy.is_enabled()); 349 | } 350 | 351 | #[tokio::test] 352 | async fn create() { 353 | let mock_server = MockServer::start().await; 354 | let body = ProxyWithToxics { 355 | proxy: ProxyConfig { 356 | name: "my_proxy".to_owned(), 357 | listen: "127.0.0.1:5556".to_owned(), 358 | upstream: "127.0.0.1:5557".to_owned(), 359 | enabled: true, 360 | rand_seed: None, 361 | }, 362 | toxics: Vec::new(), 363 | }; 364 | 365 | Mock::given(method("POST")) 366 | .and(path("/proxies")) 367 | .and(body_json(&body.proxy)) 368 | .respond_with(ResponseTemplate::new(201).set_body_json(&body)) 369 | .mount(&mock_server) 370 | .await; 371 | 372 | let client = Client::new(&mock_server.uri()); 373 | let result = client 374 | .create_proxy("my_proxy", "127.0.0.1:5556", "127.0.0.1:5557") 375 | .await; 376 | let proxy = Proxy::from_proxy_with_toxics(&mock_server.uri(), body); 377 | assert_eq!(Ok(proxy), result); 378 | } 379 | 380 | #[tokio::test] 381 | async fn create_error() { 382 | let mock_server = MockServer::start().await; 383 | 384 | let proxy = ProxyConfig { 385 | name: "my_proxy".to_owned(), 386 | listen: "127.0.0.1:5556".to_owned(), 387 | upstream: "127.0.0.1:5557".to_owned(), 388 | enabled: true, 389 | rand_seed: None, 390 | }; 391 | 392 | let body = ApiErrorResponse { 393 | message: "something odd happened".to_owned(), 394 | status_code: 500, 395 | }; 396 | 397 | Mock::given(method("POST")) 398 | .and(path("/proxies")) 399 | .and(body_json(&proxy)) 400 | .respond_with(ResponseTemplate::new(500).set_body_json(&body)) 401 | .mount(&mock_server) 402 | .await; 403 | 404 | let client = Client::new(&mock_server.uri()); 405 | let result = client 406 | .create_proxy("my_proxy", "127.0.0.1:5556", "127.0.0.1:5557") 407 | .await; 408 | assert_eq!(Err(ClientError::ApiError(body)), result); 409 | } 410 | 411 | #[tokio::test] 412 | async fn create_io_error() { 413 | let client = Client::new("asdf"); 414 | let result = client 415 | .create_proxy("my_proxy", "127.0.0.1:5556", "127.0.0.1:5557") 416 | .await; 417 | match result { 418 | Err(ClientError::IoError(_)) => {} 419 | _ => { 420 | #[cfg_attr(tarpaulin, ignore)] 421 | panic!("invalid error kind") 422 | } 423 | } 424 | } 425 | 426 | #[tokio::test] 427 | async fn change_proxy_name() { 428 | let mock_server = MockServer::start().await; 429 | let body = ProxyWithToxics { 430 | proxy: ProxyConfig { 431 | name: "my_proxy".to_owned(), 432 | listen: "127.0.0.1:5556".to_owned(), 433 | upstream: "127.0.0.1:5557".to_owned(), 434 | enabled: true, 435 | rand_seed: None, 436 | }, 437 | toxics: Vec::new(), 438 | }; 439 | let mut body2 = body.clone(); 440 | body2.proxy.name = "updated_proxy".to_owned(); 441 | 442 | Mock::given(method("POST")) 443 | .and(path("/proxies")) 444 | .and(body_json(&body.proxy)) 445 | .respond_with(ResponseTemplate::new(201).set_body_json(&body)) 446 | .mount(&mock_server) 447 | .await; 448 | 449 | Mock::given(method("POST")) 450 | .and(path("/proxies/my_proxy")) 451 | .and(body_json(&body2.proxy)) 452 | .respond_with(ResponseTemplate::new(200).set_body_json(&body2)) 453 | .mount(&mock_server) 454 | .await; 455 | 456 | let client = Client::new(&mock_server.uri()); 457 | let result = client 458 | .create_proxy("my_proxy", "127.0.0.1:5556", "127.0.0.1:5557") 459 | .await; 460 | let mut proxy = Proxy::from_proxy_with_toxics(&mock_server.uri(), body); 461 | assert_eq!(Ok(proxy.clone()), result); 462 | 463 | let result = proxy.change_name("updated_proxy").await; 464 | assert_eq!(Ok(()), result); 465 | } 466 | 467 | #[tokio::test] 468 | async fn get_toxics() { 469 | let mock_server = MockServer::start().await; 470 | let body = ProxyWithToxics { 471 | proxy: ProxyConfig { 472 | name: "my_proxy".to_owned(), 473 | listen: "127.0.0.1:5556".to_owned(), 474 | upstream: "127.0.0.1:5557".to_owned(), 475 | enabled: true, 476 | rand_seed: None, 477 | }, 478 | toxics: vec![Toxic { 479 | kind: ToxicKind::SlowClose { delay: 1000 }, 480 | name: "t1".to_owned(), 481 | toxicity: 0.5, 482 | direction: StreamDirection::Upstream, 483 | }], 484 | }; 485 | 486 | Mock::given(method("GET")) 487 | .and(path("/proxies/my_proxy")) 488 | .respond_with(ResponseTemplate::new(200).set_body_json(&body)) 489 | .mount(&mock_server) 490 | .await; 491 | 492 | Mock::given(method("GET")) 493 | .and(path("/proxies/my_proxy/toxics")) 494 | .respond_with(ResponseTemplate::new(200).set_body_json(&body.toxics)) 495 | .mount(&mock_server) 496 | .await; 497 | 498 | let client = Client::new(&mock_server.uri()); 499 | let result = client.proxy("my_proxy").await; 500 | let proxy = Proxy::from_proxy_with_toxics(&mock_server.uri(), body.clone()); 501 | assert_eq!(Ok(proxy.clone()), result); 502 | 503 | let result = proxy.toxics().await; 504 | assert_eq!(Ok(body.toxics), result); 505 | } 506 | 507 | #[tokio::test] 508 | async fn create_toxic() { 509 | let mock_server = MockServer::start().await; 510 | let body = ProxyWithToxics { 511 | proxy: ProxyConfig { 512 | name: "my_proxy".to_owned(), 513 | listen: "127.0.0.1:5556".to_owned(), 514 | upstream: "127.0.0.1:5557".to_owned(), 515 | enabled: true, 516 | rand_seed: None, 517 | }, 518 | toxics: vec![Toxic { 519 | kind: ToxicKind::SlowClose { delay: 1000 }, 520 | name: "t1".to_owned(), 521 | toxicity: 0.5, 522 | direction: StreamDirection::Upstream, 523 | }], 524 | }; 525 | let mut toxic2 = body.toxics[0].clone(); 526 | toxic2.name = "t2".to_owned(); 527 | 528 | Mock::given(method("GET")) 529 | .and(path("/proxies/my_proxy")) 530 | .respond_with(ResponseTemplate::new(200).set_body_json(&body)) 531 | .mount(&mock_server) 532 | .await; 533 | 534 | Mock::given(method("POST")) 535 | .and(path("/proxies/my_proxy/toxics")) 536 | .and(body_json(&toxic2)) 537 | .respond_with(ResponseTemplate::new(200).set_body_json(&toxic2)) 538 | .mount(&mock_server) 539 | .await; 540 | 541 | let client = Client::new(&mock_server.uri()); 542 | let result = client.proxy("my_proxy").await; 543 | let proxy = Proxy::from_proxy_with_toxics(&mock_server.uri(), body.clone()); 544 | assert_eq!(Ok(proxy.clone()), result); 545 | 546 | let result = proxy.add_toxic(&toxic2).await; 547 | assert_eq!(Ok(toxic2), result); 548 | } 549 | -------------------------------------------------------------------------------- /core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "noxious" 3 | description = "The core library for noxious, a Rust port of Toxiproxy" 4 | version = "0.1.1" 5 | authors = ["Oguz Bilgener "] 6 | repository = "https://github.com/oguzbilgener/noxious" 7 | edition = "2018" 8 | documentation = "https://docs.rs/noxious" 9 | license = "MIT OR Apache-2.0" 10 | categories = ["network-programming", "development-tools"] 11 | keywords = ["proxy", "toxiproxy", "chaos", "tcp"] 12 | 13 | include = [ 14 | "CHANGELOG.md", 15 | "LICENSE", 16 | "README.md", 17 | ".gitignore", 18 | "Cargo.toml", 19 | "src/*.rs", 20 | "src/*/*.rs", 21 | "tests/*.rs", 22 | "benches/**/*.rs", 23 | ] 24 | 25 | [badges] 26 | maintenance = { status = "actively-developed" } 27 | 28 | [dependencies] 29 | tokio = { version = "1", features = ["net", "sync", "signal", "io-util", "rt", "macros", "time"] } 30 | rand = { version = "0.8.3", features = ["std_rng"] } 31 | bytes = "1.0.1" 32 | serde = { version = "^1.0.123", features = ["derive"] } 33 | tokio-util = { version = "0.6.3", features = ["codec"] } 34 | thiserror = "1.0.23" 35 | futures = { version = "0.3.12" } 36 | bmrng = "0.4.0" 37 | tracing = { version = "0.1.25", features = ["log"] } 38 | async-trait = "0.1.47" 39 | mockall_double = "0.2.0" 40 | pin-project-lite = "0.2.6" 41 | 42 | [dev-dependencies] 43 | tokio = { version = "1", features = [ 44 | "net", 45 | "sync", 46 | "signal", 47 | "io-util", 48 | "rt", 49 | "macros", 50 | "time", 51 | "test-util", 52 | ] } 53 | mockall = "0.9.1" 54 | lazy_static = "1.4.0" 55 | serde_json = "^1.0.62" 56 | tokio-test = "0.4.0" 57 | -------------------------------------------------------------------------------- /core/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | /// Generic not found error 4 | #[derive(Error, Debug, Clone, Copy)] 5 | #[error("Item not found")] 6 | pub struct NotFoundError; 7 | 8 | /// Toxic update failed 9 | #[derive(Debug, Clone, Copy, Error, PartialEq)] 10 | pub enum ToxicUpdateError { 11 | /// No such toxic with the given name 12 | #[error("Toxic not found")] 13 | NotFound, 14 | /// Some other error 15 | #[error("Other error")] 16 | Other, 17 | } 18 | 19 | impl From for ToxicUpdateError { 20 | fn from(_: NotFoundError) -> Self { 21 | ToxicUpdateError::NotFound 22 | } 23 | } 24 | 25 | #[cfg(test)] 26 | mod tests { 27 | use super::*; 28 | 29 | #[test] 30 | fn test_not_found() { 31 | let input = NotFoundError; 32 | let _err: ToxicUpdateError = input.into(); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /core/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![warn( 3 | missing_copy_implementations, 4 | missing_debug_implementations, 5 | missing_docs, 6 | trivial_numeric_casts, 7 | unused_extern_crates, 8 | unused_import_braces, 9 | unused_qualifications 10 | )] 11 | #![cfg_attr(feature = "clippy", warn(cast_possible_truncation))] 12 | #![cfg_attr(feature = "clippy", warn(cast_possible_wrap))] 13 | #![cfg_attr(feature = "clippy", warn(cast_precision_loss))] 14 | #![cfg_attr(feature = "clippy", warn(cast_sign_loss))] 15 | #![cfg_attr(feature = "clippy", warn(missing_docs_in_private_items))] 16 | #![cfg_attr(feature = "clippy", warn(mut_mut))] 17 | #![cfg_attr(feature = "clippy", warn(print_stdout))] 18 | #![cfg_attr(all(not(test), feature = "clippy"), warn(result_unwrap_used))] 19 | #![cfg_attr(feature = "clippy", warn(unseparated_literal_suffix))] 20 | #![cfg_attr(feature = "clippy", warn(wrong_pub_self_convention))] 21 | 22 | //! # noxious 23 | 24 | /// Contains the errors 25 | pub mod error; 26 | mod link; 27 | /// Contains the proxy data types and runners 28 | pub mod proxy; 29 | /// Contains the Stop and Close signals 30 | pub mod signal; 31 | /// Contains wrappers around Tokio types to make them mockable 32 | pub mod socket; 33 | /// Contains the shared proxy and toxic state 34 | pub mod state; 35 | mod stream; 36 | mod tests; 37 | /// Contains the toxic data types 38 | pub mod toxic; 39 | mod toxics; 40 | -------------------------------------------------------------------------------- /core/src/link.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | proxy::ProxyConfig, 3 | signal::{Close, Closer, Stop, Stopper}, 4 | state::{ToxicState, ToxicStateHolder}, 5 | stream::{forward, forward_read, forward_write, Read, Write}, 6 | toxic::ToxicKind, 7 | toxic::{StreamDirection, Toxic}, 8 | toxics, 9 | }; 10 | use bytes::Bytes; 11 | use futures::channel::mpsc as futures_mpsc; 12 | use futures::StreamExt; 13 | use futures::{Sink, Stream}; 14 | use rand::{distributions::Standard, rngs::StdRng, Rng, SeedableRng}; 15 | use std::net::SocketAddr; 16 | use std::{io, sync::Arc}; 17 | use tokio::pin; 18 | use tokio::sync::{oneshot, Mutex as AsyncMutex}; 19 | use tokio::task::JoinHandle; 20 | use tracing::{debug, instrument}; 21 | 22 | #[derive(Debug)] 23 | pub(crate) struct Link { 24 | config: ProxyConfig, 25 | upstream_addr: SocketAddr, 26 | direction: StreamDirection, 27 | stop: Stop, 28 | stopper: Stopper, 29 | disband_receiver: Option>, 30 | } 31 | 32 | type Ends = (Read, Write); 33 | 34 | impl Link { 35 | pub(crate) fn new( 36 | upstream_addr: SocketAddr, 37 | direction: StreamDirection, 38 | config: ProxyConfig, 39 | stop: Stop, 40 | ) -> Self { 41 | let (stop, stopper) = stop.fork(); 42 | Link { 43 | config, 44 | upstream_addr, 45 | direction, 46 | stop, 47 | stopper, 48 | disband_receiver: None, 49 | } 50 | } 51 | 52 | pub(super) fn establish( 53 | &mut self, 54 | reader: Read, 55 | writer: Write, 56 | toxics: Vec, 57 | toxic_state_holder: Option>, 58 | ) -> JoinHandle<()> { 59 | let (disband_sender, disband_receiver) = oneshot::channel::(); 60 | self.disband_receiver = Some(disband_receiver); 61 | if toxics.is_empty() { 62 | self.forward_direct(reader, writer, disband_sender) 63 | } else { 64 | self.setup_toxics(reader, writer, toxics, disband_sender, toxic_state_holder) 65 | } 66 | } 67 | 68 | #[instrument(level = "debug", skip(self, reader, writer, disband_sender))] 69 | fn forward_direct( 70 | &mut self, 71 | mut reader: Read, 72 | mut writer: Write, 73 | disband_sender: oneshot::Sender, 74 | ) -> JoinHandle<()> { 75 | let mut stop = self.stop.clone(); 76 | tokio::spawn(async move { 77 | if !stop.stop_received() { 78 | let forward_res = forward(&mut reader, &mut writer, &mut stop).await; 79 | if forward_res.is_err() { 80 | // TODO: maybe log this error in case it's a specific I/O error. 81 | } 82 | } 83 | let _ = disband_sender.send((reader, writer)); 84 | }) 85 | } 86 | 87 | #[instrument(level = "debug", skip(self, reader, writer, disband_sender))] 88 | fn setup_toxics( 89 | &mut self, 90 | reader: Read, 91 | writer: Write, 92 | toxics: Vec, 93 | disband_sender: oneshot::Sender, 94 | toxic_state_holder: Option>, 95 | ) -> JoinHandle<()> { 96 | let mut stop = self.stop.clone(); 97 | let (left_end_tx, left_end_rx) = futures_mpsc::channel::(1); 98 | let (right_end_tx, right_end_rx) = futures_mpsc::channel::(1); 99 | 100 | let rand_gen = if let Some(seed) = self.config.rand_seed { 101 | StdRng::seed_from_u64(seed) 102 | } else { 103 | StdRng::from_entropy() 104 | }; 105 | let mut toxic_runners: Vec = toxics 106 | .into_iter() 107 | .zip(rand_gen.sample_iter(Standard)) 108 | .map(ToxicRunner::new) 109 | .collect(); 110 | 111 | let (close_read_join, close_write_join): ( 112 | JoinHandle>, 113 | JoinHandle>, 114 | ) = self.connect_pipe_ends( 115 | reader, 116 | writer, 117 | &mut toxic_runners, 118 | &mut stop, 119 | left_end_tx, 120 | right_end_rx, 121 | ); 122 | 123 | let join_handle = 124 | self.prepare_link_join_handle(close_read_join, close_write_join, disband_sender); 125 | 126 | let mut prev_pipe_read_rx = left_end_rx; 127 | 128 | for runner in toxic_runners { 129 | prev_pipe_read_rx = self.start_toxic_runner( 130 | runner, 131 | &mut stop, 132 | prev_pipe_read_rx, 133 | toxic_state_holder.clone(), 134 | ); 135 | } 136 | 137 | tokio::spawn(async move { prev_pipe_read_rx.map(Ok).forward(right_end_tx).await }); 138 | 139 | join_handle 140 | } 141 | 142 | fn start_toxic_runner( 143 | &self, 144 | mut runner: ToxicRunner, 145 | stop: &mut Stop, 146 | prev_pipe_read_rx: futures_mpsc::Receiver, 147 | toxic_state_holder: Option>, 148 | ) -> futures_mpsc::Receiver { 149 | let toxic_name = runner.toxic_name(); 150 | let toxic_state = 151 | toxic_state_holder.and_then(|holder| holder.get_state_for_toxic(toxic_name)); 152 | let mut stop = stop.clone(); 153 | let rand_seed = self.config.rand_seed; 154 | // Get the desired channel buffer capacity for the toxic (in number of chunks) 155 | // This is 1024 for the Latency toxic and 1 for others, similar 156 | // to the original Toxiproxy implementation. 157 | let (pipe_tx, pipe_rx) = 158 | futures_mpsc::channel::(runner.toxic_kind().chunk_buffer_capacity()); 159 | tokio::spawn(async move { 160 | let maybe_res = tokio::select! { 161 | res = runner.run(prev_pipe_read_rx, pipe_tx, toxic_state, rand_seed) => Some(res), 162 | _ = stop.recv() => None, 163 | }; 164 | if let Some(Err(err)) = maybe_res { 165 | debug!("Got error from toxic runner {:?}", err); 166 | } 167 | }); 168 | pipe_rx 169 | } 170 | 171 | fn connect_pipe_ends( 172 | &self, 173 | reader: Read, 174 | writer: Write, 175 | toxic_runners: &mut [ToxicRunner], 176 | stop: &mut Stop, 177 | left_end_tx: futures_mpsc::Sender, 178 | right_end_rx: futures_mpsc::Receiver, 179 | ) -> (JoinHandle>, JoinHandle>) { 180 | let (mut stop_read, read_stopper) = stop.fork(); 181 | let (mut stop_write, write_stopper) = stop.fork(); 182 | 183 | let (override_stop_toxics, toxic_override_stopper) = stop.fork(); 184 | let toxic_override_stopper_clone = toxic_override_stopper.clone(); 185 | 186 | let wait_for_manual_close: Option = 187 | self.prepare_manual_close_signals(toxic_runners, override_stop_toxics); 188 | let wait_for_manual_close_clone = wait_for_manual_close.clone(); 189 | 190 | let close_read_join = tokio::spawn(async move { 191 | pin!(left_end_tx); 192 | let res = forward_read(reader, left_end_tx, &mut stop_read).await; 193 | // Speed up closing the underlying connection by closing the other end, 194 | // unless we should wait for a toxic to yield explicitly. 195 | if let Some(close) = wait_for_manual_close { 196 | toxic_override_stopper.stop(); 197 | let _ = close.recv().await; 198 | } 199 | write_stopper.stop(); 200 | res 201 | }); 202 | 203 | let close_write_join = tokio::spawn(async move { 204 | pin!(right_end_rx); 205 | let res = forward_write(right_end_rx, writer, &mut stop_write).await; 206 | // Speed up closing the underlying connection by closing the other end, 207 | // unless we should wait for a toxic to yield explicitly. 208 | if let Some(close) = wait_for_manual_close_clone { 209 | toxic_override_stopper_clone.stop(); 210 | let _ = close.recv().await; 211 | } 212 | read_stopper.stop(); 213 | res 214 | }); 215 | (close_read_join, close_write_join) 216 | } 217 | 218 | fn prepare_manual_close_signals( 219 | &self, 220 | toxic_runners: &mut [ToxicRunner], 221 | override_stop_toxics: Stop, 222 | ) -> Option { 223 | let close_signals: Vec = toxic_runners 224 | .iter_mut() 225 | .filter_map(|runner| { 226 | if runner.is_active() && runner.toxic_kind().has_close_logic() { 227 | let (close, closer) = Close::new(); 228 | runner.set_closer(closer); 229 | runner.set_override_stop(override_stop_toxics.clone()); 230 | Some(close) 231 | } else { 232 | None 233 | } 234 | }) 235 | .collect(); 236 | if !close_signals.is_empty() { 237 | let (close, closer) = Close::new(); 238 | 239 | tokio::spawn(async move { 240 | for close in close_signals { 241 | let _ = close.recv().await; 242 | } 243 | let _ = closer.close(); 244 | }); 245 | Some(close) 246 | } else { 247 | None 248 | } 249 | } 250 | 251 | fn prepare_link_join_handle( 252 | &mut self, 253 | close_read_join: JoinHandle>, 254 | close_write_join: JoinHandle>, 255 | disband_sender: oneshot::Sender, 256 | ) -> JoinHandle<()> { 257 | let direction = self.direction; 258 | tokio::spawn(async move { 259 | let result: Result<(io::Result, io::Result), tokio::task::JoinError> = 260 | tokio::try_join!(close_read_join, close_write_join); 261 | match result { 262 | Ok((read_res, write_res)) => { 263 | if let Ok(reader) = read_res { 264 | if let Ok(writer) = write_res { 265 | let _ = disband_sender.send((reader, writer)); 266 | debug!("Joining {} task", direction); 267 | return; 268 | } 269 | } 270 | debug!("Read or write sub task failed"); 271 | } 272 | Err(err) => { 273 | debug!("Read or write sub task failed {:?}", err); 274 | } 275 | } 276 | }) 277 | } 278 | 279 | /// Cuts all the streams, stops all the ToxicRunner tasks, returns the original 280 | /// stream and the sink at the two ends. 281 | pub(super) async fn disband(self) -> io::Result<(Read, Write)> { 282 | self.stopper.stop(); 283 | let (reader, writer) = self 284 | .disband_receiver 285 | .expect("State error: Link already disbanded, or never established") 286 | .await 287 | .map_err(|_| io::Error::new(io::ErrorKind::BrokenPipe, "already closed?"))?; 288 | 289 | Ok((reader, writer)) 290 | } 291 | } 292 | 293 | impl PartialEq for Link { 294 | fn eq(&self, other: &Self) -> bool { 295 | self.upstream_addr == other.upstream_addr && self.direction == other.direction 296 | } 297 | } 298 | 299 | #[derive(Debug)] 300 | pub(crate) struct ToxicRunner { 301 | active: bool, 302 | toxic: Toxic, 303 | closer: Option, 304 | override_stop: Option, 305 | } 306 | 307 | impl ToxicRunner { 308 | pub fn new((toxic, threshold): (Toxic, f32)) -> Self { 309 | ToxicRunner { 310 | active: toxic.toxicity >= threshold, 311 | toxic, 312 | closer: None, 313 | override_stop: None, 314 | } 315 | } 316 | 317 | pub fn is_active(&self) -> bool { 318 | self.active 319 | } 320 | 321 | pub fn toxic_name(&self) -> &str { 322 | &self.toxic.name 323 | } 324 | 325 | pub fn toxic_kind(&self) -> &ToxicKind { 326 | &self.toxic.kind 327 | } 328 | 329 | pub fn set_closer(&mut self, closer: Closer) { 330 | self.closer = Some(closer); 331 | } 332 | 333 | pub fn set_override_stop(&mut self, stop: Stop) { 334 | self.override_stop = Some(stop); 335 | } 336 | 337 | fn take_override_stop(&mut self) -> Stop { 338 | self.override_stop 339 | .take() 340 | .expect("State error: cannot run toxic without a override stop signal") 341 | } 342 | 343 | pub async fn run( 344 | &mut self, 345 | input: impl Stream, 346 | output: impl Sink, 347 | state: Option>>, 348 | rand_seed: Option, 349 | ) -> io::Result<()> { 350 | pin!(input); 351 | pin!(output); 352 | let result = if self.active { 353 | match self.toxic.kind { 354 | ToxicKind::Noop => toxics::run_noop(input, output).await, 355 | ToxicKind::Latency { latency, jitter } => { 356 | toxics::run_latency(input, output, latency, jitter, rand_seed).await 357 | } 358 | ToxicKind::Timeout { timeout } => toxics::run_timeout(input, output, timeout).await, 359 | ToxicKind::Bandwidth { rate } => toxics::run_bandwidth(input, output, rate).await, 360 | ToxicKind::SlowClose { delay } => { 361 | let stop = self.take_override_stop(); 362 | toxics::run_slow_close(input, output, stop, delay).await 363 | } 364 | ToxicKind::Slicer { 365 | average_size, 366 | size_variation, 367 | delay, 368 | } => { 369 | toxics::run_slicer( 370 | input, 371 | output, 372 | average_size, 373 | size_variation, 374 | delay, 375 | rand_seed, 376 | ) 377 | .await 378 | } 379 | ToxicKind::LimitData { bytes } => { 380 | let stop = self.take_override_stop(); 381 | toxics::run_limit_data(input, output, stop, bytes, state).await 382 | } 383 | } 384 | } else { 385 | toxics::run_noop(input, output).await 386 | }; 387 | if let Some(closer) = self.closer.take() { 388 | let _ = closer.close(); 389 | } 390 | result 391 | } 392 | } 393 | 394 | #[cfg(test)] 395 | mod tests { 396 | use futures::SinkExt; 397 | use tokio_test::{assert_err, assert_ok}; 398 | 399 | use super::*; 400 | 401 | #[test] 402 | fn toxic_runner_take_override_stop() { 403 | let toxic = Toxic { 404 | name: "nop".to_owned(), 405 | kind: ToxicKind::Noop, 406 | direction: StreamDirection::Upstream, 407 | toxicity: 1.0, 408 | }; 409 | let mut runner = ToxicRunner::new((toxic, 0.9)); 410 | let (stop, stopper) = Stop::new(); 411 | runner.set_override_stop(stop); 412 | let _stop = runner.take_override_stop(); 413 | stopper.stop(); 414 | } 415 | 416 | #[tokio::test] 417 | async fn run_slicer() { 418 | let slicer = Toxic { 419 | name: "slicer slices".to_owned(), 420 | kind: ToxicKind::Slicer { 421 | average_size: 4, 422 | size_variation: 0, 423 | delay: 0, 424 | }, 425 | direction: StreamDirection::Upstream, 426 | toxicity: 1.0, 427 | }; 428 | 429 | let mut runner = ToxicRunner::new((slicer, 1.0)); 430 | let (mut tx, rx) = futures::channel::mpsc::channel::(1); 431 | let (tx2, mut rx2) = futures::channel::mpsc::channel::(1); 432 | assert_ok!(tx.send("chop chop".into()).await); 433 | let handle = tokio::spawn(async move { 434 | let res = runner.run(rx, tx2, None, None).await; 435 | assert_ok!(res); 436 | }); 437 | assert_eq!(Some("chop".into()), rx2.next().await); 438 | assert_eq!(Some(" cho".into()), rx2.next().await); 439 | assert_eq!(Some("p".into()), rx2.next().await); 440 | drop(tx); 441 | assert_eq!(None, rx2.next().await); 442 | assert_ok!(handle.await); 443 | } 444 | 445 | #[tokio::test] 446 | async fn run_slicer_recv_drop() { 447 | let slicer = Toxic { 448 | name: "slicer slices".to_owned(), 449 | kind: ToxicKind::Slicer { 450 | average_size: 4, 451 | size_variation: 0, 452 | delay: 0, 453 | }, 454 | direction: StreamDirection::Upstream, 455 | toxicity: 1.0, 456 | }; 457 | 458 | let mut runner = ToxicRunner::new((slicer, 1.0)); 459 | let (mut tx, rx) = futures::channel::mpsc::channel::(1); 460 | let (tx2, mut rx2) = futures::channel::mpsc::channel::(1); 461 | assert_ok!(tx.send("chop chop".into()).await); 462 | let handle = tokio::spawn(async move { 463 | let res = runner.run(rx, tx2, None, None).await; 464 | assert_err!(&res); 465 | assert_eq!(std::io::ErrorKind::ConnectionReset, res.unwrap_err().kind()); 466 | }); 467 | assert_eq!(Some("chop".into()), rx2.next().await); 468 | assert_eq!(Some(" cho".into()), rx2.next().await); 469 | drop(rx2); 470 | assert_ok!(handle.await); 471 | } 472 | 473 | #[tokio::test] 474 | async fn run_inactive() { 475 | let slicer = Toxic { 476 | name: "slicer slices".to_owned(), 477 | kind: ToxicKind::Slicer { 478 | average_size: 4, 479 | size_variation: 0, 480 | delay: 0, 481 | }, 482 | direction: StreamDirection::Upstream, 483 | toxicity: 0.3, 484 | }; 485 | 486 | let mut runner = ToxicRunner::new((slicer, 0.9)); 487 | let (mut tx, rx) = futures::channel::mpsc::channel::(1); 488 | let (tx2, mut rx2) = futures::channel::mpsc::channel::(1); 489 | assert_ok!(tx.send("chop chop".into()).await); 490 | let handle = tokio::spawn(async move { 491 | let res = runner.run(rx, tx2, None, None).await; 492 | assert_ok!(res); 493 | }); 494 | assert_eq!(Some("chop chop".into()), rx2.next().await); 495 | drop(tx); 496 | assert_eq!(None, rx2.next().await); 497 | assert_ok!(handle.await); 498 | } 499 | 500 | #[tokio::test] 501 | async fn run_with_closer() { 502 | let slicer = Toxic { 503 | name: "slicer slices".to_owned(), 504 | kind: ToxicKind::Bandwidth { rate: 48000 }, 505 | direction: StreamDirection::Upstream, 506 | toxicity: 0.3, 507 | }; 508 | 509 | let mut runner = ToxicRunner::new((slicer, 0.9)); 510 | let (close, closer) = Close::new(); 511 | runner.set_closer(closer); 512 | let (mut tx, rx) = futures::channel::mpsc::channel::(1); 513 | let (tx2, mut rx2) = futures::channel::mpsc::channel::(1); 514 | assert_ok!(tx.send("chop chop".into()).await); 515 | let handle = tokio::spawn(async move { 516 | let res = runner.run(rx, tx2, None, None).await; 517 | assert_ok!(res); 518 | }); 519 | assert_eq!(Some("chop chop".into()), rx2.next().await); 520 | drop(tx); 521 | assert_eq!(None, rx2.next().await); 522 | assert_ok!(handle.await); 523 | assert_ok!(close.recv().await); 524 | } 525 | } 526 | -------------------------------------------------------------------------------- /core/src/proxy.rs: -------------------------------------------------------------------------------- 1 | use crate::socket::{SocketListener, SocketStream}; 2 | use crate::{ 3 | error::NotFoundError, 4 | link::Link, 5 | signal::{Closer, Stop}, 6 | state::{ProxyState, SharedProxyInfo, ToxicStateHolder}, 7 | stream::{Read, Write}, 8 | toxic::{update_toxic_list_in_place, StreamDirection, Toxic, ToxicEvent, ToxicEventResult}, 9 | }; 10 | use async_trait::async_trait; 11 | use bmrng::{Payload, RequestReceiver}; 12 | use futures::{stream, StreamExt}; 13 | #[cfg(test)] 14 | use mockall::automock; 15 | use serde::{Deserialize, Serialize}; 16 | use std::io; 17 | use std::net::SocketAddr; 18 | use std::sync::Arc; 19 | use thiserror::Error; 20 | use tokio_util::codec::{BytesCodec, FramedRead, FramedWrite}; 21 | use tracing::{debug, error, info, instrument}; 22 | 23 | /// The default Go io.Copy buffer size is 32K, so also use 32K buffers here to imitate Toxiproxy. 24 | const READ_BUFFER_SIZE: usize = 32768; 25 | 26 | /// The immutable configuration for a proxy 27 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 28 | pub struct ProxyConfig { 29 | /// An arbitrary name 30 | #[serde(default = "default_name")] 31 | pub name: String, 32 | /// The host name and the port the proxy listens on, like 127.0.0.1:5431 33 | pub listen: String, 34 | /// The host name and the port the proxy connects to, like 127.0.0:5432 35 | pub upstream: String, 36 | /// The client can set the enabled field to false to stop this proxy. 37 | /// Proxies are enabled by default 38 | #[serde(default = "default_enabled")] 39 | pub enabled: bool, 40 | /// A random seed. Not exposed in the API 41 | #[serde(skip)] 42 | pub rand_seed: Option, 43 | } 44 | 45 | fn default_name() -> String { 46 | "".to_owned() 47 | } 48 | 49 | fn default_enabled() -> bool { 50 | true 51 | } 52 | 53 | /// A holder for upstream and downstream links, as well as the per-connection state 54 | #[derive(Debug)] 55 | pub struct Links { 56 | upstream: Link, 57 | client: Link, 58 | /// Optional, connection-wide state for toxics that need such state (like LimitData) 59 | /// Toxic Name -> State 60 | state_holder: Option>, 61 | } 62 | 63 | /// Toxics applied on a proxy connection 64 | #[derive(Debug, Clone)] 65 | pub struct Toxics { 66 | /// The toxics applied on the upstream link 67 | pub upstream: Vec, 68 | /// The toxics applied on the downstream link 69 | pub downstream: Vec, 70 | } 71 | 72 | /// The serializable API response 73 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 74 | pub struct ProxyWithToxics { 75 | /// The proxy details 76 | #[serde(flatten)] 77 | pub proxy: ProxyConfig, 78 | /// Toxics installed on the proxy 79 | pub toxics: Vec, 80 | } 81 | 82 | impl ProxyConfig { 83 | /// Validate the proxy config, return `ProxyValidateError` if invalid 84 | pub fn validate(&self) -> Result<(), ProxyValidateError> { 85 | if self.name.is_empty() { 86 | Err(ProxyValidateError::MissingName) 87 | } else if self.upstream.is_empty() { 88 | Err(ProxyValidateError::MissingUpstream) 89 | } else if self.listen.is_empty() { 90 | Err(ProxyValidateError::MissingListen) 91 | } else { 92 | Ok(()) 93 | } 94 | } 95 | } 96 | 97 | impl Toxics { 98 | /// Initialize an empty set up toxics 99 | pub fn empty() -> Self { 100 | Toxics { 101 | upstream: Vec::new(), 102 | downstream: Vec::new(), 103 | } 104 | } 105 | 106 | /// Consume this Toxics struct to combine upstream and downstream toxics in a flat unordered vec 107 | pub fn into_vec(mut self) -> Vec { 108 | self.upstream.append(&mut self.downstream); 109 | self.upstream 110 | } 111 | 112 | /// Find a toxic by name in upstream and downstream lists 113 | pub fn find_by_name(&self, toxic_name: &str) -> Option { 114 | self.upstream 115 | .iter() 116 | .find(|toxic| toxic.name == toxic_name) 117 | .or_else(|| { 118 | self.downstream 119 | .iter() 120 | .find(|toxic| toxic.name == toxic_name) 121 | }) 122 | .map(|toxic| toxic.to_owned()) 123 | } 124 | } 125 | 126 | impl ProxyWithToxics { 127 | /// Create the full ProxyWithToxics from SharedProxyInfo 128 | pub fn from_shared_proxy_info(info: SharedProxyInfo) -> Self { 129 | let proxy_state = info.state.lock(); 130 | ProxyWithToxics { 131 | proxy: info.clone_config(), 132 | toxics: proxy_state.toxics.clone().into_vec(), 133 | } 134 | } 135 | 136 | /// Create a new ProxyWithToxics with empty toxics 137 | pub fn from_proxy_config(proxy_config: ProxyConfig) -> Self { 138 | ProxyWithToxics { 139 | proxy: proxy_config, 140 | toxics: Vec::new(), 141 | } 142 | } 143 | } 144 | 145 | struct Streams { 146 | client_read: Read, 147 | client_write: Write, 148 | upstream_read: Read, 149 | upstream_write: Write, 150 | } 151 | 152 | /// The proxy runner interface (defined for mocking, mainly) 153 | #[cfg_attr(test, automock)] 154 | #[async_trait] 155 | pub trait Runner { 156 | /// Initialize a proxy, bind to a TCP port but don't start accepting clients 157 | async fn initialize_proxy( 158 | config: ProxyConfig, 159 | initial_toxics: Toxics, 160 | ) -> io::Result<(Listener, SharedProxyInfo)> 161 | where 162 | Listener: SocketListener + 'static; 163 | 164 | /// Run the initialized proxy, accept clients, establish links 165 | async fn run_proxy( 166 | listener: Listener, 167 | proxy_info: SharedProxyInfo, 168 | receiver: RequestReceiver, 169 | mut stop: Stop, 170 | closer: Closer, 171 | ) -> io::Result<()> 172 | where 173 | Listener: SocketListener + 'static; 174 | } 175 | 176 | /// The proxy runner 177 | #[derive(Debug, Copy, Clone)] 178 | pub struct ProxyRunner; 179 | 180 | #[async_trait] 181 | impl Runner for ProxyRunner { 182 | /// Initialize a proxy, bind to a TCP port but don't start accepting clients 183 | #[instrument(level = "debug")] 184 | async fn initialize_proxy( 185 | config: ProxyConfig, 186 | initial_toxics: Toxics, 187 | ) -> io::Result<(Listener, SharedProxyInfo)> 188 | where 189 | Listener: SocketListener + 'static, 190 | { 191 | let listener = Listener::bind(&config.listen).await?; 192 | 193 | info!(name = ?config.name, proxy = ?config.listen, upstream = ?config.upstream, "Initialized proxy"); 194 | 195 | let state = Arc::new(ProxyState::new(initial_toxics)); 196 | 197 | let proxy_info = SharedProxyInfo { 198 | state, 199 | config: Arc::new(config), 200 | }; 201 | 202 | Ok((listener, proxy_info)) 203 | } 204 | 205 | /// Run the initialized proxy, accept clients, establish links 206 | #[instrument(level = "debug", skip(listener, receiver, stop, closer))] 207 | async fn run_proxy( 208 | listener: Listener, 209 | proxy_info: SharedProxyInfo, 210 | receiver: RequestReceiver, 211 | mut stop: Stop, 212 | closer: Closer, 213 | ) -> io::Result<()> 214 | where 215 | Listener: SocketListener + 'static, 216 | { 217 | let state = proxy_info.state; 218 | let config = proxy_info.config; 219 | 220 | tokio::spawn(listen_toxic_events( 221 | state.clone(), 222 | receiver, 223 | stop.clone(), 224 | config.clone(), 225 | )); 226 | 227 | while !stop.stop_received() { 228 | let maybe_connection = tokio::select! { 229 | res = listener.accept() => { 230 | Ok::, io::Error>(Some(res?)) 231 | }, 232 | _ = stop.recv() => { 233 | Ok(None) 234 | }, 235 | }?; 236 | 237 | if let Some((client_stream, addr)) = maybe_connection { 238 | debug!(proxy = ?&config, addr = ?&addr, "Accepted client {}", addr); 239 | let upstream = match Listener::Stream::connect(&config.upstream).await { 240 | Ok(upstream) => upstream, 241 | Err(err) => { 242 | error!(err = ?err, proxy = ?&config.name, upstream = ?&config.upstream, listen = ?&config.listen, "Unable to open connection to upstream"); 243 | // This is not a fatal error, can retry next time another client connects 244 | continue; 245 | } 246 | }; 247 | 248 | let (client_read, client_write) = client_stream.into_split(); 249 | let (upstream_read, upstream_write) = upstream.into_split(); 250 | 251 | let client_read = 252 | FramedRead::with_capacity(client_read, BytesCodec::new(), READ_BUFFER_SIZE); 253 | let client_write = FramedWrite::new(client_write, BytesCodec::new()); 254 | let upstream_read = 255 | FramedRead::with_capacity(upstream_read, BytesCodec::new(), READ_BUFFER_SIZE); 256 | let upstream_write = FramedWrite::new(upstream_write, BytesCodec::new()); 257 | 258 | let toxics = state.lock().toxics.clone(); 259 | 260 | let streams = Streams { 261 | client_read, 262 | client_write, 263 | upstream_read, 264 | upstream_write, 265 | }; 266 | 267 | let res = create_links( 268 | state.clone(), 269 | addr, 270 | &config, 271 | &mut stop, 272 | toxics, 273 | streams, 274 | None, 275 | ); 276 | if let Err(err) = res { 277 | error!(err = ?err, proxy = ?&config.name, listen = ?&config.listen, "Unable to establish link for proxy"); 278 | continue; 279 | } 280 | } else { 281 | break; 282 | } 283 | } 284 | drop(listener); 285 | let _ = closer.close(); 286 | debug!(proxy = ?&config.name, listen = ?&config.listen, "Shutting down proxy"); 287 | Ok(()) 288 | } 289 | } 290 | 291 | #[instrument(level = "debug", skip(state, streams, stop))] 292 | fn create_links( 293 | state: Arc, 294 | addr: SocketAddr, 295 | config: &ProxyConfig, 296 | stop: &mut Stop, 297 | toxics: Toxics, 298 | streams: Streams, 299 | previous_toxic_state_holder: Option>, 300 | ) -> io::Result<()> { 301 | let mut current_state = state.lock(); 302 | 303 | if current_state.clients.contains_key(&addr) { 304 | return Err(io::Error::new( 305 | io::ErrorKind::AlreadyExists, 306 | format!( 307 | "State error: there is already a client connected with this address: {}", 308 | addr 309 | ), 310 | )); 311 | } 312 | 313 | let (links_stop, links_stopper) = stop.fork(); 314 | 315 | let toxics_state_holder = 316 | previous_toxic_state_holder.or_else(|| ToxicStateHolder::for_toxics(&toxics)); 317 | 318 | let mut upstream_link = Link::new( 319 | addr, 320 | StreamDirection::Upstream, 321 | config.clone(), 322 | links_stop.clone(), 323 | ); 324 | let mut client_link = Link::new( 325 | addr, 326 | StreamDirection::Downstream, 327 | config.clone(), 328 | links_stop, 329 | ); 330 | 331 | let upstream_handle = upstream_link.establish( 332 | streams.client_read, 333 | streams.upstream_write, 334 | toxics.upstream, 335 | toxics_state_holder.clone(), 336 | ); 337 | let downstream_handle = client_link.establish( 338 | streams.upstream_read, 339 | streams.client_write, 340 | toxics.downstream, 341 | toxics_state_holder.clone(), 342 | ); 343 | 344 | let state = state.clone(); 345 | tokio::spawn(async move { 346 | // No need to listen for the stop signal here, we're ending as soon as one of the tasks have stopped. 347 | let _ = tokio::select! { 348 | up = upstream_handle => { 349 | debug!("Upstream joined first"); 350 | up 351 | }, 352 | down = downstream_handle => { 353 | debug!("Downstream joined first"); 354 | down 355 | } 356 | }; 357 | links_stopper.stop(); 358 | let mut state = state.lock(); 359 | state.clients.remove(&addr); 360 | debug!("Removed client {}", addr); 361 | }); 362 | 363 | current_state.clients.insert( 364 | addr, 365 | Links { 366 | upstream: upstream_link, 367 | client: client_link, 368 | state_holder: toxics_state_holder, 369 | }, 370 | ); 371 | Ok(()) 372 | } 373 | 374 | #[doc(hidden)] 375 | pub async fn listen_toxic_events( 376 | state: Arc, 377 | mut receiver: RequestReceiver, 378 | mut stop: Stop, 379 | config: Arc, 380 | ) { 381 | while !stop.stop_received() { 382 | let maybe_payload: Option> = tokio::select! { 383 | res = receiver.recv() => { 384 | if let Ok(payload) = res { 385 | Some(payload) 386 | } else { 387 | None 388 | } 389 | }, 390 | _ = stop.recv() => None, 391 | }; 392 | if let Some(payload) = maybe_payload { 393 | process_toxic_event(state.clone(), config.clone(), stop.clone(), payload).await; 394 | } else { 395 | break; 396 | } 397 | } 398 | } 399 | 400 | async fn process_toxic_event( 401 | state: Arc, 402 | config: Arc, 403 | stop: Stop, 404 | (request, mut responder): Payload, 405 | ) { 406 | let new_toxics = { 407 | let mut current_state = state.lock(); 408 | if let Err(err) = update_toxics(request, &mut current_state.toxics) { 409 | let _ = responder.respond(Err(err.into())); 410 | return; 411 | } 412 | current_state.toxics.clone() 413 | }; 414 | 415 | let old_map = { 416 | let mut current_state = state.lock(); 417 | std::mem::take(&mut current_state.clients) 418 | }; 419 | 420 | let mut clients = stream::iter(old_map); 421 | while let Some((addr, links)) = clients.next().await { 422 | if let Err(err) = recreate_links( 423 | state.clone(), 424 | &config, 425 | stop.clone(), 426 | addr, 427 | links, 428 | new_toxics.clone(), 429 | ) 430 | .await 431 | { 432 | error!(err = ?err, addr = ?addr, proxy = ?&config.name, "Failed to recreate links for client"); 433 | } 434 | } 435 | let _ = responder.respond(Ok(())); 436 | } 437 | 438 | async fn recreate_links( 439 | state: Arc, 440 | config: &ProxyConfig, 441 | stop: Stop, 442 | addr: SocketAddr, 443 | links: Links, 444 | new_toxics: Toxics, 445 | ) -> io::Result<()> { 446 | let (client_read, upstream_write) = links.client.disband().await?; 447 | let (upstream_read, client_write) = links.upstream.disband().await?; 448 | let streams = Streams { 449 | client_read, 450 | client_write, 451 | upstream_read, 452 | upstream_write, 453 | }; 454 | create_links( 455 | state.clone(), 456 | addr, 457 | config, 458 | &mut stop.clone(), 459 | new_toxics, 460 | streams, 461 | links.state_holder, 462 | ) 463 | } 464 | 465 | /// Update the toxics collection in place 466 | fn update_toxics(event: ToxicEvent, toxics: &mut Toxics) -> Result<(), NotFoundError> { 467 | update_toxic_list_in_place(&mut toxics.upstream, event.kind, StreamDirection::Upstream) 468 | .or_else(|kind| { 469 | update_toxic_list_in_place(&mut toxics.downstream, kind, StreamDirection::Downstream) 470 | }) 471 | .or(Err(NotFoundError)) 472 | } 473 | 474 | /// Errors return when ProxyConfig validation fails 475 | #[derive(Debug, Clone, Copy, Error, PartialEq)] 476 | pub enum ProxyValidateError { 477 | /// The name field is empty 478 | #[error("name missing")] 479 | MissingName, 480 | /// The upstream field is empty 481 | #[error("upstream missing")] 482 | MissingUpstream, 483 | /// The listen field is empty 484 | #[error("listen address missing")] 485 | MissingListen, 486 | } 487 | 488 | #[cfg(test)] 489 | mod serde_tests { 490 | use super::*; 491 | use serde_json::{from_str, to_string}; 492 | 493 | #[test] 494 | fn test_ser_and_de() { 495 | let config = ProxyConfig { 496 | name: "foo".to_owned(), 497 | listen: "127.0.0.1:5431".to_owned(), 498 | upstream: "127.0.0.1:5432".to_owned(), 499 | enabled: false, 500 | rand_seed: Some(3), 501 | }; 502 | let serialized = to_string(&config).unwrap(); 503 | let expected = "{\"name\":\"foo\",\"listen\":\"127.0.0.1:5431\",\"upstream\":\"127.0.0.1:5432\",\"enabled\":false}"; 504 | assert_eq!(expected, serialized); 505 | 506 | let expected = ProxyConfig { 507 | name: "foo".to_owned(), 508 | listen: "127.0.0.1:5431".to_owned(), 509 | upstream: "127.0.0.1:5432".to_owned(), 510 | enabled: false, 511 | rand_seed: None, 512 | }; 513 | 514 | let deserialized = from_str(&serialized).unwrap(); 515 | assert_eq!(expected, deserialized); 516 | } 517 | 518 | #[test] 519 | fn test_optional_enabled() { 520 | let expected = ProxyConfig { 521 | name: "foo".to_owned(), 522 | listen: "127.0.0.1:5431".to_owned(), 523 | upstream: "127.0.0.1:5432".to_owned(), 524 | enabled: true, 525 | rand_seed: None, 526 | }; 527 | let input = 528 | "{\"name\":\"foo\",\"listen\":\"127.0.0.1:5431\",\"upstream\":\"127.0.0.1:5432\"}"; 529 | let deserialized = from_str(input).unwrap(); 530 | assert_eq!(expected, deserialized); 531 | } 532 | } 533 | 534 | #[cfg(test)] 535 | mod config_tests { 536 | use super::*; 537 | 538 | #[test] 539 | fn validates_name() { 540 | let config = ProxyConfig { 541 | name: "".to_owned(), 542 | listen: "".to_owned(), 543 | upstream: "".to_owned(), 544 | enabled: true, 545 | rand_seed: None, 546 | }; 547 | assert_eq!(config.validate(), Err(ProxyValidateError::MissingName)) 548 | } 549 | 550 | #[test] 551 | fn validates_listen() { 552 | let config = ProxyConfig { 553 | name: "name".to_owned(), 554 | listen: "".to_owned(), 555 | upstream: "bogus_addr".to_owned(), 556 | enabled: true, 557 | rand_seed: None, 558 | }; 559 | assert_eq!(config.validate(), Err(ProxyValidateError::MissingListen)) 560 | } 561 | 562 | #[test] 563 | fn validates_upstream() { 564 | let config = ProxyConfig { 565 | name: "name".to_owned(), 566 | listen: "bogus_addr".to_owned(), 567 | upstream: "".to_owned(), 568 | enabled: true, 569 | rand_seed: None, 570 | }; 571 | assert_eq!(config.validate(), Err(ProxyValidateError::MissingUpstream)) 572 | } 573 | 574 | #[test] 575 | fn allows_invalid_addresses() { 576 | let config = ProxyConfig { 577 | name: "name".to_owned(), 578 | listen: "bogus_addr".to_owned(), 579 | upstream: "bogus_upstream".to_owned(), 580 | enabled: true, 581 | rand_seed: None, 582 | }; 583 | assert_eq!(config.validate(), Ok(())) 584 | } 585 | } 586 | -------------------------------------------------------------------------------- /core/src/signal.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | use tokio::sync::{broadcast, watch}; 3 | use tracing::instrument; 4 | 5 | /// The receiver for the stop signal, which can be used to indicate that 6 | /// a part, or all of the system is required to shut down. 7 | /// This stop handle can be cloned to pass the signal to multiple async tasks, 8 | /// and it can be forked to let child tasks have their own stop logic in addition 9 | /// to the parent system stop logic. 10 | #[derive(Debug)] 11 | pub struct Stop { 12 | stopped: bool, 13 | receiver: broadcast::Receiver<()>, 14 | sender: broadcast::Sender<()>, 15 | } 16 | 17 | impl Stop { 18 | /// Create a new Stop and Stopper 19 | pub fn new() -> (Stop, Stopper) { 20 | let (sender, receiver) = broadcast::channel::<()>(1); 21 | let stopper = Stopper::new(sender.clone()); 22 | let stop = Stop { 23 | stopped: false, 24 | receiver, 25 | sender, 26 | }; 27 | (stop, stopper) 28 | } 29 | 30 | /// Check if this particular instance of Stop has received a stop signal. 31 | /// Note: Only use this in conjunction with `recv`, because if this instance 32 | /// of Stop does not receive the signal, this will return false. 33 | pub fn stop_received(&self) -> bool { 34 | self.stopped 35 | } 36 | 37 | /// Wait for the stop signal to be received 38 | pub async fn recv(&mut self) { 39 | if self.stopped { 40 | return; 41 | } 42 | 43 | let _ = self.receiver.recv().await; 44 | 45 | self.stopped = true; 46 | } 47 | 48 | /// Creates a sub-signal that has its own stopper but propagates the stop signal from the original 49 | pub fn fork(&self) -> (Stop, Stopper) { 50 | let (forked_stop, forked_stopper) = Stop::new(); 51 | let forked_sender = forked_stop.sender.clone(); 52 | let mut original_receiver = self.sender.subscribe(); 53 | tokio::spawn(async move { 54 | while original_receiver.recv().await.is_ok() { 55 | if forked_sender.send(()).is_err() { 56 | // Channel closed, we can no longer forward signal from original to fork 57 | break; 58 | } 59 | } 60 | drop(forked_sender); 61 | }); 62 | (forked_stop, forked_stopper) 63 | } 64 | 65 | /// Creates a new stopper for this Stop signal that can be used to stop this Stop and all 66 | /// its forked descendants 67 | pub fn get_stopper(&self) -> Stopper { 68 | Stopper::new(self.sender.clone()) 69 | } 70 | } 71 | 72 | impl Clone for Stop { 73 | fn clone(&self) -> Self { 74 | Self { 75 | stopped: self.stopped, 76 | receiver: self.sender.subscribe(), 77 | sender: self.sender.clone(), 78 | } 79 | } 80 | } 81 | 82 | impl std::fmt::Display for Stop { 83 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 84 | if self.stop_received() { 85 | write!(f, "stopped") 86 | } else { 87 | write!(f, "NOT stopped") 88 | } 89 | } 90 | } 91 | 92 | /// A handle that can send a stop signal once to all subscribers 93 | #[derive(Debug, Clone)] 94 | pub struct Stopper { 95 | sender: broadcast::Sender<()>, 96 | } 97 | 98 | impl Stopper { 99 | pub(crate) fn new(sender: broadcast::Sender<()>) -> Self { 100 | Self { sender } 101 | } 102 | 103 | /// Sends the stop signal 104 | #[instrument(level = "trace", skip(self))] 105 | pub fn stop(self) { 106 | let _ = self.sender.send(()); 107 | } 108 | } 109 | /// A receiver for the close signal, which is used to indicate that a resource 110 | /// is ready to close 111 | #[derive(Debug, Clone)] 112 | pub struct Close { 113 | receiver: watch::Receiver>, 114 | } 115 | 116 | /// The sender for the close signal, to indicate that the owner of this closer 117 | /// is ready to close 118 | #[derive(Debug)] 119 | pub struct Closer { 120 | sender: watch::Sender>, 121 | } 122 | 123 | /// The listen channel closed before the close signal was received 124 | #[derive(Error, Copy, Clone, Debug)] 125 | #[error("Close channel closed")] 126 | pub struct CloseError; 127 | 128 | /// Could not snd the close signal, listener for close dropped 129 | #[derive(Error, Copy, Clone, Debug)] 130 | #[error("Could not close, already closed?")] 131 | pub struct CloserError; 132 | 133 | impl Close { 134 | /// Create a new Close and Closer 135 | pub fn new() -> (Close, Closer) { 136 | let (sender, receiver) = watch::channel(None); 137 | let close = Close { receiver }; 138 | let closer = Closer { sender }; 139 | (close, closer) 140 | } 141 | 142 | /// Wait for the close signal 143 | pub async fn recv(mut self) -> Result<(), CloseError> { 144 | self.receiver.changed().await.map_err(|_| CloseError) 145 | } 146 | } 147 | 148 | impl Closer { 149 | /// Send the close signal and consume this closer 150 | pub fn close(self) -> Result<(), CloseError> { 151 | self.sender.send(Some(())).map_err(|_| CloseError) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /core/src/socket.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | #[cfg(test)] 3 | use mockall::automock; 4 | use pin_project_lite::pin_project; 5 | use std::{io, net::SocketAddr}; 6 | use tokio::{ 7 | io::{AsyncRead, AsyncWrite}, 8 | net::{TcpListener as TokioTcpListener, TcpStream as TokioTcpStream}, 9 | }; 10 | 11 | #[cfg(not(test))] 12 | use tokio::net::tcp::{OwnedReadHalf, OwnedWriteHalf}; 13 | 14 | /// The TcpListener interface we need to mock 15 | #[cfg_attr(test, automock(type Stream=TcpStream;))] 16 | #[async_trait] 17 | pub trait SocketListener: Sized + Send + Sync { 18 | /// The associated listener interface to be mocked too 19 | type Stream: SocketStream + 'static; 20 | 21 | /// Creates a new SocketListener, which will be bound to the specified address. 22 | async fn bind(addr: &str) -> io::Result 23 | where 24 | Self: Sized; 25 | 26 | /// Accepts a new incoming connection from this listener. 27 | async fn accept(&self) -> io::Result<(Self::Stream, SocketAddr)>; 28 | } 29 | 30 | /// The TcpStream interface we need to mock 31 | #[cfg_attr(test, automock)] 32 | #[async_trait] 33 | pub trait SocketStream: Sized + Send + Sync { 34 | /// Opens a TCP connection to a remote host. 35 | async fn connect(addr: &str) -> io::Result 36 | where 37 | Self: Sized + 'static; 38 | 39 | /// Splits the inner `TcpStream` into a read half and a write half, which 40 | /// can be used to read and write the stream concurrently. 41 | fn into_split(self) -> (ReadStream, WriteStream); 42 | } 43 | 44 | /// A simple wrapper around Tokio TcpListener to make it mockable 45 | #[derive(Debug)] 46 | pub struct TcpListener { 47 | inner: TokioTcpListener, 48 | } 49 | 50 | /// A simple wrapper around Tokio TcpStream to make it mockable 51 | #[derive(Debug)] 52 | pub struct TcpStream { 53 | inner: TokioTcpStream, 54 | } 55 | 56 | #[async_trait] 57 | impl SocketListener for TcpListener { 58 | type Stream = TcpStream; 59 | 60 | async fn bind(addr: &str) -> io::Result 61 | where 62 | Self: Sized, 63 | { 64 | Ok(TcpListener { 65 | inner: TokioTcpListener::bind(addr).await?, 66 | }) 67 | } 68 | 69 | async fn accept(&self) -> io::Result<(Self::Stream, SocketAddr)> { 70 | let (stream, addr) = self.inner.accept().await?; 71 | let wrapper = TcpStream { inner: stream }; 72 | Ok((wrapper, addr)) 73 | } 74 | } 75 | #[async_trait] 76 | impl SocketStream for TcpStream { 77 | async fn connect(addr: &str) -> io::Result 78 | where 79 | Self: Sized, 80 | { 81 | let inner = TokioTcpStream::connect(addr).await?; 82 | Ok(TcpStream { inner }) 83 | } 84 | 85 | #[cfg(not(test))] 86 | fn into_split(self) -> (ReadStream, WriteStream) { 87 | let (read_half, write_half) = self.inner.into_split(); 88 | (ReadStream::new(read_half), WriteStream::new(write_half)) 89 | } 90 | 91 | #[cfg(test)] 92 | fn into_split(self) -> (ReadStream, WriteStream) { 93 | unimplemented!("must mock") 94 | } 95 | } 96 | 97 | #[cfg(not(test))] 98 | type ReadHalf = OwnedReadHalf; 99 | #[cfg(test)] 100 | type ReadHalf = tokio_test::io::Mock; 101 | 102 | #[cfg(not(test))] 103 | type WriteHalf = OwnedWriteHalf; 104 | #[cfg(test)] 105 | type WriteHalf = tokio_test::io::Mock; 106 | 107 | pin_project! { 108 | /// Wrapper for OwnedReadHalf for mocking 109 | #[derive(Debug)] 110 | pub struct ReadStream { 111 | #[pin] 112 | inner: ReadHalf, 113 | } 114 | } 115 | 116 | pin_project! { 117 | /// Wrapper for OwnedWriteHalf for mocking 118 | #[derive(Debug)] 119 | pub struct WriteStream { 120 | #[pin] 121 | inner: WriteHalf, 122 | 123 | } 124 | } 125 | 126 | #[cfg_attr(test, automock)] 127 | impl ReadStream { 128 | pub(crate) fn new(inner: ReadHalf) -> ReadStream { 129 | ReadStream { inner } 130 | } 131 | } 132 | 133 | #[cfg_attr(test, automock)] 134 | impl WriteStream { 135 | pub(crate) fn new(inner: WriteHalf) -> WriteStream { 136 | WriteStream { inner } 137 | } 138 | } 139 | 140 | impl AsyncRead for ReadStream { 141 | fn poll_read( 142 | self: std::pin::Pin<&mut Self>, 143 | cx: &mut std::task::Context<'_>, 144 | buf: &mut tokio::io::ReadBuf<'_>, 145 | ) -> std::task::Poll> { 146 | self.project().inner.poll_read(cx, buf) 147 | } 148 | } 149 | 150 | impl AsyncWrite for WriteStream { 151 | fn poll_write( 152 | self: std::pin::Pin<&mut Self>, 153 | cx: &mut std::task::Context<'_>, 154 | buf: &[u8], 155 | ) -> std::task::Poll> { 156 | self.project().inner.poll_write(cx, buf) 157 | } 158 | 159 | fn poll_flush( 160 | self: std::pin::Pin<&mut Self>, 161 | cx: &mut std::task::Context<'_>, 162 | ) -> std::task::Poll> { 163 | self.project().inner.poll_flush(cx) 164 | } 165 | 166 | fn poll_shutdown( 167 | self: std::pin::Pin<&mut Self>, 168 | cx: &mut std::task::Context<'_>, 169 | ) -> std::task::Poll> { 170 | self.project().inner.poll_shutdown(cx) 171 | } 172 | } 173 | 174 | #[cfg(test)] 175 | mod tests { 176 | use tokio_test::assert_ok; 177 | 178 | use super::*; 179 | 180 | // Dummy test for coverage's sake 181 | #[tokio::test] 182 | async fn test_tcp_stream() { 183 | let (ready_tx, ready_rx) = tokio::sync::oneshot::channel::<()>(); 184 | tokio::spawn(async move { 185 | let listener = TcpListener { 186 | inner: TokioTcpListener::bind("127.0.0.1:9909").await.unwrap(), 187 | }; 188 | let _ = ready_tx.send(()); 189 | let _ = listener.accept().await.unwrap(); 190 | }); 191 | 192 | assert_ok!(ready_rx.await); 193 | let _stream = TcpStream::connect("127.0.0.1:9909").await.unwrap(); 194 | // let _ = stream.into_split(); 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /core/src/state.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | net::SocketAddr, 4 | sync::{Arc, Mutex, MutexGuard}, 5 | }; 6 | 7 | use tokio::sync::Mutex as AsyncMutex; 8 | 9 | use crate::{ 10 | proxy::{Links, ProxyConfig, Toxics}, 11 | toxic::{Toxic, ToxicKind}, 12 | }; 13 | 14 | /// The wrapper for the proxy state 15 | #[derive(Debug)] 16 | pub struct ProxyState { 17 | inner: Mutex, 18 | } 19 | 20 | /// The inner state of proxy, holding the list of connected clients and currently active toxics 21 | #[derive(Debug)] 22 | pub struct ProxyStateInner { 23 | /// Socket address -> (Upstream, Downstream) 24 | pub clients: HashMap, 25 | /// The collection of toxics active over upstream and downstream connections 26 | pub toxics: Toxics, 27 | } 28 | 29 | /// The proxy config and state to allow the API server read from it. 30 | /// The config is immutable, but the state is behind a mutex. 31 | #[derive(Debug, Clone)] 32 | pub struct SharedProxyInfo { 33 | /// The immutable essential proxy config, like the proxy name, upstream and downstream addresses 34 | pub config: Arc, 35 | /// The current proxy state containing the current collection of toxics and the connected clients 36 | pub state: Arc, 37 | } 38 | 39 | impl ProxyState { 40 | /// Initialize an empty proxy state 41 | pub fn new(toxics: Toxics) -> Self { 42 | ProxyState { 43 | inner: Mutex::new(ProxyStateInner { 44 | clients: HashMap::new(), 45 | toxics, 46 | }), 47 | } 48 | } 49 | 50 | /// Get the inner state, or panic if the lock is poisoned 51 | pub fn lock(&self) -> MutexGuard { 52 | self.inner.lock().expect("ProxyState poisoned") 53 | } 54 | } 55 | 56 | impl SharedProxyInfo { 57 | /// Return a new clone of the inner proxy config with owned strings 58 | pub fn clone_config(&self) -> ProxyConfig { 59 | (*self.config).clone() 60 | } 61 | } 62 | 63 | #[allow(missing_copy_implementations)] 64 | /// The state for stateful toxics 65 | #[derive(Debug, PartialEq)] 66 | pub enum ToxicState { 67 | /// LimitData toxic keeps track of the bytes transmitted 68 | LimitData { 69 | /// Bytes transmitted since the opening of a client - upstream proxy connection, per client 70 | bytes_transmitted: usize, 71 | }, 72 | } 73 | 74 | impl ToxicState { 75 | /// Initialize a ToxicState for the ToxicKind, if the ToxicKind is stateful 76 | pub fn for_toxic_kind(kind: &ToxicKind) -> Option { 77 | match kind { 78 | ToxicKind::LimitData { .. } => Some(ToxicState::LimitData { 79 | bytes_transmitted: 0, 80 | }), 81 | _ => None, 82 | } 83 | } 84 | } 85 | 86 | #[derive(Debug)] 87 | pub(crate) struct ToxicStateHolder { 88 | inner: Mutex>>>, 89 | } 90 | 91 | impl ToxicStateHolder { 92 | pub(crate) fn for_toxics(toxics: &Toxics) -> Option> { 93 | let toxics_pair: [&[Toxic]; 2] = [&toxics.upstream, &toxics.downstream]; 94 | let stateful_toxics: Vec<&Toxic> = toxics_pair 95 | .iter() 96 | .flat_map(|direction_toxics| { 97 | direction_toxics 98 | .iter() 99 | .filter(|toxic| toxic.kind.is_stateful()) 100 | }) 101 | .collect(); 102 | 103 | if stateful_toxics.is_empty() { 104 | None 105 | } else { 106 | let mut state_map: HashMap>> = HashMap::new(); 107 | 108 | for toxic in stateful_toxics { 109 | if let Some(initial_toxic_state) = ToxicState::for_toxic_kind(&toxic.kind) { 110 | state_map.insert( 111 | toxic.name.to_owned(), 112 | Arc::new(AsyncMutex::new(initial_toxic_state)), 113 | ); 114 | } 115 | } 116 | Some(Arc::new(ToxicStateHolder { 117 | inner: Mutex::new(state_map), 118 | })) 119 | } 120 | } 121 | 122 | pub(crate) fn get_state_for_toxic( 123 | &self, 124 | toxic_name: &str, 125 | ) -> Option>> { 126 | let inner = self.inner.lock().expect("ToxicStateHolder lock poisoned"); 127 | inner 128 | .get(toxic_name) 129 | .map(Arc::clone) 130 | } 131 | } 132 | 133 | #[cfg(test)] 134 | mod tests { 135 | use super::*; 136 | use crate::toxic::StreamDirection; 137 | 138 | #[tokio::test] 139 | async fn initializes_toxic_state_for_limit_data() { 140 | let toxics = Toxics { 141 | upstream: Vec::new(), 142 | downstream: vec![Toxic { 143 | kind: ToxicKind::LimitData { bytes: 50000 }, 144 | name: "limiter".to_owned(), 145 | toxicity: 0.5, 146 | direction: StreamDirection::Downstream, 147 | }], 148 | }; 149 | let holder = ToxicStateHolder::for_toxics(&toxics); 150 | assert!(holder.is_some()); 151 | assert!( 152 | holder 153 | .clone() 154 | .unwrap() 155 | .get_state_for_toxic("wrong") 156 | .is_none() 157 | ); 158 | let state = holder.unwrap().get_state_for_toxic("limiter"); 159 | assert!(state.is_some()); 160 | let state = state.unwrap(); 161 | let data = state.lock().await; 162 | assert_eq!( 163 | *data, 164 | ToxicState::LimitData { 165 | bytes_transmitted: 0 166 | } 167 | ); 168 | } 169 | 170 | #[test] 171 | fn initializes_no_toxic_state_for_latency() { 172 | let toxics = Toxics { 173 | upstream: Vec::new(), 174 | downstream: vec![Toxic { 175 | kind: ToxicKind::Latency { 176 | latency: 40, 177 | jitter: 0, 178 | }, 179 | name: "lat".to_owned(), 180 | toxicity: 0.5, 181 | direction: StreamDirection::Downstream, 182 | }], 183 | }; 184 | let holder = ToxicStateHolder::for_toxics(&toxics); 185 | assert!(holder.is_none()); 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /core/src/stream.rs: -------------------------------------------------------------------------------- 1 | use crate::signal::Stop; 2 | use crate::socket::{ReadStream, WriteStream}; 3 | use bytes::{Bytes, BytesMut}; 4 | use futures::{Sink, SinkExt, Stream, StreamExt}; 5 | use std::io; 6 | use std::pin::Pin; 7 | 8 | use tokio_util::codec::{BytesCodec, FramedRead, FramedWrite}; 9 | 10 | pub(crate) type Read = FramedRead; 11 | pub(crate) type Write = FramedWrite; 12 | 13 | pub(crate) async fn forward( 14 | reader: &mut Read, 15 | writer: &mut Write, 16 | stop: &mut Stop, 17 | ) -> io::Result<()> { 18 | while !stop.stop_received() { 19 | let maybe_res: Option> = tokio::select! { 20 | res = reader.next() => res, 21 | _ = stop.recv() => None 22 | }; 23 | if let Some(res) = maybe_res { 24 | match res { 25 | Ok(chunk) => { 26 | if let Err(_err) = writer.send(chunk.into()).await { 27 | // writer closed 28 | break; 29 | } 30 | } 31 | Err(err) => { 32 | // reader closed 33 | return Err(err); 34 | } 35 | } 36 | } else { 37 | // stop signal received 38 | break; 39 | } 40 | } 41 | Ok(()) 42 | } 43 | 44 | pub(crate) async fn forward_read( 45 | mut reader: Read, 46 | mut writer: Pin<&mut impl Sink>, 47 | stop: &mut Stop, 48 | ) -> io::Result { 49 | while !stop.stop_received() { 50 | let maybe_res: Option> = tokio::select! { 51 | res = reader.next() => res, 52 | _ = stop.recv() => None 53 | }; 54 | if let Some(res) = maybe_res { 55 | match res { 56 | Ok(chunk) => { 57 | if let Err(_err) = writer.send(chunk.into()).await { 58 | // writer channel closed 59 | break; 60 | } 61 | } 62 | Err(err) => { 63 | // reader i/o error 64 | return Err(err); 65 | } 66 | } 67 | } else { 68 | // stop signal received 69 | break; 70 | } 71 | } 72 | Ok(reader) 73 | } 74 | 75 | pub(crate) async fn forward_write( 76 | mut reader: Pin<&mut impl Stream>, 77 | mut writer: Write, 78 | stop: &mut Stop, 79 | ) -> io::Result { 80 | while !stop.stop_received() { 81 | let maybe_chunk = tokio::select! { 82 | res = reader.next() => res, 83 | _ = stop.recv() => None 84 | }; 85 | if let Some(chunk) = maybe_chunk { 86 | if writer.send(chunk).await.is_err() { 87 | // writer channel closed 88 | break; 89 | } 90 | } else { 91 | break; 92 | } 93 | } 94 | Ok(writer) 95 | } 96 | -------------------------------------------------------------------------------- /core/src/tests/mod.rs: -------------------------------------------------------------------------------- 1 | // Define "integration tests" here so we have private access, and we don't have to recompile the library. 2 | 3 | #[cfg(test)] 4 | mod proxy; 5 | #[cfg(test)] 6 | mod socket_mocks; 7 | -------------------------------------------------------------------------------- /core/src/tests/proxy.rs: -------------------------------------------------------------------------------- 1 | use crate::signal::{Close, Stop}; 2 | use crate::socket::{ReadStream, WriteStream}; 3 | use crate::tests::socket_mocks::*; 4 | use crate::toxic::{StreamDirection, Toxic, ToxicKind}; 5 | use crate::{ 6 | link::Link, 7 | proxy::{ProxyConfig, ProxyRunner, Runner, Toxics}, 8 | }; 9 | use lazy_static::lazy_static; 10 | use mockall::predicate; 11 | use std::{ 12 | io, 13 | net::SocketAddr, 14 | sync::{Arc, Mutex}, 15 | }; 16 | use tokio::sync::Mutex as AsyncMutex; 17 | use tokio_test::{assert_err, assert_ok, io as test_io}; 18 | use tokio_util::codec::{BytesCodec, FramedRead, FramedWrite}; 19 | 20 | lazy_static! { 21 | static ref MOCK_LOCK: AsyncMutex<()> = AsyncMutex::new(()); 22 | } 23 | 24 | #[tokio::test] 25 | async fn initialize_proxy_no_toxics_accept_fails() { 26 | let _lock = MOCK_LOCK.lock().await; 27 | let listen = "127.0.0.1:5431"; 28 | let config = ProxyConfig { 29 | name: "foo".to_owned(), 30 | listen: listen.to_owned(), 31 | upstream: "127.0.0.1:5432".to_owned(), 32 | enabled: true, 33 | rand_seed: None, 34 | }; 35 | let expected_config = config.clone(); 36 | let ctx = MockMemoryListener::bind_context(); 37 | ctx.expect().with(predicate::eq(listen)).returning(|_c| { 38 | let mut m = MockMemoryListener::default(); 39 | m.expect_accept() 40 | .returning(|| Err(io::Error::new(io::ErrorKind::Other, "oopsie"))); 41 | Ok(m) 42 | }); 43 | 44 | let toxics = Toxics::empty(); 45 | let proxy = ProxyRunner::initialize_proxy::(config, toxics).await; 46 | assert_ok!(&proxy); 47 | let (listener, info) = proxy.unwrap(); 48 | assert_eq!(expected_config, *info.config); 49 | 50 | let (_event_sender, event_receiver) = bmrng::channel(1); 51 | 52 | let (stop, _stopper) = Stop::new(); 53 | let (_close, closer) = Close::new(); 54 | 55 | let result = 56 | ProxyRunner::run_proxy::(listener, info, event_receiver, stop, closer) 57 | .await; 58 | assert_err!(&result); 59 | assert_eq!(result.unwrap_err().kind(), io::ErrorKind::Other,); 60 | } 61 | 62 | #[tokio::test] 63 | async fn run_proxy_no_toxics_forward() { 64 | let _lock = MOCK_LOCK.lock().await; 65 | let listen = "127.0.0.1:5431"; 66 | let upstream = "127.0.0.1:5432"; 67 | let config = ProxyConfig { 68 | name: "foo".to_owned(), 69 | listen: listen.to_owned(), 70 | upstream: upstream.to_owned(), 71 | enabled: true, 72 | rand_seed: None, 73 | }; 74 | let expected_config = config.clone(); 75 | let listener_ctx = MockMemoryListener::bind_context(); 76 | 77 | let listeners = Arc::new(Mutex::new(0)); 78 | 79 | listener_ctx 80 | .expect() 81 | .with(predicate::eq(listen)) 82 | .returning(move |c| { 83 | assert_eq!(listen, c); 84 | let listeners = listeners.clone(); 85 | 86 | let mut listener = MockMemoryListener::default(); 87 | listener.expect_accept().returning(move || { 88 | let mut val = listeners.lock().unwrap(); 89 | // only accept one connection 90 | if *val > 0 { 91 | return Err(io::Error::new(io::ErrorKind::ConnectionRefused, "done")); 92 | } 93 | *val += 1; 94 | let (client_read, mut client_handle_read) = 95 | test_io::Builder::new().build_with_handle(); 96 | let (client_write, mut client_handle_write) = 97 | test_io::Builder::new().build_with_handle(); 98 | 99 | client_handle_read.read(b"client writes"); 100 | client_handle_write.write(b"upstream writes"); 101 | 102 | let mut stream = MockMemoryStream::default(); 103 | stream.expect_into_split().return_once_st(|| { 104 | (ReadStream::new(client_read), WriteStream::new(client_write)) 105 | }); 106 | Ok((stream, SocketAddr::from(([127, 0, 0, 1], 29991)))) 107 | }); 108 | Ok(listener) 109 | }); 110 | 111 | let upstream_ctx = MockMemoryStream::connect_context(); 112 | let (upstream_read, mut upstream_handle_read) = test_io::Builder::new().build_with_handle(); 113 | let (upstream_write, mut upstream_handle_write) = test_io::Builder::new().build_with_handle(); 114 | 115 | upstream_handle_write.write(b"upstream writes"); 116 | upstream_handle_read.read(b"client writes"); 117 | // TODO: mock into_split with the mock stream instead of the real thing 118 | upstream_ctx 119 | .expect() 120 | .with(predicate::eq(upstream)) 121 | .return_once(move |c| { 122 | assert_eq!(upstream, c); 123 | let mut stream = MockMemoryStream::default(); 124 | 125 | stream.expect_into_split().return_once_st(|| { 126 | ( 127 | ReadStream::new(upstream_read), 128 | WriteStream::new(upstream_write), 129 | ) 130 | }); 131 | Ok(stream) 132 | }); 133 | 134 | let toxics = Toxics::empty(); 135 | let proxy = ProxyRunner::initialize_proxy::(config, toxics).await; 136 | assert_ok!(&proxy); 137 | let (listener, info) = proxy.unwrap(); 138 | assert_eq!(expected_config, *info.config); 139 | 140 | let (_event_sender, event_receiver) = bmrng::channel(1); 141 | 142 | let (stop, stopper) = Stop::new(); 143 | let (close, closer) = Close::new(); 144 | 145 | let handle = tokio::spawn(async move { 146 | let result = ProxyRunner::run_proxy(listener, info, event_receiver, stop, closer).await; 147 | assert_err!(result); 148 | }); 149 | assert_ok!(handle.await); 150 | stopper.stop(); 151 | let _ = close.recv().await; 152 | } 153 | 154 | #[tokio::test] 155 | async fn run_proxy_with_slicer() { 156 | let _lock = MOCK_LOCK.lock().await; 157 | let listen = "127.0.0.1:5431"; 158 | let upstream = "127.0.0.1:5432"; 159 | let config = ProxyConfig { 160 | name: "foo".to_owned(), 161 | listen: listen.to_owned(), 162 | upstream: upstream.to_owned(), 163 | enabled: true, 164 | rand_seed: None, 165 | }; 166 | let expected_config = config.clone(); 167 | let listener_ctx = MockMemoryListener::bind_context(); 168 | 169 | let listeners = Arc::new(Mutex::new(0)); 170 | 171 | listener_ctx 172 | .expect() 173 | .with(predicate::eq(listen)) 174 | .returning(move |c| { 175 | assert_eq!(listen, c); 176 | let listeners = listeners.clone(); 177 | 178 | let mut listener = MockMemoryListener::default(); 179 | listener.expect_accept().returning(move || { 180 | let mut val = listeners.lock().unwrap(); 181 | // only accept one connection 182 | if *val > 0 { 183 | return Err(io::Error::new(io::ErrorKind::ConnectionRefused, "done")); 184 | } 185 | *val += 1; 186 | let (client_read, mut client_handle_read) = 187 | test_io::Builder::new().build_with_handle(); 188 | let (client_write, mut client_handle_write) = 189 | test_io::Builder::new().build_with_handle(); 190 | 191 | client_handle_read.read(b"client writes"); 192 | client_handle_write.write(b"upstream writes"); 193 | 194 | let mut stream = MockMemoryStream::default(); 195 | stream.expect_into_split().return_once_st(|| { 196 | (ReadStream::new(client_read), WriteStream::new(client_write)) 197 | }); 198 | Ok((stream, SocketAddr::from(([127, 0, 0, 1], 29991)))) 199 | }); 200 | Ok(listener) 201 | }); 202 | 203 | let upstream_ctx = MockMemoryStream::connect_context(); 204 | let (upstream_read, mut upstream_handle_read) = test_io::Builder::new().build_with_handle(); 205 | let (upstream_write, mut upstream_handle_write) = test_io::Builder::new().build_with_handle(); 206 | 207 | upstream_handle_write.write(b"upstream writes"); 208 | upstream_handle_read.read(b"client writes"); 209 | upstream_ctx 210 | .expect() 211 | .with(predicate::eq(upstream)) 212 | .return_once(move |c| { 213 | assert_eq!(upstream, c); 214 | let mut stream = MockMemoryStream::default(); 215 | 216 | stream.expect_into_split().return_once_st(|| { 217 | ( 218 | ReadStream::new(upstream_read), 219 | WriteStream::new(upstream_write), 220 | ) 221 | }); 222 | Ok(stream) 223 | }); 224 | 225 | let toxics = Toxics { 226 | upstream: vec![Toxic { 227 | name: "chop chop".to_owned(), 228 | kind: ToxicKind::Slicer { 229 | average_size: 12, 230 | size_variation: 4, 231 | delay: 0, 232 | }, 233 | direction: StreamDirection::Upstream, 234 | toxicity: 1.0, 235 | }], 236 | downstream: Vec::new(), 237 | }; 238 | let proxy = ProxyRunner::initialize_proxy::(config, toxics).await; 239 | assert_ok!(&proxy); 240 | let (listener, info) = proxy.unwrap(); 241 | assert_eq!(expected_config, *info.config); 242 | 243 | let (_event_sender, event_receiver) = bmrng::channel(1); 244 | 245 | let (stop, stopper) = Stop::new(); 246 | let (close, closer) = Close::new(); 247 | 248 | let handle = tokio::spawn(async move { 249 | let result = ProxyRunner::run_proxy(listener, info, event_receiver, stop, closer).await; 250 | assert_err!(result); 251 | }); 252 | assert_ok!(handle.await); 253 | stopper.stop(); 254 | let _ = close.recv().await; 255 | } 256 | 257 | #[tokio::test] 258 | async fn test_link_disband() { 259 | let (read, _handle_read) = test_io::Builder::new().build_with_handle(); 260 | let (write, _handle_write) = test_io::Builder::new().build_with_handle(); 261 | let read = ReadStream::new(read); 262 | let write = WriteStream::new(write); 263 | let read = FramedRead::with_capacity(read, BytesCodec::new(), 1024); 264 | let write = FramedWrite::new(write, BytesCodec::new()); 265 | 266 | let (stop, stopper) = Stop::new(); 267 | let listen = "127.0.0.1:5431"; 268 | let upstream = "127.0.0.1:5432"; 269 | let config = ProxyConfig { 270 | name: "foo".to_owned(), 271 | listen: listen.to_owned(), 272 | upstream: upstream.to_owned(), 273 | enabled: true, 274 | rand_seed: None, 275 | }; 276 | 277 | let addr: SocketAddr = SocketAddr::from(([127, 0, 0, 1], 29991)); 278 | let mut link = Link::new(addr, StreamDirection::Upstream, config, stop); 279 | link.establish(read, write, Vec::new(), None); 280 | stopper.stop(); 281 | let res = link.disband().await; 282 | assert_ok!(res); 283 | } 284 | -------------------------------------------------------------------------------- /core/src/tests/socket_mocks.rs: -------------------------------------------------------------------------------- 1 | use crate::socket::{ReadStream, SocketListener, SocketStream, WriteStream}; 2 | use async_trait::async_trait; 3 | use mockall::mock; 4 | use std::{io, net::SocketAddr}; 5 | 6 | mock! { 7 | pub MemoryListener {} 8 | 9 | #[async_trait] 10 | impl SocketListener for MemoryListener { 11 | type Stream = MockMemoryStream; 12 | 13 | async fn bind(addr: &str) -> io::Result 14 | where 15 | Self: Sized; 16 | 17 | async fn accept(&self) -> io::Result<(MockMemoryStream, SocketAddr)>; 18 | } 19 | } 20 | 21 | mock! { 22 | pub MemoryStream {} 23 | 24 | #[async_trait] 25 | impl SocketStream for MemoryStream { 26 | async fn connect(addr: &str) -> io::Result 27 | where 28 | Self: Sized + 'static; 29 | 30 | fn into_split(self) -> (ReadStream, WriteStream); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /core/src/toxic.rs: -------------------------------------------------------------------------------- 1 | use crate::error::ToxicUpdateError; 2 | use serde::{Deserialize, Serialize}; 3 | use std::fmt; 4 | use std::mem; 5 | 6 | /// 7 | #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] 8 | pub enum StreamDirection { 9 | /// Represents an I/O channel from server to the client 10 | #[serde(rename = "downstream")] 11 | Downstream, 12 | /// Represents an I/O channel from the client to the server 13 | #[serde(rename = "upstream")] 14 | Upstream, 15 | } 16 | 17 | #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] 18 | #[serde(tag = "type", content = "attributes")] 19 | /// Toxic kind and toxic-specific attributes 20 | pub enum ToxicKind { 21 | /// Passes all data through without any toxic effects 22 | #[serde(rename = "noop")] 23 | Noop, 24 | /// Passes data through with the a delay of latency +/- jitter added 25 | #[serde(rename = "latency")] 26 | Latency { 27 | /// Latency to be added, in milliseconds 28 | latency: u64, 29 | /// Jitter to be added to the latency, also in milliseconds 30 | #[serde(default = "default_zero")] 31 | jitter: u64, 32 | }, 33 | /// Stops any data from flowing through, and will close the connection after a timeout 34 | #[serde(rename = "timeout")] 35 | Timeout { 36 | /// in milliseconds 37 | timeout: u64, 38 | }, 39 | /// Passes data through at a limited rate 40 | #[serde(rename = "bandwidth")] 41 | Bandwidth { 42 | /// in KB/S 43 | rate: u64, 44 | }, 45 | /// Stops the TCP connection from closing until after a delay 46 | #[serde(rename = "slow_close")] 47 | SlowClose { 48 | /// in milliseconds 49 | delay: u64, 50 | }, 51 | /// Slices data into multiple smaller packets 52 | #[serde(rename = "slicer")] 53 | Slicer { 54 | /// Average number of bytes to slice at 55 | average_size: u64, 56 | /// +/- bytes to vary sliced amounts. Must be less than the average size 57 | #[serde(default = "default_zero")] 58 | size_variation: u64, 59 | /// Microseconds to delay each packet. 60 | #[serde(default = "default_zero")] 61 | delay: u64, 62 | }, 63 | /// Adds a limit of bytes transferred to the proxy session 64 | #[serde(rename = "limit_data")] 65 | LimitData { 66 | /// the limit 67 | bytes: u64, 68 | }, 69 | } 70 | 71 | /// Something that can be attached to a link to modify the way the data is passed through 72 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 73 | pub struct Toxic { 74 | #[serde(flatten)] 75 | /// The kind which also contains kind-specific attributes 76 | pub kind: ToxicKind, 77 | /// The unique name for this toxic 78 | #[serde(default = "default_name")] 79 | pub name: String, 80 | /// The probability of this toxic being active 81 | #[serde(default = "default_toxicity")] 82 | pub toxicity: f32, 83 | #[serde(alias = "stream", default = "default_direction")] 84 | /// The direction this toxic is install on 85 | pub direction: StreamDirection, 86 | } 87 | 88 | /// The inners of a proxy state update event passed to the proxy runner task 89 | #[doc(hidden)] 90 | #[derive(Debug, Clone, PartialEq)] 91 | pub enum ToxicEventKind { 92 | /// Add a new toxic to a proxy 93 | AddToxic(Toxic), 94 | /// Replace a toxic with the same name with this new toxic 95 | UpdateToxic(Toxic), 96 | /// Remove a toxic by name 97 | RemoveToxic(String), 98 | /// Reset. Remove all toxics 99 | RemoveAllToxics, 100 | } 101 | 102 | /// A proxy state update event passed to the proxy runner task 103 | #[doc(hidden)] 104 | #[derive(Debug, Clone, PartialEq)] 105 | pub struct ToxicEvent { 106 | pub proxy_name: String, 107 | pub kind: ToxicEventKind, 108 | } 109 | 110 | /// The result return after the toxic event is processed. May return Ok or ToxicUpdateError 111 | pub type ToxicEventResult = Result<(), ToxicUpdateError>; 112 | 113 | fn default_name() -> String { 114 | "".to_owned() 115 | } 116 | fn default_toxicity() -> f32 { 117 | 1.0 118 | } 119 | 120 | fn default_direction() -> StreamDirection { 121 | StreamDirection::Downstream 122 | } 123 | 124 | fn default_zero() -> u64 { 125 | 0 126 | } 127 | 128 | impl fmt::Display for StreamDirection { 129 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 130 | match self { 131 | StreamDirection::Downstream => write!(f, "downstream"), 132 | StreamDirection::Upstream => write!(f, "upstream"), 133 | } 134 | } 135 | } 136 | 137 | impl Toxic { 138 | /// Get the toxic name 139 | pub fn get_name(&self) -> &str { 140 | &self.name 141 | } 142 | 143 | /// Sets a name to this Toxic if the name is an empty string. 144 | /// The default name format is {type}_{direction} 145 | pub fn set_default_name(&mut self) { 146 | if self.name.is_empty() { 147 | self.name = format!("{}_{}", self.kind.get_name(), self.direction); 148 | } 149 | } 150 | } 151 | 152 | impl ToxicEvent { 153 | /// Create a new toxic event 154 | pub fn new(proxy_name: String, kind: ToxicEventKind) -> Self { 155 | ToxicEvent { proxy_name, kind } 156 | } 157 | } 158 | 159 | impl ToxicKind { 160 | pub(crate) fn has_close_logic(&self) -> bool { 161 | matches!( 162 | self, 163 | ToxicKind::SlowClose { .. } | ToxicKind::LimitData { .. } 164 | ) 165 | } 166 | 167 | pub(crate) fn is_stateful(&self) -> bool { 168 | matches!(self, ToxicKind::LimitData { .. }) 169 | } 170 | 171 | pub(crate) fn chunk_buffer_capacity(&self) -> usize { 172 | match self { 173 | ToxicKind::Latency { .. } => 1024, 174 | _ => 1, 175 | } 176 | } 177 | 178 | /// Returns the URL-safe name for the toxic kind 179 | pub fn get_name(&self) -> &'static str { 180 | match self { 181 | ToxicKind::Noop => "noop", 182 | ToxicKind::Latency { .. } => "latency", 183 | ToxicKind::Timeout { .. } => "timeout", 184 | ToxicKind::Bandwidth { .. } => "bandwidth", 185 | ToxicKind::SlowClose { .. } => "slow_close", 186 | ToxicKind::Slicer { .. } => "slicer", 187 | ToxicKind::LimitData { .. } => "limit_data", 188 | } 189 | } 190 | } 191 | 192 | pub(super) fn update_toxic_list_in_place( 193 | toxics: &mut Vec, 194 | event_kind: ToxicEventKind, 195 | direction: StreamDirection, 196 | ) -> Result<(), ToxicEventKind> { 197 | match event_kind { 198 | ToxicEventKind::AddToxic(toxic) => { 199 | if toxic.direction == direction { 200 | toxics.push(toxic); 201 | } else { 202 | return Err(ToxicEventKind::AddToxic(toxic)); 203 | } 204 | } 205 | ToxicEventKind::UpdateToxic(toxic) => { 206 | let old_toxic = if toxic.direction == direction { 207 | toxics 208 | .iter_mut() 209 | .find(|el| el.get_name() == toxic.get_name()) 210 | } else { 211 | None 212 | }; 213 | if let Some(old_toxic) = old_toxic { 214 | let _ = mem::replace(old_toxic, toxic); 215 | } else { 216 | return Err(ToxicEventKind::UpdateToxic(toxic)); 217 | } 218 | } 219 | ToxicEventKind::RemoveToxic(toxic_name) => { 220 | let index = toxics 221 | .iter() 222 | .position(|el| el.get_name() == toxic_name) 223 | .ok_or(ToxicEventKind::RemoveToxic(toxic_name))?; 224 | toxics.remove(index); 225 | } 226 | ToxicEventKind::RemoveAllToxics => { 227 | toxics.clear(); 228 | } 229 | } 230 | Ok(()) 231 | } 232 | 233 | impl fmt::Display for Toxic { 234 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 235 | write!(f, "{}: {}", self.name, self.kind) 236 | } 237 | } 238 | 239 | impl fmt::Display for ToxicKind { 240 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 241 | match self { 242 | ToxicKind::Noop => { 243 | write!(f, "Noop") 244 | } 245 | ToxicKind::Latency { latency, jitter } => { 246 | write!(f, "Latency({}, {})", latency, jitter) 247 | } 248 | ToxicKind::Timeout { timeout } => { 249 | write!(f, "Timeout({})", timeout) 250 | } 251 | ToxicKind::Bandwidth { rate } => { 252 | write!(f, "Bandwidth({})", rate) 253 | } 254 | ToxicKind::SlowClose { delay } => { 255 | write!(f, "SlowClose({})", delay) 256 | } 257 | ToxicKind::Slicer { 258 | average_size, 259 | size_variation, 260 | delay, 261 | } => { 262 | write!(f, "Slicer({}, {}, {})", average_size, size_variation, delay) 263 | } 264 | ToxicKind::LimitData { bytes } => { 265 | write!(f, "LimitData({})", bytes) 266 | } 267 | } 268 | } 269 | } 270 | 271 | #[cfg(test)] 272 | mod tests { 273 | use super::*; 274 | use serde_json::{from_str, to_string, Error as SerdeError}; 275 | 276 | #[test] 277 | fn test_display_noop() { 278 | let toxic = Toxic { 279 | kind: ToxicKind::Noop, 280 | name: "boo".to_owned(), 281 | toxicity: 1.0, 282 | direction: StreamDirection::Upstream, 283 | }; 284 | let expected = "boo: Noop"; 285 | assert_eq!(expected, toxic.to_string()); 286 | } 287 | 288 | #[test] 289 | fn test_display_latency() { 290 | let toxic = Toxic { 291 | kind: ToxicKind::Latency { 292 | latency: 49, 293 | jitter: 5, 294 | }, 295 | name: "t2".to_owned(), 296 | toxicity: 1.0, 297 | direction: StreamDirection::Upstream, 298 | }; 299 | let expected = "t2: Latency(49, 5)"; 300 | assert_eq!(expected, toxic.to_string()); 301 | } 302 | 303 | #[test] 304 | fn test_display_timeout() { 305 | let toxic = Toxic { 306 | kind: ToxicKind::Timeout { timeout: 2000 }, 307 | name: "t3".to_owned(), 308 | toxicity: 1.0, 309 | direction: StreamDirection::Upstream, 310 | }; 311 | let expected = "t3: Timeout(2000)"; 312 | assert_eq!(expected, toxic.to_string()); 313 | } 314 | 315 | #[test] 316 | fn test_display_bandwidth() { 317 | let toxic = Toxic { 318 | kind: ToxicKind::Bandwidth { rate: 2345 }, 319 | name: "t4".to_owned(), 320 | toxicity: 1.0, 321 | direction: StreamDirection::Upstream, 322 | }; 323 | let expected = "t4: Bandwidth(2345)"; 324 | assert_eq!(expected, toxic.to_string()); 325 | } 326 | 327 | #[test] 328 | fn test_display_slicer() { 329 | let toxic = Toxic { 330 | kind: ToxicKind::Slicer { 331 | average_size: 128, 332 | size_variation: 64, 333 | delay: 100, 334 | }, 335 | name: "t5".to_owned(), 336 | toxicity: 1.0, 337 | direction: StreamDirection::Upstream, 338 | }; 339 | let expected = "t5: Slicer(128, 64, 100)"; 340 | assert_eq!(expected, toxic.to_string()); 341 | } 342 | 343 | #[test] 344 | fn test_display_slow_close() { 345 | let toxic = Toxic { 346 | kind: ToxicKind::SlowClose { delay: 1200 }, 347 | name: "t6".to_owned(), 348 | toxicity: 1.0, 349 | direction: StreamDirection::Upstream, 350 | }; 351 | let expected = "t6: SlowClose(1200)"; 352 | assert_eq!(expected, toxic.to_string()); 353 | } 354 | 355 | #[test] 356 | fn test_display_limit_data() { 357 | let toxic = Toxic { 358 | kind: ToxicKind::LimitData { bytes: 64500 }, 359 | name: "t7".to_owned(), 360 | toxicity: 1.0, 361 | direction: StreamDirection::Upstream, 362 | }; 363 | let expected = "t7: LimitData(64500)"; 364 | assert_eq!(expected, toxic.to_string()); 365 | } 366 | 367 | #[test] 368 | fn test_noop_serde() { 369 | let toxic = Toxic { 370 | kind: ToxicKind::Noop, 371 | name: "foo".to_owned(), 372 | toxicity: 0.67, 373 | direction: StreamDirection::Downstream, 374 | }; 375 | let serialized = to_string(&toxic).unwrap(); 376 | let expected = 377 | "{\"type\":\"noop\",\"name\":\"foo\",\"toxicity\":0.67,\"direction\":\"downstream\"}"; 378 | assert_eq!(expected, serialized); 379 | 380 | let deserialized = from_str(&serialized).unwrap(); 381 | assert_eq!(toxic, deserialized); 382 | } 383 | 384 | #[test] 385 | fn test_noop_ser_without_toxicity() { 386 | let input = "{\"type\":\"noop\",\"name\":\"foo\",\"direction\":\"upstream\"}"; 387 | let expected = Toxic { 388 | kind: ToxicKind::Noop, 389 | name: "foo".to_owned(), 390 | toxicity: 1.0, 391 | direction: StreamDirection::Upstream, 392 | }; 393 | 394 | let deserialized = from_str(&input).unwrap(); 395 | assert_eq!(expected, deserialized); 396 | } 397 | 398 | #[test] 399 | fn test_noop_ser_without_direction() { 400 | let input = "{\"type\":\"noop\",\"name\":\"foo\",\"toxicity\":0.55}"; 401 | let expected = Toxic { 402 | kind: ToxicKind::Noop, 403 | name: "foo".to_owned(), 404 | toxicity: 0.55, 405 | direction: StreamDirection::Downstream, 406 | }; 407 | 408 | let deserialized = from_str(&input).unwrap(); 409 | assert_eq!(expected, deserialized); 410 | } 411 | 412 | #[test] 413 | fn test_latency_serde() { 414 | let toxic = Toxic { 415 | kind: ToxicKind::Latency { 416 | latency: 4321, 417 | jitter: 5, 418 | }, 419 | name: "lat".to_owned(), 420 | toxicity: 1.0, 421 | direction: StreamDirection::Upstream, 422 | }; 423 | let serialized = to_string(&toxic).unwrap(); 424 | let expected = 425 | "{\"type\":\"latency\",\"attributes\":{\"latency\":4321,\"jitter\":5},\"name\":\"lat\",\"toxicity\":1.0,\"direction\":\"upstream\"}"; 426 | assert_eq!(expected, serialized); 427 | 428 | let deserialized = from_str(&serialized).unwrap(); 429 | assert_eq!(toxic, deserialized); 430 | } 431 | 432 | #[test] 433 | fn test_toxicity_de_int() { 434 | let input = 435 | "{\"type\":\"noop\",\"name\":\"foo\",\"toxicity\":1,\"direction\":\"downstream\"}"; 436 | let deserialized = from_str(input).unwrap(); 437 | let expected = Toxic { 438 | kind: ToxicKind::Noop, 439 | name: "foo".to_owned(), 440 | toxicity: 1.0, 441 | direction: StreamDirection::Downstream, 442 | }; 443 | assert_eq!(expected, deserialized); 444 | } 445 | 446 | #[test] 447 | fn test_latency_de_negative() { 448 | let input_ok = 449 | "{\"type\":\"latency\",\"attributes\":{\"latency\":21,\"jitter\":0},\"name\":\"lat\",\"toxicity\":1,\"direction\":\"downstream\"}"; 450 | let input_err = 451 | "{\"type\":\"latency\",\"attributes\":{\"latency\":-21,\"jitter\":0},\"name\":\"lat\",\"toxicity\":1,\"direction\":\"downstream\"}"; 452 | let deserialized_ok: Result = from_str(input_ok); 453 | let deserialized_err: Result = from_str(input_err); 454 | 455 | assert!(deserialized_ok.is_ok()); 456 | assert_eq!( 457 | "invalid value: integer `-21`, expected u64 at line 1 column 109", 458 | deserialized_err.unwrap_err().to_string() 459 | ); 460 | } 461 | 462 | #[test] 463 | fn test_noop_de_without_name() { 464 | let input = "{\"type\":\"noop\"}"; 465 | let expected = Toxic { 466 | kind: ToxicKind::Noop, 467 | name: "noop_downstream".to_owned(), 468 | toxicity: 1.0, 469 | direction: StreamDirection::Downstream, 470 | }; 471 | 472 | let mut deserialized: Toxic = from_str(input).unwrap(); 473 | assert_eq!("", &deserialized.name); 474 | deserialized.set_default_name(); 475 | assert_eq!(expected, deserialized); 476 | } 477 | 478 | #[test] 479 | fn test_latency_de_without_name() { 480 | let input = "{\"type\":\"latency\",\"attributes\":{\"latency\":4321,\"jitter\":5}}"; 481 | let expected = Toxic { 482 | kind: ToxicKind::Latency { 483 | latency: 4321, 484 | jitter: 5, 485 | }, 486 | name: "latency_downstream".to_owned(), 487 | toxicity: 1.0, 488 | direction: StreamDirection::Downstream, 489 | }; 490 | 491 | let mut deserialized: Toxic = from_str(input).unwrap(); 492 | assert_eq!("", &deserialized.name); 493 | deserialized.set_default_name(); 494 | assert_eq!(expected, deserialized); 495 | } 496 | 497 | #[test] 498 | fn test_timeout_de_without_name() { 499 | let input = "{\"type\":\"timeout\",\"attributes\":{\"timeout\":2000}}"; 500 | let expected = Toxic { 501 | kind: ToxicKind::Timeout { timeout: 2000 }, 502 | name: "timeout_downstream".to_owned(), 503 | toxicity: 1.0, 504 | direction: StreamDirection::Downstream, 505 | }; 506 | 507 | let mut deserialized: Toxic = from_str(input).unwrap(); 508 | assert_eq!("", &deserialized.name); 509 | deserialized.set_default_name(); 510 | assert_eq!(expected, deserialized); 511 | } 512 | 513 | #[test] 514 | fn test_bandwidth_de_without_name() { 515 | let input = "{\"type\":\"bandwidth\",\"attributes\":{\"rate\":500}}"; 516 | let expected = Toxic { 517 | kind: ToxicKind::Bandwidth { rate: 500 }, 518 | name: "bandwidth_downstream".to_owned(), 519 | toxicity: 1.0, 520 | direction: StreamDirection::Downstream, 521 | }; 522 | 523 | let mut deserialized: Toxic = from_str(input).unwrap(); 524 | assert_eq!("", &deserialized.name); 525 | deserialized.set_default_name(); 526 | assert_eq!(expected, deserialized); 527 | } 528 | 529 | #[test] 530 | fn test_slow_close_de_without_name() { 531 | let input = "{\"type\":\"slow_close\",\"attributes\":{\"delay\":3000}}"; 532 | let expected = Toxic { 533 | kind: ToxicKind::SlowClose { delay: 3000 }, 534 | name: "slow_close_downstream".to_owned(), 535 | toxicity: 1.0, 536 | direction: StreamDirection::Downstream, 537 | }; 538 | 539 | let mut deserialized: Toxic = from_str(input).unwrap(); 540 | assert_eq!("", &deserialized.name); 541 | deserialized.set_default_name(); 542 | assert_eq!(expected, deserialized); 543 | } 544 | 545 | #[test] 546 | fn test_slicer_de_without_name() { 547 | let input = "{\"type\":\"slicer\",\"attributes\":{\"average_size\":100,\"size_variation\": 9,\"delay\": 50}}"; 548 | let expected = Toxic { 549 | kind: ToxicKind::Slicer { 550 | average_size: 100, 551 | size_variation: 9, 552 | delay: 50, 553 | }, 554 | name: "slicer_downstream".to_owned(), 555 | toxicity: 1.0, 556 | direction: StreamDirection::Downstream, 557 | }; 558 | 559 | let mut deserialized: Toxic = from_str(input).unwrap(); 560 | assert_eq!("", &deserialized.name); 561 | deserialized.set_default_name(); 562 | assert_eq!(expected, deserialized); 563 | } 564 | 565 | #[test] 566 | fn test_limit_data_de_without_name() { 567 | let input = "{\"type\":\"limit_data\",\"attributes\":{\"bytes\":1024}}"; 568 | let expected = Toxic { 569 | kind: ToxicKind::LimitData { bytes: 1024 }, 570 | name: "limit_data_downstream".to_owned(), 571 | toxicity: 1.0, 572 | direction: StreamDirection::Downstream, 573 | }; 574 | 575 | let mut deserialized: Toxic = from_str(input).unwrap(); 576 | assert_eq!("", &deserialized.name); 577 | deserialized.set_default_name(); 578 | assert_eq!(expected, deserialized); 579 | } 580 | } 581 | -------------------------------------------------------------------------------- /core/src/toxics/bandwidth.rs: -------------------------------------------------------------------------------- 1 | use super::run_noop; 2 | use bytes::Bytes; 3 | use futures::{Sink, Stream}; 4 | use futures::{SinkExt, StreamExt}; 5 | use std::convert::TryInto; 6 | use std::io; 7 | use tokio::pin; 8 | use tokio::time::sleep; 9 | use tokio::time::Duration; 10 | 11 | const INTERVAL: u64 = 100; 12 | const UNIT: usize = 100; 13 | 14 | pub async fn run_bandwidth( 15 | input: impl Stream, 16 | output: impl Sink, 17 | rate: u64, // in KB/s 18 | ) -> io::Result<()> { 19 | if rate == 0 { 20 | return run_noop(input, output).await; 21 | } 22 | pin!(input); 23 | pin!(output); 24 | 25 | while let Some(chunk) = input.next().await { 26 | let chunk_len: u64 = chunk 27 | .len() 28 | .try_into() 29 | .expect("Could not convert chunk size from usize to u64"); 30 | let mut to_sleep = Duration::from_nanos( 31 | (Duration::from_millis(chunk_len).as_nanos() / rate as u128) 32 | .try_into() 33 | .expect("chunk is too large"), 34 | ); 35 | 36 | let mut chunk = chunk; 37 | let rate: usize = rate 38 | .try_into() 39 | .expect("Could not convert bandwidth rate from u64 to usize"); 40 | 41 | // If the rate is low enough, split the packet up and send in 100 millisecond intervals 42 | while chunk.len() > rate * UNIT { 43 | sleep(Duration::from_millis(INTERVAL)).await; 44 | let to_send = chunk.split_to(UNIT); 45 | if output.send(to_send).await.is_err() { 46 | return Err(io::Error::new( 47 | io::ErrorKind::ConnectionReset, 48 | "Write channel closed", 49 | )); 50 | } 51 | to_sleep -= Duration::from_millis(INTERVAL); 52 | } 53 | // sleep's granularity is 1ms 54 | if to_sleep.as_millis() > 0 { 55 | sleep(to_sleep).await; 56 | } 57 | if !chunk.is_empty() && output.send(chunk).await.is_err() { 58 | return Err(io::Error::new( 59 | io::ErrorKind::ConnectionReset, 60 | "Write channel closed", 61 | )); 62 | } 63 | } 64 | 65 | Ok(()) 66 | } 67 | 68 | #[cfg(test)] 69 | mod tests { 70 | use super::*; 71 | use crate::toxics::test_utils::*; 72 | 73 | #[tokio::test] 74 | async fn passthrough_once() { 75 | passthrough_test(|stream, sink| async move { run_bandwidth(stream, sink, 128).await }) 76 | .await; 77 | } 78 | 79 | #[tokio::test] 80 | async fn unlimited_passthrough_once() { 81 | passthrough_test(|stream, sink| async move { run_bandwidth(stream, sink, 0).await }).await; 82 | } 83 | 84 | #[tokio::test] 85 | async fn drop_out_channel_first() { 86 | drop_out_channel_first_test(|stream, sink| async move { 87 | run_bandwidth(stream, sink, 128).await 88 | }) 89 | .await; 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /core/src/toxics/latency.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use futures::{stream, Sink, Stream, StreamExt}; 3 | use rand::distributions::Uniform; 4 | use rand::{rngs::StdRng, Rng, SeedableRng}; 5 | use std::io; 6 | use tokio::time::Duration; 7 | 8 | /// Run the latency toxic 9 | /// 10 | /// This implementation has a slightly different behavior from Shopify's toxiproxy 11 | /// when it comes to randomizing jitter. Toxiproxy uses the global random number 12 | /// generator from Go's rand package. There is no equivalent for this in Rust 13 | /// and we don't want to use another Mutex to share a thread-local random generator 14 | /// that's seeded once at startup, so we're seeding a new random generator for every 15 | /// latency toxic, with the same seed startup argument, if available. 16 | /// This would still allow determinism when you need it. 17 | pub async fn run_latency( 18 | input: impl Stream, 19 | output: impl Sink, 20 | latency: u64, 21 | jitter: u64, 22 | rand_seed: Option, 23 | ) -> io::Result<()> { 24 | if jitter == 0 { 25 | let _ = input 26 | .then(|chunk| async move { 27 | tokio::time::sleep(Duration::from_millis(latency)).await; 28 | chunk 29 | }) 30 | .map(Ok) 31 | .forward(output) 32 | .await; 33 | } else { 34 | let range = Uniform::from(0..(jitter * 2)); 35 | let rand_gen = if let Some(seed) = rand_seed { 36 | StdRng::seed_from_u64(seed) 37 | } else { 38 | StdRng::from_entropy() 39 | }; 40 | let jitter_stream = stream::iter(rand_gen.sample_iter(&range)); 41 | let _ = input 42 | .zip(jitter_stream) 43 | .then(|(chunk, add)| async move { 44 | let delay = latency + add - jitter; 45 | tokio::time::sleep(Duration::from_millis(delay)).await; 46 | chunk 47 | }) 48 | .map(Ok) 49 | .forward(output) 50 | .await; 51 | } 52 | 53 | Ok(()) 54 | } 55 | 56 | #[cfg(test)] 57 | mod tests { 58 | 59 | use super::*; 60 | use crate::toxics::test_utils::*; 61 | use futures::{SinkExt, StreamExt}; 62 | use tokio::time::{pause, resume, Instant}; 63 | use tokio_test::assert_ok; 64 | 65 | #[tokio::test] 66 | async fn no_jitter_passthrough_once() { 67 | passthrough_test(|stream, sink| async move { run_latency(stream, sink, 2, 0, None).await }) 68 | .await; 69 | } 70 | 71 | #[tokio::test] 72 | async fn passthrough_once() { 73 | passthrough_test(|stream, sink| async move { run_latency(stream, sink, 2, 2, None).await }) 74 | .await; 75 | } 76 | 77 | #[tokio::test] 78 | async fn random_seed_passthrough_once() { 79 | passthrough_test( 80 | |stream, sink| async move { run_latency(stream, sink, 5, 2, Some(42)).await }, 81 | ) 82 | .await; 83 | } 84 | 85 | #[tokio::test] 86 | async fn drop_out_channel_first_with_latency() { 87 | drop_out_channel_first_test(|stream, sink| async move { 88 | run_latency(stream, sink, 2, 1, None).await 89 | }) 90 | .await; 91 | } 92 | 93 | async fn test_latency(latency: u64, jitter: u64, seed: u64) { 94 | let (in_stream, mut in_sink) = create_stream_sink(); 95 | let (mut out_stream, out_sink) = create_stream_sink(); 96 | let data = gen_random_bytes(32); 97 | let expected = Some(data.clone()); 98 | let handle = tokio::spawn(async move { 99 | run_latency(in_stream, out_sink, latency, jitter, Some(seed)).await 100 | }); 101 | 102 | assert_ok!(in_sink.send(data).await); 103 | drop(in_sink); 104 | assert_ok!(handle.await.unwrap()); 105 | assert_eq!(expected, out_stream.next().await); 106 | } 107 | 108 | #[tokio::test] 109 | async fn test_latency_10() { 110 | let latency = 10u64; 111 | let beginning = Instant::now(); 112 | pause(); 113 | test_latency(latency, 0, 0).await; 114 | let duration = Instant::now().duration_since(beginning); 115 | assert!(duration.as_millis() > latency as u128); 116 | resume(); 117 | } 118 | 119 | #[tokio::test] 120 | async fn test_latency_with_jitter() { 121 | let latency = 1000u64; 122 | let jitter = 5u64; 123 | let beginning = Instant::now(); 124 | pause(); 125 | test_latency(latency, jitter, 1).await; 126 | let duration = Instant::now().duration_since(beginning); 127 | assert!(duration.as_millis() > latency as u128); 128 | resume(); 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /core/src/toxics/limit_data.rs: -------------------------------------------------------------------------------- 1 | use crate::{signal::Stop, state::ToxicState}; 2 | use bytes::Bytes; 3 | use futures::{Sink, Stream}; 4 | use futures::{SinkExt, StreamExt}; 5 | use std::convert::TryInto; 6 | use std::{io, sync::Arc}; 7 | use tokio::pin; 8 | use tokio::sync::Mutex as AsyncMutex; 9 | 10 | /// Run the slicer toxic 11 | pub(crate) async fn run_limit_data( 12 | input: impl Stream, 13 | output: impl Sink, 14 | mut stop: Stop, 15 | bytes: u64, 16 | state: Option>>, 17 | ) -> io::Result<()> { 18 | let state = state.expect("No toxic state provided to LimitData toxic."); 19 | pin!(input); 20 | pin!(output); 21 | let mut state = state.lock().await; 22 | let bytes: usize = bytes 23 | .try_into() 24 | .expect("Could not convert bytes limit from u64 to usize"); 25 | 26 | let mut bytes_transmitted: usize = get_bytes_transmitted(&state); 27 | let mut result = io::Result::Ok(()); 28 | 29 | while !stop.stop_received() { 30 | if bytes_transmitted < bytes { 31 | let maybe_chunk = tokio::select! { 32 | res = input.next() => res, 33 | _ = stop.recv() => None, 34 | }; 35 | 36 | if let Some(mut chunk) = maybe_chunk { 37 | let remaining: usize = bytes - bytes_transmitted; 38 | if remaining > 0 { 39 | chunk.truncate(remaining); 40 | let to_send = chunk.len(); 41 | 42 | if output.send(chunk).await.is_ok() { 43 | bytes_transmitted += to_send; 44 | } else { 45 | result = Err(io::Error::new( 46 | io::ErrorKind::ConnectionReset, 47 | "limit data channel closed", 48 | )) 49 | } 50 | } else { 51 | break; 52 | } 53 | } else { 54 | break; 55 | } 56 | } else { 57 | break; 58 | } 59 | } 60 | 61 | write_bytes_transmitted(&mut state, bytes_transmitted); 62 | result 63 | } 64 | 65 | fn get_bytes_transmitted(state: &ToxicState) -> usize { 66 | #[allow(irrefutable_let_patterns)] 67 | if let ToxicState::LimitData { bytes_transmitted } = state { 68 | *bytes_transmitted 69 | } else { 70 | panic!("Invalid ToxicState given to LimitData toxic: {:?}", state); 71 | } 72 | } 73 | 74 | fn write_bytes_transmitted(state: &mut ToxicState, value: usize) { 75 | #[allow(irrefutable_let_patterns)] 76 | if let ToxicState::LimitData { bytes_transmitted } = state { 77 | *bytes_transmitted = value; 78 | } else { 79 | panic!("Invalid ToxicState given to LimitData toxic: {:?}", state); 80 | } 81 | } 82 | 83 | #[cfg(test)] 84 | mod tests { 85 | use super::*; 86 | use crate::{toxic::ToxicKind, toxics::test_utils::*}; 87 | use futures::{SinkExt, StreamExt}; 88 | use tokio_test::assert_ok; 89 | 90 | #[tokio::test] 91 | #[should_panic(expected = "No toxic state provided to LimitData toxic.")] 92 | async fn panics_without_state() { 93 | let (in_stream, _) = create_stream_sink(); 94 | let (_, out_sink) = create_stream_sink(); 95 | let (stop, _) = Stop::new(); 96 | let _ = run_limit_data(in_stream, out_sink, stop, 0, None).await; 97 | } 98 | 99 | async fn test_limit_data( 100 | limit: u64, 101 | to_send: u64, 102 | prev_state: Option>>, 103 | expect_output: bool, 104 | ) { 105 | let (in_stream, mut in_sink) = create_stream_sink(); 106 | let (mut out_stream, out_sink) = create_stream_sink(); 107 | let (stop, stopper) = Stop::new(); 108 | let handle = tokio::spawn(run_limit_data(in_stream, out_sink, stop, limit, prev_state)); 109 | 110 | let data = gen_random_bytes(to_send.try_into().unwrap()); 111 | 112 | let mut expected = data.clone(); 113 | expected.truncate(limit as usize); 114 | 115 | assert_ok!(in_sink.send(data).await); 116 | if to_send == 0 || limit == 0 { 117 | assert_eq!(None, out_stream.next().await); 118 | } else if expect_output { 119 | let output = out_stream.next().await.unwrap(); 120 | assert_eq!(expected, output); 121 | } 122 | 123 | if to_send < limit { 124 | stopper.stop(); 125 | } 126 | let res = handle.await.unwrap(); 127 | assert_ok!(res); 128 | } 129 | 130 | fn make_state() -> Arc> { 131 | Arc::new(AsyncMutex::new( 132 | ToxicState::for_toxic_kind(&ToxicKind::LimitData { bytes: 0 }).unwrap(), 133 | )) 134 | } 135 | 136 | #[tokio::test] 137 | async fn limit_0() { 138 | test_limit_data(0, 32, Some(make_state()), false).await; 139 | } 140 | 141 | #[tokio::test] 142 | async fn send_below_limit() { 143 | test_limit_data(10000, 500, Some(make_state()), true).await; 144 | } 145 | 146 | #[tokio::test] 147 | async fn send_above_limit() { 148 | test_limit_data(42, 500, Some(make_state()), false).await; 149 | } 150 | 151 | #[tokio::test] 152 | async fn send_state_above_limit() { 153 | let state = Arc::new(AsyncMutex::new( 154 | ToxicState::for_toxic_kind(&ToxicKind::LimitData { bytes: 99999 }).unwrap(), 155 | )); 156 | test_limit_data(42, 500, Some(state), false).await; 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /core/src/toxics/mod.rs: -------------------------------------------------------------------------------- 1 | mod bandwidth; 2 | mod latency; 3 | mod limit_data; 4 | mod noop; 5 | mod slicer; 6 | mod slow_close; 7 | #[cfg(test)] 8 | mod test_utils; 9 | mod timeout; 10 | 11 | pub(crate) use bandwidth::*; 12 | pub(crate) use latency::*; 13 | pub(crate) use limit_data::*; 14 | pub(crate) use noop::*; 15 | pub(crate) use slicer::*; 16 | pub(crate) use slow_close::*; 17 | pub(crate) use timeout::*; 18 | -------------------------------------------------------------------------------- /core/src/toxics/noop.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use futures::StreamExt; 3 | use futures::{Sink, Stream}; 4 | use std::io; 5 | 6 | pub async fn run_noop( 7 | input: impl Stream, 8 | output: impl Sink, 9 | ) -> io::Result<()> { 10 | let _ = input.map(Ok).forward(output).await; 11 | Ok(()) 12 | } 13 | 14 | #[cfg(test)] 15 | mod tests { 16 | use super::*; 17 | use crate::toxics::test_utils::*; 18 | 19 | #[tokio::test] 20 | async fn passthrough_once() { 21 | passthrough_test(|stream, sink| async move { run_noop(stream, sink).await }).await; 22 | } 23 | 24 | #[tokio::test] 25 | async fn drop_out_channel_first() { 26 | drop_out_channel_first_test(|stream, sink| async move { run_noop(stream, sink).await }) 27 | .await; 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /core/src/toxics/slicer.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use futures::{Sink, Stream}; 3 | use futures::{SinkExt, StreamExt}; 4 | use rand::{rngs::StdRng, Rng, SeedableRng}; 5 | use std::convert::TryInto; 6 | use std::io; 7 | use tokio::pin; 8 | use tokio::time::sleep; 9 | use tokio::time::Duration; 10 | 11 | /// Run the slicer toxic 12 | pub async fn run_slicer( 13 | input: impl Stream, 14 | output: impl Sink, 15 | average_size: u64, 16 | size_variation: u64, 17 | delay: u64, // microseconds 18 | rand_seed: Option, 19 | ) -> io::Result<()> { 20 | pin!(input); 21 | pin!(output); 22 | 23 | while let Some(chunk) = input.next().await { 24 | let slice_iter = SliceIter::new(chunk, average_size, size_variation, rand_seed); 25 | for slice in slice_iter { 26 | sleep(Duration::from_micros(delay)).await; 27 | if output.send(slice).await.is_err() { 28 | return Err(io::Error::new( 29 | io::ErrorKind::ConnectionReset, 30 | "Write channel closed", 31 | )); 32 | } 33 | } 34 | } 35 | Ok(()) 36 | } 37 | 38 | #[derive(Debug)] 39 | enum SliceIterKind { 40 | ConstantSized, 41 | VariableSized { 42 | size_variation: usize, 43 | // Boxed to keep the variant size variation small 44 | rand_gen: Box, 45 | }, 46 | } 47 | 48 | #[derive(Debug)] 49 | struct SliceIter { 50 | data: Option, 51 | average_size: usize, 52 | kind: SliceIterKind, 53 | } 54 | 55 | impl SliceIter { 56 | fn new( 57 | data: Bytes, 58 | average_size: u64, 59 | size_variation: u64, 60 | rand_seed: Option, 61 | ) -> SliceIter { 62 | let kind = if size_variation > 0 { 63 | let rand_gen = if let Some(seed) = rand_seed { 64 | StdRng::seed_from_u64(seed) 65 | } else { 66 | StdRng::from_entropy() 67 | }; 68 | SliceIterKind::VariableSized { 69 | size_variation: size_variation 70 | .try_into() 71 | .expect("Could not convert size_variation from u64 to usize"), 72 | rand_gen: Box::new(rand_gen), 73 | } 74 | } else { 75 | SliceIterKind::ConstantSized 76 | }; 77 | SliceIter { 78 | data: Some(data), 79 | average_size: average_size 80 | .try_into() 81 | .expect("Could not convert average_size from u64 to usize"), 82 | kind, 83 | } 84 | } 85 | 86 | fn slice_data(&mut self, position: usize) -> Option { 87 | if let Some(mut data) = self.data.take() { 88 | if data.len() > position { 89 | let slice = data.split_to(position); 90 | self.data = Some(data); 91 | Some(slice) 92 | } else if !data.is_empty() { 93 | Some(data) 94 | } else { 95 | None 96 | } 97 | } else { 98 | None 99 | } 100 | } 101 | } 102 | 103 | impl Iterator for SliceIter { 104 | type Item = Bytes; 105 | 106 | fn next(&mut self) -> Option { 107 | if self.data.is_some() { 108 | match &mut self.kind { 109 | SliceIterKind::ConstantSized => self.slice_data(self.average_size), 110 | SliceIterKind::VariableSized { 111 | size_variation, 112 | rand_gen, 113 | } => { 114 | let variation = *size_variation; 115 | let size = 116 | self.average_size + 2 * rand_gen.gen_range(1..=variation) - variation; 117 | self.slice_data(size) 118 | } 119 | } 120 | } else { 121 | None 122 | } 123 | } 124 | } 125 | 126 | #[cfg(test)] 127 | mod tests { 128 | use super::*; 129 | use crate::toxics::test_utils::*; 130 | 131 | #[tokio::test] 132 | async fn passthrough_once() { 133 | passthrough_test( 134 | |stream, sink| async move { run_slicer(stream, sink, 50, 0, 0, None).await }, 135 | ) 136 | .await; 137 | } 138 | 139 | #[tokio::test] 140 | async fn random_seed_passthrough_once() { 141 | passthrough_test(|stream, sink| async move { 142 | run_slicer(stream, sink, 50, 0, 0, Some(42)).await 143 | }) 144 | .await; 145 | } 146 | 147 | #[tokio::test] 148 | async fn random_seed_variation_passthrough_once() { 149 | passthrough_test(|stream, sink| async move { 150 | run_slicer(stream, sink, 50, 8, 0, Some(42)).await 151 | }) 152 | .await; 153 | } 154 | 155 | #[tokio::test] 156 | async fn variation_passthrough_once() { 157 | passthrough_test( 158 | |stream, sink| async move { run_slicer(stream, sink, 50, 8, 0, None).await }, 159 | ) 160 | .await; 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /core/src/toxics/slow_close.rs: -------------------------------------------------------------------------------- 1 | use crate::signal::Stop; 2 | use bytes::Bytes; 3 | use futures::{Sink, SinkExt, Stream, StreamExt}; 4 | use std::io; 5 | use tokio::pin; 6 | use tokio::time::sleep; 7 | use tokio::time::Duration; 8 | 9 | /// The SlowClose prevents the proxy connection from closing until after a delay. 10 | pub(crate) async fn run_slow_close( 11 | input: impl Stream, 12 | output: impl Sink, 13 | mut stop: Stop, 14 | delay: u64, // in millis 15 | ) -> io::Result<()> { 16 | pin!(input); 17 | pin!(output); 18 | let mut res: io::Result<()> = Ok(()); 19 | while !stop.stop_received() { 20 | let maybe_chunk = tokio::select! { 21 | res = input.next() => res, 22 | _ = stop.recv() => None, 23 | }; 24 | if let Some(chunk) = maybe_chunk { 25 | if output.send(chunk).await.is_err() { 26 | res = Err(io::Error::new( 27 | io::ErrorKind::ConnectionReset, 28 | "Write channel closed", 29 | )); 30 | break; 31 | } 32 | } else { 33 | break; 34 | } 35 | } 36 | tracing::debug!("Slow close sleep for {}", delay); 37 | sleep(Duration::from_millis(delay)).await; 38 | tracing::debug!("Slow close closing {}", delay); 39 | res 40 | } 41 | 42 | #[cfg(test)] 43 | mod tests { 44 | use super::*; 45 | use crate::toxics::test_utils::*; 46 | use tokio_test::assert_ok; 47 | 48 | #[tokio::test] 49 | async fn passthrough_once() { 50 | let (stop, _) = Stop::new(); 51 | passthrough_test(|stream, sink| async move { run_slow_close(stream, sink, stop, 0).await }) 52 | .await; 53 | } 54 | 55 | #[tokio::test] 56 | async fn drop_out_channel_first_0_delay() { 57 | let (stop, stopper) = Stop::new(); 58 | 59 | let (in_stream, mut in_sink) = create_stream_sink(); 60 | let (mut out_stream, out_sink) = create_stream_sink(); 61 | let data = gen_random_bytes(32); 62 | let expected = Some(data.clone()); 63 | let handle = 64 | tokio::spawn(async move { run_slow_close(in_stream, out_sink, stop, 0).await }); 65 | 66 | assert_ok!(in_sink.send(data).await); 67 | assert_eq!(expected, out_stream.next().await); 68 | drop(out_stream); 69 | stopper.stop(); 70 | assert_ok!(handle.await.unwrap()); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /core/src/toxics/test_utils.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use futures::{ 3 | channel::mpsc::{channel, Receiver, Sender}, 4 | Future, 5 | }; 6 | use futures::{SinkExt, StreamExt}; 7 | use std::io; 8 | use tokio_test::assert_ok; 9 | 10 | pub(crate) fn create_stream_sink() -> (Receiver, Sender) { 11 | let (tx, rx) = channel(1); 12 | (rx, tx) 13 | } 14 | 15 | pub(crate) fn gen_random_bytes(length: usize) -> Bytes { 16 | let range = 0..length; 17 | range 18 | .map(|_| rand::random::()) 19 | .collect::>() 20 | .into() 21 | } 22 | 23 | pub(crate) async fn passthrough_test( 24 | make_handle: impl FnOnce(Receiver, Sender) -> F, 25 | ) where 26 | F: Future> + 'static + Send + Sync, 27 | { 28 | let (in_stream, mut in_sink) = create_stream_sink(); 29 | let (mut out_stream, out_sink) = create_stream_sink(); 30 | let data = gen_random_bytes(32); 31 | let expected = Some(data.clone()); 32 | let handle = tokio::spawn(make_handle(in_stream, out_sink)); 33 | 34 | assert_ok!(in_sink.send(data).await); 35 | drop(in_sink); 36 | assert_ok!(handle.await.unwrap()); 37 | assert_eq!(expected, out_stream.next().await); 38 | } 39 | 40 | pub(crate) async fn drop_out_channel_first_test( 41 | make_handle: impl FnOnce(Receiver, Sender) -> F, 42 | ) where 43 | F: Future> + 'static + Send + Sync, 44 | { 45 | let (in_stream, mut in_sink) = create_stream_sink(); 46 | let (out_stream, out_sink) = create_stream_sink(); 47 | let data = gen_random_bytes(32); 48 | let handle = tokio::spawn(make_handle(in_stream, out_sink)); 49 | 50 | assert_ok!(in_sink.send(data).await); 51 | drop(out_stream); 52 | let _ = handle.await; 53 | } 54 | -------------------------------------------------------------------------------- /core/src/toxics/timeout.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use futures::StreamExt; 3 | use futures::{Sink, Stream}; 4 | use std::io; 5 | use tokio::time::sleep; 6 | use tokio::time::Duration; 7 | use tokio::{io::AsyncWriteExt, pin}; 8 | 9 | /// The TimeoutToxic stops any data from flowing through, and will close the connection after a timeout. 10 | /// If the timeout is set to 0, then the connection will not be closed. 11 | pub async fn run_timeout( 12 | input: impl Stream, 13 | _output: impl Sink, 14 | timeout: u64, // in millis 15 | ) -> io::Result<()> { 16 | let mut drain = tokio::io::sink(); 17 | if timeout == 0 { 18 | pin!(input); 19 | // Drain the input until it's closed 20 | while let Some(chunk) = input.next().await { 21 | drain.write_all(&chunk).await?; 22 | } 23 | } else { 24 | input 25 | .take_until(sleep(Duration::from_millis(timeout))) 26 | .fold((), |_, _| async move {}) 27 | .await; 28 | } 29 | 30 | Err(io::Error::new( 31 | io::ErrorKind::TimedOut, 32 | format!("timeout after {}ms", timeout), 33 | )) 34 | } 35 | 36 | #[cfg(test)] 37 | mod tests { 38 | use super::*; 39 | use crate::toxics::test_utils::*; 40 | use futures::{SinkExt, StreamExt}; 41 | use tokio::time::{pause, resume}; 42 | use tokio_test::{assert_err, assert_ok}; 43 | 44 | async fn test_timeout(timeout: u64) { 45 | let (in_stream, mut in_sink) = create_stream_sink(); 46 | let (mut out_stream, out_sink) = create_stream_sink(); 47 | let handle = tokio::spawn(run_timeout(in_stream, out_sink, timeout)); 48 | 49 | assert_ok!(in_sink.send(gen_random_bytes(32)).await); 50 | assert_ok!(in_sink.send(gen_random_bytes(32)).await); 51 | drop(in_sink); 52 | let res = handle.await.unwrap(); 53 | assert_err!(res); 54 | assert_eq!(None, out_stream.next().await); 55 | } 56 | 57 | #[tokio::test] 58 | async fn dumps_data() { 59 | let timeout = 1u64; 60 | test_timeout(timeout).await; 61 | } 62 | 63 | #[tokio::test] 64 | async fn resolves_when_time_paused() { 65 | let timeout = 5000u64; 66 | pause(); 67 | test_timeout(timeout).await; 68 | resume(); 69 | } 70 | 71 | #[tokio::test] 72 | async fn timeout_0_resolves_when_time_paused() { 73 | let timeout = 0u64; 74 | pause(); 75 | test_timeout(timeout).await; 76 | resume(); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /scripts/bin/semantic-release-rust: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oguzbilgener/noxious/3a0634189dc332e4021fd7189449e1e194705e4e/scripts/bin/semantic-release-rust -------------------------------------------------------------------------------- /scripts/bin/set-cargo-version: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oguzbilgener/noxious/3a0634189dc332e4021fd7189449e1e194705e4e/scripts/bin/set-cargo-version -------------------------------------------------------------------------------- /scripts/flamegraph.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env sh 2 | 3 | cargo flamegraph --bin noxious-server --dev -o target/flamegraph.svg 4 | 5 | #### 6 | # SETUP 7 | # cargo install flamegraph 8 | # echo 0 | sudo tee /proc/sys/kernel/kptr_restrict 9 | # echo 0 | sudo tee /proc/sys/kernel/perf_event_paranoid -------------------------------------------------------------------------------- /scripts/install-semantic-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | set -e 4 | 5 | npm install -g semantic-release @semantic-release/exec -------------------------------------------------------------------------------- /scripts/publish-image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | if [[ $# -eq 0 ]] ; then 6 | echo 'No version provided' 7 | exit 0 8 | fi 9 | 10 | echo "Building and publishing version $1" 11 | 12 | docker build -t noxious:$1 . 13 | docker image tag noxious:$1 oguzbilgener/noxious:$1 14 | docker image tag noxious:$1 oguzbilgener/noxious:latest 15 | docker push "oguzbilgener/noxious:$1" -------------------------------------------------------------------------------- /scripts/run-coverage.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | cargo tarpaulin --ignore-tests --out Html -------------------------------------------------------------------------------- /server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "noxious-server" 3 | version = "1.0.4" 4 | authors = ["Oguz Bilgener "] 5 | edition = "2018" 6 | publish = false 7 | 8 | [dependencies] 9 | noxious = { path = "../core" } 10 | tokio = { version = "1", features = ["macros", "sync", "net", "rt-multi-thread", "signal"] } 11 | futures = { version = "0.3.12" } 12 | bytes = "1.0.1" 13 | serde_json = "^1.0.62" 14 | serde = { version = "^1.0.123", features = ["derive"] } 15 | warp = { version = "0.3.0", features = [], default-features = false } 16 | async-trait = "0.1.47" 17 | tracing = { version = "0.1.25", features = ["attributes", "log", "log-always"] } 18 | tracing-log = { version = "0.1.2", features = ["env_logger", "log-tracer"] } 19 | tracing-opentelemetry = "0.11.0" 20 | log = "0.4.14" 21 | pretty_env_logger = "0.4.0" 22 | bmrng = "0.4.0" 23 | thiserror = "1.0.24" 24 | clap = { version = "3.0", features = ["derive"] } 25 | opentelemetry = "0.12.0" 26 | 27 | [dev-dependencies] 28 | tokio-test = "0.4.0" 29 | lazy_static = "1.4.0" 30 | mockall = "0.9.1" 31 | -------------------------------------------------------------------------------- /server/src/api/handlers.rs: -------------------------------------------------------------------------------- 1 | use crate::{error::StoreError, store::Store, util}; 2 | use noxious::{ 3 | proxy::{ProxyConfig, ProxyRunner}, 4 | socket::TcpListener, 5 | toxic::Toxic, 6 | }; 7 | use responses::*; 8 | use serde_json::Value as JsonValue; 9 | use std::convert::Infallible; 10 | use std::future::Future; 11 | use tracing::instrument; 12 | use warp::http::StatusCode; 13 | use warp::Reply; 14 | 15 | /// Remove all toxics from all proxies 16 | #[instrument(level = "info")] 17 | pub async fn reset_state(store: Store) -> Result { 18 | store.reset_state().await; 19 | Ok(StatusCode::NO_CONTENT) 20 | } 21 | 22 | /// Re-populate the toxics from the initial config, return a map of proxies with toxics 23 | #[instrument(level = "info", skip(store))] 24 | pub async fn populate(configs: Vec, store: Store) -> Result { 25 | wrap_store_result_with_status( 26 | async move { store.populate::(configs).await }, 27 | StatusCode::CREATED, 28 | ) 29 | .await 30 | } 31 | 32 | /// Get a key-value map of all proxies and their toxics in the system 33 | #[instrument(level = "info", skip(store))] 34 | pub async fn get_proxies(store: Store) -> Result { 35 | let result = store.get_proxies().await.and_then(|pairs| { 36 | let maybe_map: Result, serde_json::Error> = pairs 37 | .into_iter() 38 | .try_fold(serde_json::Map::new(), |mut acc, pair| { 39 | let key = pair.proxy.name.clone(); 40 | let value = serde_json::to_value(pair)?; 41 | acc.insert(key, value); 42 | Ok(acc) 43 | }); 44 | maybe_map.map_err(|_| StoreError::Other) 45 | }); 46 | use_store_result(result, StatusCode::OK) 47 | } 48 | 49 | /// Create a proxy, return it if successful 50 | #[instrument(level = "info", skip(store))] 51 | pub async fn create_proxy(proxy: ProxyConfig, store: Store) -> Result { 52 | wrap_store_result_with_status( 53 | async move { store.create_proxy::(proxy).await }, 54 | StatusCode::CREATED, 55 | ) 56 | .await 57 | } 58 | 59 | #[instrument(level = "info", skip(store))] 60 | pub async fn get_proxy(name: String, store: Store) -> Result { 61 | wrap_store_result(async move { store.get_proxy(&name).await }).await 62 | } 63 | 64 | #[instrument(level = "info", skip(store))] 65 | pub async fn update_proxy( 66 | name: String, 67 | new_proxy: ProxyConfig, 68 | store: Store, 69 | ) -> Result { 70 | wrap_store_result(async move { 71 | store 72 | .update_proxy::(&name, new_proxy) 73 | .await 74 | }) 75 | .await 76 | } 77 | 78 | #[instrument(level = "info", skip(store))] 79 | pub async fn remove_proxy(name: String, store: Store) -> Result { 80 | wrap_store_result_no_content(async move { store.remove_proxy(&name).await }).await 81 | } 82 | 83 | #[instrument(level = "info", skip(store))] 84 | pub async fn get_toxics(proxy_name: String, store: Store) -> Result { 85 | wrap_store_result(async move { store.get_toxics(&proxy_name).await }).await 86 | } 87 | 88 | #[instrument(level = "info", skip(store))] 89 | pub async fn create_toxic( 90 | proxy_name: String, 91 | toxic: Toxic, 92 | store: Store, 93 | ) -> Result { 94 | wrap_store_result(async move { store.create_toxic(proxy_name, toxic).await }).await 95 | } 96 | 97 | #[instrument(level = "info", skip(store))] 98 | pub async fn get_toxic( 99 | proxy_name: String, 100 | toxic_name: String, 101 | store: Store, 102 | ) -> Result { 103 | wrap_store_result(async move { store.get_toxic(&proxy_name, &toxic_name).await }).await 104 | } 105 | 106 | #[instrument(level = "info", skip(store))] 107 | pub async fn update_toxic( 108 | proxy_name: String, 109 | toxic_name: String, 110 | new_toxic: Toxic, 111 | store: Store, 112 | ) -> Result { 113 | wrap_store_result(async move { store.update_toxic(proxy_name, toxic_name, new_toxic).await }) 114 | .await 115 | } 116 | 117 | #[instrument(level = "info", skip(store))] 118 | pub async fn remove_toxic( 119 | proxy_name: String, 120 | toxic_name: String, 121 | store: Store, 122 | ) -> Result { 123 | wrap_store_result_no_content(async move { store.remove_toxic(proxy_name, toxic_name).await }) 124 | .await 125 | } 126 | 127 | #[instrument(level = "info")] 128 | pub async fn get_version() -> Result { 129 | Ok(warp::reply::with_status( 130 | util::get_version(), 131 | StatusCode::OK, 132 | )) 133 | } 134 | 135 | mod responses { 136 | use serde::Serialize; 137 | use warp::reply::{json as json_reply, with_status}; 138 | 139 | use crate::error::{ApiErrorResponse, StoreError}; 140 | 141 | use super::*; 142 | 143 | pub async fn wrap_store_result( 144 | f: impl Future>, 145 | ) -> Result { 146 | use_store_result(f.await, StatusCode::OK) 147 | } 148 | 149 | pub async fn wrap_store_result_with_status( 150 | f: impl Future>, 151 | status_code: StatusCode, 152 | ) -> Result { 153 | use_store_result(f.await, status_code) 154 | } 155 | 156 | pub async fn wrap_store_result_no_content( 157 | f: impl Future>, 158 | ) -> Result { 159 | use_store_result_no_content(f.await) 160 | } 161 | 162 | pub fn use_store_result( 163 | result: Result, 164 | status_code: StatusCode, 165 | ) -> Result { 166 | match result { 167 | Ok(data) => Ok(with_status(json_reply(&data), status_code).into_response()), 168 | Err(err) => { 169 | let data: ApiErrorResponse = err.into(); 170 | Ok(data.into()) 171 | } 172 | } 173 | } 174 | 175 | pub fn use_store_result_no_content( 176 | result: Result<(), StoreError>, 177 | ) -> Result { 178 | match result { 179 | Ok(_) => Ok(StatusCode::NO_CONTENT.into_response()), 180 | Err(err) => { 181 | let data: ApiErrorResponse = err.into(); 182 | Ok(data.into()) 183 | } 184 | } 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /server/src/api/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::store::Store; 2 | use crate::util; 3 | use noxious::signal::Stop; 4 | use std::{convert::Infallible, net::SocketAddr}; 5 | use tracing::{debug, info}; 6 | use warp::{Filter, Reply}; 7 | 8 | // rest api 9 | mod filters; 10 | mod handlers; 11 | 12 | fn make_filters(store: Store) -> impl Filter + Clone { 13 | use filters::*; 14 | 15 | disallow_browsers() 16 | .or(reset(store.clone()) 17 | .or(populate(store.clone())) 18 | .or(version())) 19 | .or(get_toxic(store.clone()) 20 | .or(update_toxic(store.clone())) 21 | .or(create_toxic(store.clone()))) 22 | .or(remove_toxic(store.clone()).or(get_toxics(store.clone()))) 23 | .or(update_proxy(store.clone()).or(get_proxy(store.clone()))) 24 | .or(create_proxy(store.clone()) 25 | .or(remove_proxy(store.clone())) 26 | .or(get_proxies(store))) 27 | .recover(handle_errors) 28 | } 29 | 30 | /// Serve the API server 31 | /// Panics if the the provided SocketAddr is invalid or unavailable. 32 | pub async fn serve(addr: SocketAddr, store: Store, mut stop: Stop) { 33 | let version = util::get_version(); 34 | info!( 35 | addr = ?addr, 36 | version = ?version, 37 | "API HTTP server starting" 38 | ); 39 | 40 | let api = make_filters(store); 41 | let routes = api.with(warp::log("noxious")); 42 | tokio::select! { 43 | _ = warp::serve(routes).run(addr) => {}, 44 | _ = stop.recv() => {}, 45 | }; 46 | debug!("API HTTP server shutting down"); 47 | } 48 | -------------------------------------------------------------------------------- /server/src/args.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use std::net::{IpAddr, Ipv4Addr}; 3 | 4 | /// A Rust port of Toxiproxy server 5 | #[derive(Parser, Debug)] 6 | pub struct Args { 7 | /// The host to listen on for the API server 8 | #[clap(short, long, default_value = "127.0.0.1")] 9 | pub host: String, 10 | /// The port to listen on for the API server 11 | #[clap(short, long, default_value = "8474")] 12 | pub port: String, 13 | /// json file containing proxies to create on startup 14 | #[clap(short, long)] 15 | pub config: Option, 16 | /// Seed for randomizing toxics with 17 | #[clap(long)] 18 | pub seed: Option, 19 | } 20 | 21 | impl Args { 22 | pub fn get_ip_addr(&self) -> IpAddr { 23 | if self.host == "localhost" { 24 | IpAddr::V4(Ipv4Addr::LOCALHOST) 25 | } else { 26 | self.host.parse().expect("Invalid host address") 27 | } 28 | } 29 | pub fn get_port_number(&self) -> u16 { 30 | self.port.parse().expect("Invalid port number") 31 | } 32 | } 33 | 34 | #[cfg(test)] 35 | mod tests { 36 | use std::net::Ipv4Addr; 37 | 38 | use super::*; 39 | 40 | #[test] 41 | fn parses_ip_addr() { 42 | let input = Args { 43 | host: "127.0.0.1".to_owned(), 44 | port: "5555".to_owned(), 45 | config: None, 46 | seed: None, 47 | }; 48 | let addr = input.get_ip_addr(); 49 | let expected = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); 50 | assert_eq!(expected, addr); 51 | } 52 | 53 | #[test] 54 | fn parses_localhost() { 55 | let input = Args { 56 | host: "localhost".to_owned(), 57 | port: "5555".to_owned(), 58 | config: None, 59 | seed: None, 60 | }; 61 | let addr = input.get_ip_addr(); 62 | let expected = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); 63 | assert_eq!(expected, addr); 64 | } 65 | 66 | #[test] 67 | fn parses_port_num() { 68 | let input = Args { 69 | host: "127.0.0.1".to_owned(), 70 | port: "5555".to_owned(), 71 | config: None, 72 | seed: None, 73 | }; 74 | let port = input.get_port_number(); 75 | let expected = 5555u16; 76 | assert_eq!(expected, port); 77 | } 78 | 79 | #[test] 80 | #[should_panic] 81 | fn panics_on_invalid_ip() { 82 | let input = Args { 83 | host: "127.0.0.1.2".to_owned(), 84 | port: "5555".to_owned(), 85 | config: None, 86 | seed: None, 87 | }; 88 | let _addr = input.get_ip_addr(); 89 | } 90 | 91 | #[test] 92 | #[should_panic] 93 | fn panics_on_invalid_port() { 94 | let input = Args { 95 | host: "127.0.0.1".to_owned(), 96 | port: "555511111".to_owned(), 97 | config: None, 98 | seed: None, 99 | }; 100 | let _port = input.get_port_number(); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /server/src/error.rs: -------------------------------------------------------------------------------- 1 | use noxious::{error::ToxicUpdateError, proxy::ProxyValidateError}; 2 | use serde::Serialize; 3 | use std::io; 4 | use thiserror::Error; 5 | use warp::{ 6 | http::StatusCode, 7 | reply::{json as json_reply, with_status, Response}, 8 | Reply, 9 | }; 10 | 11 | #[derive(Debug, Clone, PartialEq)] 12 | pub enum ResourceKind { 13 | Toxic, 14 | Proxy, 15 | } 16 | 17 | #[derive(Debug, Clone, Error, PartialEq)] 18 | pub enum StoreError { 19 | #[error("Missing required field")] 20 | InvalidProxyConfig(ProxyValidateError), 21 | #[error("An item with this name already exists")] 22 | AlreadyExists, 23 | #[error("{0} not found")] 24 | NotFound(ResourceKind), 25 | #[error("I/O error: {0:?}")] 26 | IoError(io::ErrorKind), 27 | #[error("Proxy closed")] 28 | ProxyClosed, 29 | #[error("Internal server error")] 30 | Other, 31 | } 32 | 33 | #[derive(Debug, Clone, PartialEq, Serialize)] 34 | pub struct ApiErrorResponse { 35 | #[serde(rename = "error")] 36 | message: String, 37 | #[serde(rename = "status")] 38 | status_code: u16, 39 | #[serde(skip)] 40 | pub code: StatusCode, 41 | } 42 | 43 | impl From for StatusCode { 44 | fn from(err: StoreError) -> Self { 45 | match err { 46 | StoreError::InvalidProxyConfig(..) => StatusCode::BAD_REQUEST, 47 | StoreError::AlreadyExists => StatusCode::CONFLICT, 48 | StoreError::NotFound(..) => StatusCode::NOT_FOUND, 49 | StoreError::ProxyClosed | StoreError::IoError(..) | StoreError::Other => { 50 | StatusCode::INTERNAL_SERVER_ERROR 51 | } 52 | } 53 | } 54 | } 55 | 56 | impl From for StoreError { 57 | fn from(err: io::Error) -> Self { 58 | StoreError::IoError(err.kind()) 59 | } 60 | } 61 | 62 | impl From for StoreError { 63 | fn from(err: ProxyValidateError) -> Self { 64 | StoreError::InvalidProxyConfig(err) 65 | } 66 | } 67 | 68 | impl From for StoreError { 69 | fn from(err: ToxicUpdateError) -> Self { 70 | match err { 71 | ToxicUpdateError::NotFound => StoreError::NotFound(ResourceKind::Toxic), 72 | ToxicUpdateError::Other => StoreError::Other, 73 | } 74 | } 75 | } 76 | 77 | impl std::fmt::Display for ResourceKind { 78 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 79 | match self { 80 | ResourceKind::Toxic => write!(f, "toxic"), 81 | ResourceKind::Proxy => write!(f, "proxy"), 82 | } 83 | } 84 | } 85 | 86 | impl From for ApiErrorResponse { 87 | fn from(err: StoreError) -> Self { 88 | let message = err.to_string(); 89 | let code: StatusCode = err.into(); 90 | ApiErrorResponse { 91 | message, 92 | status_code: code.as_u16(), 93 | code, 94 | } 95 | } 96 | } 97 | 98 | impl ApiErrorResponse { 99 | pub fn new(message: &str, code: StatusCode) -> Self { 100 | ApiErrorResponse { 101 | message: message.to_owned(), 102 | status_code: code.as_u16(), 103 | code, 104 | } 105 | } 106 | } 107 | 108 | impl From for Response { 109 | fn from(resp: ApiErrorResponse) -> Self { 110 | with_status(json_reply(&resp), resp.code).into_response() 111 | } 112 | } 113 | 114 | #[cfg(test)] 115 | mod tests { 116 | use super::*; 117 | 118 | #[test] 119 | fn proxy_validate_error_into_store_error() { 120 | let err: StoreError = ProxyValidateError::MissingName.into(); 121 | assert_eq!( 122 | StoreError::InvalidProxyConfig(ProxyValidateError::MissingName), 123 | err 124 | ); 125 | } 126 | 127 | #[test] 128 | fn toxic_update_error_into_store_error() { 129 | let err: StoreError = ToxicUpdateError::NotFound.into(); 130 | assert_eq!(StoreError::NotFound(ResourceKind::Toxic), err); 131 | 132 | let err: StoreError = ToxicUpdateError::Other.into(); 133 | assert_eq!(StoreError::Other, err); 134 | } 135 | 136 | #[test] 137 | fn io_error_into_store_error() { 138 | let err: StoreError = io::Error::new(io::ErrorKind::AddrInUse, ":(").into(); 139 | assert_eq!(StoreError::IoError(io::ErrorKind::AddrInUse), err); 140 | } 141 | 142 | #[test] 143 | fn other_server_error_into_status_code() { 144 | let code: StatusCode = StoreError::Other.into(); 145 | assert_eq!(StatusCode::INTERNAL_SERVER_ERROR, code); 146 | 147 | let code: StatusCode = StoreError::ProxyClosed.into(); 148 | assert_eq!(StatusCode::INTERNAL_SERVER_ERROR, code); 149 | 150 | let err: StoreError = io::Error::new(io::ErrorKind::AddrInUse, ":(").into(); 151 | let code: StatusCode = err.into(); 152 | assert_eq!(StatusCode::INTERNAL_SERVER_ERROR, code); 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /server/src/file.rs: -------------------------------------------------------------------------------- 1 | use noxious::{ 2 | proxy::{ProxyConfig, Runner}, 3 | socket::SocketListener, 4 | }; 5 | use std::io; 6 | use tokio::{fs::File, io::AsyncReadExt}; 7 | use tracing::{error, info}; 8 | 9 | use crate::store::Store; 10 | 11 | pub async fn get_proxy_configs(file_path: &str) -> io::Result> { 12 | let mut file = File::open(file_path).await?; 13 | // The Toxiproxy config file is one big array and it looks like serde_json's 14 | // streaming API doesn't support that. So we just read the whole file into the 15 | // memory and parse it at once. This should be fine since the config file is not 16 | // expected to be large. 17 | // Related issue: https://github.com/serde-rs/json/issues/404 18 | 19 | let mut buffer = Vec::new(); 20 | 21 | // read the whole file 22 | file.read_to_end(&mut buffer).await?; 23 | 24 | let proxy_configs: Vec = serde_json::from_slice(&buffer) 25 | .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; 26 | Ok(proxy_configs) 27 | } 28 | 29 | pub fn populate_initial_proxy_configs(file_path: &str, store: Store) 30 | where 31 | L: SocketListener + 'static, 32 | R: Runner + 'static, 33 | { 34 | let file_path = file_path.to_owned(); 35 | tokio::spawn(async move { 36 | match get_proxy_configs(&file_path).await { 37 | Ok(configs) => { 38 | let length = configs.len(); 39 | if let Err(err) = store.populate::(configs).await { 40 | error!(err = ?err, "Failed to populate store from proxy configs"); 41 | } else { 42 | let config: &str = &file_path; 43 | info!(config, proxies = length, "Populated proxies from file"); 44 | } 45 | } 46 | Err(err) => { 47 | error!(config = ?&file_path, err = ?err, "Error reading config file"); 48 | } 49 | } 50 | }); 51 | } 52 | -------------------------------------------------------------------------------- /server/src/main.rs: -------------------------------------------------------------------------------- 1 | use crate::args::Args; 2 | use clap::Parser; 3 | use noxious::{proxy::ProxyRunner, signal::Stop, socket::TcpListener}; 4 | use std::net::SocketAddr; 5 | use tokio::signal; 6 | use tracing::{debug, info}; 7 | 8 | use crate::{file::populate_initial_proxy_configs, store::Store}; 9 | 10 | mod api; 11 | mod args; 12 | mod error; 13 | mod file; 14 | mod store; 15 | mod util; 16 | 17 | #[tokio::main] 18 | async fn main() { 19 | util::init_tracing(); 20 | 21 | let args: Args = Args::parse(); 22 | 23 | let (stop, stopper) = Stop::new(); 24 | 25 | let store = Store::new(stop.clone(), args.seed); 26 | 27 | if let Some(config_file_path) = &args.config { 28 | populate_initial_proxy_configs::(config_file_path, store.clone()); 29 | } else { 30 | debug!("No config file path provided"); 31 | } 32 | 33 | tokio::spawn(async move { 34 | let _ = signal::ctrl_c().await; 35 | info!("Shutting down"); 36 | stopper.stop(); 37 | }); 38 | 39 | api::serve( 40 | SocketAddr::new(args.get_ip_addr(), args.get_port_number()), 41 | store, 42 | stop, 43 | ) 44 | .await; 45 | } 46 | -------------------------------------------------------------------------------- /server/src/util.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | pub fn init_tracing() { 4 | if env::var_os("RUST_LOG").is_none() { 5 | env::set_var("RUST_LOG", "noxious=info"); 6 | } 7 | pretty_env_logger::init(); 8 | // tracing_log::env_logger::init(); 9 | // LogTracer::init().expect("Failed to set a global logger"); 10 | } 11 | 12 | pub fn get_version() -> &'static str { 13 | env!("CARGO_PKG_VERSION") 14 | } 15 | --------------------------------------------------------------------------------