├── .github ├── dependabot.yml └── workflows │ └── ci.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── Dockerfile ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── examples └── example.rs ├── manifest.toml └── src ├── background.rs ├── client.rs ├── errors.rs ├── events.rs ├── lib.rs ├── network_conf.rs ├── params.rs ├── requests.rs └── responses.rs /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "cargo" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | - package-ecosystem: "github-actions" 8 | directory: "/" 9 | schedule: 10 | interval: "weekly" 11 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # Based on https://github.com/actions-rs/meta/blob/master/recipes/quickstart.md 2 | 3 | on: [push, pull_request] 4 | 5 | name: CI 6 | 7 | jobs: 8 | check: 9 | name: Check 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout sources 13 | uses: actions/checkout@v3 14 | 15 | - name: Install stable toolchain 16 | uses: dtolnay/rust-toolchain@stable 17 | 18 | - name: Run cargo check 19 | run: cargo check 20 | shell: bash 21 | 22 | test: 23 | name: Test Suite 24 | runs-on: ubuntu-latest 25 | steps: 26 | - name: Checkout sources 27 | uses: actions/checkout@v3 28 | 29 | - name: Install stable toolchain 30 | uses: dtolnay/rust-toolchain@stable 31 | 32 | - name: Run cargo test 33 | run: cargo test 34 | shell: bash 35 | 36 | lints: 37 | name: Lints 38 | runs-on: ubuntu-latest 39 | steps: 40 | - name: Checkout sources 41 | uses: actions/checkout@v3 42 | 43 | - name: Install stable toolchain 44 | uses: dtolnay/rust-toolchain@stable 45 | with: 46 | components: rustfmt, clippy 47 | 48 | - name: Run cargo fmt 49 | run: cargo fmt --all -- --check 50 | shell: bash 51 | 52 | - name: Run cargo clippy 53 | run: cargo clippy --all -- -D warnings 54 | shell: bash 55 | 56 | testground: 57 | name: Testground runs 58 | runs-on: ubuntu-latest 59 | defaults: 60 | run: 61 | shell: bash 62 | steps: 63 | - name: Checkout sources 64 | uses: actions/checkout@v3 65 | with: 66 | path: sdk-rust 67 | 68 | - name: Checkout testground 69 | uses: actions/checkout@v3 70 | with: 71 | path: testground 72 | repository: testground/testground 73 | 74 | - name: Setup Go 75 | uses: actions/setup-go@v4 76 | with: 77 | go-version: "1.16.x" 78 | 79 | - name: Install testground 80 | run: make install 81 | working-directory: testground 82 | 83 | - name: Run testground daemon 84 | run: testground daemon > daemon.out 2> daemon.err & 85 | working-directory: testground 86 | 87 | - name: Import testground plans 88 | run: testground plan import --from sdk-rust 89 | 90 | - name: Check testground daemon health 91 | run: 92 | echo "Waiting for Testground to launch on 8042..."; 93 | while ! nc -z localhost 8042; do 94 | sleep 1; 95 | done; 96 | echo "Testground launched"; 97 | testground healthcheck --runner local:docker --fix; 98 | shell: bash 99 | 100 | - name: Run testground plan (case=example) 101 | run: | 102 | testground run single \ 103 | --plan=sdk-rust \ 104 | --testcase=example \ 105 | --builder=docker:generic \ 106 | --runner=local:docker \ 107 | --instances=1 \ 108 | --wait \ 109 | --collect \ 110 | --collect-file ./result_example.tgz 111 | 112 | - name: Run testground plan (case=publish-subscribe) 113 | run: | 114 | testground run single \ 115 | --plan=sdk-rust \ 116 | --testcase=publish-subscribe \ 117 | --builder=docker:generic \ 118 | --runner=local:docker \ 119 | --instances=2 \ 120 | --wait \ 121 | --collect \ 122 | --collect-file ./result_publish_subscribe.tgz 123 | 124 | - uses: actions/upload-artifact@v3 125 | if: ${{ always() }} 126 | with: 127 | name: testground-output 128 | path: | 129 | testground/daemon.* 130 | result*.tgz 131 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [0.5.0] - unreleased 8 | ### Added 9 | - Write events to `run.out`. See [PR 45]. 10 | 11 | ### Change 12 | - Change `RunParameters::test_start_time` from `String` to `DateTime` for ease of use. See [PR 41]. 13 | 14 | [PR 41]: https://github.com/testground/sdk-rust/pull/41 15 | [PR 45]: https://github.com/testground/sdk-rust/pull/45 16 | 17 | ## [0.4.0] 18 | ### Added 19 | - Add `global_seq` and `group_seq` to `Client`, sequence numbers assigned to test instance by the sync service. See [PR 29] 20 | 21 | ### Change 22 | - Move `RunParameters` to a field on `Client`. See [PR 29]. 23 | 24 | - Don't wait for network when no sidecar. See [PR 27]. 25 | 26 | - Make `RunParameters::test_group_instance_count` a `u64` to be consistent with 27 | `RunParameters::test_instance_count`. See [PR 26]. 28 | 29 | - Replace `Client::new` with `Client::new_and_init`, waiting for the network to 30 | initialize, claiming global and group sequence numbers, as well as waiting for 31 | other instances to do the same. Also makes `Client::wait_network_initialized` 32 | private, as it is included in `Client::new_and_init` now. See [PR 25]. 33 | 34 | - Use a JSON Payload instead of string in `publish` and `subscribe`. See [PR 34]. 35 | 36 | - Make footgun `Client::subscribe` explicit, requiring user to provide a 37 | capacity. See [PR 36]. 38 | 39 | [PR 26]: https://github.com/testground/sdk-rust/pull/26 40 | [PR 25]: https://github.com/testground/sdk-rust/pull/25 41 | [PR 27]: https://github.com/testground/sdk-rust/pull/27 42 | [PR 29]: https://github.com/testground/sdk-rust/pull/29 43 | [PR 34]: https://github.com/testground/sdk-rust/pull/34 44 | [PR 36]: https://github.com/testground/sdk-rust/pull/36 45 | 46 | ## [0.3.0] 47 | ### Added 48 | 49 | - Add `RunParameters::data_network_ip` for ease of finding the IP within the data network assigned to the instance. See [PR 22]. 50 | 51 | ### Change 52 | - Change `RunParameters::test_instance_params` from `String` to `HashMap` which contains key-value pairs from parsing the parameter string. See [PR 19]. 53 | 54 | [PR 19]: https://github.com/testground/sdk-rust/pull/19 55 | [PR 22]: https://github.com/testground/sdk-rust/pull/22 56 | 57 | ## [0.2.0] 58 | ### Added 59 | - Add PubSub, Network Shaping & Metrics. See [PR 6]. 60 | 61 | ### Change 62 | - Take ownership of `Client` when signaling success / failure. See [PR 7]. 63 | 64 | ### Fixed 65 | - Make events payload compatible with the go-sdk. See [PR 14] 66 | 67 | [PR 6]: https://github.com/testground/sdk-rust/pull/6 68 | [PR 7]: https://github.com/testground/sdk-rust/pull/7 69 | [PR 14]: https://github.com/testground/sdk-rust/pull/14 70 | 71 | ## [0.1.1] 72 | ### Added 73 | - Add `Client::publish_success` to signal instance success to daemon and sync service. See [PR 5]. 74 | 75 | [PR 5]: https://github.com/testground/sdk-rust/pull/5 76 | 77 | ## [0.1.0] - 2022-01-24 78 | ### Added 79 | - Add initial scaffolding with basic synchronization client. See [PR 1]. 80 | 81 | [PR 1]: https://github.com/testground/sdk-rust/pull/1 82 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Max Inden "] 3 | description = "The Rust SDK for developing Testground test plans." 4 | documentation = "https://docs.rs/testground" 5 | edition = "2021" 6 | license = "Apache-2.0 OR MIT" 7 | name = "testground" 8 | repository = "https://github.com/testground/sdk-rust/" 9 | version = "0.5.0" 10 | 11 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 12 | 13 | [dependencies] 14 | chrono = { version = "0.4", default-features = false, features = ["std"] } 15 | clap = { version = "3", default-features = false, features = ["std", "derive", "env"] } 16 | futures = { version = "0.3", default-features = false, features = [] } 17 | if-addrs = "0.7.0" 18 | influxdb = { version = "0.5", default-features = false, features = ["reqwest", "serde", "serde_json", "derive"] } 19 | ipnetwork = { version = "0.20.0", default-features = false, features = ["serde"] } 20 | log = "0.4" 21 | soketto = { version = "0.7", default-features = false, features = [] } 22 | serde = { version = "1", default-features = false, features = ["derive"] } 23 | serde_json = { version = "1", default-features = false, features = ["std"] } 24 | serde_repr = "0.1.7" 25 | serde_with = { version = "2", default-features = false, features = ["macros"] } 26 | thiserror = { version = "1", default-features = false, features = [] } 27 | tokio = { version = "1", default-features = false, features = ["sync", "rt-multi-thread", "macros", "net"] } 28 | tokio-stream = { version = "0.1", default-features = false, features = [] } 29 | tokio-util = { version = "0.7", default-features = false, features = ["compat"] } 30 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.62-bullseye as builder 2 | WORKDIR /usr/src/sdk-rust 3 | 4 | # Cache dependencies between test runs, 5 | # See https://blog.mgattozzi.dev/caching-rust-docker-builds/ 6 | # And https://github.com/rust-lang/cargo/issues/2644 7 | 8 | RUN mkdir -p ./plan/src/ 9 | RUN echo "fn main() { println!(\"If you see this message, you may want to clean up the target directory or the Docker build cache.\") }" > ./plan/src/main.rs 10 | COPY ./plan/Cargo.* ./plan/ 11 | RUN cd ./plan/ && cargo build 12 | 13 | COPY . . 14 | 15 | # This is in order to make sure `main.rs`s mtime timestamp is updated to avoid the dummy `main` 16 | # remaining in the release binary. 17 | # https://github.com/rust-lang/cargo/issues/9598 18 | RUN touch ./plan/src/main.rs 19 | 20 | RUN cd ./plan/ && cargo build --example example 21 | 22 | FROM debian:bullseye-slim 23 | COPY --from=builder /usr/src/sdk-rust/plan/target/debug/examples/example /usr/local/bin/example 24 | EXPOSE 6060 25 | ENTRYPOINT [ "example"] -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rust Testground SDK 2 | 3 | ![Testground logo](https://raw.githubusercontent.com/testground/pm/master/logo/TG_Banner_GitHub.jpg) 4 | 5 | [![CI](https://github.com/testground/sdk-rust/actions/workflows/ci.yml/badge.svg)](https://github.com/testground/sdk-rust/actions/workflows/ci.yml) 6 | [![Crate](https://img.shields.io/crates/v/testground.svg)](https://crates.io/crates/testground) 7 | [![API](https://docs.rs/testground/badge.svg)](https://docs.rs/testground) 8 | 9 | This repository contains the Rust SDK for developing [Testground](https://github.com/testground/testground) test plans. 10 | 11 | ## License 12 | 13 | Licensed under either of 14 | 15 | * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 16 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 17 | 18 | at your option. 19 | 20 | #### Contribution 21 | 22 | Unless you explicitly state otherwise, any contribution intentionally submitted 23 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 24 | dual licensed as above, without any additional terms or conditions. 25 | -------------------------------------------------------------------------------- /examples/example.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | 3 | use tokio_stream::StreamExt; 4 | 5 | #[tokio::main] 6 | async fn main() -> Result<(), Box> { 7 | let client = testground::client::Client::new_and_init().await?; 8 | 9 | match client.run_parameters().test_case.as_str() { 10 | "example" => example(client).await, 11 | "publish-subscribe" => publish_subscribe(client).await, 12 | _ => panic!("Unknown test case: {}", client.run_parameters().test_case), 13 | } 14 | } 15 | 16 | async fn example(client: testground::client::Client) -> Result<(), Box> { 17 | client.record_message(format!( 18 | "{}, sdk-rust!", 19 | client 20 | .run_parameters() 21 | .test_instance_params 22 | .get("greeting") 23 | .unwrap() 24 | )); 25 | 26 | client.record_success().await?; 27 | 28 | Ok(()) 29 | } 30 | 31 | async fn publish_subscribe( 32 | client: testground::client::Client, 33 | ) -> Result<(), Box> { 34 | client.record_message("running the publish_subscribe test"); 35 | 36 | match client.global_seq() { 37 | 1 => { 38 | client.record_message("I am instance 1: acting as the leader"); 39 | 40 | let json = serde_json::json!({"foo": "bar"}); 41 | client.publish("demonstration", Cow::Owned(json)).await?; 42 | client.record_success().await?; 43 | } 44 | _ => { 45 | client.record_message(format!( 46 | "I am instance {}: acting as a follower", 47 | client.global_seq() 48 | )); 49 | 50 | let payload = client 51 | .subscribe("demonstration", u16::MAX.into()) 52 | .await 53 | .take(1) 54 | .map(|x| x.unwrap()) 55 | .next() 56 | .await 57 | .unwrap(); 58 | 59 | client.record_message(format!("I received the payload: {}", payload)); 60 | 61 | if payload["foo"].as_str() == Some("bar") { 62 | client.record_success().await?; 63 | } else { 64 | client 65 | .record_failure(format!("invalid payload: {}", payload)) 66 | .await?; 67 | } 68 | } 69 | } 70 | Ok(()) 71 | } 72 | -------------------------------------------------------------------------------- /manifest.toml: -------------------------------------------------------------------------------- 1 | name = "sdk-rust" 2 | 3 | [defaults] 4 | builder = "docker:generic" 5 | runner = "local:docker" 6 | 7 | [builders."docker:generic"] 8 | enabled = true 9 | 10 | [runners."local:docker"] 11 | enabled = true 12 | 13 | [[testcases]] 14 | name = "example" 15 | instances = { min = 1, max = 1, default = 1 } 16 | 17 | [testcases.params] 18 | greeting = { type = "string", desc = "greeting", default = "Hello" } 19 | 20 | [[testcases]] 21 | name = "publish-subscribe" 22 | instances = { min = 2, max = 10, default = 2 } -------------------------------------------------------------------------------- /src/background.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use futures::stream::StreamExt; 4 | use influxdb::{Client, WriteQuery}; 5 | use soketto::handshake::ServerResponse; 6 | use tokio::sync::{mpsc, oneshot}; 7 | use tokio_util::compat::{Compat, TokioAsyncReadCompatExt}; 8 | 9 | use crate::events::LogLine; 10 | use crate::{ 11 | errors::Error, 12 | events::{Event, EventType}, 13 | network_conf::NetworkConfiguration, 14 | params::RunParameters, 15 | requests::{PayloadType, Request, RequestType}, 16 | responses::{RawResponse, Response, ResponseType}, 17 | }; 18 | 19 | const WEBSOCKET_RECEIVER: &str = "Websocket Receiver"; 20 | 21 | #[derive(Debug)] 22 | pub enum Command { 23 | Publish { 24 | topic: String, 25 | message: serde_json::Value, 26 | sender: oneshot::Sender>, 27 | }, 28 | Subscribe { 29 | topic: String, 30 | stream: mpsc::Sender>, 31 | }, 32 | 33 | SignalEntry { 34 | state: String, 35 | sender: oneshot::Sender>, 36 | }, 37 | 38 | Barrier { 39 | state: String, 40 | target: u64, 41 | sender: oneshot::Sender>, 42 | }, 43 | 44 | WaitNetworkInitializedStart { 45 | sender: oneshot::Sender>, 46 | }, 47 | 48 | WaitNetworkInitializedBarrier { 49 | sender: oneshot::Sender>, 50 | }, 51 | 52 | WaitNetworkInitializedEnd { 53 | sender: oneshot::Sender>, 54 | }, 55 | 56 | NetworkShaping { 57 | config: NetworkConfiguration, 58 | sender: oneshot::Sender>, 59 | }, 60 | 61 | SignalSuccess { 62 | sender: oneshot::Sender>, 63 | }, 64 | 65 | SignalFailure { 66 | error: String, 67 | sender: oneshot::Sender>, 68 | }, 69 | 70 | SignalCrash { 71 | error: String, 72 | stacktrace: String, 73 | sender: oneshot::Sender>, 74 | }, 75 | 76 | Metric { 77 | write_query: WriteQuery, 78 | sender: oneshot::Sender>, 79 | }, 80 | } 81 | 82 | #[derive(Debug)] 83 | enum PendingRequest { 84 | PublishOrSignal { 85 | sender: oneshot::Sender>, 86 | }, 87 | Barrier { 88 | sender: oneshot::Sender>, 89 | }, 90 | Subscribe { 91 | stream: mpsc::Sender>, 92 | }, 93 | } 94 | 95 | pub struct BackgroundTask { 96 | websocket_tx: soketto::Sender>, 97 | websocket_rx: futures::stream::BoxStream<'static, Result, soketto::connection::Error>>, 98 | 99 | influxdb: Client, 100 | 101 | next_id: u64, 102 | 103 | params: RunParameters, 104 | 105 | client_rx: mpsc::Receiver, 106 | 107 | pending_req: HashMap, 108 | } 109 | 110 | impl BackgroundTask { 111 | pub async fn new( 112 | client_rx: mpsc::Receiver, 113 | params: RunParameters, 114 | ) -> Result> { 115 | let (websocket_tx, websocket_rx) = { 116 | let socket = tokio::net::TcpStream::connect(("testground-sync-service", 5050)).await?; 117 | 118 | let mut client = soketto::handshake::Client::new(socket.compat(), "...", "/"); 119 | match client.handshake().await? { 120 | ServerResponse::Redirect { 121 | status_code, 122 | location, 123 | } => { 124 | return Err(std::io::Error::new( 125 | std::io::ErrorKind::Other, 126 | format!( 127 | "Remote redirected to {}. Status code {}", 128 | location, status_code 129 | ), 130 | ) 131 | .into()) 132 | } 133 | ServerResponse::Rejected { status_code } => { 134 | return Err(std::io::Error::new( 135 | std::io::ErrorKind::ConnectionRefused, 136 | format!("Remote refused connection. Status code {}", status_code), 137 | ) 138 | .into()) 139 | } 140 | _ => {} 141 | }; 142 | let (tx, rx) = client.into_builder().finish(); 143 | 144 | let socket_packets = futures::stream::unfold(rx, move |mut rx| async { 145 | let mut buf = Vec::new(); 146 | let ret = match rx.receive_data(&mut buf).await { 147 | Ok(_) => Ok(buf), 148 | Err(err) => Err(err), 149 | }; 150 | Some((ret, rx)) 151 | }); 152 | 153 | (tx, socket_packets.boxed()) 154 | }; 155 | 156 | let influxdb = Client::new(params.influxdb_url.clone(), "testground"); 157 | 158 | Ok(Self { 159 | websocket_tx, 160 | websocket_rx, 161 | 162 | influxdb, 163 | next_id: 0, 164 | params, 165 | client_rx, 166 | pending_req: Default::default(), 167 | }) 168 | } 169 | 170 | fn contextualize_state(&self, state: &str) -> String { 171 | format!( 172 | "run:{}:plan:{}:case:{}:states:{}", 173 | self.params.test_run, self.params.test_plan, self.params.test_case, state 174 | ) 175 | } 176 | 177 | fn contextualize_topic(&self, topic: &str) -> String { 178 | format!( 179 | "run:{}:plan:{}:case:{}:topics:{}", 180 | self.params.test_run, self.params.test_plan, self.params.test_case, topic 181 | ) 182 | } 183 | 184 | fn contextualize_event(&self) -> String { 185 | format!( 186 | "run:{}:plan:{}:case:{}:run_events", 187 | self.params.test_run, self.params.test_plan, self.params.test_case 188 | ) 189 | } 190 | 191 | fn next_id(&mut self) -> u64 { 192 | let next_id = self.next_id; 193 | self.next_id += 1; 194 | next_id 195 | } 196 | 197 | pub async fn run(mut self) { 198 | loop { 199 | tokio::select! { 200 | res = self.websocket_rx.next() => match res { 201 | Some(res) => match res { 202 | Ok(res) => self.response(serde_json::from_slice::(&res).expect("Response Deserialization").into()).await, 203 | Err(e) => { 204 | eprintln!("Web socket Error: {}", e); 205 | return; 206 | } 207 | }, 208 | None => { 209 | eprintln!("Web socket receiver dropped"); 210 | return; 211 | }, 212 | }, 213 | cmd = self.client_rx.recv() => match cmd { 214 | Some(cmd) => self.command(cmd).await, 215 | None => { 216 | log::debug!("Client command sender dropped. Background task shutting down."); 217 | return; 218 | }, 219 | }, 220 | } 221 | } 222 | } 223 | 224 | async fn command(&mut self, cmd: Command) { 225 | let id = self.next_id(); 226 | 227 | match cmd { 228 | Command::Publish { 229 | topic, 230 | message, 231 | sender, 232 | } => { 233 | let topic = self.contextualize_topic(&topic); 234 | 235 | self.publish(id, topic, PayloadType::Json(message), sender) 236 | .await 237 | } 238 | Command::Subscribe { topic, stream } => { 239 | let topic = self.contextualize_topic(&topic); 240 | 241 | self.subscribe(id, topic, stream).await 242 | } 243 | Command::SignalEntry { state, sender } => { 244 | let state = self.contextualize_state(&state); 245 | 246 | self.signal(id, state, sender).await 247 | } 248 | Command::Barrier { 249 | state, 250 | mut target, 251 | sender, 252 | } => { 253 | let state = self.contextualize_state(&state); 254 | 255 | if target == 0 { 256 | target = self.params.test_instance_count; 257 | } 258 | 259 | self.barrier(id, state, target, sender).await 260 | } 261 | Command::WaitNetworkInitializedStart { sender } => { 262 | let event = Event { 263 | event: EventType::StageStart { 264 | name: "network-initialized".to_owned(), 265 | group: self.params.test_group_id.clone(), 266 | }, 267 | }; 268 | 269 | let topic = self.contextualize_event(); 270 | 271 | self.publish(id, topic, PayloadType::Event(event.event), sender) 272 | .await 273 | } 274 | Command::WaitNetworkInitializedBarrier { sender } => { 275 | if !self.params.test_sidecar { 276 | log::debug!( 277 | "Running in environment without network side car. \ 278 | Skipping wait for network." 279 | ); 280 | return; 281 | } 282 | 283 | let state = self.contextualize_state("network-initialized"); 284 | let target = self.params.test_instance_count; 285 | 286 | self.barrier(id, state, target, sender).await; 287 | } 288 | Command::WaitNetworkInitializedEnd { sender } => { 289 | let event = Event { 290 | event: EventType::StageEnd { 291 | name: "network-initialized".to_owned(), 292 | group: self.params.test_group_id.clone(), 293 | }, 294 | }; 295 | 296 | let topic = self.contextualize_event(); 297 | 298 | self.publish(id, topic, PayloadType::Event(event.event), sender) 299 | .await 300 | } 301 | Command::NetworkShaping { config, sender } => { 302 | if !self.params.test_sidecar { 303 | let _ = sender.send(Err(Error::SideCar)); 304 | return; 305 | } 306 | 307 | let topic = format!("network:{}", self.params.hostname); 308 | 309 | let topic = self.contextualize_topic(&topic); 310 | 311 | self.publish(id, topic, PayloadType::Config(config), sender) 312 | .await 313 | } 314 | Command::SignalSuccess { sender } => { 315 | let event = EventType::Success { 316 | group: self.params.test_group_id.clone(), 317 | }; 318 | 319 | let topic = self.contextualize_event(); 320 | 321 | self.publish(id, topic, PayloadType::Event(event), sender) 322 | .await 323 | } 324 | Command::SignalFailure { error, sender } => { 325 | let event = EventType::Failure { 326 | group: self.params.test_group_id.clone(), 327 | error, 328 | }; 329 | 330 | let topic = self.contextualize_event(); 331 | 332 | self.publish(id, topic, PayloadType::Event(event), sender) 333 | .await 334 | } 335 | Command::SignalCrash { 336 | error, 337 | stacktrace, 338 | sender, 339 | } => { 340 | let event = EventType::Crash { 341 | groups: self.params.test_group_id.clone(), 342 | error, 343 | stacktrace, 344 | }; 345 | 346 | let topic = self.contextualize_event(); 347 | 348 | self.publish(id, topic, PayloadType::Event(event), sender) 349 | .await 350 | } 351 | Command::Metric { 352 | write_query, 353 | sender, 354 | } => { 355 | //TODO add global tag to the query before processing 356 | 357 | match self.influxdb.query(write_query).await { 358 | Ok(_) => { 359 | let _ = sender.send(Ok(())); 360 | } 361 | Err(e) => { 362 | let _ = sender.send(Err(e.into())); 363 | } 364 | } 365 | } 366 | } 367 | } 368 | 369 | async fn publish( 370 | &mut self, 371 | id: u64, 372 | topic: String, 373 | payload: PayloadType, 374 | sender: oneshot::Sender>, 375 | ) { 376 | if let PayloadType::Event(ref event) = payload { 377 | // The Testground daemon determines the success or failure of a test 378 | // instance by parsing stdout for runtime events. 379 | println!("{}", serde_json::to_string(&LogLine::new(event)).unwrap()); 380 | } 381 | 382 | let request = Request { 383 | id: id.to_string(), 384 | is_cancel: false, 385 | request: RequestType::Publish { topic, payload }, 386 | }; 387 | 388 | self.send(request).await.expect(WEBSOCKET_RECEIVER); 389 | 390 | self.pending_req 391 | .insert(id, PendingRequest::PublishOrSignal { sender }); 392 | } 393 | 394 | async fn subscribe( 395 | &mut self, 396 | id: u64, 397 | topic: String, 398 | stream: mpsc::Sender>, 399 | ) { 400 | let request = Request { 401 | id: id.to_string(), 402 | is_cancel: false, 403 | request: RequestType::Subscribe { topic }, 404 | }; 405 | 406 | self.send(request).await.expect(WEBSOCKET_RECEIVER); 407 | 408 | self.pending_req 409 | .insert(id, PendingRequest::Subscribe { stream }); 410 | } 411 | 412 | async fn signal( 413 | &mut self, 414 | id: u64, 415 | state: String, 416 | sender: oneshot::Sender>, 417 | ) { 418 | let request = Request { 419 | id: id.to_string(), 420 | is_cancel: false, 421 | request: RequestType::SignalEntry { state }, 422 | }; 423 | 424 | self.send(request).await.expect(WEBSOCKET_RECEIVER); 425 | 426 | self.pending_req 427 | .insert(id, PendingRequest::PublishOrSignal { sender }); 428 | } 429 | 430 | async fn barrier( 431 | &mut self, 432 | id: u64, 433 | state: String, 434 | target: u64, 435 | sender: oneshot::Sender>, 436 | ) { 437 | let request = Request { 438 | id: id.to_string(), 439 | is_cancel: false, 440 | request: RequestType::Barrier { state, target }, 441 | }; 442 | 443 | self.send(request).await.expect(WEBSOCKET_RECEIVER); 444 | 445 | self.pending_req 446 | .insert(id, PendingRequest::Barrier { sender }); 447 | } 448 | 449 | async fn response(&mut self, res: Response) { 450 | let Response { id, response } = res; 451 | 452 | let idx = id.parse().unwrap(); 453 | 454 | let pending_req = match self.pending_req.remove(&idx) { 455 | Some(req) => req, 456 | None => return, 457 | }; 458 | 459 | match (pending_req, response) { 460 | (PendingRequest::Barrier { sender }, ResponseType::Error(error)) => { 461 | let _ = sender.send(Err(Error::SyncService(error))); 462 | } 463 | (PendingRequest::PublishOrSignal { sender }, ResponseType::Error(error)) => { 464 | let _ = sender.send(Err(Error::SyncService(error))); 465 | } 466 | (PendingRequest::Subscribe { stream }, ResponseType::Error(error)) => { 467 | let _ = stream.send(Err(Error::SyncService(error))).await; 468 | } 469 | (PendingRequest::Subscribe { stream }, ResponseType::Subscribe(msg)) => { 470 | if stream.send(Ok(msg)).await.is_ok() { 471 | self.pending_req 472 | .insert(idx, PendingRequest::Subscribe { stream }); 473 | } 474 | } 475 | (PendingRequest::PublishOrSignal { sender }, ResponseType::SignalEntry { seq }) => { 476 | let _ = sender.send(Ok(seq)); 477 | } 478 | (PendingRequest::PublishOrSignal { sender }, ResponseType::Publish { seq }) => { 479 | let _ = sender.send(Ok(seq)); 480 | } 481 | (PendingRequest::Barrier { sender }, ResponseType::Barrier) => { 482 | let _ = sender.send(Ok(())); 483 | } 484 | (req, res) => { 485 | panic!("No match Request: {:?} Response: {:?}", req, res); 486 | } 487 | } 488 | } 489 | 490 | async fn send(&mut self, req: Request) -> Result<(), ()> { 491 | let mut json = serde_json::to_vec(&req).expect("Request Serialization"); 492 | 493 | self.websocket_tx.send_binary_mut(&mut json).await.unwrap(); 494 | 495 | self.websocket_tx.flush().await.unwrap(); 496 | 497 | Ok(()) 498 | } 499 | } 500 | -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::fs::File; 3 | use std::io::Write; 4 | use std::path::PathBuf; 5 | 6 | use crate::{ 7 | background::{BackgroundTask, Command}, 8 | errors::Error, 9 | events::{Event, EventType}, 10 | network_conf::NetworkConfiguration, 11 | RunParameters, 12 | }; 13 | 14 | use clap::Parser; 15 | 16 | use influxdb::WriteQuery; 17 | 18 | use crate::events::LogLine; 19 | use tokio::sync::{ 20 | mpsc::{self, channel, Sender}, 21 | oneshot, 22 | }; 23 | use tokio_stream::{wrappers::ReceiverStream, Stream}; 24 | 25 | const BACKGROUND_RECEIVER: &str = "Background Receiver"; 26 | const BACKGROUND_SENDER: &str = "Background Sender"; 27 | 28 | /// Basic synchronization client enabling one to send signals, await barriers and subscribe or publish to a topic. 29 | #[derive(Clone)] 30 | pub struct Client { 31 | cmd_tx: Sender, 32 | /// The runtime parameters for this test. 33 | run_parameters: RunParameters, 34 | /// A global sequence number assigned to this test instance by the sync service. 35 | global_seq: u64, 36 | /// A group-scoped sequence number assigned to this test instance by the sync service. 37 | group_seq: u64, 38 | /// A path to `run.out`. 39 | run_out: Option, 40 | } 41 | 42 | impl Client { 43 | pub async fn new_and_init() -> Result> { 44 | let run_parameters: RunParameters = RunParameters::try_parse()?; 45 | 46 | let (cmd_tx, cmd_rx) = channel(1); 47 | 48 | let background = BackgroundTask::new(cmd_rx, run_parameters.clone()).await?; 49 | 50 | let run_out = run_parameters 51 | .test_outputs_path 52 | .to_str() 53 | .map(|path_str| { 54 | if path_str.is_empty() { 55 | None 56 | } else { 57 | let mut path = PathBuf::from(path_str); 58 | path.push("run.out"); 59 | Some(path) 60 | } 61 | }) 62 | .unwrap_or(None); 63 | 64 | // `global_seq` and `group_seq` are initialized by 0 at this point since no way to signal to the sync service. 65 | let mut client = Self { 66 | cmd_tx, 67 | run_parameters, 68 | global_seq: 0, 69 | group_seq: 0, 70 | run_out, 71 | }; 72 | 73 | tokio::spawn(background.run()); 74 | 75 | client.wait_network_initialized().await?; 76 | 77 | let global_seq_num = client 78 | // Note that the sdk-go only signals, but not waits. 79 | .signal_and_wait( 80 | "initialized_global", 81 | client.run_parameters.test_instance_count, 82 | ) 83 | .await?; 84 | 85 | let group_seq_num = client 86 | // Note that the sdk-go only signals, but not waits. 87 | .signal_and_wait( 88 | format!("initialized_group_{}", client.run_parameters.test_group_id), 89 | client.run_parameters.test_group_instance_count, 90 | ) 91 | .await?; 92 | 93 | client.record_message(format!( 94 | "claimed sequence numbers; global={}, group({})={}", 95 | global_seq_num, client.run_parameters.test_group_id, group_seq_num 96 | )); 97 | 98 | client.global_seq = global_seq_num; 99 | client.group_seq = group_seq_num; 100 | 101 | Ok(client) 102 | } 103 | 104 | /// ```publish``` publishes an item on the supplied topic. 105 | /// 106 | /// Once the item has been published successfully, 107 | /// returning the sequence number of the new item in the ordered topic, 108 | /// or an error if one occurred, starting with 1 (for the first item). 109 | pub async fn publish( 110 | &self, 111 | topic: impl Into>, 112 | message: impl Into>, 113 | ) -> Result { 114 | let (sender, receiver) = oneshot::channel(); 115 | 116 | let cmd = Command::Publish { 117 | topic: topic.into().into_owned(), 118 | message: message.into().into_owned(), 119 | sender, 120 | }; 121 | 122 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 123 | 124 | receiver.await.expect(BACKGROUND_SENDER) 125 | } 126 | 127 | /// ```subscribe``` subscribes to a topic, consuming ordered, elements from 128 | /// index 0. 129 | /// 130 | /// Note that once the capacity of the returned [`Stream`] is reached, the 131 | /// background task blocks and thus all work related to the [`Client`] will 132 | /// pause until elements from the [`Stream`] are consumed and thus capacity 133 | /// is freed. Callers of [`Client::subscribe`] should either set a high 134 | /// capacity, continuously read from the returned [`Stream`] or drop it. 135 | /// 136 | /// ```no_run 137 | /// # use testground::client::Client; 138 | /// # let client: Client = todo!(); 139 | /// client.subscribe("my_topic", u16::MAX.into()); 140 | /// ``` 141 | pub async fn subscribe( 142 | &self, 143 | topic: impl Into>, 144 | capacity: usize, 145 | ) -> impl Stream> { 146 | let (stream, out) = mpsc::channel(capacity); 147 | 148 | let cmd = Command::Subscribe { 149 | topic: topic.into().into_owned(), 150 | stream, 151 | }; 152 | 153 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 154 | 155 | ReceiverStream::new(out) 156 | } 157 | 158 | /// ```signal_and_wait``` composes SignalEntry and Barrier, 159 | /// signalling entry on the supplied state, 160 | /// and then awaiting until the required value has been reached. 161 | pub async fn signal_and_wait( 162 | &self, 163 | state: impl Into>, 164 | target: u64, 165 | ) -> Result { 166 | let state = state.into().into_owned(); 167 | 168 | let res = self.signal(state.clone()).await?; 169 | 170 | self.barrier(state, target).await?; 171 | 172 | Ok(res) 173 | } 174 | 175 | /// ```signal``` increments the state counter by one, 176 | /// returning the value of the new value of the counter, 177 | /// or an error if the operation fails. 178 | pub async fn signal(&self, state: impl Into>) -> Result { 179 | let (sender, receiver) = oneshot::channel(); 180 | 181 | let state = state.into().into_owned(); 182 | let cmd = Command::SignalEntry { state, sender }; 183 | 184 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 185 | 186 | receiver.await.expect(BACKGROUND_SENDER) 187 | } 188 | 189 | /// ```barrier``` sets a barrier on the supplied ```state``` that fires when it reaches its target value (or higher). 190 | pub async fn barrier( 191 | &self, 192 | state: impl Into>, 193 | target: u64, 194 | ) -> Result<(), Error> { 195 | let (sender, receiver) = oneshot::channel(); 196 | 197 | let state = state.into().into_owned(); 198 | let cmd = Command::Barrier { 199 | state, 200 | target, 201 | sender, 202 | }; 203 | 204 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 205 | 206 | receiver.await.expect(BACKGROUND_SENDER) 207 | } 208 | 209 | /// ```wait_network_initialized``` waits for the sidecar to initialize the network, 210 | /// if the sidecar is enabled. 211 | async fn wait_network_initialized(&self) -> Result<(), Error> { 212 | // Event 213 | let (sender, receiver) = oneshot::channel(); 214 | 215 | let cmd = Command::WaitNetworkInitializedStart { sender }; 216 | 217 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 218 | 219 | receiver.await.expect(BACKGROUND_SENDER)?; 220 | 221 | // Barrier 222 | let (sender, receiver) = oneshot::channel(); 223 | 224 | let cmd = Command::WaitNetworkInitializedBarrier { sender }; 225 | 226 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 227 | 228 | receiver.await.expect(BACKGROUND_SENDER)?; 229 | 230 | // Event 231 | let (sender, receiver) = oneshot::channel(); 232 | 233 | let cmd = Command::WaitNetworkInitializedEnd { sender }; 234 | 235 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 236 | 237 | receiver.await.expect(BACKGROUND_SENDER)?; 238 | 239 | Ok(()) 240 | } 241 | 242 | /// ```configure_network``` asks the sidecar to configure the network. 243 | pub async fn configure_network(&self, config: NetworkConfiguration) -> Result<(), Error> { 244 | // Publish 245 | let (sender, receiver) = oneshot::channel(); 246 | 247 | let state = config.callback_state.clone(); 248 | let target = if let Some(callback_target) = config.callback_target { 249 | callback_target 250 | } else { 251 | 0 252 | }; 253 | 254 | let cmd = Command::NetworkShaping { sender, config }; 255 | 256 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 257 | 258 | receiver.await.expect(BACKGROUND_SENDER)?; 259 | 260 | self.barrier(state, target).await?; 261 | 262 | Ok(()) 263 | } 264 | 265 | pub fn record_message(&self, message: impl Into>) { 266 | let message = message.into().into_owned(); 267 | 268 | let event = Event { 269 | event: EventType::Message { message }, 270 | }; 271 | 272 | //TODO implement logger similar to go-sdk 273 | 274 | let json_event = serde_json::to_string(&event).expect("Event Serialization"); 275 | 276 | println!("{}", json_event); 277 | 278 | self.write(&event.event); 279 | } 280 | 281 | pub async fn record_success(self) -> Result<(), Error> { 282 | let (sender, receiver) = oneshot::channel(); 283 | 284 | let cmd = Command::SignalSuccess { sender }; 285 | 286 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 287 | 288 | receiver.await.expect(BACKGROUND_SENDER)?; 289 | 290 | self.write(&EventType::Success { 291 | group: self.run_parameters.test_group_id.clone(), 292 | }); 293 | 294 | Ok(()) 295 | } 296 | 297 | pub async fn record_failure(self, error: impl Into>) -> Result<(), Error> { 298 | let error = error.into().into_owned(); 299 | 300 | let (sender, receiver) = oneshot::channel(); 301 | 302 | let cmd = Command::SignalFailure { 303 | error: error.clone(), 304 | sender, 305 | }; 306 | 307 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 308 | 309 | receiver.await.expect(BACKGROUND_SENDER)?; 310 | 311 | self.write(&EventType::Failure { 312 | group: self.run_parameters.test_group_id.clone(), 313 | error, 314 | }); 315 | 316 | Ok(()) 317 | } 318 | 319 | pub async fn record_crash( 320 | self, 321 | error: impl Into>, 322 | stacktrace: impl Into>, 323 | ) -> Result<(), Error> { 324 | let error = error.into().into_owned(); 325 | let stacktrace = stacktrace.into().into_owned(); 326 | 327 | let (sender, receiver) = oneshot::channel(); 328 | 329 | let cmd = Command::SignalCrash { 330 | error: error.clone(), 331 | stacktrace: stacktrace.clone(), 332 | sender, 333 | }; 334 | 335 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 336 | 337 | receiver.await.expect(BACKGROUND_SENDER)?; 338 | 339 | self.write(&EventType::Crash { 340 | groups: self.run_parameters.test_group_id.clone(), 341 | error, 342 | stacktrace, 343 | }); 344 | 345 | Ok(()) 346 | } 347 | 348 | pub async fn record_metric(&self, write_query: WriteQuery) -> Result<(), Error> { 349 | let (sender, receiver) = oneshot::channel(); 350 | 351 | let cmd = Command::Metric { 352 | write_query, 353 | sender, 354 | }; 355 | 356 | self.cmd_tx.send(cmd).await.expect(BACKGROUND_RECEIVER); 357 | 358 | receiver.await.expect(BACKGROUND_SENDER)?; 359 | 360 | Ok(()) 361 | } 362 | 363 | /// Returns runtime parameters for this test. 364 | pub fn run_parameters(&self) -> RunParameters { 365 | self.run_parameters.clone() 366 | } 367 | 368 | /// Returns a global sequence number assigned to this test instance. 369 | pub fn global_seq(&self) -> u64 { 370 | self.global_seq 371 | } 372 | 373 | /// Returns a group-scoped sequence number assigned to this test instance. 374 | pub fn group_seq(&self) -> u64 { 375 | self.group_seq 376 | } 377 | 378 | /// Writes an event to `run.out`. 379 | fn write(&self, event_type: &EventType) { 380 | if let Some(path) = self.run_out.as_ref() { 381 | let mut file = match File::options().create(true).append(true).open(path) { 382 | Ok(file) => file, 383 | Err(e) => { 384 | eprintln!("Failed to open `run.out`: {}", e); 385 | return; 386 | } 387 | }; 388 | 389 | if let Err(e) = writeln!( 390 | file, 391 | "{}", 392 | serde_json::to_string(&LogLine::new(event_type)).expect("Event Serialization") 393 | ) { 394 | eprintln!("Failed to write a log to `run.out`: {}", e); 395 | } 396 | } 397 | } 398 | } 399 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | #[derive(Error, Debug)] 4 | pub enum Error { 5 | #[error("Soketto: {0}")] 6 | Soketto(#[from] soketto::connection::Error), 7 | #[error("Serde: {0}")] 8 | Serde(#[from] serde_json::error::Error), 9 | #[error("UTF-8: {0}")] 10 | FromUtf8(#[from] std::string::FromUtf8Error), 11 | #[error("Sync-Service: {0}")] 12 | SyncService(String), 13 | #[error("The SideCar is not running")] 14 | SideCar, 15 | #[error("InfluxDB: {0}")] 16 | InfluxDB(#[from] influxdb::Error), 17 | } 18 | -------------------------------------------------------------------------------- /src/events.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use serde::Serialize; 4 | use std::time::{SystemTime, UNIX_EPOCH}; 5 | 6 | #[derive(Serialize, Debug)] 7 | pub struct Event { 8 | pub event: EventType, 9 | } 10 | 11 | #[derive(Debug, Serialize)] 12 | pub struct LogLine<'a> { 13 | pub ts: u128, 14 | pub event: &'a EventType, 15 | } 16 | 17 | impl LogLine<'_> { 18 | pub fn new(event: &EventType) -> LogLine { 19 | LogLine { 20 | ts: SystemTime::now() 21 | .duration_since(UNIX_EPOCH) 22 | .unwrap() 23 | .as_nanos(), 24 | event, 25 | } 26 | } 27 | } 28 | 29 | #[derive(Serialize, Debug)] 30 | pub enum EventType { 31 | #[serde(rename = "start_event")] 32 | Start { runenv: String }, 33 | #[serde(rename = "message_event")] 34 | Message { message: String }, 35 | #[serde(rename = "success_event")] 36 | Success { group: String }, 37 | #[serde(rename = "failure_event")] 38 | Failure { group: String, error: String }, 39 | #[serde(rename = "crash_event")] 40 | Crash { 41 | groups: String, 42 | error: String, 43 | stacktrace: String, 44 | }, 45 | #[serde(rename = "stage_start_event")] 46 | StageStart { name: String, group: String }, 47 | #[serde(rename = "stage_end_event")] 48 | StageEnd { name: String, group: String }, 49 | } 50 | 51 | #[cfg(test)] 52 | mod tests { 53 | use super::*; 54 | 55 | #[test] 56 | fn serde_test() { 57 | //let raw_response = r#"{"key": "run:c7uji38e5te2b9t464v0:plan:streaming_test:case:quickstart:run_events", "error": "failed to decode as type *runtime.Event: \"{\\\"stage_end_event\\\":{\\\"name\\\":\\\"network-initialized\\\",\\\"group\\\":\\\"single\\\"}}\"", "id": "0"}"#; 58 | 59 | let event = Event { 60 | event: EventType::StageStart { 61 | name: "network-initialized".to_owned(), 62 | group: "single".to_owned(), 63 | }, 64 | }; 65 | 66 | let json = serde_json::to_string(&event).unwrap(); 67 | 68 | println!("{:?}", json); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | mod background; 2 | pub mod client; 3 | pub mod errors; 4 | mod events; 5 | pub mod network_conf; 6 | mod params; 7 | mod requests; 8 | mod responses; 9 | 10 | pub use params::RunParameters; 11 | 12 | // Re-export public dependencies. 13 | pub use influxdb::{Timestamp, WriteQuery}; 14 | -------------------------------------------------------------------------------- /src/network_conf.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network}; 4 | use serde::Serialize; 5 | 6 | use serde_repr::{Deserialize_repr, Serialize_repr}; 7 | 8 | #[derive(Serialize_repr, Deserialize_repr, PartialEq, Eq, Debug)] 9 | #[repr(u8)] 10 | pub enum FilterAction { 11 | Accept = 0, 12 | Reject = 1, 13 | Drop = 2, 14 | } 15 | 16 | #[derive(Serialize, Debug)] 17 | /// LinkShape defines how traffic should be shaped. 18 | pub struct LinkShape { 19 | /// Latency is the egress latency. 20 | pub latency: u64, 21 | 22 | /// Jitter is the egress jitter. 23 | pub jitter: u64, 24 | 25 | /// Bandwidth is egress bits per second. 26 | pub bandwidth: u64, 27 | 28 | /// Drop all inbound traffic. 29 | /// TODO: Not implemented 30 | pub filter: FilterAction, 31 | 32 | /// Loss is the egress packet loss (%) 33 | pub loss: f32, 34 | 35 | /// Corrupt is the egress packet corruption probability (%) 36 | pub corrupt: f32, 37 | 38 | /// Corrupt is the egress packet corruption correlation (%) 39 | pub corrupt_corr: f32, 40 | 41 | /// Reorder is the probability that an egress packet will be reordered (%) 42 | /// 43 | /// Reordered packets will skip the latency delay and be sent 44 | /// immediately. You must specify a non-zero Latency for this option to 45 | /// make sense. 46 | pub reorder: f32, 47 | 48 | /// ReorderCorr is the egress packet reordering correlation (%) 49 | pub reorder_corr: f32, 50 | 51 | /// Duplicate is the percentage of packets that are duplicated (%) 52 | pub duplicate: f32, 53 | 54 | /// DuplicateCorr is the correlation between egress packet duplication (%) 55 | pub duplicate_corr: f32, 56 | } 57 | 58 | #[derive(Serialize, Debug)] 59 | /// LinkRule applies a LinkShape to a subnet. 60 | pub struct LinkRule { 61 | #[serde(flatten)] 62 | pub link_shape: LinkShape, 63 | 64 | pub subnet: IpNetwork, 65 | } 66 | 67 | pub const DEFAULT_DATA_NETWORK: &str = "default"; 68 | 69 | #[derive(Serialize, Debug)] 70 | pub enum RoutingPolicyType { 71 | #[serde(rename = "allow_all")] 72 | AllowAll, 73 | #[serde(rename = "deny_all")] 74 | DenyAll, 75 | } 76 | 77 | /// NetworkConfiguration specifies how a node's network should be configured. 78 | #[derive(Serialize, Debug)] 79 | pub struct NetworkConfiguration { 80 | /// Network is the name of the network to configure. 81 | pub network: String, 82 | 83 | /// IPv4 and IPv6 set the IP addresses of this network device. If 84 | /// unspecified, the sidecar will leave them alone. 85 | /// 86 | /// Your test-case will be assigned a B block in the range 87 | /// 16.0.0.1-32.0.0.0. X.Y.0.1 will always be reserved for the gateway 88 | /// and shouldn't be used by the test. 89 | #[serde(rename = "IPv4")] 90 | pub ipv4: Option, 91 | 92 | /// TODO: IPv6 is currently not supported. 93 | #[serde(rename = "IPv6")] 94 | pub ipv6: Option, 95 | 96 | /// Enable enables this network device. 97 | pub enable: bool, 98 | 99 | /// Default is the default link shaping rule. 100 | pub default: LinkShape, 101 | 102 | /// Rules defines how traffic should be shaped to different subnets. 103 | /// 104 | /// TODO: This is not implemented. 105 | pub rules: Option>, 106 | 107 | /// CallbackState will be signalled when the link changes are applied. 108 | /// 109 | /// Nodes can use the same state to wait for _all_ or a subset of nodes to 110 | /// enter the desired network state. See CallbackTarget. 111 | pub callback_state: String, 112 | 113 | /// CallbackTarget is the amount of instances that will have needed to signal 114 | /// on the Callback state to consider the configuration operation a success. 115 | #[serde(skip_serializing_if = "Option::is_none")] 116 | pub callback_target: Option, 117 | 118 | /// RoutingPolicy defines the data routing policy of a certain node. This affects 119 | /// external networks other than the network 'Default', e.g., external Internet 120 | /// access. 121 | pub routing_policy: RoutingPolicyType, 122 | } 123 | 124 | #[cfg(test)] 125 | mod tests { 126 | use std::net::Ipv4Addr; 127 | 128 | use super::*; 129 | 130 | #[test] 131 | fn serde_test() { 132 | let output = r#"{"network":"default","IPv4":"16.0.1.1/24","IPv6":null,"enable":true,"default":{"latency":10000000,"jitter":0,"bandwidth":1048576,"filter":0,"loss":0.0,"corrupt":0.0,"corrupt_corr":0.0,"reorder":0.0,"reorder_corr":0.0,"duplicate":0.0,"duplicate_corr":0.0},"rules":null,"callback_state":"latency-reduced","routing_policy":"deny_all"}"#; 133 | 134 | let network_conf = NetworkConfiguration { 135 | network: DEFAULT_DATA_NETWORK.to_owned(), 136 | ipv4: Some(Ipv4Network::new(Ipv4Addr::new(16, 0, 1, 1), 24).unwrap()), 137 | ipv6: None, 138 | enable: true, 139 | default: LinkShape { 140 | latency: 10000000, 141 | jitter: 0, 142 | bandwidth: 1048576, 143 | filter: FilterAction::Accept, 144 | loss: 0.0, 145 | corrupt: 0.0, 146 | corrupt_corr: 0.0, 147 | reorder: 0.0, 148 | reorder_corr: 0.0, 149 | duplicate: 0.0, 150 | duplicate_corr: 0.0, 151 | }, 152 | rules: None, 153 | callback_state: "latency-reduced".to_owned(), 154 | callback_target: None, 155 | routing_policy: RoutingPolicyType::DenyAll, 156 | }; 157 | 158 | let input = serde_json::to_string(&network_conf).unwrap(); 159 | 160 | println!("{}", input); 161 | 162 | assert_eq!(input, output) 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /src/params.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use std::collections::HashMap; 3 | use std::net::{IpAddr, Ipv4Addr}; 4 | 5 | use chrono::{DateTime, FixedOffset}; 6 | use std::path::PathBuf; 7 | 8 | use ipnetwork::IpNetwork; 9 | 10 | #[derive(Parser, Debug, Clone)] 11 | /// RunParameters encapsulates the runtime parameters for this test. 12 | pub struct RunParameters { 13 | #[clap(env)] 14 | pub test_plan: String, // TEST_PLAN: streaming_test 15 | #[clap(env)] 16 | pub test_case: String, // TEST_CASE: quickstart 17 | #[clap(env)] 18 | pub test_run: String, // TEST_RUN: c7fjstge5te621cen4i0 19 | 20 | #[clap(env)] 21 | pub test_repo: String, //TEST_REPO: 22 | #[clap(env)] 23 | pub test_branch: String, // TEST_BRANCH: 24 | #[clap(env)] 25 | pub test_tag: String, // TEST_TAG: 26 | 27 | #[clap(env)] 28 | pub test_outputs_path: PathBuf, // TEST_OUTPUTS_PATH: /outputs 29 | #[clap(env)] 30 | pub test_temp_path: String, // TEST_TEMP_PATH: /temp 31 | 32 | #[clap(env)] 33 | pub test_instance_count: u64, // TEST_INSTANCE_COUNT: 1 34 | #[clap(env)] 35 | pub test_instance_role: String, // TEST_INSTANCE_ROLE: 36 | #[clap(env, parse(try_from_str = parse_key_val))] 37 | pub test_instance_params: HashMap, // TEST_INSTANCE_PARAMS: feature=false|neutral_nodes=10|num=2|word=never 38 | 39 | #[clap(long, env)] 40 | pub test_sidecar: bool, // TEST_SIDECAR: true 41 | 42 | #[clap(env)] 43 | pub test_subnet: IpNetwork, // TEST_SUBNET: 16.0.0.0/16 44 | #[clap(env)] 45 | pub test_start_time: DateTime, // TEST_START_TIME: 2022-01-12T15:48:07-05:00 46 | 47 | #[clap(env)] 48 | pub test_capture_profiles: String, // TEST_CAPTURE_PROFILES: 49 | 50 | #[clap(env)] 51 | pub test_group_instance_count: u64, // TEST_GROUP_INSTANCE_COUNT: 1 52 | #[clap(env)] 53 | pub test_group_id: String, // TEST_GROUP_ID: single 54 | 55 | #[clap(long, env)] 56 | pub test_disable_metrics: bool, // TEST_DISABLE_METRICS: false 57 | 58 | #[clap(env)] 59 | pub hostname: String, // HOSTNAME: e6f4cc8fc147 60 | #[clap(env)] 61 | pub influxdb_url: String, // INFLUXDB_URL: http://testground-influxdb:8086 62 | #[clap(env)] 63 | pub redis_host: String, // REDIS_HOST: testground-redis 64 | // HOME: / 65 | } 66 | 67 | impl RunParameters { 68 | /// Examines the local network interfaces, and tries to find our assigned IP 69 | /// within the data network. 70 | /// 71 | /// If running in a sidecar-less environment, the loopback address is 72 | /// returned. 73 | pub fn data_network_ip(&self) -> std::io::Result> { 74 | if !self.test_sidecar { 75 | // This must be a local:exec runner and we currently don't support 76 | // traffic shaping on it for now, just return the loopback address. 77 | return Ok(Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)))); 78 | } 79 | 80 | Ok(if_addrs::get_if_addrs()? 81 | .into_iter() 82 | .map(|i| i.addr.ip()) 83 | .find(|ip| self.test_subnet.contains(*ip))) 84 | } 85 | } 86 | 87 | fn parse_key_val(s: &str) -> Result, String> { 88 | let mut hashmap = HashMap::new(); 89 | 90 | for kv in s.split('|').filter(|&s| !s.is_empty()) { 91 | let pos = kv 92 | .find('=') 93 | .ok_or_else(|| format!("Invalid KEY=VALUE: no '=' found in {}", kv))?; 94 | hashmap.insert(String::from(&kv[..pos]), String::from(&kv[pos + 1..])); 95 | } 96 | 97 | Ok(hashmap) 98 | } 99 | 100 | #[test] 101 | fn test_parse_key_val() { 102 | let result = parse_key_val("feature=false|neutral_nodes=10|num=2|word=never").unwrap(); 103 | assert_eq!(4, result.len()); 104 | assert_eq!("false", result.get("feature").unwrap()); 105 | assert_eq!("10", result.get("neutral_nodes").unwrap()); 106 | assert_eq!("2", result.get("num").unwrap()); 107 | assert_eq!("never", result.get("word").unwrap()); 108 | 109 | let result = parse_key_val("feature=false").unwrap(); 110 | assert_eq!(1, result.len()); 111 | assert_eq!("false", result.get("feature").unwrap()); 112 | 113 | let result = parse_key_val("word=ne=ver").unwrap(); 114 | assert_eq!(1, result.len()); 115 | assert_eq!("ne=ver", result.get("word").unwrap()); 116 | 117 | let result = parse_key_val("").unwrap(); 118 | assert!(result.is_empty()); 119 | 120 | let result = parse_key_val("feature=false|neutral_nodes"); 121 | assert!(result.is_err()); 122 | } 123 | -------------------------------------------------------------------------------- /src/requests.rs: -------------------------------------------------------------------------------- 1 | use serde::Serialize; 2 | 3 | use crate::{events::EventType, network_conf::NetworkConfiguration}; 4 | 5 | #[derive(Serialize, Debug)] 6 | pub struct Request { 7 | pub id: String, 8 | 9 | pub is_cancel: bool, 10 | 11 | #[serde(flatten)] 12 | pub request: RequestType, 13 | } 14 | 15 | #[derive(Serialize, Debug)] 16 | #[serde(untagged)] 17 | pub enum PayloadType { 18 | Event(EventType), 19 | 20 | Json(serde_json::Value), 21 | 22 | Config(NetworkConfiguration), 23 | } 24 | 25 | #[derive(Serialize, Debug)] 26 | pub enum RequestType { 27 | #[serde(rename = "signal_entry")] 28 | SignalEntry { state: String }, 29 | #[serde(rename = "barrier")] 30 | Barrier { state: String, target: u64 }, 31 | #[serde(rename = "publish")] 32 | Publish { topic: String, payload: PayloadType }, 33 | #[serde(rename = "subscribe")] 34 | Subscribe { topic: String }, 35 | } 36 | 37 | #[cfg(test)] 38 | mod tests { 39 | 40 | use std::net::Ipv4Addr; 41 | 42 | use ipnetwork::Ipv4Network; 43 | 44 | use crate::{events::EventType, network_conf::*}; 45 | 46 | use super::*; 47 | 48 | #[test] 49 | fn serde_test() { 50 | let _network_conf = NetworkConfiguration { 51 | network: DEFAULT_DATA_NETWORK.to_owned(), 52 | ipv4: Some(Ipv4Network::new(Ipv4Addr::new(16, 0, 1, 1), 24).unwrap()), 53 | ipv6: None, 54 | enable: true, 55 | default: LinkShape { 56 | latency: 10000000, 57 | jitter: 0, 58 | bandwidth: 1048576, 59 | filter: FilterAction::Accept, 60 | loss: 0.0, 61 | corrupt: 0.0, 62 | corrupt_corr: 0.0, 63 | reorder: 0.0, 64 | reorder_corr: 0.0, 65 | duplicate: 0.0, 66 | duplicate_corr: 0.0, 67 | }, 68 | rules: None, 69 | callback_state: "latency-reduced".to_owned(), 70 | callback_target: None, 71 | routing_policy: RoutingPolicyType::DenyAll, 72 | }; 73 | 74 | let event = EventType::StageStart { 75 | name: "network-initialized".to_owned(), 76 | group: "single".to_owned(), 77 | }; 78 | 79 | let _msg = "123QM 192.168.1.1/25".to_owned(); 80 | 81 | let req = Request { 82 | id: "0".to_owned(), 83 | is_cancel: false, 84 | request: RequestType::Publish { 85 | topic: "run:abcd1234:plan:live_streming:case:quickstart:topics:network:hostname" 86 | .to_owned(), 87 | payload: PayloadType::Event(event), 88 | }, 89 | }; 90 | 91 | let json_req = serde_json::to_string_pretty(&req).unwrap(); 92 | 93 | println!("{}", json_req); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/responses.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | use serde_with::{serde_as, NoneAsEmptyString}; 3 | 4 | #[derive(Deserialize, Debug)] 5 | pub struct SignalEntry { 6 | pub seq: u64, 7 | } 8 | 9 | #[derive(Deserialize, Debug)] 10 | pub struct Publish { 11 | pub seq: u64, 12 | } 13 | 14 | #[serde_as] 15 | #[derive(Deserialize, Debug)] 16 | pub struct RawResponse { 17 | pub id: String, 18 | 19 | #[serde_as(as = "NoneAsEmptyString")] 20 | pub error: Option, 21 | 22 | #[serde_as(as = "NoneAsEmptyString")] 23 | pub subscribe: Option, 24 | 25 | pub signal_entry: Option, 26 | 27 | pub publish: Option, 28 | } 29 | 30 | #[derive(Debug, PartialEq, Eq)] 31 | pub enum ResponseType { 32 | SignalEntry { seq: u64 }, 33 | Publish { seq: u64 }, 34 | Subscribe(serde_json::Value), 35 | Error(String), 36 | Barrier, 37 | } 38 | 39 | #[derive(Debug, PartialEq, Eq)] 40 | pub struct Response { 41 | pub id: String, 42 | pub response: ResponseType, 43 | } 44 | 45 | impl From for Response { 46 | fn from(raw_response: RawResponse) -> Self { 47 | let RawResponse { 48 | id, 49 | error, 50 | subscribe, 51 | signal_entry, 52 | publish, 53 | } = raw_response; 54 | 55 | let response = match (error, subscribe, signal_entry, publish) { 56 | (None, None, None, None) => ResponseType::Barrier, 57 | (Some(error), None, None, None) => { 58 | // Hack to remove extra escape characters 59 | let error = serde_json::from_str(&error).expect("JSON Deserialization"); 60 | ResponseType::Error(error) 61 | } 62 | (None, Some(msg), None, None) => { 63 | // The Subscribe payload is a json encoded string, so we need to deserialize it. 64 | let payload = serde_json::from_str(&msg).expect("JSON Deserialization"); 65 | ResponseType::Subscribe(payload) 66 | } 67 | (None, None, Some(signal), None) => ResponseType::SignalEntry { seq: signal.seq }, 68 | (None, None, None, Some(publish)) => ResponseType::Publish { seq: publish.seq }, 69 | (error, subscribe, signal_entry, publish) => { 70 | panic!( 71 | "Incompatible Raw Response {:?}", 72 | RawResponse { 73 | id, 74 | error, 75 | subscribe, 76 | signal_entry, 77 | publish, 78 | } 79 | ); 80 | } 81 | }; 82 | 83 | Self { id, response } 84 | } 85 | } 86 | 87 | #[cfg(test)] 88 | mod tests { 89 | use super::*; 90 | 91 | #[test] 92 | fn serde_test() { 93 | let raw_response = 94 | "{\"id\":\"0\",\"error\":\"\",\"subscribe\":\"\",\"publish\":{\"seq\":1},\"signal_entry\":null}"; 95 | 96 | let response: RawResponse = serde_json::from_str(raw_response).unwrap(); 97 | 98 | let response: Response = response.into(); 99 | 100 | assert_eq!( 101 | Response { 102 | id: "0".to_owned(), 103 | response: ResponseType::Publish { seq: 1 } 104 | }, 105 | response 106 | ); 107 | } 108 | #[test] 109 | fn serde_test_complex_subscribe() { 110 | let raw_response = "{\"id\":\"1\",\"error\":\"\",\"subscribe\":\"{\\\"Addrs\\\":[\\\"/ip4/16.3.0.3/tcp/45369\\\"],\\\"ID\\\":\\\"QmbSLMEMackm7vHiUGMB2EFAPbzeJNpeB9yTpzYKoojDWc\\\"}\"}"; 111 | 112 | let response: RawResponse = serde_json::from_str(raw_response).unwrap(); 113 | 114 | let response: Response = response.into(); 115 | 116 | assert_eq!( 117 | Response { 118 | id: "1".to_owned(), 119 | response: ResponseType::Subscribe(serde_json::json!({ 120 | "Addrs": ["/ip4/16.3.0.3/tcp/45369"], 121 | "ID": "QmbSLMEMackm7vHiUGMB2EFAPbzeJNpeB9yTpzYKoojDWc" 122 | })) 123 | }, 124 | response 125 | ); 126 | } 127 | } 128 | --------------------------------------------------------------------------------