├── .config ├── lingua.dic └── spellcheck.toml ├── .dockerignore ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.yaml │ └── feature_report.yaml └── workflows │ ├── ci.yml │ ├── ci_integration.yml │ ├── documentation.yml │ ├── fileserver.yml │ └── spellcheck.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── adrs └── 001-node-rust-foreign-function-interface │ ├── 001-node-to-rust-foreign-function-interface.md │ ├── boilerplate-app-prototype.md │ ├── napi-prototype.md │ └── wasm-prototype.md ├── crates ├── cli │ ├── Cargo.toml │ └── src │ │ └── main.rs ├── configuration │ ├── .gitignore │ ├── Cargo.toml │ ├── src │ │ ├── global_settings.rs │ │ ├── hrmp_channel.rs │ │ ├── lib.rs │ │ ├── network.rs │ │ ├── parachain.rs │ │ ├── relaychain.rs │ │ ├── shared.rs │ │ ├── shared │ │ │ ├── errors.rs │ │ │ ├── helpers.rs │ │ │ ├── macros.rs │ │ │ ├── node.rs │ │ │ ├── resources.rs │ │ │ └── types.rs │ │ └── utils.rs │ └── testing │ │ └── snapshots │ │ ├── 0000-small-network.toml │ │ ├── 0001-big-network.toml │ │ ├── 0002-overridden-defaults.toml │ │ ├── 0003-small-network_w_parachain.toml │ │ ├── 0004-small-network-without-settings.toml │ │ └── 0005-small-networl-with-wasm-override.toml ├── examples │ ├── Cargo.toml │ └── examples │ │ ├── 0001-simple.toml │ │ ├── add_para.rs │ │ ├── common │ │ └── lib.rs │ │ ├── db_snapshot.rs │ │ ├── para_upgrade.rs │ │ ├── pjs.rs │ │ ├── pjs_transfer.js │ │ ├── register_para.rs │ │ ├── simple_network_example.rs │ │ ├── small_network_config.rs │ │ ├── small_network_with_base_dir.rs │ │ ├── small_network_with_default.rs │ │ ├── small_network_with_para.rs │ │ └── two_paras_same_id.rs ├── file-server │ ├── Cargo.toml │ ├── Dockerfile │ └── src │ │ └── main.rs ├── orchestrator │ ├── .gitignore │ ├── Cargo.toml │ ├── src │ │ ├── errors.rs │ │ ├── generators.rs │ │ ├── generators │ │ │ ├── bootnode_addr.rs │ │ │ ├── chain_spec.rs │ │ │ ├── command.rs │ │ │ ├── errors.rs │ │ │ ├── identity.rs │ │ │ ├── key.rs │ │ │ ├── keystore.rs │ │ │ ├── para_artifact.rs │ │ │ └── port.rs │ │ ├── lib.rs │ │ ├── network.rs │ │ ├── network │ │ │ ├── chain_upgrade.rs │ │ │ ├── node.rs │ │ │ ├── parachain.rs │ │ │ └── relaychain.rs │ │ ├── network_helper.rs │ │ ├── network_helper │ │ │ ├── metrics.rs │ │ │ └── verifier.rs │ │ ├── network_spec.rs │ │ ├── network_spec │ │ │ ├── node.rs │ │ │ ├── parachain.rs │ │ │ └── relaychain.rs │ │ ├── pjs_helper.rs │ │ ├── shared.rs │ │ ├── shared │ │ │ ├── constants.rs │ │ │ ├── macros.rs │ │ │ └── types.rs │ │ ├── spawner.rs │ │ ├── tx_helper.rs │ │ └── tx_helper │ │ │ ├── balance.rs │ │ │ ├── client.rs │ │ │ ├── register_para.rs │ │ │ ├── runtime_upgrade.rs │ │ │ └── validator_actions.rs │ └── testing │ │ └── rococo-local-plain.json ├── prom-metrics-parser │ ├── Cargo.toml │ ├── src │ │ ├── grammar.pest │ │ └── lib.rs │ └── testing │ │ └── metrics.txt ├── provider │ ├── .gitignore │ ├── Cargo.toml │ └── src │ │ ├── docker.rs │ │ ├── docker │ │ ├── client.rs │ │ ├── namespace.rs │ │ ├── node.rs │ │ └── provider.rs │ │ ├── kubernetes.rs │ │ ├── kubernetes │ │ ├── client.rs │ │ ├── namespace.rs │ │ ├── node.rs │ │ ├── pod_spec_builder.rs │ │ ├── provider.rs │ │ └── static-configs │ │ │ ├── baseline-resources.yaml │ │ │ └── namespace-network-policy.yaml │ │ ├── lib.rs │ │ ├── native.rs │ │ ├── native │ │ ├── namespace.rs │ │ ├── node.rs │ │ └── provider.rs │ │ ├── shared.rs │ │ └── shared │ │ ├── constants.rs │ │ ├── helpers.rs │ │ ├── scripts │ │ ├── helper-binaries-downloader.sh │ │ └── zombie-wrapper.sh │ │ └── types.rs ├── sdk │ ├── Cargo.toml │ ├── src │ │ ├── environment.rs │ │ └── lib.rs │ └── tests │ │ ├── smoke-native.rs │ │ ├── smoke.rs │ │ └── two-paras-same-id.rs ├── support │ ├── .gitignore │ ├── Cargo.toml │ └── src │ │ ├── constants.rs │ │ ├── fs.rs │ │ ├── fs │ │ ├── in_memory.rs │ │ └── local.rs │ │ ├── lib.rs │ │ ├── net.rs │ │ └── replacer.rs └── test-runner │ ├── .gitignore │ ├── Cargo.toml │ └── src │ └── lib.rs └── rustfmt.toml /.config/lingua.dic: -------------------------------------------------------------------------------- 1 | 90 2 | 3 | = 4 | CLI 5 | Deserialization 6 | Deserialized 7 | IFF 8 | IPv4 9 | JSON 10 | NetworkNode 11 | Ok 12 | P2P 13 | PjsResult 14 | PoS 15 | RPC 16 | RUN_IN_CI 17 | SDK 18 | WASM 19 | arg 20 | args 21 | chain_spec_command 22 | cmd 23 | declaratively 24 | deserialize 25 | deserialized 26 | dir 27 | env 28 | fs 29 | invulnerables 30 | ip 31 | js 32 | k8s 33 | msg 34 | multiaddress 35 | natively 36 | ns 37 | p2p 38 | parachaing 39 | pjs_rs 40 | polkadot 41 | polkadot_ 42 | rococo_local_testnet 43 | rpc 44 | serde_json 45 | tgz 46 | tmp 47 | u128 48 | u64 49 | validator 50 | ws 51 | -------------------------------------------------------------------------------- /.config/spellcheck.toml: -------------------------------------------------------------------------------- 1 | [hunspell] 2 | lang = "en_US" 3 | search_dirs = ["."] 4 | extra_dictionaries = ["lingua.dic"] 5 | skip_os_lookups = true 6 | use_builtin = true 7 | 8 | [hunspell.quirks] 9 | # `Type`'s 10 | # 5x 11 | transform_regex = ["^'([^\\s])'$", "^[0-9]+(?:\\.[0-9]*)?x$", "^'s$", "^\\+$", "[><+-]"] 12 | allow_concatenation = true 13 | allow_dashes = true 14 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target 2 | Dockerfile 3 | .dockerignore 4 | .git 5 | .gitignore -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | @pepoviola 2 | @l0r1s -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yaml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: File a bug report 3 | labels: ["triage-needed"] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this bug report! 9 | **NOTE** A number of issues reported against Zombienet are often found to already be fixed in more current versions of the project. 10 | Before reporting an issue, please verify the version you are running with `zombienet version` and compare it to the latest release. 11 | If they differ, please update your version of Zombienet to the latest possible and retry your command before creating an issue. 12 | 13 | 14 | - type: textarea 15 | id: description 16 | attributes: 17 | label: Issue Description 18 | description: Please explain your issue 19 | value: "Describe your issue" 20 | validations: 21 | required: true 22 | 23 | - type: textarea 24 | id: reproducer 25 | attributes: 26 | label: Steps to reproduce the issue 27 | description: Please explain the steps to reproduce the issue, including configuration files needed. 28 | value: "Steps to reproduce the issue\n1.\n2.\n3.\n" 29 | validations: 30 | required: true 31 | 32 | - type: textarea 33 | id: received_results 34 | attributes: 35 | label: Describe the results you received 36 | description: Please explain the results you are noticing, including stacktrace and error logs. 37 | value: "Describe the results you received" 38 | validations: 39 | required: true 40 | 41 | - type: textarea 42 | id: expected_results 43 | attributes: 44 | label: Describe the results you expected 45 | description: Please explain the results you are expecting 46 | value: "Describe the results you expected" 47 | validations: 48 | required: true 49 | 50 | - type: input 51 | id: zombienet_version 52 | attributes: 53 | label: Zombienet version 54 | description: Which zombienet version are you using ? 55 | validations: 56 | required: true 57 | 58 | - type: dropdown 59 | id: provider 60 | attributes: 61 | label: Provider 62 | description: Which provider are you using ? 63 | options: 64 | - Native 65 | - Kubernetes 66 | - Podman 67 | validations: 68 | required: true 69 | 70 | - type: textarea 71 | id: provider_version 72 | attributes: 73 | label: Provider version 74 | description: Which provider version / binaries versions are you using ? 75 | value: | 76 | ## For binaries 77 | polkadot 0.9.40-a2b62fb872b 78 | polkadot-parachain 0.9.380-fe24f39507f 79 | 80 | ## For Kubernetes/Podman 81 | podman version 4.4.1 82 | 83 | OR 84 | 85 | kubectl version v0.26.3 86 | cluster version 1.25.2 87 | render: yaml 88 | validations: 89 | required: true 90 | 91 | - type: dropdown 92 | id: upstream_latest 93 | attributes: 94 | label: Upstream Latest Release 95 | description: Have you tried running the [latest upstream release](https://github.com/paritytech/zombienet/releases/latest) 96 | options: 97 | - 'Yes' 98 | - 'No' 99 | validations: 100 | required: true 101 | 102 | - type: textarea 103 | id: additional_environment 104 | attributes: 105 | label: Additional environment details 106 | description: Please describe any additional environment details like (Cloud, Local, OS, Provider versions...) 107 | value: "Additional environment details" 108 | 109 | - type: textarea 110 | id: additional_info 111 | attributes: 112 | label: Additional information 113 | description: Please explain the additional information you deem important 114 | value: "Additional information like issue happens only occasionally or issue happens with a particular architecture or on a particular setting" 115 | validations: 116 | required: false 117 | 118 | - type: textarea 119 | id: screenshots 120 | attributes: 121 | label: Screenshots 122 | description: Provide us with screenshots if needed to have a better understanding of the issue 123 | validations: 124 | required: false -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_report.yaml: -------------------------------------------------------------------------------- 1 | name: Feature request 2 | description: File a feature request 3 | labels: ["triage-needed"] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this feature report! 9 | Please make sure to describe your feature and the problem it would solve. 10 | 11 | 12 | - type: textarea 13 | id: description 14 | attributes: 15 | label: Is your feature request related to a problem? Please describe. 16 | description: A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 17 | value: "Describe the feature" 18 | validations: 19 | required: true 20 | 21 | - type: textarea 22 | id: solution 23 | attributes: 24 | label: Describe the solution you'd like 25 | description: A clear and concise description of what you want to happen.. 26 | validations: 27 | required: true 28 | 29 | - type: textarea 30 | id: alt_solution 31 | attributes: 32 | label: Describe alternatives you've considered 33 | description: A clear and concise description of any alternative solutions or features you've considered. 34 | validations: 35 | required: false 36 | 37 | - type: textarea 38 | id: additional_context 39 | attributes: 40 | label: Additional context 41 | description: Add any other context or screenshots about the feature request here. 42 | validations: 43 | required: false -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Cargo Build & Test 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | RUSTFLAGS: "-Dwarnings" 12 | 13 | jobs: 14 | build: 15 | name: Zombienet SDK - latest 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | toolchain: 20 | - stable 21 | # TODO 24-02-08: Disable nightly due to tkaitchuck/aHash#200. 22 | #- nightly 23 | steps: 24 | # https://github.com/jlumbroso/free-disk-space 25 | - name: Free Disk Space (Ubuntu) 26 | uses: jlumbroso/free-disk-space@main 27 | with: 28 | tool-cache: false 29 | 30 | - uses: actions/checkout@v3 31 | 32 | - name: Init nigthly install for fmt 33 | run: rustup update nightly && rustup default nightly && rustup component add rustfmt 34 | 35 | - name: Check format 36 | run: cargo +nightly fmt --check --all 37 | 38 | - name: Init install 39 | run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} && rustup component add clippy 40 | 41 | - name: Fetch cache 42 | uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 43 | with: 44 | shared-key: "zombie-cache" 45 | 46 | - name: Clippy 47 | # disable needless_lifetimes until we align the version with polakdot-sdk 48 | run: cargo clippy --all-targets --all-features -- -A clippy::needless_lifetimes 49 | 50 | - name: Build 51 | run: cargo build 52 | 53 | - name: Tests 54 | run: cargo test --workspace -- --skip ci_k8s 55 | 56 | coverage: 57 | name: Zombienet SDK - coverage 58 | needs: build 59 | runs-on: ubuntu-20.04 60 | if: github.event_name == 'pull_request' 61 | 62 | permissions: 63 | issues: write 64 | pull-requests: write 65 | 66 | steps: 67 | - uses: actions/checkout@v3 68 | 69 | # https://github.com/jlumbroso/free-disk-space 70 | - name: Free Disk Space (Ubuntu) 71 | uses: jlumbroso/free-disk-space@main 72 | with: 73 | tool-cache: false 74 | 75 | - name: Fetch cache 76 | uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 77 | with: 78 | shared-key: "zombie-cache" 79 | 80 | - name: Install latest nextest release 81 | uses: taiki-e/install-action@nextest 82 | 83 | - name: Install cargo-llvm-cov 84 | uses: taiki-e/install-action@cargo-llvm-cov 85 | 86 | - name: Collect coverage data 87 | run: cargo llvm-cov nextest --workspace --exclude zombienet-sdk --test-threads 1 --lcov --output-path lcov.info 88 | 89 | - name: Report code coverage 90 | uses: Nef10/lcov-reporter-action@v0.4.0 91 | with: 92 | lcov-file: lcov.info 93 | pr-number: ${{ github.event.pull_request.number }} 94 | -------------------------------------------------------------------------------- /.github/workflows/ci_integration.yml: -------------------------------------------------------------------------------- 1 | name: Integration test 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | concurrency: 10 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 11 | cancel-in-progress: true 12 | 13 | env: 14 | RUN_IN_CONTAINER: 1 15 | FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1 16 | GHA_CLUSTER_SERVER_ADDR: "https://kubernetes.default:443" 17 | CARGO_TERM_COLOR: always 18 | RUSTFLAGS: "-Dwarnings" 19 | BASE_IMAGE: docker.io/paritytech/ci-unified:bullseye-1.84.1-2025-01-28-v202502131220 20 | RUN_IN_CI: "1" 21 | RUST_LOG: "zombienet_orchestrator=debug,zombienet_provider=debug" 22 | CARGO_TARGET_DIR: /tmp/target 23 | 24 | jobs: 25 | build-tests: 26 | runs-on: parity-large 27 | timeout-minutes: 60 28 | container: 29 | image: docker.io/paritytech/ci-unified:bullseye-1.84.1-2025-01-28-v202502131220 30 | steps: 31 | - name: Checkout 32 | uses: actions/checkout@v4 33 | 34 | - uses: Swatinem/rust-cache@v2 35 | with: 36 | cache-on-failure: true 37 | 38 | - name: Build tests 39 | run: | 40 | cargo build --tests --keep-going --locked 41 | mkdir -p artifacts 42 | cd artifacts 43 | find /tmp/target/debug/deps/ -maxdepth 1 -name "smoke-*" ! -name "*.d" -exec mv {} $(pwd)/smoke \; 44 | find /tmp/target/debug/deps/ -maxdepth 1 -name "smoke_native-*" ! -name "*.d" -exec mv {} $(pwd)/smoke_native \; 45 | cd .. 46 | tar cvfz artifacts.tar.gz artifacts 47 | 48 | - name: Upload artifacts 49 | uses: actions/upload-artifact@v4 50 | with: 51 | name: zombienet-tests-${{ github.sha }} 52 | path: artifacts.tar.gz 53 | 54 | k8s-integration-test-smoke: 55 | runs-on: parity-zombienet 56 | needs: build-tests 57 | timeout-minutes: 60 58 | container: 59 | image: docker.io/paritytech/ci-unified:bullseye-1.84.1-2025-01-28-v202502131220 60 | steps: 61 | - name: Download artifacts 62 | uses: actions/download-artifact@v4 63 | with: 64 | name: zombienet-tests-${{ github.sha }} 65 | part: artifacts.tar.gz 66 | 67 | - name: script 68 | run: | 69 | export ZOMBIE_K8S_CI_NAMESPACE=$(cat /data/namespace) 70 | export ZOMBIE_PROVIDER="k8s" 71 | mv artifacts.tar.gz /tmp 72 | cd /tmp 73 | ls -la 74 | tar xvfz artifacts.tar.gz 75 | ./artifacts/smoke --nocapture 76 | # for running local 77 | # cargo test --test smoke -- --nocapture 78 | 79 | - name: upload logs 80 | uses: actions/upload-artifact@v4 81 | with: 82 | name: zombienet-logs-${{ github.job }}-${{ github.sha }} 83 | path: | 84 | /tmp/zombie*/logs/* 85 | # 86 | # 87 | native-integration-test-smoke: 88 | runs-on: parity-default 89 | needs: build-tests 90 | timeout-minutes: 60 91 | container: 92 | image: docker.io/paritytech/ci-unified:bullseye-1.84.1-2025-01-28-v202502131220 93 | steps: 94 | - name: Download artifacts 95 | uses: actions/download-artifact@v4 96 | with: 97 | name: zombienet-tests-${{ github.sha }} 98 | part: artifacts.tar.gz 99 | 100 | - name: Download bins 101 | shell: bash 102 | run: | 103 | for bin in polkadot polkadot-execute-worker polkadot-prepare-worker; do 104 | echo "downloading $bin"; 105 | curl -L -o /tmp/$bin https://github.com/paritytech/polkadot-sdk/releases/download/polkadot-stable2503-1/$bin; 106 | chmod 755 /tmp/$bin; 107 | done 108 | ls -ltr /tmp 109 | export PATH=/tmp:$PATH 110 | echo $PATH 111 | 112 | - name: script 113 | run: | 114 | export PATH=/tmp:$PATH 115 | echo $PATH 116 | mv artifacts.tar.gz /tmp 117 | cd /tmp 118 | ls -la 119 | tar xvfz artifacts.tar.gz 120 | export ZOMBIE_PROVIDER="native" 121 | ./artifacts/smoke_native --nocapture 122 | # cargo test --test smoke-native -- --nocapture 123 | 124 | - name: upload logs 125 | uses: actions/upload-artifact@v4 126 | with: 127 | name: zombienet-logs-${{ github.job }}-${{ github.sha }} 128 | path: | 129 | /tmp/zombie*/logs/* 130 | -------------------------------------------------------------------------------- /.github/workflows/documentation.yml: -------------------------------------------------------------------------------- 1 | name: Cargo Create Docs 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | RUSTFLAGS: "-Dwarnings" 12 | 13 | jobs: 14 | build-rust-doc: 15 | name: Zombienet SDK - Rust Docs 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | toolchain: 20 | # TODO 24-02-08: Disable nightly due to tkaitchuck/aHash#200. 21 | #- nightly 22 | - stable 23 | steps: 24 | - uses: actions/checkout@v3 25 | 26 | - name: Init nigthly install for fmt 27 | run: rustup update nightly && rustup default nightly && rustup component add rustfmt 28 | 29 | - name: Check format 30 | run: cargo +nightly fmt --check --all 31 | 32 | - name: Init install 33 | run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} && rustup component add clippy 34 | 35 | - name: Create docs 36 | run: | 37 | cargo doc --no-deps 38 | echo "" > target/doc/index.html 39 | 40 | 41 | 42 | - name: Move docs 43 | run: | 44 | mkdir -p ./doc 45 | mv ./target/doc/* ./doc 46 | git config user.email "github-action@users.noreply.github.com" 47 | git config user.name "GitHub Action" 48 | git config user.password "${{ secrets.GH_PAGES_TOKEN }}" 49 | git checkout --orphan gh-pages 50 | mkdir to_delete 51 | shopt -s extglob 52 | mv !(to_delete) ./to_delete 53 | mv ./to_delete/doc/* . 54 | rm -rf ./to_delete 55 | git add --all 56 | git commit -m "Documentation" 57 | shell: bash # Necessary for `shopt` to work 58 | - run: git push -f origin gh-pages:gh-pages 59 | if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} 60 | -------------------------------------------------------------------------------- /.github/workflows/fileserver.yml: -------------------------------------------------------------------------------- 1 | name: File server build & image publish 2 | run-name: Deploy file server ${{ github.ref }} 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | paths: 9 | - "Cargo.toml" 10 | - "crates/file-server/**" 11 | workflow_dispatch: {} 12 | 13 | env: 14 | PROJECT_ID: "parity-zombienet" 15 | GCR_REGISTRY: "europe-west3-docker.pkg.dev" 16 | GCR_REPOSITORY: "zombienet-public-images" 17 | 18 | jobs: 19 | build_and_push: 20 | runs-on: ubuntu-latest 21 | 22 | steps: 23 | - name: Checkout code 24 | uses: actions/checkout@v4 25 | 26 | - name: Setup gcloud CLI 27 | uses: google-github-actions/setup-gcloud@v2.0.1 28 | with: 29 | service_account_key: ${{ secrets.GCP_SA_KEY }} 30 | project_id: ${{ env.PROJECT_ID }} 31 | export_default_credentials: true 32 | 33 | - name: Login to GCP 34 | uses: google-github-actions/auth@v2.0.1 35 | with: 36 | credentials_json: ${{ secrets.GCP_SA_KEY }} 37 | 38 | - name: Artifact registry authentication 39 | run: | 40 | gcloud auth configure-docker ${{ env.GCR_REGISTRY }} 41 | 42 | - name: Build, tag, and push image to GCP Artifact registry 43 | id: build-image 44 | env: 45 | IMAGE: "${{ env.GCR_REGISTRY }}/${{ env.PROJECT_ID }}/${{ env.GCR_REPOSITORY }}/zombienet-file-server" 46 | 47 | run: | 48 | docker build -t $IMAGE:${{ github.sha }} -f ./crates/file-server/Dockerfile . 49 | docker tag $IMAGE:${{ github.sha }} $IMAGE:latest 50 | docker push --all-tags $IMAGE 51 | echo "image=$IMAGE:${{ github.sha }}" >> $GITHUB_OUTPUT 52 | echo "image=$IMAGE:latest" >> $GITHUB_OUTPUT -------------------------------------------------------------------------------- /.github/workflows/spellcheck.yml: -------------------------------------------------------------------------------- 1 | name: Spellcheck 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | spellcheck: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Checkout repository 17 | uses: actions/checkout@v3 18 | 19 | - name: Install Rust 20 | uses: actions-rs/toolchain@v1 21 | with: 22 | toolchain: stable 23 | override: true 24 | 25 | - name: Install cargo-spellcheck 26 | run: | 27 | sudo apt-get install libclang-dev 28 | export LIBCLANG_PATH=/usr/lib/llvm-18/lib/ 29 | cargo install cargo-spellcheck 30 | 31 | - name: Run cargo-spellcheck 32 | run: cargo spellcheck 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # These are backup files generated by rustfmt 7 | **/*.rs.bk 8 | 9 | # MSVC Windows builds of rustc generate these, which store debugging information 10 | *.pdb 11 | 12 | node_modules 13 | dist 14 | log.md 15 | .env 16 | bins 17 | .DS_Store 18 | **/target/ 19 | *.swp 20 | .vscode 21 | 22 | # nix 23 | result 24 | 25 | # docs 26 | docs 27 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = [ 4 | "crates/sdk", 5 | "crates/examples", 6 | "crates/support", 7 | "crates/configuration", 8 | "crates/orchestrator", 9 | "crates/provider", 10 | #"crates/test-runner", 11 | "crates/prom-metrics-parser", 12 | "crates/file-server", 13 | "crates/cli", 14 | ] 15 | 16 | [workspace.package] 17 | authors = ["Parity Technologies "] 18 | edition = "2021" 19 | version = "0.3.5" 20 | rust-version = "1.70.0" 21 | license = "Apache-2.0 OR GPL-3.0" 22 | repository = "https://github.com/paritytech/zombienet-sdk" 23 | 24 | [workspace.dependencies] 25 | thiserror = "1.0" 26 | anyhow = "1.0" 27 | async-trait = "0.1" 28 | futures = "0.3" 29 | serde = { version = "1.0", features = ["derive"] } 30 | serde_json = "1.0" 31 | serde_yaml = "0.9" 32 | toml = "0.8.19" 33 | tokio = "1.28" 34 | tokio-util = "0.7" 35 | reqwest = "0.12.9" 36 | regex = "1.8" 37 | lazy_static = "1.4" 38 | multiaddr = "0.18" 39 | url = "2.3" 40 | uuid = "1.4" 41 | nix = "0.29.0" 42 | pest = "2.7" 43 | pest_derive = "2.7" 44 | rand = "0.8" 45 | sha2 = { version = "0.10.2", default-features = false } 46 | hex = "0.4" 47 | sp-core = "35.0.0" 48 | libp2p = "0.54.1" 49 | subxt = "0.38" 50 | subxt-signer = { version = "0.38", features = ["subxt"] } 51 | tracing = "0.1.35" 52 | kube = "0.87.1" 53 | k8s-openapi = "0.20.0" 54 | tar = "0.4" 55 | axum = "0.7" 56 | axum-extra = "0.9" 57 | tower = "0.4" 58 | tower-http = "0.5" 59 | tracing-subscriber = "0.3" 60 | glob-match = "0.2.1" 61 | libsecp256k1 = { version = "0.7.1", default-features = false } 62 | pjs-rs = "0.1.4" 63 | flate2 = "1.0" 64 | 65 | # Zombienet workspace crates: 66 | support = { package = "zombienet-support", version = "0.3.5", path = "crates/support" } 67 | configuration = { package = "zombienet-configuration", version = "0.3.5", path = "crates/configuration" } 68 | orchestrator = { package = "zombienet-orchestrator", version = "0.3.5", path = "crates/orchestrator" } 69 | provider = { package = "zombienet-provider", version = "0.3.5", path = "crates/provider" } 70 | prom-metrics-parser = { package = "zombienet-prom-metrics-parser", version = "0.3.5", path = "crates/prom-metrics-parser" } 71 | zombienet-sdk = { version = "0.3.5", path = "crates/sdk" } 72 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🚧⚠️ [WIP] ZombieNet SDK ⚠️🚧 2 | 3 | 4 | [Rust Docs](https://paritytech.github.io/zombienet-sdk) 5 | 6 | # The Vision 7 | 8 | This issue will track the progress of the new ZombieNet SDK. 9 | 10 | We want to create a new SDK for `ZombieNet` that allow users to build more complex use cases and interact with the network in a more flexible and programatic way. 11 | The SDK will provide a set of `building blocks` that users can combine in order to spawn and interact (test/query/etc) with the network providing a *fluent* api to craft different topologies and assertions to the running network. The new `SDK` will support the same range of `providers` and configurations that can be created in the current version (v1). 12 | 13 | We also want to continue supporting the `CLI` interface *but* should be updated to use the `SDK` under the hood. 14 | 15 | # The Plan 16 | 17 | We plan to divide the work phases to. ensure we cover all the requirement and inside each phase in small tasks, covering one of the building blocks and the interaction between them. 18 | 19 | ## Prototype building blocks 20 | 21 | Prototype each building block with a clear interface and how to interact with it 22 | - [Building block Network #2](https://github.com/paritytech/zombienet-sdk/issues/2) 23 | - [Building block Node #3](https://github.com/paritytech/zombienet-sdk/issues/3) 24 | - [Building block NodeGroup #4](https://github.com/paritytech/zombienet-sdk/issues/4) 25 | - [Building block Parachain #5](https://github.com/paritytech/zombienet-sdk/issues/5) 26 | - [Building block Collator #6](https://github.com/paritytech/zombienet-sdk/issues/6) 27 | - [Building block CollatorGroup #7](https://github.com/paritytech/zombienet-sdk/issues/7) 28 | - [Building block Assertion #8](https://github.com/paritytech/zombienet-sdk/issues/8) 29 | 30 | ## Integrate, test interactions and document 31 | 32 | We want to integrate the interactions for all building blocks and document the way that they work together. 33 | 34 | - [Spawning Integration #9](https://github.com/paritytech/zombienet-sdk/issues/9) 35 | - [Assertion Integration #10](https://github.com/paritytech/zombienet-sdk/issues/10) 36 | - [Documentation #11](https://github.com/paritytech/zombienet-sdk/issues/11) 37 | 38 | ## Refactor `CLI` and ensure backwards compatibility 39 | 40 | Refactor the `CLI` module to use the new `SDK` under the hood. 41 | 42 | - [Refactor CLI #12](https://github.com/paritytech/zombienet-sdk/issues/12) 43 | - [Ensure that spawning from toml works #13](https://github.com/paritytech/zombienet-sdk/issues/13) 44 | - [Ensure that test-runner from DSL works #14](https://github.com/paritytech/zombienet-sdk/issues/14) 45 | 46 | ## ROADMAP 47 | 48 | ## Infra 49 | - Chaos testing, add examples and explore possibilities in `native` and `podman` provider 50 | - Add `docker` provider 51 | - Add `nomad` provider 52 | - Create [helm chart](https://helm.sh/docs/topics/charts/) to allow other use zombienet in k8s 53 | - Auth system to not use k8s users 54 | - Create GitHub Action and publish in NPM marketplace (Completed) 55 | - Rename `@paritytech/zombienet` npm package to `zombienet`. Keep all zombienet modules under `@zombienet/*` org (Completed) 56 | 57 | ## Internal teams 58 | - Add more teams (wip) 59 | 60 | ## Registry 61 | - Create decorators registry and allow override by paras (wip) 62 | - Explore how to get info from paras. 63 | 64 | ## Functional tasks 65 | - Add subxt integration, allow to compile/run on the fly 66 | - Move parser to pest (wip) 67 | - Detach phases and use JSON to communicate instead of `paths` 68 | - Add relative values assertions (for metrics/scripts) 69 | - Allow to define nodes that are not started in the launching phase and can be started by the test-runner 70 | - Allow to define `race` assertions 71 | - Rust integration -> Create multiples libs (crates) 72 | - Explore backchannel use case 73 | - Add support to run test agains a running network (wip) 74 | - Add more CLI subcommands 75 | - Add js/subxt snippets ready to use in assertions (e.g transfers) 76 | - Add XCM support in built-in assertions 77 | - Add `ink! smart contract` support 78 | - Add support to start from a live network (fork-off) [check subalfred] 79 | - Create "default configuration" - (if `zombieconfig.json` exists in same dir with zombienet then the config applied in it will override the default configuration of zombienet. E.G if user wants to have as default `native` instead of `k8s` he can add to 80 | 81 | ## UI 82 | - Create UI to create `.zndls` and `network` files. 83 | - Improve VSCode extension (grammar/snippets/syntax highlighting/file validations) ([repo](https://github.com/paritytech/zombienet-vscode-extension)) 84 | - Create UI app (desktop) to run zombienet without the need of terminal. 85 | -------------------------------------------------------------------------------- /adrs/001-node-rust-foreign-function-interface/001-node-to-rust-foreign-function-interface.md: -------------------------------------------------------------------------------- 1 | # Mechanism to call Rust code from Javascript/Typescript 2 | 3 | ### Status: proposed | rejected | **accepted** | deprecated 4 | 5 | ### Deciders: [@pepoviola](https://github.com/pepoviola) [@wirednkod](https://github.com/wirednkod) [@l0r1s](https://github.com/l0r1s) 6 | 7 | ### Creation date: 18/05/2023 8 | 9 | ### Update date: - 10 | 11 | --- 12 | 13 | ## Context and Problem Statement 14 | 15 | The `zombienet-sdk` will be developed in Rust. Our objective is make it easily integrable into existing Typescript/Javascript project. To achieve this goal, we need to find a way to call the Rust code from a Javascript/Typescript program. 16 | 17 | Many mechanisms exists for this purpose like Wasm or N(ode)-API but some may or may not fit our use case, for example, executing async code. 18 | 19 | --- 20 | 21 | ## Decision drivers 22 | 23 | - We can use the standard library (for filesystem or networking in providers). 24 | 25 | - We can execute asynchronous code: our goal is not to make the program fully sequential as many operations (e.g: bootstrapping the relaychain nodes) can be done concurrently. 26 | 27 | - Easy to package and deploy 28 | 29 | --- 30 | 31 | ## Considered Options 32 | 33 | - #### WASM 34 | 35 | - [wasm-pack](https://github.com/neon-bindings/neon) 36 | 37 | - #### Native node modules (Node-API / V8 / libuv) 38 | - [napi-rs](https://github.com/napi-rs/napi-rs) 39 | 40 | --- 41 | 42 | ## Prototyping 43 | 44 | To demonstrate and learn which options fit the best for our use case, we will create a small test program which will have the following functionalities: 45 | 46 | - Has a function taking an arbitratry object and a callback as parameters in the Typescript code, calling the callback with the function result on Rust side. 47 | - Has a function taking an arbitrary object as parameter and a returning a promise in Typescript, signaling an asynchronous operation on Rust side. 48 | - Make an HTTP request asynchronously in the Rust code, using a dependency using the standard library. 49 | 50 | The prototype assume versions of `rustc` and `cargo` to be `1.69.0`, use of `stable` channel and `Linux` on `amd64` architecture. 51 | 52 | 53 | - ### [Boilerplate app to execute prototype](boilerplate-app-prototype.md) 54 | 55 | - ### [Wasm-pack prototype](wasm-prototype.md) 56 | 57 | - ### [Napi-rs prototype](napi-prototype.md) 58 | 59 | --- 60 | 61 | ## Pros and cons of each options 62 | 63 | - ### Napi-rs 64 | - Pros 👍 65 | - Support many types correctly including typed callback, typed array, class and all JS primitives types (Null, Undefined, Numbers, String, BigInt, ...) 66 | 67 | - Support top level async function because it detects if it needs to be run inside an async runtime (tokio by default) 68 | 69 | - Standard library can be used without limitations, including threading, networking, etc... 70 | 71 | - Extremely well documented with examples 72 | 73 | - Provide full Github action pipeline template to compile on all architecture easily 74 | 75 | - Support complex use cases 76 | 77 | - Used by many big names (Prisma, Parcel, Tailwind, Next.js, Bitwarden) 78 | 79 | - Cons 👎 80 | - Node-API is not simple for complex use case 81 | 82 | - Bound to NodeJS, if we want to expose the same logic to others languages (Go, C++, Python, ...) we need to wrap the Rust code inside a dynamic library and adapt to others languages primitives by creating a small adapter over the library 83 | 84 | - Not universally compiled 85 | 86 | 87 | - ### Wasm-pack 88 | - Pros 👍 89 | - Rich ecosystem and developing fast 90 | 91 | - Used in many places across web, backend (Docker supports WASM) 92 | 93 | - Easy to use and distribute 94 | 95 | - Universally compiled and used across languages (if they support WASM execution) 96 | 97 | - Good for simple use case where you do pure function (taking input, returning output, without side effects like writing to filesystem or making networking calls) 98 | 99 | - Cons 👎 100 | - Limited in the use of the standard library, can't access networking/filesystem primitives without having to use WASI which is inconsistent across languages/runtimes 101 | 102 | - Only support 32 bits 103 | 104 | - No support for concurrent programming (async/threads), even if we can returns Promise from WASM exposed functions but could see the light in few months (maybe?) 105 | 106 | - wasm-bindgen types are too generic, for example, we return a JsValue but we would like to be more specific for the type 107 | 108 | ## Decision outcome 109 | 110 | - ### **Napi-rs** for crates dependant on async, filesystem or networking: *support*, *orchestrator*, *test-runner*, *providers* from [schema](https://github.com/paritytech/zombienet-sdk/issues/22) 111 | 112 | - ### **Wasm-pack** for the rest of the crates: *configuration* from [schema](https://github.com/paritytech/zombienet-sdk/issues/22) -------------------------------------------------------------------------------- /adrs/001-node-rust-foreign-function-interface/boilerplate-app-prototype.md: -------------------------------------------------------------------------------- 1 | ## [Back](001-node-to-rust-foreign-function-interface.md) 2 | 3 | ## Boilerplate app to execute prototypes 4 | 5 | 1. Create the new node app : 6 | 7 | ```bash 8 | $ mkdir -p ffi-prototype/app && cd ffi-prototype/app && npm init -y 9 | ``` 10 | 11 | 2. Install required packages : 12 | 13 | ```bash 14 | [ffi-prototype/app]$ npm i -D @tsconfig/recommended ts-node typescript 15 | ``` 16 | 17 | 3. Add a new script : 18 | 19 | ```json 20 | { 21 | "scripts": { 22 | "build+exec": "tsc && node ./index.js" 23 | } 24 | } 25 | ``` 26 | 27 | 4. Add tsconfig.json 28 | ```json 29 | { 30 | "extends": "@tsconfig/recommended/tsconfig.json" 31 | } 32 | ``` -------------------------------------------------------------------------------- /adrs/001-node-rust-foreign-function-interface/napi-prototype.md: -------------------------------------------------------------------------------- 1 | ## [Back](001-node-to-rust-foreign-function-interface.md) 2 | 3 | ## Napi-rs prototype 4 | ___ 5 | 6 | 1. Install the napi CLI 7 | 8 | ```bash 9 | [ffi-prototype]$ npm install -g @napi-rs/cli 10 | ``` 11 | 12 | 2. Create a new napi project 13 | 14 | ```bash 15 | [ffi-prototype]$ napi new napi-prototype 16 | ``` 17 | 18 | 3. Install cargo dependencies 19 | 20 | ```bash 21 | [ffi-prototype/napi-prototype]$ cargo add tokio --features full 22 | [ffi-prototype/napi-prototype]$ cargo add reqwest --features blocking 23 | [ffi-prototype/napi-prototype]$ cargo add napi --no-default-features --features napi4,async 24 | ``` 25 | 26 | 4. Copy the following code to `napi-prototype/src/lib.rs` 27 | 28 | ```rust 29 | #![deny(clippy::all)] 30 | 31 | use std::thread; 32 | 33 | use napi::{ 34 | bindgen_prelude::*, 35 | threadsafe_function::{ 36 | ErrorStrategy, ThreadSafeCallContext, ThreadsafeFunction, ThreadsafeFunctionCallMode, 37 | }, 38 | }; 39 | use reqwest; 40 | 41 | #[macro_use] 42 | extern crate napi_derive; 43 | 44 | // native async with tokio is supported without annotating a main function 45 | #[napi] 46 | pub async fn fetch_promise() -> Result { 47 | let body = reqwest::get("https://paritytech.github.io/zombienet/") 48 | .await 49 | .map_err(|_| napi::Error::from_reason("Error while fetching page"))? 50 | .text() 51 | .await 52 | .map_err(|_| napi::Error::from_reason("Error while extracting body"))?; 53 | 54 | Ok(body) 55 | } 56 | 57 | #[napi] 58 | pub fn fetch_callback(callback: JsFunction) -> Result<()> { 59 | // createa thread safe callback from the JsFunction 60 | let thread_safe_callback: ThreadsafeFunction = callback 61 | .create_threadsafe_function(0, |ctx: ThreadSafeCallContext| { 62 | ctx.env.create_string(&ctx.value).map(|s| vec![s]) 63 | })?; 64 | 65 | // spawn a thread to execute our logic 66 | thread::spawn(move || { 67 | let response = reqwest::blocking::get("https://paritytech.github.io/zombienet/"); 68 | 69 | if response.is_err() { 70 | let response = response 71 | .map(|_| "".into()) 72 | .map_err(|_| napi::Error::from_reason("Error while fetching page")); 73 | 74 | // error are returned by calling the callback with an empty response and the error mapped 75 | return thread_safe_callback.call(response, ThreadsafeFunctionCallMode::Blocking); 76 | } 77 | 78 | let body = response.unwrap().text(); 79 | 80 | if body.is_err() { 81 | let body = body 82 | .map(|_| "".into()) 83 | .map_err(|_| napi::Error::from_reason("Error while extracting body")); 84 | 85 | return thread_safe_callback.call(body, ThreadsafeFunctionCallMode::Blocking); 86 | } 87 | 88 | // result is returned as a string 89 | thread_safe_callback.call(Ok(body.unwrap()), ThreadsafeFunctionCallMode::Blocking) 90 | }); 91 | 92 | Ok(()) 93 | } 94 | ``` 95 | 96 | 5. Build the project : 97 | ```bash 98 | [ffi-prototype/napi-prototype]$ npm run build 99 | ``` 100 | 101 | 6. Copy artifacts : 102 | ```bash 103 | [ffi-prorotype/napi-prototype]$ mv napi-prototype.linux-x64-gnu.node index.d.ts index.js npm/linux-x64-gnu 104 | ``` 105 | 106 | 7. Install package in ```ffi-prototype/app``` : 107 | ```bash 108 | [ffi-prototype/app]$ npm i ../napi-prototype/npm/linux-x64-gnu/ 109 | ``` 110 | 111 | 8. Copy the following code to the ```ffi-prototype/app/index.ts``` file : 112 | 113 | ```ts 114 | import { fetchCallback, fetchPromise } from "napi-prototype-linux-x64-gnu"; 115 | 116 | (async () => { 117 | fetchCallback((_err: any, result: string) => { 118 | console.log(`HTTP request through FFI with callback: ${result.length}`); 119 | }); 120 | 121 | console.log( 122 | `HTTP request through FFI with promise ${(await fetchPromise()).length}` 123 | ); 124 | })(); 125 | ``` 126 | 127 | 9. Build and execute the app : 128 | 129 | ```bash 130 | [ffi-prototype/app]$ npm run build+exec 131 | ``` 132 | 133 | Expected output: 134 | ```tty 135 | > app@1.0.0 build+exec 136 | > tsc && node ./index.js 137 | 138 | HTTP request through FFI with promise 12057 139 | HTTP request through FFI with callback: 12057 140 | ``` 141 | 142 | That's it ! -------------------------------------------------------------------------------- /adrs/001-node-rust-foreign-function-interface/wasm-prototype.md: -------------------------------------------------------------------------------- 1 | ## [Back](001-node-to-rust-foreign-function-interface.md) 2 | 3 | ## Wasm-pack prototype 4 | ___ 5 | 6 | 1. Install the wasm-pack CLI 7 | 8 | ```bash 9 | curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh 10 | ``` 11 | 12 | 2. Create a new wasm-pack project 13 | 14 | ```bash 15 | [ffi-prototype]$ wasm-pack new wasm-prototype 16 | ``` 17 | 18 | 3. Install cargo dependencies 19 | ```bash 20 | [ffi-prototype/wasm-prototype]$ cargo add tokio --features full 21 | [ffi-prototype/wasm-prototype]$ cargo add reqwest --features blocking 22 | [ffi-prototype/wasm-prototype]$ cargo add wasm-bindgen-futures 23 | cargo add js-sys 24 | ``` 25 | 26 | 4. Copy the following code to `wasm-prototype/src/lib.rs` 27 | ```rust 28 | mod utils; 29 | 30 | use wasm_bindgen::prelude::*; 31 | 32 | // When the `wee_alloc` feature is enabled, use `wee_alloc` as the global 33 | // allocator. 34 | #[cfg(feature = "wee_alloc")] 35 | #[global_allocator] 36 | static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; 37 | 38 | #[wasm_bindgen] 39 | pub async fn fetch_promise() -> Result { 40 | let body = reqwest::get("https://paritytech.github.io/zombienet/") 41 | .await 42 | .map_err(|_| JsError::new("Error while fetching page"))? 43 | .text() 44 | .await 45 | .map_err(|_| JsError::new("Error while extracting body"))?; 46 | 47 | Ok(body) 48 | } 49 | 50 | #[wasm_bindgen] 51 | pub fn fetch_callback(callback: &js_sys::Function) -> Result { 52 | let this = JsValue::null(); 53 | 54 | let response = reqwest::blocking::get("https://paritytech.github.io/zombienet/"); 55 | 56 | if response.is_err() { 57 | return callback.call2( 58 | &this, 59 | &JsError::new("Error while fetching page").into(), 60 | &JsValue::null(), 61 | ); 62 | } 63 | 64 | let body = response.unwrap().text(); 65 | 66 | if body.is_err() { 67 | return callback.call2( 68 | &this, 69 | &JsError::new("Error while extracting body").into(), 70 | &JsValue::null(), 71 | ); 72 | } 73 | 74 | Ok(body.unwrap().into()) 75 | } 76 | ``` 77 | 78 | 5. Build the project : 79 | ```bash 80 | [ffi-prototype/wasm-prototype]$ wasm-pack build -t nodejs 81 | ``` 82 | 83 | Error are shown, this is expected because WASM doesn't support networking primitives, 84 | as you can see, we removed the thread call from the fetch_callback function because ```JsValue``` 85 | is using *const u8 under the hood and it's not ```Send``` so can't be passed safely across thread: 86 | 87 | ```bash 88 | [INFO]: 🎯 Checking for the Wasm target... 89 | [INFO]: 🌀 Compiling to Wasm... 90 | Compiling mio v0.8.6 91 | Compiling parking_lot v0.12.1 92 | Compiling serde_json v1.0.96 93 | Compiling url v2.3.1 94 | error[E0432]: unresolved import `crate::sys::IoSourceState` 95 | --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/io_source.rs:12:5 96 | | 97 | 12 | use crate::sys::IoSourceState; 98 | | ^^^^^^^^^^^^^^^^^^^^^^^^^ no `IoSourceState` in `sys` 99 | 100 | error[E0432]: unresolved import `crate::sys::tcp` 101 | --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/net/tcp/listener.rs:15:17 102 | | 103 | 15 | use crate::sys::tcp::{bind, listen, new_for_addr}; 104 | | ^^^ could not find `tcp` in `sys` 105 | 106 | error[E0432]: unresolved import `crate::sys::tcp` 107 | --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/net/tcp/stream.rs:13:17 108 | | 109 | 13 | use crate::sys::tcp::{connect, new_for_addr}; 110 | | ^^^ could not find `tcp` in `sys` 111 | 112 | error[E0433]: failed to resolve: could not find `Selector` in `sys` 113 | --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/poll.rs:301:18 114 | | 115 | 301 | sys::Selector::new().map(|selector| Poll { 116 | | ^^^^^^^^ could not find `Selector` in `sys` 117 | 118 | error[E0433]: failed to resolve: could not find `event` in `sys` 119 | --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:24:14 120 | | 121 | 24 | sys::event::token(&self.inner) 122 | | ^^^^^ could not find `event` in `sys` 123 | 124 | error[E0433]: failed to resolve: could not find `event` in `sys` 125 | --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:38:14 126 | | 127 | 38 | sys::event::is_readable(&self.inner) 128 | | ^^^^^ could not find `event` in `sys` 129 | 130 | error[E0433]: failed to resolve: could not find `event` in `sys` 131 | --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:43:14 132 | | 133 | 43 | sys::event::is_writable(&self.inner) 134 | | ^^^^^ could not find `event` in `sys` 135 | 136 | error[E0433]: failed to resolve: could not find `event` in `sys` 137 | --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:68:14 138 | | 139 | 68 | sys::event::is_error(&self.inner) 140 | | ^^^^^ could not find `event` in `sys` 141 | 142 | error[E0433]: failed to resolve: could not find `event` in `sys` 143 | --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:99:14 144 | | 145 | 99 | sys::event::is_read_closed(&self.inner) 146 | | ^^^^^ could not find `event` in `sys` 147 | 148 | error[E0433]: failed to resolve: could not find `event` in `sys` 149 | --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:129:14 150 | | 151 | 129 | sys::event::is_write_closed(&self.inner) 152 | | ^^^^^ could not find `event` in `sys` 153 | ``` -------------------------------------------------------------------------------- /crates/cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zombie-cli" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | rust-version.workspace = true 7 | publish = true 8 | license.workspace = true 9 | repository.workspace = true 10 | description = "Zombienet cli, entrypoint for using zombienet" 11 | keywords = ["zombienet", "sdk", "cli"] 12 | 13 | [[bin]] 14 | name = "zombie-cli" 15 | path = "src/main.rs" 16 | doc = false 17 | 18 | 19 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 20 | 21 | [dependencies] 22 | tracing-subscriber = "0.3.18" 23 | tokio = { workspace = true } 24 | zombienet-sdk = { workspace = true } 25 | clap = { version = "4.4.18", features = ["derive"] } 26 | -------------------------------------------------------------------------------- /crates/cli/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use clap::{Parser, Subcommand}; 4 | use zombienet_sdk::{environment::Provider, NetworkConfig}; 5 | 6 | #[derive(Parser, Debug)] 7 | #[command(author, version, about, long_about = None)] 8 | struct Args { 9 | #[command(subcommand)] 10 | cmd: Commands, 11 | } 12 | 13 | #[derive(Subcommand, Debug, Clone)] 14 | enum Commands { 15 | Spawn { 16 | config: String, 17 | #[arg(short, long, value_parser = clap::builder::PossibleValuesParser::new(["docker", "k8s", "native"]), default_value="docker")] 18 | provider: String, 19 | }, 20 | } 21 | 22 | #[tokio::main] 23 | async fn main() { 24 | tracing_subscriber::fmt::init(); 25 | 26 | let args = Args::parse(); 27 | 28 | let (config, provider) = match args.cmd { 29 | Commands::Spawn { config, provider } => (config, provider), 30 | }; 31 | 32 | let config = NetworkConfig::load_from_toml(&config).unwrap(); 33 | let provider: Provider = provider.into(); 34 | let spawn_fn = provider.get_spawn_fn(); 35 | let _n = spawn_fn(config).await.unwrap(); 36 | 37 | println!("looping..."); 38 | 39 | loop { 40 | tokio::time::sleep(Duration::from_secs(60)).await; 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /crates/configuration/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /crates/configuration/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zombienet-configuration" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | rust-version.workspace = true 7 | publish = true 8 | license.workspace = true 9 | repository.workspace = true 10 | description = "Zombienet sdk config builder, allow to build a network configuration" 11 | keywords = ["zombienet", "configuration", "sdk"] 12 | 13 | [dependencies] 14 | regex = { workspace = true } 15 | lazy_static = { workspace = true } 16 | multiaddr = { workspace = true } 17 | url = { workspace = true, features = ["serde"] } 18 | thiserror = { workspace = true } 19 | anyhow = { workspace = true } 20 | serde = { workspace = true, features = ["derive"] } 21 | toml = { workspace = true } 22 | serde_json = { workspace = true } 23 | reqwest = { workspace = true } 24 | tokio = { workspace = true, features = ["fs"] } 25 | tracing = { workspace = true } 26 | 27 | # zombienet deps 28 | support = { workspace = true } 29 | 30 | -------------------------------------------------------------------------------- /crates/configuration/src/hrmp_channel.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use crate::shared::{macros::states, types::ParaId}; 6 | 7 | /// HRMP channel configuration, with fine-grained configuration options. 8 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 9 | pub struct HrmpChannelConfig { 10 | sender: ParaId, 11 | recipient: ParaId, 12 | max_capacity: u32, 13 | max_message_size: u32, 14 | } 15 | 16 | impl HrmpChannelConfig { 17 | /// The sending parachain ID. 18 | pub fn sender(&self) -> ParaId { 19 | self.sender 20 | } 21 | 22 | /// The receiving parachain ID. 23 | pub fn recipient(&self) -> ParaId { 24 | self.recipient 25 | } 26 | 27 | /// The maximum capacity of messages in the channel. 28 | pub fn max_capacity(&self) -> u32 { 29 | self.max_capacity 30 | } 31 | 32 | /// The maximum size of a message in the channel. 33 | pub fn max_message_size(&self) -> u32 { 34 | self.max_message_size 35 | } 36 | } 37 | 38 | states! { 39 | Initial, 40 | WithSender, 41 | WithRecipient 42 | } 43 | 44 | /// HRMP channel configuration builder, used to build an [`HrmpChannelConfig`] declaratively with fields validation. 45 | pub struct HrmpChannelConfigBuilder { 46 | config: HrmpChannelConfig, 47 | _state: PhantomData, 48 | } 49 | 50 | impl Default for HrmpChannelConfigBuilder { 51 | fn default() -> Self { 52 | Self { 53 | config: HrmpChannelConfig { 54 | sender: 0, 55 | recipient: 0, 56 | max_capacity: 8, 57 | max_message_size: 512, 58 | }, 59 | _state: PhantomData, 60 | } 61 | } 62 | } 63 | 64 | impl HrmpChannelConfigBuilder { 65 | fn transition(&self, config: HrmpChannelConfig) -> HrmpChannelConfigBuilder { 66 | HrmpChannelConfigBuilder { 67 | config, 68 | _state: PhantomData, 69 | } 70 | } 71 | } 72 | 73 | impl HrmpChannelConfigBuilder { 74 | pub fn new() -> Self { 75 | Self::default() 76 | } 77 | 78 | /// Set the sending parachain ID. 79 | pub fn with_sender(self, sender: ParaId) -> HrmpChannelConfigBuilder { 80 | self.transition(HrmpChannelConfig { 81 | sender, 82 | ..self.config 83 | }) 84 | } 85 | } 86 | 87 | impl HrmpChannelConfigBuilder { 88 | /// Set the receiving parachain ID. 89 | pub fn with_recipient(self, recipient: ParaId) -> HrmpChannelConfigBuilder { 90 | self.transition(HrmpChannelConfig { 91 | recipient, 92 | ..self.config 93 | }) 94 | } 95 | } 96 | 97 | impl HrmpChannelConfigBuilder { 98 | /// Set the max capacity of messages in the channel. 99 | pub fn with_max_capacity(self, max_capacity: u32) -> Self { 100 | self.transition(HrmpChannelConfig { 101 | max_capacity, 102 | ..self.config 103 | }) 104 | } 105 | 106 | /// Set the maximum size of a message in the channel. 107 | pub fn with_max_message_size(self, max_message_size: u32) -> Self { 108 | self.transition(HrmpChannelConfig { 109 | max_message_size, 110 | ..self.config 111 | }) 112 | } 113 | 114 | pub fn build(self) -> HrmpChannelConfig { 115 | self.config 116 | } 117 | } 118 | 119 | #[cfg(test)] 120 | mod tests { 121 | use super::*; 122 | 123 | #[test] 124 | fn hrmp_channel_config_builder_should_build_a_new_hrmp_channel_config_correctly() { 125 | let hrmp_channel_config = HrmpChannelConfigBuilder::new() 126 | .with_sender(1000) 127 | .with_recipient(2000) 128 | .with_max_capacity(50) 129 | .with_max_message_size(100) 130 | .build(); 131 | 132 | assert_eq!(hrmp_channel_config.sender(), 1000); 133 | assert_eq!(hrmp_channel_config.recipient(), 2000); 134 | assert_eq!(hrmp_channel_config.max_capacity(), 50); 135 | assert_eq!(hrmp_channel_config.max_message_size(), 100); 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /crates/configuration/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! This crate is used to create type safe configuration for Zombienet SDK using nested builders. 2 | //! 3 | //! 4 | //! The main entry point of this crate is the [`NetworkConfigBuilder`] which is used to build a full network configuration 5 | //! but all inner builders are also exposed to allow more granular control over the configuration. 6 | //! 7 | //! **Note**: Not all options can be checked at compile time and some will be checked at runtime when spawning a 8 | //! network (e.g.: supported args for a specific node version). 9 | //! 10 | //! # Example 11 | //! ``` 12 | //! use zombienet_configuration::NetworkConfigBuilder; 13 | //! 14 | //! let simple_configuration = NetworkConfigBuilder::new() 15 | //! .with_relaychain(|relaychain| { 16 | //! relaychain 17 | //! .with_chain("polkadot") 18 | //! .with_random_nominators_count(10) 19 | //! .with_default_resources(|resources| { 20 | //! resources 21 | //! .with_limit_cpu("1000m") 22 | //! .with_request_memory("1Gi") 23 | //! .with_request_cpu(100_000) 24 | //! }) 25 | //! .with_node(|node| { 26 | //! node.with_name("node") 27 | //! .with_command("command") 28 | //! .validator(true) 29 | //! }) 30 | //! }) 31 | //! .with_parachain(|parachain| { 32 | //! parachain 33 | //! .with_id(1000) 34 | //! .with_chain("myparachain1") 35 | //! .with_initial_balance(100_000) 36 | //! .with_default_image("myimage:version") 37 | //! .with_collator(|collator| { 38 | //! collator 39 | //! .with_name("collator1") 40 | //! .with_command("command1") 41 | //! .validator(true) 42 | //! }) 43 | //! }) 44 | //! .with_parachain(|parachain| { 45 | //! parachain 46 | //! .with_id(2000) 47 | //! .with_chain("myparachain2") 48 | //! .with_initial_balance(50_0000) 49 | //! .with_collator(|collator| { 50 | //! collator 51 | //! .with_name("collator2") 52 | //! .with_command("command2") 53 | //! .validator(true) 54 | //! }) 55 | //! }) 56 | //! .with_hrmp_channel(|hrmp_channel1| { 57 | //! hrmp_channel1 58 | //! .with_sender(1) 59 | //! .with_recipient(2) 60 | //! .with_max_capacity(200) 61 | //! .with_max_message_size(500) 62 | //! }) 63 | //! .with_hrmp_channel(|hrmp_channel2| { 64 | //! hrmp_channel2 65 | //! .with_sender(2) 66 | //! .with_recipient(1) 67 | //! .with_max_capacity(100) 68 | //! .with_max_message_size(250) 69 | //! }) 70 | //! .with_global_settings(|global_settings| { 71 | //! global_settings 72 | //! .with_network_spawn_timeout(1200) 73 | //! .with_node_spawn_timeout(240) 74 | //! }) 75 | //! .build(); 76 | //! 77 | //! assert!(simple_configuration.is_ok()) 78 | //! ``` 79 | 80 | #![allow(clippy::expect_fun_call)] 81 | mod global_settings; 82 | mod hrmp_channel; 83 | mod network; 84 | mod parachain; 85 | mod relaychain; 86 | pub mod shared; 87 | mod utils; 88 | 89 | pub use global_settings::{GlobalSettings, GlobalSettingsBuilder}; 90 | pub use hrmp_channel::{HrmpChannelConfig, HrmpChannelConfigBuilder}; 91 | pub use network::{NetworkConfig, NetworkConfigBuilder, WithRelaychain}; 92 | pub use parachain::{ 93 | states as para_states, ParachainConfig, ParachainConfigBuilder, RegistrationStrategy, 94 | }; 95 | pub use relaychain::{RelaychainConfig, RelaychainConfigBuilder}; 96 | // re-export shared 97 | pub use shared::{node::NodeConfig, types}; 98 | -------------------------------------------------------------------------------- /crates/configuration/src/shared.rs: -------------------------------------------------------------------------------- 1 | pub mod errors; 2 | pub mod helpers; 3 | pub mod macros; 4 | pub mod node; 5 | pub mod resources; 6 | pub mod types; 7 | -------------------------------------------------------------------------------- /crates/configuration/src/shared/errors.rs: -------------------------------------------------------------------------------- 1 | use super::types::{ParaId, Port}; 2 | 3 | /// An error at the configuration level. 4 | #[derive(thiserror::Error, Debug)] 5 | pub enum ConfigError { 6 | #[error("relaychain.{0}")] 7 | Relaychain(anyhow::Error), 8 | 9 | #[error("parachain[{0}].{1}")] 10 | Parachain(ParaId, anyhow::Error), 11 | 12 | #[error("global_settings.{0}")] 13 | GlobalSettings(anyhow::Error), 14 | 15 | #[error("nodes['{0}'].{1}")] 16 | Node(String, anyhow::Error), 17 | 18 | #[error("collators['{0}'].{1}")] 19 | Collator(String, anyhow::Error), 20 | } 21 | 22 | /// An error at the field level. 23 | #[derive(thiserror::Error, Debug)] 24 | pub enum FieldError { 25 | #[error("name: {0}")] 26 | Name(anyhow::Error), 27 | 28 | #[error("chain: {0}")] 29 | Chain(anyhow::Error), 30 | 31 | #[error("image: {0}")] 32 | Image(anyhow::Error), 33 | 34 | #[error("default_image: {0}")] 35 | DefaultImage(anyhow::Error), 36 | 37 | #[error("command: {0}")] 38 | Command(anyhow::Error), 39 | 40 | #[error("default_command: {0}")] 41 | DefaultCommand(anyhow::Error), 42 | 43 | #[error("bootnodes_addresses[{0}]: '{1}' {2}")] 44 | BootnodesAddress(usize, String, anyhow::Error), 45 | 46 | #[error("genesis_wasm_generator: {0}")] 47 | GenesisWasmGenerator(anyhow::Error), 48 | 49 | #[error("genesis_state_generator: {0}")] 50 | GenesisStateGenerator(anyhow::Error), 51 | 52 | #[error("local_ip: {0}")] 53 | LocalIp(anyhow::Error), 54 | 55 | #[error("default_resources.{0}")] 56 | DefaultResources(anyhow::Error), 57 | 58 | #[error("resources.{0}")] 59 | Resources(anyhow::Error), 60 | 61 | #[error("request_memory: {0}")] 62 | RequestMemory(anyhow::Error), 63 | 64 | #[error("request_cpu: {0}")] 65 | RequestCpu(anyhow::Error), 66 | 67 | #[error("limit_memory: {0}")] 68 | LimitMemory(anyhow::Error), 69 | 70 | #[error("limit_cpu: {0}")] 71 | LimitCpu(anyhow::Error), 72 | 73 | #[error("ws_port: {0}")] 74 | WsPort(anyhow::Error), 75 | 76 | #[error("rpc_port: {0}")] 77 | RpcPort(anyhow::Error), 78 | 79 | #[error("prometheus_port: {0}")] 80 | PrometheusPort(anyhow::Error), 81 | 82 | #[error("p2p_port: {0}")] 83 | P2pPort(anyhow::Error), 84 | 85 | #[error("registration_strategy: {0}")] 86 | RegistrationStrategy(anyhow::Error), 87 | } 88 | 89 | /// A conversion error for shared types across fields. 90 | #[derive(thiserror::Error, Debug, Clone)] 91 | pub enum ConversionError { 92 | #[error("'{0}' shouldn't contains whitespace")] 93 | ContainsWhitespaces(String), 94 | 95 | #[error("'{}' doesn't match regex '{}'", .value, .regex)] 96 | DoesntMatchRegex { value: String, regex: String }, 97 | 98 | #[error("can't be empty")] 99 | CantBeEmpty, 100 | 101 | #[error("deserialize error")] 102 | DeserializeError(String), 103 | } 104 | 105 | /// A validation error for shared types across fields. 106 | #[derive(thiserror::Error, Debug, Clone)] 107 | pub enum ValidationError { 108 | #[error("'{0}' is already used across config")] 109 | PortAlreadyUsed(Port), 110 | 111 | #[error("'{0}' is already used across config")] 112 | NodeNameAlreadyUsed(String), 113 | 114 | #[error("can't be empty")] 115 | CantBeEmpty(), 116 | } 117 | -------------------------------------------------------------------------------- /crates/configuration/src/shared/helpers.rs: -------------------------------------------------------------------------------- 1 | use std::{cell::RefCell, rc::Rc}; 2 | 3 | use support::constants::{BORROWABLE, THIS_IS_A_BUG}; 4 | 5 | use super::{ 6 | errors::ValidationError, 7 | types::{ParaId, Port, ValidationContext}, 8 | }; 9 | 10 | pub fn merge_errors(errors: Vec, new_error: anyhow::Error) -> Vec { 11 | let mut errors = errors; 12 | errors.push(new_error); 13 | 14 | errors 15 | } 16 | 17 | pub fn merge_errors_vecs( 18 | errors: Vec, 19 | new_errors: Vec, 20 | ) -> Vec { 21 | let mut errors = errors; 22 | 23 | for new_error in new_errors.into_iter() { 24 | errors.push(new_error); 25 | } 26 | 27 | errors 28 | } 29 | 30 | pub fn ensure_node_name_unique( 31 | node_name: impl Into, 32 | validation_context: Rc>, 33 | ) -> Result<(), anyhow::Error> { 34 | let mut context = validation_context 35 | .try_borrow_mut() 36 | .expect(&format!("{}, {}", BORROWABLE, THIS_IS_A_BUG)); 37 | 38 | let node_name = node_name.into(); 39 | if !context.used_nodes_names.contains(&node_name) { 40 | context.used_nodes_names.push(node_name); 41 | return Ok(()); 42 | } 43 | 44 | Err(ValidationError::NodeNameAlreadyUsed(node_name).into()) 45 | } 46 | 47 | pub fn ensure_value_is_not_empty(value: &str) -> Result<(), anyhow::Error> { 48 | if value.is_empty() { 49 | Err(ValidationError::CantBeEmpty().into()) 50 | } else { 51 | Ok(()) 52 | } 53 | } 54 | 55 | pub fn ensure_port_unique( 56 | port: Port, 57 | validation_context: Rc>, 58 | ) -> Result<(), anyhow::Error> { 59 | let mut context = validation_context 60 | .try_borrow_mut() 61 | .expect(&format!("{}, {}", BORROWABLE, THIS_IS_A_BUG)); 62 | 63 | if !context.used_ports.contains(&port) { 64 | context.used_ports.push(port); 65 | return Ok(()); 66 | } 67 | 68 | Err(ValidationError::PortAlreadyUsed(port).into()) 69 | } 70 | 71 | pub fn generate_unique_para_id( 72 | para_id: ParaId, 73 | validation_context: Rc>, 74 | ) -> String { 75 | let mut context = validation_context 76 | .try_borrow_mut() 77 | .expect(&format!("{}, {}", BORROWABLE, THIS_IS_A_BUG)); 78 | 79 | if let Some(suffix) = context.used_para_ids.get_mut(¶_id) { 80 | *suffix += 1; 81 | format!("{para_id}-{suffix}") 82 | } else { 83 | // insert 0, since will be used next time. 84 | context.used_para_ids.insert(para_id, 0); 85 | para_id.to_string() 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /crates/configuration/src/shared/macros.rs: -------------------------------------------------------------------------------- 1 | // Helper to define states of a type. 2 | // We use an enum with no variants because it can't be constructed by definition. 3 | macro_rules! states { 4 | ($($ident:ident),*) => { 5 | $( 6 | pub enum $ident {} 7 | )* 8 | }; 9 | } 10 | 11 | pub(crate) use states; 12 | -------------------------------------------------------------------------------- /crates/configuration/src/utils.rs: -------------------------------------------------------------------------------- 1 | use crate::types::{Command, Duration}; 2 | 3 | pub(crate) fn is_true(value: &bool) -> bool { 4 | *value 5 | } 6 | 7 | pub(crate) fn is_false(value: &bool) -> bool { 8 | !(*value) 9 | } 10 | 11 | pub(crate) fn default_as_true() -> bool { 12 | true 13 | } 14 | 15 | pub(crate) fn default_as_false() -> bool { 16 | false 17 | } 18 | 19 | pub(crate) fn default_initial_balance() -> crate::types::U128 { 20 | 2_000_000_000_000.into() 21 | } 22 | 23 | /// Default timeout for spawning a node (10mins) 24 | pub(crate) fn default_node_spawn_timeout() -> Duration { 25 | 600 26 | } 27 | 28 | /// Default timeout for spawning the whole network (1hr) 29 | pub(crate) fn default_timeout() -> Duration { 30 | 3600 31 | } 32 | 33 | pub(crate) fn default_command_polkadot() -> Option { 34 | TryInto::::try_into("polkadot").ok() 35 | } 36 | -------------------------------------------------------------------------------- /crates/configuration/testing/snapshots/0000-small-network.toml: -------------------------------------------------------------------------------- 1 | [settings] 2 | timeout = 3600 3 | node_spawn_timeout = 600 4 | 5 | [relaychain] 6 | chain = "rococo-local" 7 | default_command = "polkadot" 8 | default_image = "docker.io/parity/polkadot:latest" 9 | default_args = ["-lparachain=debug"] 10 | 11 | [[relaychain.nodes]] 12 | name = "alice" 13 | validator = true 14 | invulnerable = true 15 | bootnode = false 16 | balance = 2000000000000 17 | 18 | [[relaychain.nodes]] 19 | name = "bob" 20 | args = ["--database=paritydb-experimental"] 21 | validator = true 22 | invulnerable = false 23 | bootnode = true 24 | balance = 2000000000000 25 | -------------------------------------------------------------------------------- /crates/configuration/testing/snapshots/0001-big-network.toml: -------------------------------------------------------------------------------- 1 | [settings] 2 | timeout = 3600 3 | node_spawn_timeout = 600 4 | 5 | [relaychain] 6 | chain = "polkadot" 7 | default_command = "polkadot" 8 | default_image = "docker.io/parity/polkadot:latest" 9 | 10 | [relaychain.default_resources.requests] 11 | memory = "500M" 12 | cpu = "100000" 13 | 14 | [relaychain.default_resources.limits] 15 | memory = "4000M" 16 | cpu = "10Gi" 17 | 18 | [[relaychain.nodes]] 19 | name = "alice" 20 | validator = true 21 | invulnerable = true 22 | bootnode = true 23 | balance = 1000000000 24 | 25 | [[relaychain.nodes]] 26 | name = "bob" 27 | validator = true 28 | invulnerable = true 29 | bootnode = true 30 | balance = 2000000000000 31 | 32 | [[parachains]] 33 | id = 1000 34 | chain = "myparachain" 35 | register_para = true 36 | onboard_as_parachain = false 37 | balance = 2000000000000 38 | default_db_snapshot = "https://storage.com/path/to/db_snapshot.tgz" 39 | chain_spec_path = "/path/to/my/chain/spec.json" 40 | cumulus_based = true 41 | evm_based = false 42 | 43 | [[parachains.collators]] 44 | name = "john" 45 | validator = true 46 | invulnerable = true 47 | bootnode = true 48 | balance = 5000000000 49 | 50 | [[parachains.collators]] 51 | name = "charles" 52 | validator = false 53 | invulnerable = true 54 | bootnode = true 55 | balance = 0 56 | 57 | [[parachains.collators]] 58 | name = "frank" 59 | validator = true 60 | invulnerable = false 61 | bootnode = true 62 | balance = 1000000000 63 | 64 | [[parachains]] 65 | id = 2000 66 | chain = "myotherparachain" 67 | add_to_genesis = true 68 | balance = 2000000000000 69 | chain_spec_path = "/path/to/my/other/chain/spec.json" 70 | cumulus_based = true 71 | evm_based = false 72 | 73 | [[parachains.collators]] 74 | name = "mike" 75 | validator = true 76 | invulnerable = true 77 | bootnode = true 78 | balance = 5000000000 79 | 80 | [[parachains.collators]] 81 | name = "georges" 82 | validator = false 83 | invulnerable = true 84 | bootnode = true 85 | balance = 0 86 | 87 | [[parachains.collators]] 88 | name = "victor" 89 | validator = true 90 | invulnerable = false 91 | bootnode = true 92 | balance = 1000000000 93 | 94 | [[hrmp_channels]] 95 | sender = 1000 96 | recipient = 2000 97 | max_capacity = 150 98 | max_message_size = 5000 99 | 100 | [[hrmp_channels]] 101 | sender = 2000 102 | recipient = 1000 103 | max_capacity = 200 104 | max_message_size = 8000 105 | -------------------------------------------------------------------------------- /crates/configuration/testing/snapshots/0002-overridden-defaults.toml: -------------------------------------------------------------------------------- 1 | [settings] 2 | timeout = 3600 3 | node_spawn_timeout = 600 4 | 5 | [relaychain] 6 | chain = "polkadot" 7 | default_command = "polkadot" 8 | default_image = "docker.io/parity/polkadot:latest" 9 | default_db_snapshot = "https://storage.com/path/to/db_snapshot.tgz" 10 | default_args = [ 11 | "-name=value", 12 | "--flag", 13 | ] 14 | 15 | [relaychain.default_resources.requests] 16 | memory = "500M" 17 | cpu = "100000" 18 | 19 | [relaychain.default_resources.limits] 20 | memory = "4000M" 21 | cpu = "10Gi" 22 | 23 | [[relaychain.nodes]] 24 | name = "alice" 25 | validator = true 26 | invulnerable = true 27 | bootnode = true 28 | balance = 1000000000 29 | 30 | [[relaychain.nodes]] 31 | name = "bob" 32 | image = "mycustomimage:latest" 33 | command = "my-custom-command" 34 | args = ["-myothername=value"] 35 | validator = true 36 | invulnerable = true 37 | bootnode = true 38 | balance = 2000000000000 39 | db_snapshot = "https://storage.com/path/to/other/db_snapshot.tgz" 40 | 41 | [relaychain.nodes.resources.requests] 42 | memory = "250Mi" 43 | cpu = "1000" 44 | 45 | [relaychain.nodes.resources.limits] 46 | memory = "2Gi" 47 | cpu = "5Gi" 48 | 49 | [[parachains]] 50 | id = 1000 51 | chain = "myparachain" 52 | add_to_genesis = true 53 | balance = 2000000000000 54 | default_command = "my-default-command" 55 | default_image = "mydefaultimage:latest" 56 | default_db_snapshot = "https://storage.com/path/to/other_snapshot.tgz" 57 | chain_spec_path = "/path/to/my/chain/spec.json" 58 | cumulus_based = true 59 | evm_based = false 60 | 61 | [[parachains.collators]] 62 | name = "john" 63 | image = "anotherimage:latest" 64 | command = "my-non-default-command" 65 | validator = true 66 | invulnerable = true 67 | bootnode = true 68 | balance = 5000000000 69 | 70 | [[parachains.collators]] 71 | name = "charles" 72 | validator = false 73 | invulnerable = true 74 | bootnode = true 75 | balance = 0 76 | -------------------------------------------------------------------------------- /crates/configuration/testing/snapshots/0003-small-network_w_parachain.toml: -------------------------------------------------------------------------------- 1 | [settings] 2 | timeout = 3600 3 | node_spawn_timeout = 600 4 | 5 | [relaychain] 6 | chain = "rococo-local" 7 | default_command = "polkadot" 8 | default_image = "docker.io/parity/polkadot:latest" 9 | default_args = ["-lparachain=debug"] 10 | 11 | [[relaychain.nodes]] 12 | name = "alice" 13 | validator = true 14 | invulnerable = true 15 | bootnode = false 16 | balance = 2000000000000 17 | 18 | [[relaychain.nodes]] 19 | name = "bob" 20 | args = ["--database=paritydb-experimental"] 21 | validator = true 22 | invulnerable = false 23 | bootnode = true 24 | balance = 2000000000000 25 | 26 | [[parachains]] 27 | id = 1000 28 | chain = "myparachain" 29 | onboard_as_parachain = false 30 | balance = 2000000000000 31 | default_db_snapshot = "https://storage.com/path/to/db_snapshot.tgz" 32 | chain_spec_path = "/path/to/my/chain/spec.json" 33 | cumulus_based = true 34 | 35 | [parachains.collator] 36 | name = "john" 37 | validator = true 38 | invulnerable = true 39 | bootnode = true 40 | balance = 5000000000 41 | 42 | [[parachains]] 43 | id = 1000 44 | chain = "myparachain" 45 | onboard_as_parachain = false 46 | balance = 2000000000000 47 | default_db_snapshot = "https://storage.com/path/to/db_snapshot.tgz" 48 | chain_spec_path = "/path/to/my/chain/spec.json" 49 | cumulus_based = true 50 | evm_based = true 51 | 52 | [[parachains.collators]] 53 | name = "john" 54 | validator = true 55 | invulnerable = true 56 | bootnode = true 57 | balance = 5000000000 58 | -------------------------------------------------------------------------------- /crates/configuration/testing/snapshots/0004-small-network-without-settings.toml: -------------------------------------------------------------------------------- 1 | [relaychain] 2 | chain = "rococo-local" 3 | default_command = "polkadot" 4 | 5 | [[relaychain.nodes]] 6 | name = "alice" 7 | 8 | [[relaychain.nodes]] 9 | name = "bob" -------------------------------------------------------------------------------- /crates/configuration/testing/snapshots/0005-small-networl-with-wasm-override.toml: -------------------------------------------------------------------------------- 1 | [relaychain] 2 | chain = "rococo-local" 3 | default_command = "polkadot" 4 | wasm_override = "/some/path/runtime.wasm" 5 | 6 | [[relaychain.nodes]] 7 | name = "alice" 8 | 9 | [[relaychain.nodes]] 10 | name = "bob" 11 | 12 | [[parachains]] 13 | id = 1000 14 | wasm_override = "https://some.com/runtime.wasm" 15 | 16 | [parachains.collator] 17 | name = "john" 18 | -------------------------------------------------------------------------------- /crates/examples/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "examples" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | zombienet-sdk = {workspace = true, features = ["pjs"]} 10 | tokio = { workspace = true } 11 | futures = { workspace = true } 12 | tracing-subscriber = { workspace = true } 13 | serde_json = { workspace = true } 14 | anyhow = { workspace = true } 15 | -------------------------------------------------------------------------------- /crates/examples/examples/0001-simple.toml: -------------------------------------------------------------------------------- 1 | [settings] 2 | timeout = 1000 3 | 4 | [relaychain.runtime_genesis_patch.configuration.config] 5 | max_validators_per_core = 1 6 | needed_approvals = 2 7 | group_rotation_frequency = 10 8 | 9 | [relaychain] 10 | default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" 11 | chain = "rococo-local" 12 | command = "polkadot" 13 | 14 | [[relaychain.nodes]] 15 | name = "alice" 16 | args = [ "--alice", "-lruntime=debug,parachain=trace" ] 17 | 18 | [[relaychain.nodes]] 19 | name = "bob" 20 | args = [ "--bob", "-lruntime=debug,parachain=trace" ] 21 | 22 | [[parachains]] 23 | id = 100 24 | addToGenesis = false 25 | 26 | [[parachains.collators]] 27 | name = "collator01" 28 | image = "{{COL_IMAGE}}" 29 | command = "adder-collator" 30 | args = [ "-lruntime=debug,parachain=trace" ] 31 | 32 | [types.Header] 33 | number = "u64" 34 | parent_hash = "Hash" 35 | post_state = "Hash" 36 | -------------------------------------------------------------------------------- /crates/examples/examples/add_para.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use anyhow::anyhow; 4 | use futures::stream::StreamExt; 5 | use zombienet_sdk::{subxt, NetworkConfigBuilder, NetworkConfigExt}; 6 | 7 | #[tokio::main] 8 | async fn main() -> Result<(), anyhow::Error> { 9 | tracing_subscriber::fmt::init(); 10 | let mut network = NetworkConfigBuilder::new() 11 | .with_relaychain(|r| { 12 | r.with_chain("rococo-local") 13 | .with_default_command("polkadot") 14 | .with_node(|node| node.with_name("alice")) 15 | .with_node(|node| node.with_name("bob")) 16 | }) 17 | .with_parachain(|p| { 18 | p.with_id(2000) 19 | .cumulus_based(true) 20 | .with_collator(|n| 21 | n.with_name("collator") 22 | // TODO: check how we can clean 23 | .with_command("polkadot-parachain") 24 | // .with_command("test-parachain") 25 | // .with_image("docker.io/paritypr/test-parachain:c90f9713b5bc73a9620b2e72b226b4d11e018190") 26 | ) 27 | }) 28 | .build() 29 | .unwrap() 30 | .spawn_native() 31 | .await?; 32 | 33 | println!("🚀🚀🚀🚀 network deployed"); 34 | 35 | let alice = network.get_node("alice")?; 36 | tokio::time::sleep(Duration::from_secs(10)).await; 37 | println!("{:#?}", alice); 38 | let client = alice.wait_client::().await?; 39 | 40 | // wait 3 blocks 41 | let mut blocks = client.blocks().subscribe_finalized().await?.take(3); 42 | 43 | while let Some(block) = blocks.next().await { 44 | println!("Block #{}", block?.header().number); 45 | } 46 | 47 | println!("⚙️ adding parachain to the running network"); 48 | 49 | let para_config = network 50 | .para_config_builder() 51 | .with_id(100) 52 | //.with_registration_strategy(zombienet_sdk::RegistrationStrategy::Manual) 53 | .with_default_command("polkadot-parachain") 54 | .with_collator(|c| c.with_name("col-100-1")) 55 | .build() 56 | .map_err(|_e| anyhow!("Building config"))?; 57 | 58 | network 59 | .add_parachain(¶_config, None, Some("new_para_100".to_string())) 60 | .await?; 61 | 62 | // For now let just loop.... 63 | #[allow(clippy::empty_loop)] 64 | loop {} 65 | 66 | #[allow(clippy::unreachable)] 67 | #[allow(unreachable_code)] 68 | Ok(()) 69 | } 70 | -------------------------------------------------------------------------------- /crates/examples/examples/common/lib.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use zombienet_sdk::{NetworkConfig, NetworkConfigBuilder}; 4 | 5 | #[allow(dead_code)] 6 | pub fn small_network_config( 7 | custom_base_dir: Option<&Path>, 8 | ) -> Result> { 9 | // let config = 10 | let builder = NetworkConfigBuilder::new() 11 | .with_relaychain(|r| { 12 | r.with_chain("rococo-local") 13 | .with_default_command("polkadot") 14 | .with_default_image("docker.io/parity/polkadot:latest") 15 | .with_node(|node| node.with_name("alice")) 16 | .with_node(|node| node.with_name("bob")) 17 | }) 18 | .with_parachain(|p| { 19 | p.with_id(2000).cumulus_based(true).with_collator(|n| { 20 | n.with_name("collator") 21 | .with_command("polkadot-parachain") 22 | .with_image("docker.io/parity/polkadot-parachain:latest") 23 | }) 24 | }); 25 | 26 | if let Some(base_dir) = custom_base_dir { 27 | builder 28 | .with_global_settings(|g| g.with_base_dir(base_dir)) 29 | .build() 30 | } else { 31 | builder.build() 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /crates/examples/examples/db_snapshot.rs: -------------------------------------------------------------------------------- 1 | use zombienet_sdk::{NetworkConfigBuilder, NetworkConfigExt}; 2 | 3 | #[tokio::main] 4 | async fn main() -> Result<(), Box> { 5 | tracing_subscriber::fmt::init(); 6 | let mut _network = NetworkConfigBuilder::new() 7 | .with_relaychain(|r| { 8 | r.with_chain("rococo-local") 9 | .with_default_command("substrate-node") 10 | .with_default_image("docker.io/paritypr/substrate:3428-e5be9c93") 11 | .with_default_db_snapshot("https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-9677807d738b951e9f6c82e5fd15518eb0ae0419.tgz") 12 | .with_chain_spec_path("/Users/pepo/parity/polkadot-sdk/substrate/zombienet/0001-basic-warp-sync/chain-spec.json") 13 | .with_node(|node| node.with_name("alice")) 14 | .with_node(|node| node.with_name("bob")) 15 | .with_node(|node| node.with_name("charlie")) 16 | }) 17 | .build() 18 | .unwrap() 19 | // .spawn_native() 20 | .spawn_k8s() 21 | .await?; 22 | 23 | println!("🚀🚀🚀🚀 network deployed"); 24 | 25 | // For now let just loop.... 26 | #[allow(clippy::empty_loop)] 27 | loop {} 28 | } 29 | -------------------------------------------------------------------------------- /crates/examples/examples/para_upgrade.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) Parity Technologies (UK) Ltd. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use std::env; 5 | 6 | use anyhow::anyhow; 7 | use zombienet_sdk::{ 8 | subxt, 9 | tx_helper::{ChainUpgrade, RuntimeUpgradeOptions}, 10 | NetworkConfigBuilder, 11 | }; 12 | 13 | const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}"; 14 | 15 | #[tokio::main] 16 | async fn main() -> Result<(), anyhow::Error> { 17 | tracing_subscriber::fmt::init(); 18 | 19 | // allow to pass the upgrade path through first cli argument 20 | let args: Vec<_> = env::args().collect(); 21 | 22 | let images = zombienet_sdk::environment::get_images_from_env(); 23 | let config = NetworkConfigBuilder::new() 24 | .with_relaychain(|r| { 25 | r.with_chain("rococo-local") 26 | .with_default_command("polkadot") 27 | .with_default_image(images.polkadot.as_str()) 28 | .with_node(|node| node.with_name("alice")) 29 | .with_node(|node| node.with_name("bob")) 30 | }) 31 | .with_parachain(|p| { 32 | p.with_id(100) 33 | .with_default_command("test-parachain") 34 | .with_default_image(images.cumulus.as_str()) 35 | .with_collator(|c| c.with_name("collator")) 36 | }) 37 | .build() 38 | .map_err(|e| { 39 | let errs = e 40 | .into_iter() 41 | .map(|e| e.to_string()) 42 | .collect::>() 43 | .join(" "); 44 | anyhow!("config errs: {errs}") 45 | })?; 46 | 47 | let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); 48 | let network = spawn_fn(config).await?; 49 | 50 | // wait 2 blocks 51 | let alice = network.get_node("alice")?; 52 | assert!(alice 53 | .wait_metric(BEST_BLOCK_METRIC, |b| b > 2_f64) 54 | .await 55 | .is_ok()); 56 | 57 | // get current runtime spec 58 | let client = network 59 | .get_node("collator")? 60 | .wait_client::() 61 | .await?; 62 | let current_runtime = client.backend().current_runtime_version().await?; 63 | println!( 64 | "current_runtime spec version: {:?}", 65 | current_runtime.spec_version 66 | ); 67 | 68 | // get current best 69 | let best_block = alice.reports(BEST_BLOCK_METRIC).await?; 70 | 71 | // upgrade runtime 72 | let wasm = if args.len() > 1 { 73 | args[1].clone() 74 | } else if env::var("ZOMBIE_WASM_INCREMENTED_PATH").is_ok() { 75 | env::var("ZOMBIE_WASM_INCREMENTED_PATH").unwrap() 76 | } else { 77 | panic!("You need to provide the PATH to the wasm file to use to upgrade, through first argument or 'ZOMBIE_WASM_INCREMENTED_PATH' env var"); 78 | }; 79 | 80 | // wait 2 more blocks 81 | alice 82 | .wait_metric(BEST_BLOCK_METRIC, |x| x > best_block + 50_f64) 83 | .await?; 84 | 85 | println!("Perfoming upgrade from path {wasm}"); 86 | 87 | network 88 | .parachain(100) 89 | .expect("Invalid parachain Id") 90 | .runtime_upgrade(RuntimeUpgradeOptions::new(wasm.as_str().into())) 91 | .await?; 92 | 93 | // wait 2 more blocks 94 | alice 95 | .wait_metric(BEST_BLOCK_METRIC, |x| x > best_block + 2_f64) 96 | .await?; 97 | 98 | let incremented_runtime = client.backend().current_runtime_version().await?; 99 | println!( 100 | "incremented_runtime spec version: {}", 101 | incremented_runtime.spec_version 102 | ); 103 | 104 | assert_eq!( 105 | incremented_runtime.spec_version, 106 | current_runtime.spec_version + 1000, 107 | "version should be incremented" 108 | ); 109 | 110 | Ok(()) 111 | } 112 | -------------------------------------------------------------------------------- /crates/examples/examples/pjs.rs: -------------------------------------------------------------------------------- 1 | use futures::stream::StreamExt; 2 | use serde_json::json; 3 | use zombienet_sdk::{subxt, NetworkConfigBuilder, NetworkConfigExt}; 4 | 5 | #[tokio::main] 6 | async fn main() -> Result<(), Box> { 7 | tracing_subscriber::fmt::init(); 8 | let network = NetworkConfigBuilder::new() 9 | .with_relaychain(|r| { 10 | r.with_chain("rococo-local") 11 | .with_default_command("polkadot") 12 | .with_node(|node| node.with_name("alice")) 13 | .with_node(|node| node.with_name("bob")) 14 | }) 15 | .with_parachain(|p| { 16 | p.with_id(100) 17 | .cumulus_based(true) 18 | .with_collator(|n| n.with_name("collator").with_command("polkadot-parachain")) 19 | }) 20 | .build() 21 | .unwrap() 22 | .spawn_native() 23 | .await?; 24 | 25 | println!("🚀🚀🚀🚀 network deployed"); 26 | 27 | let alice = network.get_node("alice")?; 28 | let client = alice.wait_client::().await?; 29 | 30 | // wait 2 blocks 31 | let mut blocks = client.blocks().subscribe_finalized().await?.take(2); 32 | 33 | while let Some(block) = blocks.next().await { 34 | println!("Block #{}", block?.header().number); 35 | } 36 | 37 | // run pjs with code 38 | let query_paras = r#" 39 | const parachains: number[] = (await api.query.paras.parachains()) || []; 40 | return parachains.toJSON() 41 | "#; 42 | 43 | let paras = alice.pjs(query_paras, vec![], None).await??; 44 | 45 | println!("parachains registered: {:?}", paras); 46 | 47 | let manifest_dir = env!("CARGO_MANIFEST_DIR"); 48 | // run pjs with file 49 | let _ = alice 50 | .pjs_file( 51 | format!("{}/{}", manifest_dir, "examples/pjs_transfer.js"), 52 | vec![json!("//Alice")], 53 | None, 54 | ) 55 | .await?; 56 | 57 | Ok(()) 58 | } 59 | -------------------------------------------------------------------------------- /crates/examples/examples/pjs_transfer.js: -------------------------------------------------------------------------------- 1 | const seed = arguments[0]; 2 | 3 | await utilCrypto.cryptoWaitReady(); 4 | const k = new keyring.Keyring({ type: "sr25519" }); 5 | const signer = k.addFromUri(seed); 6 | 7 | // Make a transfer from Alice to Bob and listen to system events. 8 | // You need to be connected to a development chain for this example to work. 9 | const ALICE = '5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY'; 10 | const BOB = '5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty'; 11 | 12 | // Get a random number between 1 and 100000 13 | const randomAmount = Math.floor((Math.random() * 100000) + 1); 14 | 15 | // Create a extrinsic, transferring randomAmount units to Bob. 16 | const transferAllowDeath = api.tx.balances.transferAllowDeath(BOB, randomAmount); 17 | 18 | return new Promise(async (resolve, _reject) => { 19 | // Sign and Send the transaction 20 | const unsub = await transferAllowDeath.signAndSend(signer, ({ events = [], status }) => { 21 | if (status.isInBlock) { 22 | console.log('Successful transfer of ' + randomAmount + ' with hash ' + status.asInBlock.toHex()); 23 | return resolve(); 24 | } else { 25 | console.log('Status of transfer: ' + status.type); 26 | } 27 | 28 | events.forEach(({ phase, event: { data, method, section } }) => { 29 | console.log(phase.toString() + ' : ' + section + '.' + method + ' ' + data.toString()); 30 | }); 31 | }); 32 | }); -------------------------------------------------------------------------------- /crates/examples/examples/register_para.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use futures::stream::StreamExt; 4 | use zombienet_sdk::{subxt, NetworkConfigBuilder, NetworkConfigExt, RegistrationStrategy}; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), anyhow::Error> { 8 | tracing_subscriber::fmt::init(); 9 | 10 | // let images = zombienet_sdk::environment::get_images_from_env(); 11 | let mut network = NetworkConfigBuilder::new() 12 | .with_relaychain(|r| { 13 | r.with_chain("rococo-local") 14 | .with_default_command("polkadot") 15 | .with_node(|node| node.with_name("alice")) 16 | .with_node(|node| node.with_name("bob")) 17 | }) 18 | .with_parachain(|p| { 19 | p.with_id(2000) 20 | .with_registration_strategy(RegistrationStrategy::Manual) 21 | .cumulus_based(true) 22 | .with_collator(|n| n.with_name("collator").with_command("test-parachain")) 23 | }) 24 | .build() 25 | .unwrap() 26 | .spawn_native() 27 | .await?; 28 | 29 | println!("🚀🚀🚀🚀 network deployed"); 30 | 31 | let alice = network.get_node("alice")?; 32 | tokio::time::sleep(Duration::from_secs(10)).await; 33 | println!("{:#?}", alice); 34 | let client = alice.wait_client::().await?; 35 | 36 | // wait 3 blocks 37 | let mut blocks = client.blocks().subscribe_finalized().await?.take(3); 38 | 39 | println!("⏲ waiting for 3 finalized relay chain blocks"); 40 | while let Some(block) = blocks.next().await { 41 | println!("Block #{}", block?.header().number); 42 | } 43 | 44 | println!("⚙️ registering parachain in the running network"); 45 | 46 | network.register_parachain(2000).await?; 47 | 48 | let collator = network.get_node("collator")?; 49 | tokio::time::sleep(Duration::from_secs(10)).await; 50 | println!("{:#?}", collator); 51 | 52 | let client = collator.wait_client::().await?; 53 | 54 | // wait 3 blocks 55 | let mut blocks = client.blocks().subscribe_finalized().await?.take(3); 56 | 57 | println!("⏲ waiting for 3 finalized parachain blocks"); 58 | while let Some(block) = blocks.next().await { 59 | println!("Block #{}", block?.header().number); 60 | } 61 | 62 | Ok(()) 63 | } 64 | -------------------------------------------------------------------------------- /crates/examples/examples/simple_network_example.rs: -------------------------------------------------------------------------------- 1 | use futures::stream::StreamExt; 2 | use zombienet_sdk::{subxt, NetworkConfig, NetworkConfigExt}; 3 | 4 | #[tokio::main] 5 | async fn main() -> Result<(), Box> { 6 | tracing_subscriber::fmt::init(); 7 | let network = NetworkConfig::load_from_toml("./crates/examples/examples/0001-simple.toml") 8 | .expect("errored?") 9 | .spawn_native() 10 | .await?; 11 | 12 | println!("🚀🚀🚀🚀 network deployed"); 13 | 14 | let client = network 15 | .get_node("collator01")? 16 | .wait_client::() 17 | .await?; 18 | let mut blocks = client.blocks().subscribe_finalized().await?.take(3); 19 | 20 | while let Some(block) = blocks.next().await { 21 | println!("Block #{}", block?.header().number); 22 | } 23 | 24 | Ok(()) 25 | } 26 | -------------------------------------------------------------------------------- /crates/examples/examples/small_network_config.rs: -------------------------------------------------------------------------------- 1 | use zombienet_sdk::NetworkConfigBuilder; 2 | 3 | fn main() { 4 | let config = NetworkConfigBuilder::new() 5 | .with_relaychain(|r| { 6 | r.with_chain("rococo-local") 7 | .with_node(|node| node.with_name("alice").with_command("polkadot")) 8 | }) 9 | .build(); 10 | 11 | println!("{:?}", config.unwrap()); 12 | } 13 | -------------------------------------------------------------------------------- /crates/examples/examples/small_network_with_base_dir.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use zombienet_sdk::NetworkConfigExt; 4 | 5 | #[path = "./common/lib.rs"] 6 | mod common; 7 | 8 | #[tokio::main] 9 | async fn main() { 10 | tracing_subscriber::fmt::init(); 11 | let config = common::small_network_config(Some(Path::new("/tmp/zombie-1"))).unwrap(); 12 | let _network = config.spawn_docker().await.unwrap(); 13 | 14 | // For now let just loop.... 15 | #[allow(clippy::empty_loop)] 16 | loop {} 17 | } 18 | -------------------------------------------------------------------------------- /crates/examples/examples/small_network_with_default.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use zombienet_sdk::{NetworkConfigBuilder, NetworkConfigExt}; 4 | 5 | #[tokio::main] 6 | async fn main() -> Result<(), Box> { 7 | tracing_subscriber::fmt::init(); 8 | let network = NetworkConfigBuilder::new() 9 | .with_relaychain(|r| { 10 | r.with_chain("rococo-local") 11 | .with_default_command("polkadot") 12 | .with_default_image("docker.io/parity/polkadot:v1.4.0") 13 | .with_node(|node| node.with_name("alice")) 14 | .with_node(|node| node.with_name("bob")) 15 | }) 16 | .with_parachain(|p| { 17 | p.with_id(2000) 18 | .cumulus_based(true) 19 | .with_collator(|n| 20 | n.with_name("collator") 21 | // TODO: check how we can clean 22 | .with_command("polkadot-parachain") 23 | // .with_command("test-parachain") 24 | .with_image("docker.io/paritypr/test-parachain:c90f9713b5bc73a9620b2e72b226b4d11e018190") 25 | ) 26 | }) 27 | .build() 28 | .unwrap() 29 | .spawn_native() 30 | // .spawn_k8s() 31 | .await?; 32 | 33 | println!("🚀🚀🚀🚀 network deployed"); 34 | // give some time to node's bootstraping 35 | tokio::time::sleep(Duration::from_secs(12)).await; 36 | 37 | // // Add a new node to the running network. 38 | // let opts = AddNodeOptions { 39 | // rpc_port: Some(9444), 40 | // is_validator: true, 41 | // ..Default::default() 42 | // }; 43 | 44 | // network.add_node("new1", opts).await?; 45 | 46 | // Example of some operations that you can do 47 | // with `nodes` (e.g pause, resume, restart) 48 | 49 | // Get a ref to the node 50 | let node = network.get_node("alice")?; 51 | 52 | let is_10 = node.assert("block_height{status=\"best\"}", 10).await?; 53 | println!("is_10: {is_10}"); 54 | 55 | // let role = node.reports("node_roles").await?; 56 | // println!("Role is {role}"); 57 | 58 | // pause the node 59 | // node.pause().await?; 60 | // println!("node new1 paused!"); 61 | 62 | // node.resume().await?; 63 | // println!("node new1 resumed!"); 64 | 65 | // let col_opts = AddCollatorOptions { 66 | // command: Some("polkadot-parachain".try_into()?), 67 | // ..Default::default() 68 | // }; 69 | // network.add_collator("new-col-1", col_opts, 100).await?; 70 | // println!("new collator deployed!"); 71 | 72 | // For now let just loop.... 73 | #[allow(clippy::empty_loop)] 74 | loop {} 75 | 76 | // Ok(()) 77 | } 78 | -------------------------------------------------------------------------------- /crates/examples/examples/small_network_with_para.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use zombienet_sdk::{AddCollatorOptions, AddNodeOptions, NetworkConfigBuilder, NetworkConfigExt}; 4 | 5 | #[tokio::main] 6 | async fn main() -> Result<(), Box> { 7 | tracing_subscriber::fmt::init(); 8 | let mut network = NetworkConfigBuilder::new() 9 | .with_relaychain(|r| { 10 | r.with_chain("rococo-local") 11 | .with_default_command("polkadot") 12 | .with_node(|node| node.with_name("alice")) 13 | .with_node(|node| node.with_name("bob")) 14 | }) 15 | .with_parachain(|p| { 16 | p.with_id(100) 17 | .cumulus_based(true) 18 | //.with_registration_strategy(RegistrationStrategy::UsingExtrinsic) 19 | .with_collator(|n| n.with_name("collator").with_command("polkadot-parachain")) 20 | }) 21 | .build() 22 | .unwrap() 23 | .spawn_native() 24 | .await?; 25 | 26 | println!("🚀🚀🚀🚀 network deployed"); 27 | // add a new node 28 | let opts = AddNodeOptions { 29 | rpc_port: Some(9444), 30 | is_validator: true, 31 | ..Default::default() 32 | }; 33 | 34 | // TODO: add check to ensure if unique 35 | network.add_node("new1", opts).await?; 36 | 37 | // Example of some operations that you can do 38 | // with `nodes` (e.g pause, resume, restart) 39 | 40 | tokio::time::sleep(Duration::from_secs(12)).await; 41 | 42 | // Get a ref to the node 43 | let node = network.get_node("alice")?; 44 | 45 | let is_10 = node.assert("block_height{status=\"best\"}", 10).await?; 46 | println!("is_10: {is_10}"); 47 | 48 | let role = node.reports("node_roles").await?; 49 | println!("Role is {role}"); 50 | 51 | // pause the node 52 | // node.pause().await?; 53 | // println!("node new1 paused!"); 54 | 55 | // tokio::time::sleep(Duration::from_secs(2)).await; 56 | 57 | // node.resume().await?; 58 | // println!("node new1 resumed!"); 59 | 60 | let col_opts = AddCollatorOptions { 61 | command: Some("polkadot-parachain".try_into()?), 62 | ..Default::default() 63 | }; 64 | network.add_collator("new-col-1", col_opts, 100).await?; 65 | println!("new collator deployed!"); 66 | 67 | // For now let just loop.... 68 | #[allow(clippy::empty_loop)] 69 | loop {} 70 | 71 | // Ok(()) 72 | } 73 | -------------------------------------------------------------------------------- /crates/examples/examples/two_paras_same_id.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use zombienet_sdk::{NetworkConfigBuilder, NetworkConfigExt}; 4 | 5 | #[tokio::main] 6 | async fn main() -> Result<(), Box> { 7 | tracing_subscriber::fmt::init(); 8 | let _network = NetworkConfigBuilder::new() 9 | .with_relaychain(|r| { 10 | r.with_chain("rococo-local") 11 | .with_default_command("polkadot") 12 | .with_node(|node| node.with_name("alice")) 13 | .with_node(|node| node.with_name("bob")) 14 | }) 15 | .with_parachain(|p| { 16 | p.with_id(2000) 17 | .with_collator(|n| n.with_name("collator").with_command("polkadot-parachain")) 18 | }) 19 | .with_parachain(|p| { 20 | p.with_id(2000) 21 | .with_registration_strategy(zombienet_sdk::RegistrationStrategy::Manual) 22 | .with_collator(|n| n.with_name("collator1").with_command("polkadot-parachain")) 23 | }) 24 | .build() 25 | .unwrap() 26 | .spawn_native() 27 | .await?; 28 | 29 | println!("🚀🚀🚀🚀 network deployed"); 30 | 31 | // For now let just loop.... 32 | #[allow(clippy::empty_loop)] 33 | loop { 34 | tokio::time::sleep(Duration::from_secs(60)).await; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /crates/file-server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zombienet-file-server" 3 | authors.workspace = true 4 | edition.workspace = true 5 | version.workspace = true 6 | rust-version.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | 10 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 11 | 12 | [dependencies] 13 | axum = { workspace = true, features = ["multipart"] } 14 | axum-extra = { workspace = true } 15 | tokio = { workspace = true, features = ["full"] } 16 | tokio-util = { workspace = true, features = ["io"] } 17 | tower = { workspace = true, features = ["util"] } 18 | tower-http = { workspace = true, features = ["fs", "trace"] } 19 | futures = { workspace = true } 20 | tracing = { workspace = true } 21 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 22 | -------------------------------------------------------------------------------- /crates/file-server/Dockerfile: -------------------------------------------------------------------------------- 1 | # build stage 2 | FROM rust:1.75-alpine as builder 3 | 4 | WORKDIR /tmp 5 | 6 | COPY . . 7 | 8 | RUN apk add musl-dev 9 | 10 | RUN cargo build --release -p zombienet-file-server 11 | 12 | # run stage 13 | FROM alpine:latest 14 | 15 | ENV LISTENING_ADDRESS 0.0.0.0:80 16 | ENV UPLOADS_DIRECTORY /uploads 17 | 18 | COPY --from=builder /tmp/target/release/zombienet-file-server /usr/local/bin/file-server 19 | 20 | CMD ["file-server"] -------------------------------------------------------------------------------- /crates/file-server/src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::expect_fun_call)] 2 | use std::io; 3 | 4 | use axum::{ 5 | extract::{Path, Request, State}, 6 | http::StatusCode, 7 | routing::{get, post}, 8 | Router, 9 | }; 10 | use futures::TryStreamExt; 11 | use tokio::{fs::File, io::BufWriter, net::TcpListener}; 12 | use tokio_util::io::StreamReader; 13 | use tower_http::services::ServeDir; 14 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; 15 | 16 | #[derive(Clone)] 17 | struct AppState { 18 | uploads_directory: String, 19 | } 20 | 21 | #[tokio::main] 22 | async fn main() { 23 | let address = 24 | std::env::var("LISTENING_ADDRESS").expect("LISTENING_ADDRESS env variable isn't defined"); 25 | let uploads_directory = 26 | std::env::var("UPLOADS_DIRECTORY").expect("UPLOADS_DIRECTORY env variable isn't defined"); 27 | 28 | tracing_subscriber::registry() 29 | .with(tracing_subscriber::fmt::layer()) 30 | .init(); 31 | 32 | tokio::fs::create_dir_all(&uploads_directory) 33 | .await 34 | .expect(&format!("failed to create '{uploads_directory}' directory")); 35 | 36 | let app = Router::new() 37 | .route("/", get(|| async { "Ok" })) 38 | .route( 39 | "/*file_path", 40 | post(upload).get_service(ServeDir::new(&uploads_directory)), 41 | ) 42 | .with_state(AppState { uploads_directory }); 43 | 44 | let listener = TcpListener::bind(&address) 45 | .await 46 | .expect(&format!("failed to listen on {address}")); 47 | tracing::info!("file server started on {}", listener.local_addr().unwrap()); 48 | axum::serve(listener, app).await.unwrap() 49 | } 50 | 51 | async fn upload( 52 | Path(file_path): Path, 53 | State(state): State, 54 | request: Request, 55 | ) -> Result<(), (StatusCode, String)> { 56 | if !path_is_valid(&file_path) { 57 | return Err((StatusCode::BAD_REQUEST, "Invalid path".to_owned())); 58 | } 59 | 60 | async { 61 | let path = std::path::Path::new(&state.uploads_directory).join(file_path); 62 | 63 | if let Some(parent_dir) = path.parent() { 64 | tokio::fs::create_dir_all(parent_dir).await?; 65 | } 66 | 67 | let stream = request.into_body().into_data_stream(); 68 | let body_with_io_error = stream.map_err(|err| io::Error::new(io::ErrorKind::Other, err)); 69 | let body_reader = StreamReader::new(body_with_io_error); 70 | futures::pin_mut!(body_reader); 71 | 72 | let mut file = BufWriter::new(File::create(&path).await?); 73 | tokio::io::copy(&mut body_reader, &mut file).await?; 74 | 75 | tracing::info!("created file '{}'", path.to_string_lossy()); 76 | 77 | Ok::<_, io::Error>(()) 78 | } 79 | .await 80 | .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string())) 81 | } 82 | 83 | fn path_is_valid(path: &str) -> bool { 84 | let path = std::path::Path::new(path); 85 | let mut components = path.components().peekable(); 86 | 87 | components.all(|component| matches!(component, std::path::Component::Normal(_))) 88 | } 89 | -------------------------------------------------------------------------------- /crates/orchestrator/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /crates/orchestrator/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zombienet-orchestrator" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | rust-version.workspace = true 7 | publish = true 8 | license.workspace = true 9 | repository.workspace = true 10 | description = "Zombienet Orchestrator, drive network spwan through providers" 11 | keywords = ["zombienet", "orchestrator", "sdk"] 12 | 13 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 14 | 15 | [dependencies] 16 | tokio = { workspace = true, features = ["time"] } 17 | thiserror = { workspace = true } 18 | multiaddr = { workspace = true } 19 | serde_json = { workspace = true, features = ["arbitrary_precision"] } 20 | futures = { workspace = true } 21 | anyhow = { workspace = true } 22 | rand = { workspace = true } 23 | sha2 = { workspace = true, default-features = false } 24 | hex = { workspace = true } 25 | sp-core = { workspace = true } 26 | libp2p = { workspace = true } 27 | subxt = { workspace = true } 28 | subxt-signer = { workspace = true } 29 | reqwest = { workspace = true } 30 | tracing = { workspace = true } 31 | pjs-rs = { workspace = true, optional = true } 32 | uuid = { workspace = true } 33 | regex = { workspace = true } 34 | glob-match = { workspace = true } 35 | async-trait = { workspace = true } 36 | serde = { workspace = true, features = ["derive"] } 37 | libsecp256k1 = { workspace = true } 38 | 39 | # Zombienet deps 40 | configuration = { workspace = true } 41 | support = { workspace = true } 42 | provider = { workspace = true } 43 | prom-metrics-parser = { workspace = true } 44 | 45 | [dev-dependencies] 46 | toml = { workspace = true } 47 | async-trait = { workspace = true } 48 | lazy_static = { workspace = true } 49 | 50 | [features] 51 | pjs = ["dep:pjs-rs"] 52 | -------------------------------------------------------------------------------- /crates/orchestrator/src/errors.rs: -------------------------------------------------------------------------------- 1 | //! Zombienet Orchestrator error definitions. 2 | 3 | use provider::ProviderError; 4 | use support::fs::FileSystemError; 5 | 6 | use crate::generators; 7 | 8 | #[derive(Debug, thiserror::Error)] 9 | pub enum OrchestratorError { 10 | // TODO: improve invalid config reporting 11 | #[error("Invalid network configuration: {0}")] 12 | InvalidConfig(String), 13 | #[error("Invalid network config to use provider {0}: {1}")] 14 | InvalidConfigForProvider(String, String), 15 | #[error("Invalid configuration for node: {0}, field: {1}")] 16 | InvalidNodeConfig(String, String), 17 | #[error("Invariant not fulfilled {0}")] 18 | InvariantError(&'static str), 19 | #[error("Global network spawn timeout: {0} secs")] 20 | GlobalTimeOut(u32), 21 | #[error("Generator error: {0}")] 22 | GeneratorError(#[from] generators::errors::GeneratorError), 23 | #[error("Provider error")] 24 | ProviderError(#[from] ProviderError), 25 | #[error("FileSystem error")] 26 | FileSystemError(#[from] FileSystemError), 27 | #[error("Serialization error")] 28 | SerializationError(#[from] serde_json::Error), 29 | #[error(transparent)] 30 | SpawnerError(#[from] anyhow::Error), 31 | } 32 | -------------------------------------------------------------------------------- /crates/orchestrator/src/generators.rs: -------------------------------------------------------------------------------- 1 | pub mod chain_spec; 2 | pub mod errors; 3 | pub mod key; 4 | pub mod para_artifact; 5 | 6 | mod bootnode_addr; 7 | mod command; 8 | mod identity; 9 | mod keystore; 10 | mod port; 11 | 12 | pub use bootnode_addr::generate as generate_node_bootnode_addr; 13 | pub use command::{ 14 | generate_for_cumulus_node as generate_node_command_cumulus, 15 | generate_for_node as generate_node_command, GenCmdOptions, 16 | }; 17 | pub use identity::generate as generate_node_identity; 18 | pub use key::generate as generate_node_keys; 19 | pub use keystore::generate as generate_node_keystore; 20 | pub use port::generate as generate_node_port; 21 | -------------------------------------------------------------------------------- /crates/orchestrator/src/generators/bootnode_addr.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, net::IpAddr}; 2 | 3 | use super::errors::GeneratorError; 4 | 5 | pub fn generate + Display>( 6 | peer_id: &str, 7 | ip: &IpAddr, 8 | port: u16, 9 | args: &[T], 10 | p2p_cert: &Option, 11 | ) -> Result { 12 | let addr = if let Some(index) = args.iter().position(|arg| arg.as_ref().eq("--listen-addr")) { 13 | let listen_value = args 14 | .as_ref() 15 | .get(index + 1) 16 | .ok_or(GeneratorError::BootnodeAddrGeneration( 17 | "can not generate bootnode address from args".into(), 18 | ))? 19 | .to_string(); 20 | 21 | let ip_str = ip.to_string(); 22 | let port_str = port.to_string(); 23 | let mut parts = listen_value.split('/').collect::>(); 24 | parts[2] = &ip_str; 25 | parts[4] = port_str.as_str(); 26 | parts.join("/") 27 | } else { 28 | format!("/ip4/{ip}/tcp/{port}/ws") 29 | }; 30 | 31 | let mut addr_with_peer = format!("{addr}/p2p/{peer_id}"); 32 | if let Some(p2p_cert) = p2p_cert { 33 | addr_with_peer.push_str("/certhash/"); 34 | addr_with_peer.push_str(p2p_cert) 35 | } 36 | Ok(addr_with_peer) 37 | } 38 | 39 | #[cfg(test)] 40 | mod tests { 41 | 42 | use provider::constants::LOCALHOST; 43 | 44 | use super::*; 45 | #[test] 46 | fn generate_for_alice_without_args() { 47 | let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed 48 | let args: Vec<&str> = vec![]; 49 | let bootnode_addr = generate(peer_id, &LOCALHOST, 5678, &args, &None).unwrap(); 50 | assert_eq!( 51 | &bootnode_addr, 52 | "/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" 53 | ); 54 | } 55 | 56 | #[test] 57 | fn generate_for_alice_with_listen_addr() { 58 | // Should override the ip/port 59 | let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed 60 | let args: Vec = [ 61 | "--some", 62 | "other", 63 | "--listen-addr", 64 | "/ip4/192.168.100.1/tcp/30333/ws", 65 | ] 66 | .iter() 67 | .map(|x| x.to_string()) 68 | .collect(); 69 | let bootnode_addr = 70 | generate(peer_id, &LOCALHOST, 5678, args.iter().as_ref(), &None).unwrap(); 71 | assert_eq!( 72 | &bootnode_addr, 73 | "/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" 74 | ); 75 | } 76 | 77 | #[test] 78 | fn generate_for_alice_with_listen_addr_without_value_must_fail() { 79 | // Should override the ip/port 80 | let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed 81 | let args: Vec = ["--some", "other", "--listen-addr"] 82 | .iter() 83 | .map(|x| x.to_string()) 84 | .collect(); 85 | let bootnode_addr = generate(peer_id, &LOCALHOST, 5678, args.iter().as_ref(), &None); 86 | 87 | assert!(bootnode_addr.is_err()); 88 | assert!(matches!( 89 | bootnode_addr, 90 | Err(GeneratorError::BootnodeAddrGeneration(_)) 91 | )); 92 | } 93 | 94 | #[test] 95 | fn generate_for_alice_withcert() { 96 | let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed 97 | let args: Vec<&str> = vec![]; 98 | let bootnode_addr = generate( 99 | peer_id, 100 | &LOCALHOST, 101 | 5678, 102 | &args, 103 | &Some(String::from("data")), 104 | ) 105 | .unwrap(); 106 | assert_eq!( 107 | &bootnode_addr, 108 | "/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm/certhash/data" 109 | ); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /crates/orchestrator/src/generators/errors.rs: -------------------------------------------------------------------------------- 1 | use provider::ProviderError; 2 | use support::fs::FileSystemError; 3 | 4 | #[derive(Debug, thiserror::Error)] 5 | pub enum GeneratorError { 6 | #[error("Generating key {0} with input {1}")] 7 | KeyGeneration(String, String), 8 | #[error("Generating port {0}, err {1}")] 9 | PortGeneration(u16, String), 10 | #[error("Chain-spec build error: {0}")] 11 | ChainSpecGeneration(String), 12 | #[error("Provider error: {0}")] 13 | ProviderError(#[from] ProviderError), 14 | #[error("FileSystem error")] 15 | FileSystemError(#[from] FileSystemError), 16 | #[error("Generating identity, err {0}")] 17 | IdentityGeneration(String), 18 | #[error("Generating bootnode address, err {0}")] 19 | BootnodeAddrGeneration(String), 20 | #[error("Error overriding wasm on raw chain-spec, err {0}")] 21 | OverridingWasm(String), 22 | } 23 | -------------------------------------------------------------------------------- /crates/orchestrator/src/generators/identity.rs: -------------------------------------------------------------------------------- 1 | use hex::FromHex; 2 | use libp2p::identity::{ed25519, Keypair}; 3 | use sha2::digest::Digest; 4 | 5 | use super::errors::GeneratorError; 6 | 7 | // Generate p2p identity for node 8 | // return `node-key` and `peerId` 9 | pub fn generate(node_name: &str) -> Result<(String, String), GeneratorError> { 10 | let key = hex::encode(sha2::Sha256::digest(node_name)); 11 | 12 | let bytes = <[u8; 32]>::from_hex(key.clone()).map_err(|_| { 13 | GeneratorError::IdentityGeneration("can not transform hex to [u8;32]".into()) 14 | })?; 15 | let sk = ed25519::SecretKey::try_from_bytes(bytes) 16 | .map_err(|_| GeneratorError::IdentityGeneration("can not create sk from bytes".into()))?; 17 | let local_identity: Keypair = ed25519::Keypair::from(sk).into(); 18 | let local_public = local_identity.public(); 19 | let local_peer_id = local_public.to_peer_id(); 20 | 21 | Ok((key, local_peer_id.to_base58())) 22 | } 23 | 24 | #[cfg(test)] 25 | mod tests { 26 | 27 | use super::*; 28 | #[test] 29 | fn generate_for_alice() { 30 | let s = "alice"; 31 | let (key, peer_id) = generate(s).unwrap(); 32 | assert_eq!( 33 | &key, 34 | "2bd806c97f0e00af1a1fc3328fa763a9269723c8db8fac4f93af71db186d6e90" 35 | ); 36 | assert_eq!( 37 | &peer_id, 38 | "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" 39 | ); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /crates/orchestrator/src/generators/key.rs: -------------------------------------------------------------------------------- 1 | use sp_core::{crypto::SecretStringError, ecdsa, ed25519, keccak_256, sr25519, Pair, H160, H256}; 2 | 3 | use super::errors::GeneratorError; 4 | use crate::shared::types::{Accounts, NodeAccount}; 5 | const KEYS: [&str; 5] = ["sr", "sr_stash", "ed", "ec", "eth"]; 6 | 7 | pub fn generate_pair(seed: &str) -> Result { 8 | let pair = T::Pair::from_string(seed, None)?; 9 | Ok(pair) 10 | } 11 | 12 | pub fn generate(seed: &str) -> Result { 13 | let mut accounts: Accounts = Default::default(); 14 | for k in KEYS { 15 | let (address, public_key) = match k { 16 | "sr" => { 17 | let pair = generate_pair::(seed) 18 | .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?; 19 | (pair.public().to_string(), hex::encode(pair.public())) 20 | }, 21 | "sr_stash" => { 22 | let pair = generate_pair::(&format!("{}//stash", seed)) 23 | .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?; 24 | (pair.public().to_string(), hex::encode(pair.public())) 25 | }, 26 | "ed" => { 27 | let pair = generate_pair::(seed) 28 | .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?; 29 | (pair.public().to_string(), hex::encode(pair.public())) 30 | }, 31 | "ec" => { 32 | let pair = generate_pair::(seed) 33 | .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?; 34 | (pair.public().to_string(), hex::encode(pair.public())) 35 | }, 36 | "eth" => { 37 | let pair = generate_pair::(seed) 38 | .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?; 39 | 40 | let decompressed = libsecp256k1::PublicKey::parse_compressed(&pair.public().0) 41 | .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))? 42 | .serialize(); 43 | let mut m = [0u8; 64]; 44 | m.copy_from_slice(&decompressed[1..65]); 45 | let account = H160::from(H256::from(keccak_256(&m))); 46 | 47 | (hex::encode(account), hex::encode(account)) 48 | }, 49 | _ => unreachable!(), 50 | }; 51 | accounts.insert(k.into(), NodeAccount::new(address, public_key)); 52 | } 53 | Ok(accounts) 54 | } 55 | 56 | #[cfg(test)] 57 | mod tests { 58 | 59 | use super::*; 60 | #[test] 61 | fn generate_for_alice() { 62 | use sp_core::crypto::Ss58Codec; 63 | let s = "Alice"; 64 | let seed = format!("//{}", s); 65 | 66 | let pair = generate_pair::(&seed).unwrap(); 67 | assert_eq!( 68 | "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", 69 | pair.public().to_ss58check() 70 | ); 71 | 72 | let pair = generate_pair::(&seed).unwrap(); 73 | assert_eq!( 74 | "0x020a1091341fe5664bfa1782d5e04779689068c916b04cb365ec3153755684d9a1", 75 | format!("0x{}", hex::encode(pair.public())) 76 | ); 77 | 78 | let pair = generate_pair::(&seed).unwrap(); 79 | assert_eq!( 80 | "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu", 81 | pair.public().to_ss58check() 82 | ); 83 | } 84 | 85 | #[test] 86 | fn generate_for_zombie() { 87 | use sp_core::crypto::Ss58Codec; 88 | let s = "Zombie"; 89 | let seed = format!("//{}", s); 90 | 91 | let pair = generate_pair::(&seed).unwrap(); 92 | assert_eq!( 93 | "5FTcLfwFc7ctvqp3RhbEig6UuHLHcHVRujuUm8r21wy4dAR8", 94 | pair.public().to_ss58check() 95 | ); 96 | } 97 | 98 | #[test] 99 | fn generate_pair_invalid_should_fail() { 100 | let s = "Alice"; 101 | let seed = s.to_string(); 102 | 103 | let pair = generate_pair::(&seed); 104 | assert!(pair.is_err()); 105 | } 106 | 107 | #[test] 108 | fn generate_invalid_should_fail() { 109 | let s = "Alice"; 110 | let seed = s.to_string(); 111 | 112 | let pair = generate(&seed); 113 | assert!(pair.is_err()); 114 | assert!(matches!(pair, Err(GeneratorError::KeyGeneration(_, _)))); 115 | } 116 | 117 | #[test] 118 | fn generate_work() { 119 | let s = "Alice"; 120 | let seed = format!("//{}", s); 121 | 122 | let pair = generate(&seed).unwrap(); 123 | let sr = pair.get("sr").unwrap(); 124 | let sr_stash = pair.get("sr_stash").unwrap(); 125 | let ed = pair.get("ed").unwrap(); 126 | let ec = pair.get("ec").unwrap(); 127 | let eth = pair.get("eth").unwrap(); 128 | 129 | assert_eq!( 130 | sr.address, 131 | "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" 132 | ); 133 | assert_eq!( 134 | sr_stash.address, 135 | "5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY" 136 | ); 137 | assert_eq!( 138 | ed.address, 139 | "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu" 140 | ); 141 | assert_eq!( 142 | format!("0x{}", ec.public_key), 143 | "0x020a1091341fe5664bfa1782d5e04779689068c916b04cb365ec3153755684d9a1" 144 | ); 145 | 146 | assert_eq!( 147 | format!("0x{}", eth.public_key), 148 | "0xe04cc55ebee1cbce552f250e85c57b70b2e2625b" 149 | ) 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /crates/orchestrator/src/generators/keystore.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | path::{Path, PathBuf}, 3 | vec, 4 | }; 5 | 6 | use hex::encode; 7 | use support::{constants::THIS_IS_A_BUG, fs::FileSystem}; 8 | 9 | use super::errors::GeneratorError; 10 | use crate::{shared::types::NodeAccounts, ScopedFilesystem}; 11 | 12 | const PREFIXES: [&str; 11] = [ 13 | "aura", "babe", "imon", "gran", "audi", "asgn", "para", "beef", // Beffy 14 | "nmbs", // Nimbus 15 | "rand", // Randomness (Moonbeam) 16 | "rate", // Equilibrium rate module 17 | ]; 18 | 19 | pub async fn generate<'a, T>( 20 | acc: &NodeAccounts, 21 | node_files_path: impl AsRef, 22 | scoped_fs: &ScopedFilesystem<'a, T>, 23 | asset_hub_polkadot: bool, 24 | ) -> Result, GeneratorError> 25 | where 26 | T: FileSystem, 27 | { 28 | // Create local keystore 29 | scoped_fs.create_dir_all(node_files_path.as_ref()).await?; 30 | let mut filenames = vec![]; 31 | 32 | let f = PREFIXES.map(|k| { 33 | // let filename = encode(k); 34 | 35 | let filename = match k { 36 | "aura" if asset_hub_polkadot => { 37 | let pk = acc 38 | .accounts 39 | .get("ed") 40 | .expect(&format!("Key 'ed' should be set for node {THIS_IS_A_BUG}")) 41 | .public_key 42 | .as_str(); 43 | format!("{}{}", encode(k), pk) 44 | }, 45 | "babe" | "imon" | "audi" | "asgn" | "para" | "nmbs" | "rand" | "aura" => { 46 | let pk = acc 47 | .accounts 48 | .get("sr") 49 | .expect(&format!("Key 'sr' should be set for node {THIS_IS_A_BUG}")) 50 | .public_key 51 | .as_str(); 52 | format!("{}{}", encode(k), pk) 53 | }, 54 | "gran" | "rate" => { 55 | let pk = acc 56 | .accounts 57 | .get("ed") 58 | .expect(&format!("Key 'ed' should be set for node {THIS_IS_A_BUG}")) 59 | .public_key 60 | .as_str(); 61 | format!("{}{}", encode(k), pk) 62 | }, 63 | "beef" => { 64 | let pk = acc 65 | .accounts 66 | .get("ec") 67 | .expect(&format!("Key 'ec' should be set for node {THIS_IS_A_BUG}")) 68 | .public_key 69 | .as_str(); 70 | format!("{}{}", encode(k), pk) 71 | }, 72 | _ => unreachable!(), 73 | }; 74 | let file_path = PathBuf::from(format!( 75 | "{}/{}", 76 | node_files_path.as_ref().to_string_lossy(), 77 | filename 78 | )); 79 | filenames.push(PathBuf::from(filename)); 80 | let content = format!("\"{}\"", acc.seed); 81 | scoped_fs.write(file_path, content) 82 | }); 83 | 84 | // TODO: implement logic for filter keys 85 | // node.keystoreKeyTypes?.forEach((key_type: string) => { 86 | // if (DEFAULT_KEYSTORE_KEY_TYPES.includes(key_type)) 87 | // keystore_key_types[key_type] = default_keystore_key_types[key_type]; 88 | // }); 89 | 90 | futures::future::try_join_all(f).await?; 91 | Ok(filenames) 92 | } 93 | -------------------------------------------------------------------------------- /crates/orchestrator/src/generators/para_artifact.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use configuration::types::CommandWithCustomArgs; 4 | use provider::{ 5 | constants::NODE_CONFIG_DIR, 6 | types::{GenerateFileCommand, GenerateFilesOptions, TransferedFile}, 7 | DynNamespace, 8 | }; 9 | use serde::{Deserialize, Serialize}; 10 | use support::fs::FileSystem; 11 | use uuid::Uuid; 12 | 13 | use super::errors::GeneratorError; 14 | use crate::ScopedFilesystem; 15 | 16 | #[derive(Debug, Clone, Serialize, Deserialize)] 17 | pub(crate) enum ParaArtifactType { 18 | Wasm, 19 | State, 20 | } 21 | 22 | #[derive(Debug, Clone, Serialize, Deserialize)] 23 | pub(crate) enum ParaArtifactBuildOption { 24 | Path(String), 25 | Command(String), 26 | CommandWithCustomArgs(CommandWithCustomArgs), 27 | } 28 | 29 | /// Parachain artifact (could be either the genesis state or genesis wasm) 30 | #[derive(Debug, Clone, Serialize, Deserialize)] 31 | pub struct ParaArtifact { 32 | artifact_type: ParaArtifactType, 33 | build_option: ParaArtifactBuildOption, 34 | artifact_path: Option, 35 | // image to use for building the para artifact 36 | image: Option, 37 | } 38 | 39 | impl ParaArtifact { 40 | pub(crate) fn new( 41 | artifact_type: ParaArtifactType, 42 | build_option: ParaArtifactBuildOption, 43 | ) -> Self { 44 | Self { 45 | artifact_type, 46 | build_option, 47 | artifact_path: None, 48 | image: None, 49 | } 50 | } 51 | 52 | pub(crate) fn image(mut self, image: Option) -> Self { 53 | self.image = image; 54 | self 55 | } 56 | 57 | pub(crate) fn artifact_path(&self) -> Option<&PathBuf> { 58 | self.artifact_path.as_ref() 59 | } 60 | 61 | pub(crate) async fn build<'a, T>( 62 | &mut self, 63 | chain_spec_path: Option>, 64 | artifact_path: impl AsRef, 65 | ns: &DynNamespace, 66 | scoped_fs: &ScopedFilesystem<'a, T>, 67 | ) -> Result<(), GeneratorError> 68 | where 69 | T: FileSystem, 70 | { 71 | let (cmd, custom_args) = match &self.build_option { 72 | ParaArtifactBuildOption::Path(path) => { 73 | let t = TransferedFile::new(PathBuf::from(path), artifact_path.as_ref().into()); 74 | scoped_fs.copy_files(vec![&t]).await?; 75 | self.artifact_path = Some(artifact_path.as_ref().into()); 76 | return Ok(()); // work done! 77 | }, 78 | ParaArtifactBuildOption::Command(cmd) => (cmd, &vec![]), 79 | ParaArtifactBuildOption::CommandWithCustomArgs(cmd_with_custom_args) => { 80 | ( 81 | &cmd_with_custom_args.cmd().as_str().to_string(), 82 | cmd_with_custom_args.args(), 83 | ) 84 | // (cmd.cmd_as_str().to_string(), cmd.1) 85 | }, 86 | }; 87 | 88 | let generate_subcmd = match self.artifact_type { 89 | ParaArtifactType::Wasm => "export-genesis-wasm", 90 | ParaArtifactType::State => "export-genesis-state", 91 | }; 92 | 93 | // TODO: replace uuid with para_id-random 94 | let temp_name = format!("temp-{}-{}", generate_subcmd, Uuid::new_v4()); 95 | let mut args: Vec = vec![generate_subcmd.into()]; 96 | 97 | let files_to_inject = if let Some(chain_spec_path) = chain_spec_path { 98 | // TODO: we should get the full path from the scoped filesystem 99 | let chain_spec_path_local = format!( 100 | "{}/{}", 101 | ns.base_dir().to_string_lossy(), 102 | chain_spec_path.as_ref().to_string_lossy() 103 | ); 104 | // Remote path to be injected 105 | let chain_spec_path_in_pod = format!( 106 | "{}/{}", 107 | NODE_CONFIG_DIR, 108 | chain_spec_path.as_ref().to_string_lossy() 109 | ); 110 | // Path in the context of the node, this can be different in the context of the providers (e.g native) 111 | let chain_spec_path_in_args = if ns.capabilities().prefix_with_full_path { 112 | // In native 113 | format!( 114 | "{}/{}{}", 115 | ns.base_dir().to_string_lossy(), 116 | &temp_name, 117 | &chain_spec_path_in_pod 118 | ) 119 | } else { 120 | chain_spec_path_in_pod.clone() 121 | }; 122 | 123 | args.push("--chain".into()); 124 | args.push(chain_spec_path_in_args); 125 | 126 | for custom_arg in custom_args { 127 | match custom_arg { 128 | configuration::types::Arg::Flag(flag) => { 129 | args.push(flag.into()); 130 | }, 131 | configuration::types::Arg::Option(flag, flag_value) => { 132 | args.push(flag.into()); 133 | args.push(flag_value.into()); 134 | }, 135 | configuration::types::Arg::Array(flag, values) => { 136 | args.push(flag.into()); 137 | values.iter().for_each(|v| args.push(v.into())); 138 | }, 139 | } 140 | } 141 | 142 | vec![TransferedFile::new( 143 | chain_spec_path_local, 144 | chain_spec_path_in_pod, 145 | )] 146 | } else { 147 | vec![] 148 | }; 149 | 150 | let artifact_path_ref = artifact_path.as_ref(); 151 | let generate_command = GenerateFileCommand::new(cmd.as_str(), artifact_path_ref).args(args); 152 | let options = GenerateFilesOptions::with_files( 153 | vec![generate_command], 154 | self.image.clone(), 155 | &files_to_inject, 156 | ) 157 | .temp_name(temp_name); 158 | ns.generate_files(options).await?; 159 | self.artifact_path = Some(artifact_path_ref.into()); 160 | 161 | Ok(()) 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /crates/orchestrator/src/generators/port.rs: -------------------------------------------------------------------------------- 1 | use std::net::TcpListener; 2 | 3 | use configuration::shared::types::Port; 4 | use support::constants::THIS_IS_A_BUG; 5 | 6 | use super::errors::GeneratorError; 7 | use crate::shared::types::ParkedPort; 8 | 9 | // TODO: (team), we want to continue support ws_port? No 10 | enum PortTypes { 11 | Rpc, 12 | P2P, 13 | Prometheus, 14 | } 15 | 16 | pub fn generate(port: Option) -> Result { 17 | let port = port.unwrap_or(0); 18 | let listener = TcpListener::bind(format!("0.0.0.0:{port}")) 19 | .map_err(|_e| GeneratorError::PortGeneration(port, "Can't bind".into()))?; 20 | let port = listener 21 | .local_addr() 22 | .expect(&format!( 23 | "We should always get the local_addr from the listener {THIS_IS_A_BUG}" 24 | )) 25 | .port(); 26 | Ok(ParkedPort::new(port, listener)) 27 | } 28 | 29 | #[cfg(test)] 30 | mod tests { 31 | use super::*; 32 | #[test] 33 | fn generate_random() { 34 | let port = generate(None).unwrap(); 35 | let listener = port.1.write().unwrap(); 36 | 37 | assert!(listener.is_some()); 38 | } 39 | 40 | #[test] 41 | fn generate_fixed_port() { 42 | let port = generate(Some(33056)).unwrap(); 43 | let listener = port.1.write().unwrap(); 44 | 45 | assert!(listener.is_some()); 46 | assert_eq!(port.0, 33056); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /crates/orchestrator/src/network/chain_upgrade.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use anyhow::anyhow; 4 | use async_trait::async_trait; 5 | use subxt_signer::{sr25519::Keypair, SecretUri}; 6 | 7 | use super::node::NetworkNode; 8 | use crate::{shared::types::RuntimeUpgradeOptions, tx_helper}; 9 | 10 | #[async_trait] 11 | pub trait ChainUpgrade { 12 | /// Perform a runtime upgrade (with sudo) 13 | /// 14 | /// This call 'System.set_code_without_checks' wrapped in 15 | /// 'Sudo.sudo_unchecked_weight' 16 | async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error>; 17 | 18 | /// Perform a runtime upgrade (with sudo), inner call with the node pass as arg. 19 | /// 20 | /// This call 'System.set_code_without_checks' wrapped in 21 | /// 'Sudo.sudo_unchecked_weight' 22 | async fn perform_runtime_upgrade( 23 | &self, 24 | node: &NetworkNode, 25 | options: RuntimeUpgradeOptions, 26 | ) -> Result<(), anyhow::Error> { 27 | let sudo = if let Some(possible_seed) = options.seed { 28 | Keypair::from_secret_key(possible_seed) 29 | .map_err(|_| anyhow!("seed should return a Keypair"))? 30 | } else { 31 | let uri = SecretUri::from_str("//Alice")?; 32 | Keypair::from_uri(&uri).map_err(|_| anyhow!("'//Alice' should return a Keypair"))? 33 | }; 34 | 35 | let wasm_data = options.wasm.get_asset().await?; 36 | 37 | tx_helper::runtime_upgrade::upgrade(node, &wasm_data, &sudo).await?; 38 | 39 | Ok(()) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /crates/orchestrator/src/network/relaychain.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use anyhow::anyhow; 4 | use async_trait::async_trait; 5 | use serde::Serialize; 6 | 7 | use super::node::NetworkNode; 8 | use crate::{network::chain_upgrade::ChainUpgrade, shared::types::RuntimeUpgradeOptions}; 9 | 10 | #[derive(Debug, Serialize)] 11 | pub struct Relaychain { 12 | pub(crate) chain: String, 13 | pub(crate) chain_id: String, 14 | pub(crate) chain_spec_path: PathBuf, 15 | pub(crate) nodes: Vec, 16 | } 17 | 18 | #[async_trait] 19 | impl ChainUpgrade for Relaychain { 20 | async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error> { 21 | // check if the node is valid first 22 | let node = if let Some(node_name) = &options.node_name { 23 | if let Some(node) = self 24 | .nodes() 25 | .into_iter() 26 | .find(|node| node.name() == node_name) 27 | { 28 | node 29 | } else { 30 | return Err(anyhow!( 31 | "Node: {} is not part of the set of nodes", 32 | node_name 33 | )); 34 | } 35 | } else { 36 | // take the first node 37 | if let Some(node) = self.nodes().first() { 38 | node 39 | } else { 40 | return Err(anyhow!("chain doesn't have any node!")); 41 | } 42 | }; 43 | 44 | self.perform_runtime_upgrade(node, options).await 45 | } 46 | } 47 | 48 | impl Relaychain { 49 | pub(crate) fn new(chain: String, chain_id: String, chain_spec_path: PathBuf) -> Self { 50 | Self { 51 | chain, 52 | chain_id, 53 | chain_spec_path, 54 | nodes: Default::default(), 55 | } 56 | } 57 | 58 | // Public API 59 | pub fn nodes(&self) -> Vec<&NetworkNode> { 60 | self.nodes.iter().collect() 61 | } 62 | 63 | /// Get chain name 64 | pub fn chain(&self) -> &str { 65 | &self.chain 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /crates/orchestrator/src/network_helper.rs: -------------------------------------------------------------------------------- 1 | pub mod metrics; 2 | pub mod verifier; 3 | -------------------------------------------------------------------------------- /crates/orchestrator/src/network_helper/metrics.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use async_trait::async_trait; 4 | use reqwest::Url; 5 | 6 | #[async_trait] 7 | pub trait MetricsHelper { 8 | async fn metric(&self, metric_name: &str) -> Result; 9 | async fn metric_with_url( 10 | metric: impl AsRef + Send, 11 | endpoint: impl Into + Send, 12 | ) -> Result; 13 | } 14 | 15 | pub struct Metrics { 16 | endpoint: Url, 17 | } 18 | 19 | impl Metrics { 20 | fn new(endpoint: impl Into) -> Self { 21 | Self { 22 | endpoint: endpoint.into(), 23 | } 24 | } 25 | 26 | async fn fetch_metrics( 27 | endpoint: impl AsRef, 28 | ) -> Result, anyhow::Error> { 29 | let response = reqwest::get(endpoint.as_ref()).await?; 30 | Ok(prom_metrics_parser::parse(&response.text().await?)?) 31 | } 32 | 33 | fn get_metric( 34 | metrics_map: HashMap, 35 | metric_name: &str, 36 | ) -> Result { 37 | let treat_not_found_as_zero = true; 38 | if let Some(val) = metrics_map.get(metric_name) { 39 | Ok(*val) 40 | } else if treat_not_found_as_zero { 41 | Ok(0_f64) 42 | } else { 43 | Err(anyhow::anyhow!("MetricNotFound: {metric_name}")) 44 | } 45 | } 46 | } 47 | 48 | #[async_trait] 49 | impl MetricsHelper for Metrics { 50 | async fn metric(&self, metric_name: &str) -> Result { 51 | let metrics_map = Metrics::fetch_metrics(self.endpoint.as_str()).await?; 52 | Metrics::get_metric(metrics_map, metric_name) 53 | } 54 | 55 | async fn metric_with_url( 56 | metric_name: impl AsRef + Send, 57 | endpoint: impl Into + Send, 58 | ) -> Result { 59 | let metrics_map = Metrics::fetch_metrics(endpoint.into()).await?; 60 | Metrics::get_metric(metrics_map, metric_name.as_ref()) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /crates/orchestrator/src/network_helper/verifier.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use tokio::time::timeout; 4 | use tracing::trace; 5 | 6 | use crate::network::node::NetworkNode; 7 | 8 | pub(crate) async fn verify_nodes(nodes: &[&NetworkNode]) -> Result<(), anyhow::Error> { 9 | timeout(Duration::from_secs(90), check_nodes(nodes)) 10 | .await 11 | .map_err(|_| anyhow::anyhow!("one or more nodes are not ready!")) 12 | } 13 | 14 | // TODO: we should inject in someway the logic to make the request 15 | // in order to allow us to `mock` and easily test this. 16 | // maybe moved to the provider with a NodeStatus, and some helpers like wait_running, wait_ready, etc... ? to be discussed 17 | async fn check_nodes(nodes: &[&NetworkNode]) { 18 | loop { 19 | let tasks: Vec<_> = nodes 20 | .iter() 21 | .map(|node| { 22 | trace!("🔎 checking node: {} ", node.name); 23 | reqwest::get(node.prometheus_uri.clone()) 24 | }) 25 | .collect(); 26 | 27 | let all_ready = futures::future::try_join_all(tasks).await; 28 | if all_ready.is_ok() { 29 | return; 30 | } 31 | 32 | tokio::time::sleep(Duration::from_millis(1000)).await; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /crates/orchestrator/src/network_spec/relaychain.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use configuration::{ 4 | shared::{ 5 | resources::Resources, 6 | types::{Arg, AssetLocation, Chain, Command, Image}, 7 | }, 8 | RelaychainConfig, 9 | }; 10 | use serde::{Deserialize, Serialize}; 11 | use support::replacer::apply_replacements; 12 | 13 | use super::node::NodeSpec; 14 | use crate::{ 15 | errors::OrchestratorError, 16 | generators::chain_spec::{ChainSpec, Context}, 17 | shared::{constants::DEFAULT_CHAIN_SPEC_TPL_COMMAND, types::ChainDefaultContext}, 18 | }; 19 | 20 | /// A relaychain configuration spec 21 | #[derive(Debug, Clone, Serialize, Deserialize)] 22 | pub struct RelaychainSpec { 23 | /// Chain to use (e.g. rococo-local). 24 | pub(crate) chain: Chain, 25 | 26 | /// Default command to run the node. Can be overridden on each node. 27 | pub(crate) default_command: Option, 28 | 29 | /// Default image to use (only podman/k8s). Can be overridden on each node. 30 | pub(crate) default_image: Option, 31 | 32 | /// Default resources. Can be overridden on each node. 33 | pub(crate) default_resources: Option, 34 | 35 | /// Default database snapshot. Can be overridden on each node. 36 | pub(crate) default_db_snapshot: Option, 37 | 38 | /// Default arguments to use in nodes. Can be overridden on each node. 39 | pub(crate) default_args: Vec, 40 | 41 | // chain_spec_path: Option, 42 | pub(crate) chain_spec: ChainSpec, 43 | 44 | /// Set the count of nominators to generator (used with PoS networks). 45 | pub(crate) random_nominators_count: u32, 46 | 47 | /// Set the max nominators value (used with PoS networks). 48 | pub(crate) max_nominations: u8, 49 | 50 | /// Genesis overrides as JSON value. 51 | pub(crate) runtime_genesis_patch: Option, 52 | 53 | /// Wasm override path/url to use. 54 | pub(crate) wasm_override: Option, 55 | 56 | /// Nodes to run. 57 | pub(crate) nodes: Vec, 58 | } 59 | 60 | impl RelaychainSpec { 61 | pub fn from_config(config: &RelaychainConfig) -> Result { 62 | // Relaychain main command to use, in order: 63 | // set as `default_command` or 64 | // use the command of the first node. 65 | // If non of those is set, return an error. 66 | let main_cmd = config 67 | .default_command() 68 | .or(config.nodes().first().and_then(|node| node.command())) 69 | .ok_or(OrchestratorError::InvalidConfig( 70 | "Relaychain, either default_command or first node with a command needs to be set." 71 | .to_string(), 72 | ))?; 73 | 74 | // TODO: internally we use image as String 75 | let main_image = config 76 | .default_image() 77 | .or(config.nodes().first().and_then(|node| node.image())) 78 | .map(|image| image.as_str().to_string()); 79 | 80 | let replacements = HashMap::from([ 81 | ("disableBootnodes", "--disable-default-bootnode"), 82 | ("mainCommand", main_cmd.as_str()), 83 | ]); 84 | let tmpl = if let Some(tmpl) = config.chain_spec_command() { 85 | apply_replacements(tmpl, &replacements) 86 | } else { 87 | apply_replacements(DEFAULT_CHAIN_SPEC_TPL_COMMAND, &replacements) 88 | }; 89 | 90 | let chain_spec = ChainSpec::new(config.chain().as_str(), Context::Relay) 91 | .set_chain_name(config.chain().as_str()) 92 | .command(tmpl.as_str(), config.chain_spec_command_is_local()) 93 | .image(main_image.clone()); 94 | 95 | // Add asset location if present 96 | let chain_spec = if let Some(chain_spec_path) = config.chain_spec_path() { 97 | chain_spec.asset_location(chain_spec_path.clone()) 98 | } else { 99 | chain_spec 100 | }; 101 | 102 | // build the `node_specs` 103 | let chain_context = ChainDefaultContext { 104 | default_command: config.default_command(), 105 | default_image: config.default_image(), 106 | default_resources: config.default_resources(), 107 | default_db_snapshot: config.default_db_snapshot(), 108 | default_args: config.default_args(), 109 | }; 110 | 111 | let (nodes, mut errs) = config 112 | .nodes() 113 | .iter() 114 | .map(|node_config| NodeSpec::from_config(node_config, &chain_context)) 115 | .fold((vec![], vec![]), |(mut nodes, mut errs), result| { 116 | match result { 117 | Ok(node) => nodes.push(node), 118 | Err(err) => errs.push(err), 119 | } 120 | (nodes, errs) 121 | }); 122 | 123 | if !errs.is_empty() { 124 | // TODO: merge errs, maybe return something like Result> 125 | return Err(errs.swap_remove(0)); 126 | } 127 | 128 | Ok(RelaychainSpec { 129 | chain: config.chain().clone(), 130 | default_command: config.default_command().cloned(), 131 | default_image: config.default_image().cloned(), 132 | default_resources: config.default_resources().cloned(), 133 | default_db_snapshot: config.default_db_snapshot().cloned(), 134 | wasm_override: config.wasm_override().cloned(), 135 | default_args: config.default_args().into_iter().cloned().collect(), 136 | chain_spec, 137 | random_nominators_count: config.random_nominators_count().unwrap_or(0), 138 | max_nominations: config.max_nominations().unwrap_or(24), 139 | runtime_genesis_patch: config.runtime_genesis_patch().cloned(), 140 | nodes, 141 | }) 142 | } 143 | 144 | pub fn chain_spec(&self) -> &ChainSpec { 145 | &self.chain_spec 146 | } 147 | 148 | pub fn chain_spec_mut(&mut self) -> &mut ChainSpec { 149 | &mut self.chain_spec 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /crates/orchestrator/src/pjs_helper.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | pub use pjs_rs::ReturnValue; 3 | use serde_json::json; 4 | use tracing::trace; 5 | 6 | pub fn pjs_build_template( 7 | ws_uri: &str, 8 | content: &str, 9 | args: Vec, 10 | user_types: Option, 11 | ) -> String { 12 | let types = if let Some(user_types) = user_types { 13 | if let Some(types) = user_types.pointer("/types") { 14 | // if the user_types includes the `types` key use the inner value 15 | types.clone() 16 | } else { 17 | user_types.clone() 18 | } 19 | } else { 20 | // No custom types, just an emtpy json 21 | json!({}) 22 | }; 23 | 24 | let tmpl = format!( 25 | r#" 26 | const {{ util, utilCrypto, keyring, types }} = pjs; 27 | ( async () => {{ 28 | const api = await pjs.api.ApiPromise.create({{ 29 | provider: new pjs.api.WsProvider('{}'), 30 | types: {} 31 | }}); 32 | const _run = async (api, hashing, keyring, types, util, arguments) => {{ 33 | {} 34 | }}; 35 | return await _run(api, utilCrypto, keyring, types, util, {}); 36 | }})() 37 | "#, 38 | ws_uri, 39 | types, 40 | content, 41 | json!(args), 42 | ); 43 | trace!(tmpl = tmpl, "code to execute"); 44 | tmpl 45 | } 46 | 47 | // Since pjs-rs run a custom javascript runtime (using deno_core) we need to 48 | // execute in an isolated thread. 49 | pub fn pjs_exec(code: String) -> Result { 50 | let rt = tokio::runtime::Builder::new_current_thread() 51 | .enable_all() 52 | .build()?; 53 | 54 | std::thread::spawn(move || { 55 | rt.block_on(async move { 56 | let value = pjs_rs::run_ts_code(code, None).await; 57 | trace!("ts_code return: {:?}", value); 58 | value 59 | }) 60 | }) 61 | .join() 62 | .map_err(|_| anyhow!("[pjs] Thread panicked"))? 63 | } 64 | 65 | /// pjs-rs success [Result] type 66 | /// 67 | /// Represent the possible states returned from a successfully call to pjs-rs 68 | /// 69 | /// Ok(value) -> Deserialized return value into a [serde_json::Value] 70 | /// Err(msg) -> Execution of the script finish Ok, but the returned value 71 | /// can't be deserialize into a [serde_json::Value] 72 | pub type PjsResult = Result; 73 | -------------------------------------------------------------------------------- /crates/orchestrator/src/shared.rs: -------------------------------------------------------------------------------- 1 | pub mod constants; 2 | pub mod macros; 3 | pub mod types; 4 | -------------------------------------------------------------------------------- /crates/orchestrator/src/shared/constants.rs: -------------------------------------------------------------------------------- 1 | /// Prometheus exporter default port 2 | pub const PROMETHEUS_PORT: u16 = 9615; 3 | /// Prometheus exporter default port in collator full-node 4 | pub const FULL_NODE_PROMETHEUS_PORT: u16 = 9616; 5 | /// JSON-RPC server (ws) 6 | pub const RPC_PORT: u16 = 9944; 7 | // JSON-RPC server (http, used by old versions) 8 | pub const RPC_HTTP_PORT: u16 = 9933; 9 | // P2P default port 10 | pub const P2P_PORT: u16 = 30333; 11 | // default command template to build chain-spec 12 | pub const DEFAULT_CHAIN_SPEC_TPL_COMMAND: &str = 13 | "{{mainCommand}} build-spec --chain {{chainName}} {{disableBootnodes}}"; 14 | -------------------------------------------------------------------------------- /crates/orchestrator/src/shared/macros.rs: -------------------------------------------------------------------------------- 1 | macro_rules! create_add_options { 2 | ($struct:ident {$( $field:ident:$type:ty ),*}) =>{ 3 | #[derive(Default, Debug, Clone)] 4 | pub struct $struct { 5 | /// Image to run the node 6 | pub image: Option, 7 | /// Command to run the node 8 | pub command: Option, 9 | /// Subcommand for the node 10 | pub subcommand: Option, 11 | /// Arguments to pass to the node 12 | pub args: Vec, 13 | /// Env vars to set 14 | pub env: Vec, 15 | /// Make the node a validator 16 | /// 17 | /// This implies `--validator` or `--collator` 18 | pub is_validator: bool, 19 | /// RPC port to use, if None a random one will be set 20 | pub rpc_port: Option, 21 | /// Prometheus port to use, if None a random one will be set 22 | pub prometheus_port: Option, 23 | /// P2P port to use, if None a random one will be set 24 | pub p2p_port: Option, 25 | $( 26 | pub $field: $type, 27 | )* 28 | } 29 | }; 30 | } 31 | 32 | pub(crate) use create_add_options; 33 | -------------------------------------------------------------------------------- /crates/orchestrator/src/shared/types.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | net::TcpListener, 4 | path::PathBuf, 5 | sync::{Arc, RwLock}, 6 | }; 7 | 8 | use configuration::shared::{ 9 | resources::Resources, 10 | types::{Arg, AssetLocation, Command, Image, Port}, 11 | }; 12 | use serde::{Deserialize, Serialize}; 13 | 14 | pub type Accounts = HashMap; 15 | 16 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 17 | pub struct NodeAccount { 18 | pub address: String, 19 | pub public_key: String, 20 | } 21 | 22 | impl NodeAccount { 23 | pub fn new(addr: impl Into, pk: impl Into) -> Self { 24 | Self { 25 | address: addr.into(), 26 | public_key: pk.into(), 27 | } 28 | } 29 | } 30 | 31 | #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] 32 | pub struct NodeAccounts { 33 | pub seed: String, 34 | pub accounts: Accounts, 35 | } 36 | 37 | #[derive(Clone, Default, Debug, Serialize, Deserialize)] 38 | pub struct ParkedPort( 39 | pub(crate) Port, 40 | #[serde(skip)] pub(crate) Arc>>, 41 | ); 42 | 43 | impl ParkedPort { 44 | pub(crate) fn new(port: u16, listener: TcpListener) -> ParkedPort { 45 | let listener = Arc::new(RwLock::new(Some(listener))); 46 | ParkedPort(port, listener) 47 | } 48 | 49 | pub(crate) fn drop_listener(&self) { 50 | // drop the listener will allow the running node to start listenen connections 51 | let mut l = self.1.write().unwrap(); 52 | *l = None; 53 | } 54 | } 55 | 56 | #[derive(Debug, Clone, Default)] 57 | pub struct ChainDefaultContext<'a> { 58 | pub default_command: Option<&'a Command>, 59 | pub default_image: Option<&'a Image>, 60 | pub default_resources: Option<&'a Resources>, 61 | pub default_db_snapshot: Option<&'a AssetLocation>, 62 | pub default_args: Vec<&'a Arg>, 63 | } 64 | 65 | #[derive(Debug, Clone)] 66 | pub struct RegisterParachainOptions { 67 | pub id: u32, 68 | pub wasm_path: PathBuf, 69 | pub state_path: PathBuf, 70 | pub node_ws_url: String, 71 | pub onboard_as_para: bool, 72 | pub seed: Option<[u8; 32]>, 73 | pub finalization: bool, 74 | } 75 | 76 | pub struct RuntimeUpgradeOptions { 77 | /// Location of the wasm file (could be either a local file or an url) 78 | pub wasm: AssetLocation, 79 | /// Name of the node to use as rpc endpoint 80 | pub node_name: Option, 81 | /// Seed to use to sign and submit (default to //Alice) 82 | pub seed: Option<[u8; 32]>, 83 | } 84 | 85 | impl RuntimeUpgradeOptions { 86 | pub fn new(wasm: AssetLocation) -> Self { 87 | Self { 88 | wasm, 89 | node_name: None, 90 | seed: None, 91 | } 92 | } 93 | } 94 | #[derive(Debug, Clone)] 95 | pub struct ParachainGenesisArgs { 96 | pub genesis_head: String, 97 | pub validation_code: String, 98 | pub parachain: bool, 99 | } 100 | -------------------------------------------------------------------------------- /crates/orchestrator/src/tx_helper.rs: -------------------------------------------------------------------------------- 1 | // pub mod register_para; 2 | // pub mod validator_actions; 3 | pub mod client; 4 | pub mod runtime_upgrade; 5 | -------------------------------------------------------------------------------- /crates/orchestrator/src/tx_helper/balance.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paritytech/zombienet-sdk/1005c0c91153a3db98b96544db2012096024dfd7/crates/orchestrator/src/tx_helper/balance.rs -------------------------------------------------------------------------------- /crates/orchestrator/src/tx_helper/client.rs: -------------------------------------------------------------------------------- 1 | use subxt::{backend::rpc::RpcClient, OnlineClient}; 2 | 3 | #[async_trait::async_trait] 4 | pub trait ClientFromUrl: Sized { 5 | async fn from_secure_url(url: &str) -> Result; 6 | async fn from_insecure_url(url: &str) -> Result; 7 | } 8 | 9 | #[async_trait::async_trait] 10 | impl ClientFromUrl for OnlineClient { 11 | async fn from_secure_url(url: &str) -> Result { 12 | Self::from_url(url).await 13 | } 14 | 15 | async fn from_insecure_url(url: &str) -> Result { 16 | Self::from_insecure_url(url).await 17 | } 18 | } 19 | 20 | #[async_trait::async_trait] 21 | impl ClientFromUrl for RpcClient { 22 | async fn from_secure_url(url: &str) -> Result { 23 | Self::from_url(url).await 24 | } 25 | 26 | async fn from_insecure_url(url: &str) -> Result { 27 | Self::from_insecure_url(url).await 28 | } 29 | } 30 | 31 | pub async fn get_client_from_url(url: &str) -> Result { 32 | if subxt::utils::url_is_secure(url)? { 33 | T::from_secure_url(url).await 34 | } else { 35 | T::from_insecure_url(url).await 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /crates/orchestrator/src/tx_helper/register_para.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use configuration::shared::constants::THIS_IS_A_BUG; 4 | use subxt::{dynamic::Value, OnlineClient, SubstrateConfig}; 5 | use subxt_signer::{sr25519::Keypair, SecretUri}; 6 | use support::fs::FileSystem; 7 | 8 | use crate::{shared::types::RegisterParachainOptions, ScopedFilesystem}; 9 | use tracing::{debug, info, trace}; 10 | 11 | pub async fn register( 12 | options: RegisterParachainOptions, 13 | scoped_fs: &ScopedFilesystem<'_, impl FileSystem>, 14 | ) -> Result<(), anyhow::Error> { 15 | debug!("Registering parachain: {:?}", options); 16 | // get the seed 17 | let sudo: Keypair; 18 | if let Some(possible_seed) = options.seed { 19 | sudo = Keypair::from_seed(possible_seed) 20 | .expect(&format!("seed should return a Keypair {THIS_IS_A_BUG}")); 21 | } else { 22 | let uri = SecretUri::from_str("//Alice")?; 23 | sudo = Keypair::from_uri(&uri)?; 24 | } 25 | 26 | let genesis_state = scoped_fs 27 | .read_to_string(options.state_path) 28 | .await 29 | .expect(&format!( 30 | "State Path should be ok by this point {THIS_IS_A_BUG}" 31 | )); 32 | let wasm_data = scoped_fs 33 | .read_to_string(options.wasm_path) 34 | .await 35 | .expect(&format!( 36 | "Wasm Path should be ok by this point {THIS_IS_A_BUG}" 37 | )); 38 | 39 | let api: OnlineClient = get_client_from_url(&options.node_ws_url).await?; 40 | 41 | let schedule_para = subxt::dynamic::tx( 42 | "ParasSudoWrapper", 43 | "sudo_schedule_para_initialize", 44 | vec![ 45 | Value::primitive(options.id.into()), 46 | Value::named_composite([ 47 | ( 48 | "genesis_head", 49 | Value::from_bytes(hex::decode(&genesis_state[2..])?), 50 | ), 51 | ( 52 | "validation_code", 53 | Value::from_bytes(hex::decode(&wasm_data[2..])?), 54 | ), 55 | ("para_kind", Value::bool(options.onboard_as_para)), 56 | ]), 57 | ], 58 | ); 59 | 60 | let sudo_call = subxt::dynamic::tx("Sudo", "sudo", vec![schedule_para.into_value()]); 61 | 62 | // TODO: uncomment below and fix the sign and submit (and follow afterwards until 63 | // finalized block) to register the parachain 64 | let result = api 65 | .tx() 66 | .sign_and_submit_then_watch_default(&sudo_call, &sudo) 67 | .await?; 68 | 69 | let result = result.wait_for_in_block().await?; 70 | debug!("In block: {:#?}", result.block_hash()); 71 | Ok(()) 72 | } 73 | -------------------------------------------------------------------------------- /crates/orchestrator/src/tx_helper/runtime_upgrade.rs: -------------------------------------------------------------------------------- 1 | use subxt::{dynamic::Value, tx::TxStatus, OnlineClient, SubstrateConfig}; 2 | use subxt_signer::sr25519::Keypair; 3 | use tracing::{debug, info}; 4 | 5 | use crate::network::node::NetworkNode; 6 | 7 | pub async fn upgrade( 8 | node: &NetworkNode, 9 | wasm_data: &[u8], 10 | sudo: &Keypair, 11 | ) -> Result<(), anyhow::Error> { 12 | debug!( 13 | "Upgrading runtime, using node: {} with endpoting {}", 14 | node.name, node.ws_uri 15 | ); 16 | let api: OnlineClient = node.wait_client().await?; 17 | 18 | let upgrade = subxt::dynamic::tx( 19 | "System", 20 | "set_code_without_checks", 21 | vec![Value::from_bytes(wasm_data)], 22 | ); 23 | 24 | let sudo_call = subxt::dynamic::tx( 25 | "Sudo", 26 | "sudo_unchecked_weight", 27 | vec![ 28 | upgrade.into_value(), 29 | Value::named_composite([ 30 | ("ref_time", Value::primitive(1.into())), 31 | ("proof_size", Value::primitive(1.into())), 32 | ]), 33 | ], 34 | ); 35 | 36 | let mut tx = api 37 | .tx() 38 | .sign_and_submit_then_watch_default(&sudo_call, sudo) 39 | .await?; 40 | 41 | // Below we use the low level API to replicate the `wait_for_in_block` behaviour 42 | // which was removed in subxt 0.33.0. See https://github.com/paritytech/subxt/pull/1237. 43 | while let Some(status) = tx.next().await { 44 | let status = status?; 45 | match &status { 46 | TxStatus::InBestBlock(tx_in_block) | TxStatus::InFinalizedBlock(tx_in_block) => { 47 | let _result = tx_in_block.wait_for_success().await?; 48 | let block_status = if status.as_finalized().is_some() { 49 | "Finalized" 50 | } else { 51 | "Best" 52 | }; 53 | info!( 54 | "[{}] In block: {:#?}", 55 | block_status, 56 | tx_in_block.block_hash() 57 | ); 58 | }, 59 | TxStatus::Error { message } 60 | | TxStatus::Invalid { message } 61 | | TxStatus::Dropped { message } => { 62 | return Err(anyhow::format_err!("Error submitting tx: {message}")); 63 | }, 64 | _ => continue, 65 | } 66 | } 67 | 68 | Ok(()) 69 | } 70 | -------------------------------------------------------------------------------- /crates/orchestrator/src/tx_helper/validator_actions.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use subxt::{dynamic::Value, OnlineClient, SubstrateConfig}; 4 | use subxt_signer::{sr25519::Keypair, SecretUri}; 5 | use tracing::{debug, info, trace}; 6 | 7 | pub async fn register(validator_ids: Vec, node_ws_url: &str) -> Result<(), anyhow::Error> { 8 | debug!("Registering validators: {:?}", validator_ids); 9 | // get the seed 10 | // let sudo: Keypair; 11 | // if let Some(possible_seed) = options.seed { 12 | // sudo = Keypair::from_seed(possible_seed).expect("seed should return a Keypair."); 13 | // } else { 14 | let uri = SecretUri::from_str("//Alice")?; 15 | let sudo = Keypair::from_uri(&uri)?; 16 | // } 17 | 18 | let api: OnlineClient = get_client_from_url(&options.node_ws_url).await?; 19 | 20 | let register_call = subxt::dynamic::tx( 21 | "ValidatorManager", 22 | "register_validators", 23 | vec![Value::unnamed_composite(vec![Value::from_bytes( 24 | validator_ids.first().unwrap().as_bytes(), 25 | )])], 26 | ); 27 | 28 | let sudo_call = subxt::dynamic::tx("Sudo", "sudo", vec![register_call.into_value()]); 29 | 30 | // TODO: uncomment below and fix the sign and submit (and follow afterwards until 31 | // finalized block) to register the parachain 32 | let result = api 33 | .tx() 34 | .sign_and_submit_then_watch_default(&sudo_call, &sudo) 35 | .await?; 36 | 37 | debug!("result: {:#?}", result); 38 | let result = result.wait_for_in_block().await?; 39 | debug!("In block: {:#?}", result.block_hash()); 40 | Ok(()) 41 | } 42 | -------------------------------------------------------------------------------- /crates/prom-metrics-parser/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zombienet-prom-metrics-parser" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | rust-version.workspace = true 7 | publish = true 8 | license.workspace = true 9 | repository.workspace = true 10 | description = "Prometheus metric parser, parse metrics provided by internal prometheus server" 11 | keywords = ["zombienet", "prometheus"] 12 | 13 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 14 | 15 | [dependencies] 16 | pest = { workspace = true } 17 | pest_derive = { workspace = true } 18 | thiserror = { workspace = true } 19 | -------------------------------------------------------------------------------- /crates/prom-metrics-parser/src/grammar.pest: -------------------------------------------------------------------------------- 1 | // Grammar taken from https://github.com/mitghi/promerge/ with 2 | // some small modifications. 3 | alpha = _{'a'..'z' | 'A'..'Z'} 4 | alphanum = _{'a'..'z' | 'A'..'Z' | '0'..'9'} 5 | number = @{ 6 | "-"? 7 | ~ ("0" | ASCII_NONZERO_DIGIT ~ ASCII_DIGIT*) 8 | ~ ("." ~ ASCII_DIGIT*)? 9 | ~ (^"e" ~ ("+" | "-")? ~ ASCII_DIGIT+)? 10 | } 11 | string = ${"\"" ~ inner ~ "\""} 12 | inner = @{char*} 13 | char = { 14 | !("\"" | "\\") ~ ANY 15 | | "\\" ~ ("\"" | "\\" | "/" | "b" | "f" | "n" | "r" | "t") 16 | | "\\" ~ ("u" ~ ASCII_HEX_DIGIT{4}) 17 | } 18 | whitespace_or_newline = _{(" "| "\n")*} 19 | hash = _{"#"} 20 | posInf = {"+Inf"} 21 | negInf = {"-Inf"} 22 | NaN = {"NaN"} 23 | lbrace = _{"{"} 24 | rbrace = _{"}"} 25 | typelit = _{"TYPE"} 26 | helplit = _{"HELP"} 27 | comma = _{","} 28 | countertype = {"counter"} 29 | gaugetype = {"gauge"} 30 | histogramtype = {"histogram"} 31 | summarytype = {"summary"} 32 | untyped = {"untyped"} 33 | ident = {alphanum+} 34 | key = @{ident ~ ("_" ~ ident)*} 35 | label = {key ~ "=" ~ string} 36 | labels = {label ~ (comma ~ label)*} 37 | helpkey = {key} 38 | helpval = {inner} 39 | typekey = {key} 40 | typeval = {countertype | gaugetype | histogramtype | summarytype | untyped} 41 | commentval = @{((ASCII_DIGIT| ASCII_NONZERO_DIGIT | ASCII_BIN_DIGIT | ASCII_OCT_DIGIT | ASCII_HEX_DIGIT | ASCII_ALPHA_LOWER | ASCII_ALPHA_UPPER | ASCII_ALPHA | ASCII_ALPHANUMERIC | !"\n" ~ ANY ))*} 42 | helpexpr = {hash ~ whitespace_or_newline ~ helplit ~ whitespace_or_newline ~ helpkey ~ whitespace_or_newline ~ commentval} 43 | typexpr = {hash ~ whitespace_or_newline ~ typelit ~ whitespace_or_newline ~ typekey ~ whitespace_or_newline ~ typeval } 44 | genericomment = {hash ~ whitespace_or_newline ~ commentval} 45 | promstmt = {key ~ (lbrace ~ (labels)* ~ rbrace){0,1} ~ whitespace_or_newline ~ ((posInf | negInf | NaN | number) ~ whitespace_or_newline ){1,2}} 46 | block = {((helpexpr | typexpr | genericomment)~ NEWLINE?)+ ~ (promstmt ~ NEWLINE?)+} 47 | statement = {SOI ~ block+ ~ EOI} 48 | -------------------------------------------------------------------------------- /crates/provider/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /crates/provider/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zombienet-provider" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | rust-version.workspace = true 7 | publish = true 8 | license.workspace = true 9 | repository.workspace = true 10 | description = "Zombienet provider, implement the logic to run the nodes in the native provider" 11 | keywords = ["zombienet", "provider", "native"] 12 | 13 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 14 | 15 | [dependencies] 16 | async-trait = { workspace = true } 17 | futures = { workspace = true } 18 | serde = { workspace = true, features = ["derive"] } 19 | serde_json = { workspace = true } 20 | serde_yaml = { workspace = true } 21 | tokio = { workspace = true, features = [ 22 | "process", 23 | "macros", 24 | "fs", 25 | "time", 26 | "rt", 27 | ] } 28 | tokio-util = { workspace = true, features = ["compat"] } 29 | thiserror = { workspace = true } 30 | anyhow = { workspace = true } 31 | uuid = { workspace = true, features = ["v4"] } 32 | nix = { workspace = true, features = ["signal"] } 33 | kube = { workspace = true, features = ["ws", "runtime"] } 34 | k8s-openapi = { workspace = true, features = ["v1_27"] } 35 | tar = { workspace = true } 36 | sha2 = { workspace = true } 37 | hex = { workspace = true } 38 | tracing = { workspace = true } 39 | reqwest = { workspace = true } 40 | regex = { workspace = true } 41 | url = { workspace = true } 42 | flate2 = { workspace = true } 43 | 44 | # Zomebienet deps 45 | support = { workspace = true } 46 | configuration = { workspace = true } 47 | -------------------------------------------------------------------------------- /crates/provider/src/docker.rs: -------------------------------------------------------------------------------- 1 | mod client; 2 | mod namespace; 3 | mod node; 4 | mod provider; 5 | 6 | pub use provider::DockerProvider; 7 | -------------------------------------------------------------------------------- /crates/provider/src/docker/provider.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | path::{Path, PathBuf}, 4 | sync::{Arc, Weak}, 5 | }; 6 | 7 | use async_trait::async_trait; 8 | use support::fs::FileSystem; 9 | use tokio::sync::RwLock; 10 | 11 | use super::{client::DockerClient, namespace::DockerNamespace}; 12 | use crate::{ 13 | types::ProviderCapabilities, DynNamespace, Provider, ProviderError, ProviderNamespace, 14 | }; 15 | 16 | const PROVIDER_NAME: &str = "docker"; 17 | 18 | pub struct DockerProvider 19 | where 20 | FS: FileSystem + Send + Sync + Clone, 21 | { 22 | weak: Weak>, 23 | capabilities: ProviderCapabilities, 24 | tmp_dir: PathBuf, 25 | docker_client: DockerClient, 26 | filesystem: FS, 27 | pub(super) namespaces: RwLock>>>, 28 | } 29 | 30 | impl DockerProvider 31 | where 32 | FS: FileSystem + Send + Sync + Clone + 'static, 33 | { 34 | pub async fn new(filesystem: FS) -> Arc { 35 | let docker_client = DockerClient::new().await.unwrap(); 36 | 37 | let provider = Arc::new_cyclic(|weak| DockerProvider { 38 | weak: weak.clone(), 39 | capabilities: ProviderCapabilities { 40 | requires_image: true, 41 | has_resources: false, 42 | prefix_with_full_path: false, 43 | use_default_ports_in_cmd: true, 44 | }, 45 | tmp_dir: std::env::temp_dir(), 46 | docker_client, 47 | filesystem, 48 | namespaces: RwLock::new(HashMap::new()), 49 | }); 50 | 51 | let cloned_provider = provider.clone(); 52 | tokio::spawn(async move { 53 | tokio::signal::ctrl_c().await.unwrap(); 54 | for (_, ns) in cloned_provider.namespaces().await { 55 | if ns.is_detached().await { 56 | // best effort 57 | let _ = ns.destroy().await; 58 | } 59 | } 60 | 61 | // exit the process (130, SIGINT) 62 | std::process::exit(130) 63 | }); 64 | 65 | provider 66 | } 67 | 68 | pub fn tmp_dir(mut self, tmp_dir: impl Into) -> Self { 69 | self.tmp_dir = tmp_dir.into(); 70 | self 71 | } 72 | } 73 | 74 | #[async_trait] 75 | impl Provider for DockerProvider 76 | where 77 | FS: FileSystem + Send + Sync + Clone + 'static, 78 | { 79 | fn name(&self) -> &str { 80 | PROVIDER_NAME 81 | } 82 | 83 | fn capabilities(&self) -> &ProviderCapabilities { 84 | &self.capabilities 85 | } 86 | 87 | async fn namespaces(&self) -> HashMap { 88 | self.namespaces 89 | .read() 90 | .await 91 | .iter() 92 | .map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace)) 93 | .collect() 94 | } 95 | 96 | async fn create_namespace(&self) -> Result { 97 | let namespace = DockerNamespace::new( 98 | &self.weak, 99 | &self.tmp_dir, 100 | &self.capabilities, 101 | &self.docker_client, 102 | &self.filesystem, 103 | None, 104 | ) 105 | .await?; 106 | 107 | self.namespaces 108 | .write() 109 | .await 110 | .insert(namespace.name().to_string(), namespace.clone()); 111 | 112 | Ok(namespace) 113 | } 114 | 115 | async fn create_namespace_with_base_dir( 116 | &self, 117 | base_dir: &Path, 118 | ) -> Result { 119 | let namespace = DockerNamespace::new( 120 | &self.weak, 121 | &self.tmp_dir, 122 | &self.capabilities, 123 | &self.docker_client, 124 | &self.filesystem, 125 | Some(base_dir), 126 | ) 127 | .await?; 128 | 129 | self.namespaces 130 | .write() 131 | .await 132 | .insert(namespace.name().to_string(), namespace.clone()); 133 | 134 | Ok(namespace) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /crates/provider/src/kubernetes.rs: -------------------------------------------------------------------------------- 1 | mod client; 2 | mod namespace; 3 | mod node; 4 | mod pod_spec_builder; 5 | mod provider; 6 | 7 | pub use provider::KubernetesProvider; 8 | -------------------------------------------------------------------------------- /crates/provider/src/kubernetes/pod_spec_builder.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use configuration::shared::resources::{ResourceQuantity, Resources}; 4 | use k8s_openapi::{ 5 | api::core::v1::{ 6 | ConfigMapVolumeSource, Container, EnvVar, PodSpec, ResourceRequirements, Volume, 7 | VolumeMount, 8 | }, 9 | apimachinery::pkg::api::resource::Quantity, 10 | }; 11 | 12 | pub(super) struct PodSpecBuilder; 13 | 14 | impl PodSpecBuilder { 15 | pub(super) fn build( 16 | name: &str, 17 | image: &str, 18 | resources: Option<&Resources>, 19 | program: &str, 20 | args: &[String], 21 | env: &[(String, String)], 22 | ) -> PodSpec { 23 | PodSpec { 24 | hostname: Some(name.to_string()), 25 | init_containers: Some(vec![Self::build_helper_binaries_setup_container()]), 26 | containers: vec![Self::build_main_container( 27 | name, image, resources, program, args, env, 28 | )], 29 | volumes: Some(Self::build_volumes()), 30 | ..Default::default() 31 | } 32 | } 33 | 34 | fn build_main_container( 35 | name: &str, 36 | image: &str, 37 | resources: Option<&Resources>, 38 | program: &str, 39 | args: &[String], 40 | env: &[(String, String)], 41 | ) -> Container { 42 | Container { 43 | name: name.to_string(), 44 | image: Some(image.to_string()), 45 | image_pull_policy: Some("Always".to_string()), 46 | command: Some( 47 | [ 48 | vec!["/zombie-wrapper.sh".to_string(), program.to_string()], 49 | args.to_vec(), 50 | ] 51 | .concat(), 52 | ), 53 | env: Some( 54 | env.iter() 55 | .map(|(name, value)| EnvVar { 56 | name: name.clone(), 57 | value: Some(value.clone()), 58 | value_from: None, 59 | }) 60 | .collect(), 61 | ), 62 | volume_mounts: Some(Self::build_volume_mounts(vec![VolumeMount { 63 | name: "zombie-wrapper-volume".to_string(), 64 | mount_path: "/zombie-wrapper.sh".to_string(), 65 | sub_path: Some("zombie-wrapper.sh".to_string()), 66 | ..Default::default() 67 | }])), 68 | resources: Self::build_resources_requirements(resources), 69 | ..Default::default() 70 | } 71 | } 72 | 73 | fn build_helper_binaries_setup_container() -> Container { 74 | Container { 75 | name: "helper-binaries-setup".to_string(), 76 | image: Some("europe-west3-docker.pkg.dev/parity-zombienet/zombienet-public-images/alpine:latest".to_string()), 77 | image_pull_policy: Some("IfNotPresent".to_string()), 78 | volume_mounts: Some(Self::build_volume_mounts(vec![VolumeMount { 79 | name: "helper-binaries-downloader-volume".to_string(), 80 | mount_path: "/helper-binaries-downloader.sh".to_string(), 81 | sub_path: Some("helper-binaries-downloader.sh".to_string()), 82 | ..Default::default() 83 | }])), 84 | command: Some(vec![ 85 | "ash".to_string(), 86 | "/helper-binaries-downloader.sh".to_string(), 87 | ]), 88 | ..Default::default() 89 | } 90 | } 91 | 92 | fn build_volumes() -> Vec { 93 | vec![ 94 | Volume { 95 | name: "cfg".to_string(), 96 | ..Default::default() 97 | }, 98 | Volume { 99 | name: "data".to_string(), 100 | ..Default::default() 101 | }, 102 | Volume { 103 | name: "relay-data".to_string(), 104 | ..Default::default() 105 | }, 106 | Volume { 107 | name: "zombie-wrapper-volume".to_string(), 108 | config_map: Some(ConfigMapVolumeSource { 109 | name: Some("zombie-wrapper".to_string()), 110 | default_mode: Some(0o755), 111 | ..Default::default() 112 | }), 113 | ..Default::default() 114 | }, 115 | Volume { 116 | name: "helper-binaries-downloader-volume".to_string(), 117 | config_map: Some(ConfigMapVolumeSource { 118 | name: Some("helper-binaries-downloader".to_string()), 119 | default_mode: Some(0o755), 120 | ..Default::default() 121 | }), 122 | ..Default::default() 123 | }, 124 | ] 125 | } 126 | 127 | fn build_volume_mounts(non_default_mounts: Vec) -> Vec { 128 | [ 129 | vec![ 130 | VolumeMount { 131 | name: "cfg".to_string(), 132 | mount_path: "/cfg".to_string(), 133 | read_only: Some(false), 134 | ..Default::default() 135 | }, 136 | VolumeMount { 137 | name: "data".to_string(), 138 | mount_path: "/data".to_string(), 139 | read_only: Some(false), 140 | ..Default::default() 141 | }, 142 | VolumeMount { 143 | name: "relay-data".to_string(), 144 | mount_path: "/relay-data".to_string(), 145 | read_only: Some(false), 146 | ..Default::default() 147 | }, 148 | ], 149 | non_default_mounts, 150 | ] 151 | .concat() 152 | } 153 | 154 | fn build_resources_requirements(resources: Option<&Resources>) -> Option { 155 | resources.map(|resources| ResourceRequirements { 156 | limits: Self::build_resources_requirements_quantities( 157 | resources.limit_cpu(), 158 | resources.limit_memory(), 159 | ), 160 | requests: Self::build_resources_requirements_quantities( 161 | resources.request_cpu(), 162 | resources.request_memory(), 163 | ), 164 | ..Default::default() 165 | }) 166 | } 167 | 168 | fn build_resources_requirements_quantities( 169 | cpu: Option<&ResourceQuantity>, 170 | memory: Option<&ResourceQuantity>, 171 | ) -> Option> { 172 | let mut quantities = BTreeMap::new(); 173 | 174 | if let Some(cpu) = cpu { 175 | quantities.insert("cpu".to_string(), Quantity(cpu.as_str().to_string())); 176 | } 177 | 178 | if let Some(memory) = memory { 179 | quantities.insert("memory".to_string(), Quantity(memory.as_str().to_string())); 180 | } 181 | 182 | if !quantities.is_empty() { 183 | Some(quantities) 184 | } else { 185 | None 186 | } 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /crates/provider/src/kubernetes/provider.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | path::{Path, PathBuf}, 4 | sync::{Arc, Weak}, 5 | }; 6 | 7 | use async_trait::async_trait; 8 | use support::fs::FileSystem; 9 | use tokio::sync::RwLock; 10 | 11 | use super::{client::KubernetesClient, namespace::KubernetesNamespace}; 12 | use crate::{ 13 | types::ProviderCapabilities, DynNamespace, Provider, ProviderError, ProviderNamespace, 14 | }; 15 | 16 | const PROVIDER_NAME: &str = "k8s"; 17 | 18 | pub struct KubernetesProvider 19 | where 20 | FS: FileSystem + Send + Sync + Clone, 21 | { 22 | weak: Weak>, 23 | capabilities: ProviderCapabilities, 24 | tmp_dir: PathBuf, 25 | k8s_client: KubernetesClient, 26 | filesystem: FS, 27 | pub(super) namespaces: RwLock>>>, 28 | } 29 | 30 | impl KubernetesProvider 31 | where 32 | FS: FileSystem + Send + Sync + Clone, 33 | { 34 | pub async fn new(filesystem: FS) -> Arc { 35 | let k8s_client = KubernetesClient::new().await.unwrap(); 36 | 37 | Arc::new_cyclic(|weak| KubernetesProvider { 38 | weak: weak.clone(), 39 | capabilities: ProviderCapabilities { 40 | requires_image: true, 41 | has_resources: true, 42 | prefix_with_full_path: false, 43 | use_default_ports_in_cmd: true, 44 | }, 45 | tmp_dir: std::env::temp_dir(), 46 | k8s_client, 47 | filesystem, 48 | namespaces: RwLock::new(HashMap::new()), 49 | }) 50 | } 51 | 52 | pub fn tmp_dir(mut self, tmp_dir: impl Into) -> Self { 53 | self.tmp_dir = tmp_dir.into(); 54 | self 55 | } 56 | } 57 | 58 | #[async_trait] 59 | impl Provider for KubernetesProvider 60 | where 61 | FS: FileSystem + Send + Sync + Clone + 'static, 62 | { 63 | fn name(&self) -> &str { 64 | PROVIDER_NAME 65 | } 66 | 67 | fn capabilities(&self) -> &ProviderCapabilities { 68 | &self.capabilities 69 | } 70 | 71 | async fn namespaces(&self) -> HashMap { 72 | self.namespaces 73 | .read() 74 | .await 75 | .iter() 76 | .map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace)) 77 | .collect() 78 | } 79 | 80 | async fn create_namespace(&self) -> Result { 81 | let namespace = KubernetesNamespace::new( 82 | &self.weak, 83 | &self.tmp_dir, 84 | &self.capabilities, 85 | &self.k8s_client, 86 | &self.filesystem, 87 | None, 88 | ) 89 | .await?; 90 | 91 | self.namespaces 92 | .write() 93 | .await 94 | .insert(namespace.name().to_string(), namespace.clone()); 95 | 96 | Ok(namespace) 97 | } 98 | 99 | async fn create_namespace_with_base_dir( 100 | &self, 101 | base_dir: &Path, 102 | ) -> Result { 103 | let namespace = KubernetesNamespace::new( 104 | &self.weak, 105 | &self.tmp_dir, 106 | &self.capabilities, 107 | &self.k8s_client, 108 | &self.filesystem, 109 | Some(base_dir), 110 | ) 111 | .await?; 112 | 113 | self.namespaces 114 | .write() 115 | .await 116 | .insert(namespace.name().to_string(), namespace.clone()); 117 | 118 | Ok(namespace) 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /crates/provider/src/kubernetes/static-configs/baseline-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: LimitRange 3 | metadata: 4 | name: mem-limit-range 5 | spec: 6 | limits: 7 | - defaultRequest: 8 | memory: 1G 9 | cpu: 0.5 10 | type: Container 11 | -------------------------------------------------------------------------------- /crates/provider/src/kubernetes/static-configs/namespace-network-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: internal-access 5 | spec: 6 | podSelector: {} 7 | ingress: 8 | - from: 9 | - namespaceSelector: 10 | matchExpressions: 11 | - key: kubernetes.io/metadata.name 12 | operator: In 13 | values: 14 | - {{namespace}} 15 | - gitlab 16 | - arc-runners 17 | - loki 18 | - tempo 19 | - monitoring 20 | - parachain-exporter 21 | - default 22 | policyTypes: 23 | - Ingress 24 | -------------------------------------------------------------------------------- /crates/provider/src/native.rs: -------------------------------------------------------------------------------- 1 | mod namespace; 2 | mod node; 3 | mod provider; 4 | 5 | pub use provider::NativeProvider; 6 | -------------------------------------------------------------------------------- /crates/provider/src/native/provider.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | path::{Path, PathBuf}, 4 | sync::{Arc, Weak}, 5 | }; 6 | 7 | use async_trait::async_trait; 8 | use support::fs::FileSystem; 9 | use tokio::sync::RwLock; 10 | 11 | use super::namespace::NativeNamespace; 12 | use crate::{ 13 | types::ProviderCapabilities, DynNamespace, Provider, ProviderError, ProviderNamespace, 14 | }; 15 | 16 | const PROVIDER_NAME: &str = "native"; 17 | 18 | pub struct NativeProvider 19 | where 20 | FS: FileSystem + Send + Sync + Clone, 21 | { 22 | weak: Weak>, 23 | capabilities: ProviderCapabilities, 24 | tmp_dir: PathBuf, 25 | filesystem: FS, 26 | pub(super) namespaces: RwLock>>>, 27 | } 28 | 29 | impl NativeProvider 30 | where 31 | FS: FileSystem + Send + Sync + Clone, 32 | { 33 | pub fn new(filesystem: FS) -> Arc { 34 | Arc::new_cyclic(|weak| NativeProvider { 35 | weak: weak.clone(), 36 | capabilities: ProviderCapabilities { 37 | has_resources: false, 38 | requires_image: false, 39 | prefix_with_full_path: true, 40 | use_default_ports_in_cmd: false, 41 | }, 42 | // NOTE: temp_dir in linux return `/tmp` but on mac something like 43 | // `/var/folders/rz/1cyx7hfj31qgb98d8_cg7jwh0000gn/T/`, having 44 | // one `trailing slash` and the other no can cause issues if 45 | // you try to build a fullpath by concatenate. Use Pathbuf to prevent the issue. 46 | tmp_dir: std::env::temp_dir(), 47 | filesystem, 48 | namespaces: RwLock::new(HashMap::new()), 49 | }) 50 | } 51 | 52 | pub fn tmp_dir(mut self, tmp_dir: impl Into) -> Self { 53 | self.tmp_dir = tmp_dir.into(); 54 | self 55 | } 56 | } 57 | 58 | #[async_trait] 59 | impl Provider for NativeProvider 60 | where 61 | FS: FileSystem + Send + Sync + Clone + 'static, 62 | { 63 | fn name(&self) -> &str { 64 | PROVIDER_NAME 65 | } 66 | 67 | fn capabilities(&self) -> &ProviderCapabilities { 68 | &self.capabilities 69 | } 70 | 71 | async fn namespaces(&self) -> HashMap { 72 | self.namespaces 73 | .read() 74 | .await 75 | .iter() 76 | .map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace)) 77 | .collect() 78 | } 79 | 80 | async fn create_namespace(&self) -> Result { 81 | let namespace = NativeNamespace::new( 82 | &self.weak, 83 | &self.tmp_dir, 84 | &self.capabilities, 85 | &self.filesystem, 86 | None, 87 | ) 88 | .await?; 89 | 90 | self.namespaces 91 | .write() 92 | .await 93 | .insert(namespace.name().to_string(), namespace.clone()); 94 | 95 | Ok(namespace) 96 | } 97 | 98 | async fn create_namespace_with_base_dir( 99 | &self, 100 | base_dir: &Path, 101 | ) -> Result { 102 | let namespace = NativeNamespace::new( 103 | &self.weak, 104 | &self.tmp_dir, 105 | &self.capabilities, 106 | &self.filesystem, 107 | Some(base_dir), 108 | ) 109 | .await?; 110 | 111 | self.namespaces 112 | .write() 113 | .await 114 | .insert(namespace.name().to_string(), namespace.clone()); 115 | 116 | Ok(namespace) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /crates/provider/src/shared.rs: -------------------------------------------------------------------------------- 1 | pub mod constants; 2 | pub mod helpers; 3 | pub mod types; 4 | -------------------------------------------------------------------------------- /crates/provider/src/shared/constants.rs: -------------------------------------------------------------------------------- 1 | use std::net::{IpAddr, Ipv4Addr}; 2 | 3 | /// Namespace prefix 4 | pub const NAMESPACE_PREFIX: &str = "zombie-"; 5 | /// Directory for node configuration 6 | pub const NODE_CONFIG_DIR: &str = "/cfg"; 7 | /// Directory for node data dir 8 | pub const NODE_DATA_DIR: &str = "/data"; 9 | /// Directory for node relay data dir 10 | pub const NODE_RELAY_DATA_DIR: &str = "/relay-data"; 11 | /// Directory for node scripts 12 | pub const NODE_SCRIPTS_DIR: &str = "/scripts"; 13 | /// Localhost ip 14 | pub const LOCALHOST: IpAddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); 15 | /// The port substrate listens for p2p connections on 16 | pub const P2P_PORT: u16 = 30333; 17 | /// The remote port Prometheus can be accessed with 18 | pub const PROMETHEUS_PORT: u16 = 9615; 19 | /// The remote port websocket to access the RPC 20 | pub const RPC_WS_PORT: u16 = 9944; 21 | /// The remote port HTTP to access the RPC 22 | pub const RPC_HTTP_PORT: u16 = 9933; 23 | -------------------------------------------------------------------------------- /crates/provider/src/shared/helpers.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | /// Check if we are running in `CI` by checking the 'RUN_IN_CI' env var 4 | pub fn running_in_ci() -> bool { 5 | env::var("RUN_IN_CI").unwrap_or_default() == "1" 6 | } 7 | 8 | #[cfg(test)] 9 | mod tests { 10 | use super::*; 11 | 12 | #[test] 13 | fn check_runing_in_ci_env_var() { 14 | assert!(!running_in_ci()); 15 | // now set the env var 16 | env::set_var("RUN_IN_CI", "1"); 17 | assert!(running_in_ci()); 18 | // reset 19 | env::set_var("RUN_IN_CI", ""); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /crates/provider/src/shared/scripts/helper-binaries-downloader.sh: -------------------------------------------------------------------------------- 1 | #!/bin/ash 2 | 3 | log() { 4 | echo "$(date +"%F %T") $1" 5 | } 6 | 7 | # used to handle the distinction where /cfg is used for k8s and /helpers for docker/podman 8 | # to share a volume across nodes containing helper binaries and independent from /cfg 9 | # where some node files are stored 10 | OUTDIR=$([ -d /helpers ] && echo "/helpers" || echo "/cfg") 11 | 12 | # Allow to use our image and just cp'd the binaries. 13 | 14 | if [ -f /tmp/curl ]; then 15 | cp /tmp/curl $OUTDIR/curl 16 | log "curl copied" 17 | else 18 | wget github.com/moparisthebest/static-curl/releases/download/v7.83.1/curl-amd64 -O "$OUTDIR/curl" 19 | log "curl downloaded" 20 | fi; 21 | 22 | chmod +x "$OUTDIR/curl" 23 | log "curl chmoded" 24 | 25 | if [ -f /tmp/coreutils ]; then 26 | cp /tmp/coreutils $OUTDIR/coreutils 27 | log "coreutils copied" 28 | else 29 | wget -qO- github.com/uutils/coreutils/releases/download/0.0.17/coreutils-0.0.17-x86_64-unknown-linux-musl.tar.gz | tar -xz -C $OUTDIR --strip-components=1 coreutils-0.0.17-x86_64-unknown-linux-musl/coreutils 30 | log "coreutils downloaded" 31 | fi; 32 | 33 | chmod +x "$OUTDIR/coreutils" 34 | log "coreutils chmoded" 35 | -------------------------------------------------------------------------------- /crates/provider/src/shared/scripts/zombie-wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -uxo pipefail 3 | 4 | if [ -f /cfg/coreutils ]; then 5 | RM="/cfg/coreutils rm" 6 | MKFIFO="/cfg/coreutils mkfifo" 7 | MKNOD="/cfg/coreutils mknod" 8 | LS="/cfg/coreutils ls" 9 | KILL="/cfg/coreutils kill" 10 | SLEEP="/cfg/coreutils sleep" 11 | ECHO="/cfg/coreutils echo" 12 | elif [ -f /helpers/coreutils ]; then 13 | # used for docker/podman to have a single volume sharing helper binaries 14 | # across nodes independent from the /cfg where some files are stored 15 | # by the node itself 16 | RM="/helpers/coreutils rm" 17 | MKFIFO="/helpers/coreutils mkfifo" 18 | MKNOD="/helpers/coreutils mknod" 19 | LS="/helpers/coreutils ls" 20 | KILL="/helpers/coreutils kill" 21 | SLEEP="/helpers/coreutils sleep" 22 | ECHO="/helpers/coreutils echo" 23 | else 24 | RM="rm" 25 | MKFIFO="mkfifo" 26 | MKNOD="mknod" 27 | LS="ls" 28 | KILL="kill" 29 | SLEEP="sleep" 30 | ECHO="echo" 31 | fi 32 | 33 | echo "COMMANDS DEFINED" 34 | 35 | # add /cfg as first `looking dir` to allow to overrides commands. 36 | export PATH="/cfg":$PATH 37 | 38 | echo "EXPORT PATH" 39 | 40 | # setup pipe 41 | pipe=/tmp/zombiepipe 42 | trap "$RM -f $pipe" EXIT 43 | 44 | # try mkfifo first and allow to fail 45 | if [[ ! -p $pipe ]]; then 46 | $MKFIFO $pipe 47 | fi 48 | 49 | # set immediately exit on any non 0 exit code 50 | set -e 51 | 52 | # if fails try mknod 53 | if [[ ! -p $pipe ]]; then 54 | $MKNOD $pipe p 55 | fi 56 | 57 | echo "PIPE CREATED" 58 | 59 | # init empty 60 | child_pid="" 61 | 62 | # get the command to exec 63 | CMD=($@) 64 | 65 | # File to store CMD (and update from there) 66 | ZOMBIE_CMD_FILE=/tmp/zombie.cmd 67 | ZOMBIE_CMD_PID=/tmp/zombie.pid 68 | 69 | # Store the cmd and make it available to later usage 70 | # NOTE: echo without new line to allow to customize the cmd later 71 | $ECHO -n "${CMD[@]}" > $ZOMBIE_CMD_FILE 72 | 73 | echo "COMMAND TO RUN IS: $CMD" 74 | 75 | start() { 76 | # redirect the output to be expored to loki 77 | "${CMD[@]}" >> /proc/1/fd/1 2>> /proc/1/fd/2 & 78 | if [[ "$CMD" != "cat" ]]; then 79 | child_pid="$!" 80 | 81 | $ECHO $(cat $ZOMBIE_CMD_FILE) 82 | # store pid 83 | $ECHO ${child_pid} > $ZOMBIE_CMD_PID 84 | 85 | # sleep a few secs to detect errors bootstraping the node 86 | sleep 3 87 | 88 | # check if the process is running 89 | if ! $LS /proc/$child_pid > /dev/null 2>&1 ; then 90 | echo "child process doesn't exist, quiting..."; 91 | exit 1; 92 | else 93 | echo "PID: $child_pid alive"; 94 | fi; 95 | else 96 | echo "Process not started, PID not stored, since was 'cat'"; 97 | fi; 98 | 99 | } 100 | 101 | restart() { 102 | if [ ! -z "${child_pid}" ]; then 103 | $KILL -9 "$child_pid" 104 | fi 105 | 106 | # check if we have timeout 107 | if [[ "$1" ]]; then 108 | $SLEEP "$1" 109 | fi 110 | 111 | start 112 | } 113 | 114 | pause() { 115 | if [ ! -z "${child_pid}" ]; then 116 | echo "send -STOP to process $child_pid" 117 | $KILL -STOP "$child_pid" 118 | echo "result $?" 119 | fi 120 | } 121 | 122 | resume() { 123 | if [ ! -z "${child_pid}" ]; then 124 | echo "send -CONT to process $child_pid" 125 | $KILL -CONT "$child_pid" 126 | echo "result $?" 127 | fi 128 | } 129 | 130 | 131 | # keep listening from the pipe 132 | while read line <$pipe 133 | echo "read line: ${line}" 134 | do 135 | if [[ "$line" == "start" ]]; then 136 | start 137 | elif [[ "$line" == "quit" ]]; then 138 | break 139 | elif [[ "$line" =~ "restart" ]]; then 140 | # check if we have timeout between restart 141 | if [[ $line =~ [^0-9]+([0-9]+) ]]; then 142 | restart "${BASH_REMATCH[1]}" 143 | else 144 | restart 0 145 | fi; 146 | elif [[ "$line" == "pause" ]]; then 147 | pause 148 | elif [[ "$line" == "resume" ]]; then 149 | resume 150 | fi 151 | done 152 | 153 | exit 0 154 | -------------------------------------------------------------------------------- /crates/sdk/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zombienet-sdk" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | rust-version.workspace = true 7 | publish = true 8 | license.workspace = true 9 | repository.workspace = true 10 | description = "Zombienet SDK, entrypoint for using zombienet" 11 | keywords = ["zombienet", "sdk"] 12 | 13 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 14 | 15 | [dependencies] 16 | async-trait = { workspace = true } 17 | tokio = { workspace = true } 18 | futures = { workspace = true } 19 | lazy_static = { workspace = true } 20 | subxt = { workspace = true } 21 | subxt-signer = { workspace = true, features = ["subxt"] } 22 | 23 | # Zombienet deps 24 | configuration = { workspace = true } 25 | orchestrator = { workspace = true } 26 | provider = { workspace = true } 27 | support = { workspace = true } 28 | 29 | [dev-dependencies] 30 | tracing-subscriber = { workspace = true } 31 | kube = { workspace = true, features = ["ws", "runtime"] } 32 | k8s-openapi = { workspace = true, features = ["v1_27"] } 33 | serde_json = {workspace = true } 34 | 35 | [features] 36 | pjs = ["orchestrator/pjs"] 37 | -------------------------------------------------------------------------------- /crates/sdk/src/environment.rs: -------------------------------------------------------------------------------- 1 | //! Helpers functions to get configuration (e.g. Provider and images) from the env vars 2 | use std::{env, future::Future, pin::Pin}; 3 | 4 | use crate::{LocalFileSystem, Network, NetworkConfig, NetworkConfigExt, OrchestratorError}; 5 | 6 | const DEFAULT_POLKADOT_IMAGE: &str = "docker.io/parity/polkadot:latest"; 7 | const DEFAULT_CUMULUS_IMAGE: &str = "docker.io/parity/polkadot-parachain:latest"; 8 | 9 | #[derive(Debug, Default)] 10 | pub struct Images { 11 | pub polkadot: String, 12 | pub cumulus: String, 13 | } 14 | 15 | pub enum Provider { 16 | Native, 17 | K8s, 18 | Docker, 19 | } 20 | 21 | impl Provider { 22 | pub fn get_spawn_fn( 23 | &self, 24 | ) -> fn(NetworkConfig) -> Pin + Send>> { 25 | match self { 26 | Provider::Native => NetworkConfigExt::spawn_native, 27 | Provider::K8s => NetworkConfigExt::spawn_k8s, 28 | Provider::Docker => NetworkConfigExt::spawn_docker, 29 | } 30 | } 31 | } 32 | 33 | // Use `docker` as default provider 34 | impl From for Provider { 35 | fn from(value: String) -> Self { 36 | match value.to_ascii_lowercase().as_ref() { 37 | "native" => Provider::Native, 38 | "k8s" => Provider::K8s, 39 | _ => Provider::Docker, // default provider 40 | } 41 | } 42 | } 43 | 44 | pub fn get_images_from_env() -> Images { 45 | let polkadot = env::var("POLKADOT_IMAGE").unwrap_or(DEFAULT_POLKADOT_IMAGE.into()); 46 | let cumulus = env::var("CUMULUS_IMAGE").unwrap_or(DEFAULT_CUMULUS_IMAGE.into()); 47 | Images { polkadot, cumulus } 48 | } 49 | 50 | pub fn get_provider_from_env() -> Provider { 51 | env::var("ZOMBIE_PROVIDER").unwrap_or_default().into() 52 | } 53 | 54 | pub type SpawnResult = Result, OrchestratorError>; 55 | pub fn get_spawn_fn() -> fn(NetworkConfig) -> Pin + Send>> { 56 | let provider = get_provider_from_env(); 57 | 58 | match provider { 59 | Provider::Native => NetworkConfigExt::spawn_native, 60 | Provider::K8s => NetworkConfigExt::spawn_k8s, 61 | Provider::Docker => NetworkConfigExt::spawn_docker, 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /crates/sdk/src/lib.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | pub use configuration::{ 3 | GlobalSettings, GlobalSettingsBuilder, NetworkConfig, NetworkConfigBuilder, 4 | RegistrationStrategy, WithRelaychain, 5 | }; 6 | #[cfg(feature = "pjs")] 7 | pub use orchestrator::pjs_helper::PjsResult; 8 | pub use orchestrator::{ 9 | errors::OrchestratorError, 10 | network::{node::NetworkNode, Network}, 11 | AddCollatorOptions, AddNodeOptions, Orchestrator, 12 | }; 13 | 14 | // Helpers used for interact with the network 15 | pub mod tx_helper { 16 | pub use orchestrator::{ 17 | network::chain_upgrade::ChainUpgrade, shared::types::RuntimeUpgradeOptions, 18 | }; 19 | } 20 | 21 | use provider::{DockerProvider, KubernetesProvider, NativeProvider}; 22 | pub use support::fs::local::LocalFileSystem; 23 | 24 | pub mod environment; 25 | pub const PROVIDERS: [&str; 3] = ["k8s", "native", "docker"]; 26 | 27 | // re-export subxt 28 | pub use subxt; 29 | pub use subxt_signer; 30 | 31 | #[async_trait] 32 | pub trait NetworkConfigExt { 33 | /// Spawns a network using the native or k8s provider. 34 | /// 35 | /// # Example: 36 | /// ```rust 37 | /// # use zombienet_sdk::{NetworkConfig, NetworkConfigExt}; 38 | /// # async fn example() -> Result<(), zombienet_sdk::OrchestratorError> { 39 | /// let network = NetworkConfig::load_from_toml("config.toml")? 40 | /// .spawn_native() 41 | /// .await?; 42 | /// # Ok(()) 43 | /// # } 44 | /// ``` 45 | async fn spawn_native(self) -> Result, OrchestratorError>; 46 | async fn spawn_k8s(self) -> Result, OrchestratorError>; 47 | async fn spawn_docker(self) -> Result, OrchestratorError>; 48 | } 49 | 50 | #[async_trait] 51 | impl NetworkConfigExt for NetworkConfig { 52 | async fn spawn_native(self) -> Result, OrchestratorError> { 53 | let filesystem = LocalFileSystem; 54 | let provider = NativeProvider::new(filesystem.clone()); 55 | let orchestrator = Orchestrator::new(filesystem, provider); 56 | orchestrator.spawn(self).await 57 | } 58 | 59 | async fn spawn_k8s(self) -> Result, OrchestratorError> { 60 | let filesystem = LocalFileSystem; 61 | let provider = KubernetesProvider::new(filesystem.clone()).await; 62 | let orchestrator = Orchestrator::new(filesystem, provider); 63 | orchestrator.spawn(self).await 64 | } 65 | 66 | async fn spawn_docker(self) -> Result, OrchestratorError> { 67 | let filesystem = LocalFileSystem; 68 | let provider = DockerProvider::new(filesystem.clone()).await; 69 | let orchestrator = Orchestrator::new(filesystem, provider); 70 | orchestrator.spawn(self).await 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /crates/sdk/tests/smoke-native.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use configuration::{NetworkConfig, NetworkConfigBuilder}; 4 | use zombienet_sdk::environment::get_spawn_fn; 5 | 6 | fn small_network() -> NetworkConfig { 7 | NetworkConfigBuilder::new() 8 | .with_relaychain(|r| { 9 | r.with_chain("rococo-local") 10 | .with_default_command("polkadot") 11 | .with_default_image("docker.io/parity/polkadot:v1.7.0") 12 | .with_node(|node| node.with_name("alice")) 13 | .with_node(|node| node.with_name("bob")) 14 | }) 15 | .build() 16 | .unwrap() 17 | } 18 | 19 | #[tokio::test(flavor = "multi_thread")] 20 | async fn ci_native_smoke_should_works() { 21 | tracing_subscriber::fmt::init(); 22 | const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}"; 23 | let now = Instant::now(); 24 | let config = small_network(); 25 | let spawn_fn = get_spawn_fn(); 26 | 27 | let network = spawn_fn(config).await.unwrap(); 28 | 29 | let elapsed = now.elapsed(); 30 | println!("🚀🚀🚀🚀 network deployed in {:.2?}", elapsed); 31 | 32 | // Get a ref to the node 33 | let alice = network.get_node("alice").unwrap(); 34 | // wait 10 blocks 35 | alice 36 | .wait_metric(BEST_BLOCK_METRIC, |x| x > 9_f64) 37 | .await 38 | .unwrap(); 39 | } 40 | -------------------------------------------------------------------------------- /crates/sdk/tests/smoke.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use configuration::{NetworkConfig, NetworkConfigBuilder}; 4 | use futures::{stream::StreamExt, try_join}; 5 | use orchestrator::{AddCollatorOptions, AddNodeOptions}; 6 | #[cfg(feature = "pjs")] 7 | use serde_json::json; 8 | use zombienet_sdk::environment::get_spawn_fn; 9 | 10 | fn small_network() -> NetworkConfig { 11 | NetworkConfigBuilder::new() 12 | .with_relaychain(|r| { 13 | r.with_chain("rococo-local") 14 | .with_default_command("polkadot") 15 | .with_default_image("docker.io/parity/polkadot:v1.7.0") 16 | .with_node(|node| node.with_name("alice")) 17 | .with_node(|node| node.with_name("bob")) 18 | }) 19 | .with_parachain(|p| { 20 | p.with_id(2000).cumulus_based(true).with_collator(|n| { 21 | n.with_name("collator") 22 | .with_command("polkadot-parachain") 23 | .with_image("docker.io/parity/polkadot-parachain:1.7.0") 24 | }) 25 | }) 26 | .build() 27 | .unwrap() 28 | } 29 | 30 | #[tokio::test(flavor = "multi_thread")] 31 | async fn ci_k8s_basic_functionalities_should_works() { 32 | tracing_subscriber::fmt::init(); 33 | const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}"; 34 | let now = Instant::now(); 35 | 36 | let config = small_network(); 37 | let spawn_fn = get_spawn_fn(); 38 | 39 | let mut network = spawn_fn(config).await.unwrap(); 40 | // Optionally detach the network 41 | // network.detach().await; 42 | 43 | let elapsed = now.elapsed(); 44 | println!("🚀🚀🚀🚀 network deployed in {:.2?}", elapsed); 45 | 46 | // Get a ref to the node 47 | let alice = network.get_node("alice").unwrap(); 48 | 49 | // timeout connecting ws 50 | let c = network.get_node("collator").unwrap(); 51 | let r = c 52 | .wait_client_with_timeout::(1_u32) 53 | .await; 54 | assert!(r.is_err()); 55 | 56 | let (_best_block_pass, client) = try_join!( 57 | alice.wait_metric(BEST_BLOCK_METRIC, |x| x > 5_f64), 58 | alice.wait_client::() 59 | ) 60 | .unwrap(); 61 | 62 | alice 63 | .wait_log_line_count("*rted #1*", true, 10) 64 | .await 65 | .unwrap(); 66 | 67 | // check best block through metrics with timeout 68 | assert!(alice 69 | .wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 10_f64, 45_u32) 70 | .await 71 | .is_ok()); 72 | 73 | // ensure timeout error 74 | let best_block = alice.reports(BEST_BLOCK_METRIC).await.unwrap(); 75 | let res = alice 76 | .wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > (best_block * 2_f64), 10_u32) 77 | .await; 78 | 79 | assert!(res.is_err()); 80 | 81 | // get single metric 82 | let role = alice.reports("node_roles").await.unwrap(); 83 | println!("Role is {role}"); 84 | assert_eq!(role, 4.0); 85 | 86 | // subxt 87 | // wait 3 blocks 88 | let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(3); 89 | while let Some(block) = blocks.next().await { 90 | println!("Block #{}", block.unwrap().header().number); 91 | } 92 | 93 | // drop the client 94 | drop(client); 95 | 96 | // check best block through metrics 97 | let best_block = alice 98 | .reports("block_height{status=\"best\"}") 99 | .await 100 | .unwrap(); 101 | 102 | assert!(best_block >= 2.0, "Current best {}", best_block); 103 | 104 | #[cfg(feature = "pjs")] 105 | { 106 | // pjs 107 | let para_is_registered = r#" 108 | const paraId = arguments[0]; 109 | const parachains: number[] = (await api.query.paras.parachains()) || []; 110 | const isRegistered = parachains.findIndex((id) => id.toString() == paraId.toString()) >= 0; 111 | return isRegistered; 112 | "#; 113 | 114 | let is_registered = alice 115 | .pjs(para_is_registered, vec![json!(2000)], None) 116 | .await 117 | .unwrap() 118 | .unwrap(); 119 | assert_eq!(is_registered, json!(true)); 120 | 121 | // run pjs with code 122 | let query_paras = r#" 123 | const parachains: number[] = (await api.query.paras.parachains()) || []; 124 | return parachains.toJSON() 125 | "#; 126 | 127 | let paras = alice.pjs(query_paras, vec![], None).await.unwrap(); 128 | 129 | println!("parachains registered: {:?}", paras); 130 | } 131 | 132 | // collator 133 | let collator = network.get_node("collator").unwrap(); 134 | let client = collator 135 | .wait_client::() 136 | .await 137 | .unwrap(); 138 | 139 | // wait 3 blocks 140 | let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(3); 141 | while let Some(block) = blocks.next().await { 142 | println!("Block (para) #{}", block.unwrap().header().number); 143 | } 144 | 145 | // add node 146 | let opts = AddNodeOptions { 147 | rpc_port: Some(9444), 148 | is_validator: true, 149 | ..Default::default() 150 | }; 151 | 152 | network.add_node("new1", opts).await.unwrap(); 153 | 154 | // add collator 155 | let col_opts = AddCollatorOptions { 156 | command: Some("polkadot-parachain".try_into().unwrap()), 157 | image: Some( 158 | "docker.io/parity/polkadot-parachain:1.7.0" 159 | .try_into() 160 | .unwrap(), 161 | ), 162 | ..Default::default() 163 | }; 164 | 165 | network 166 | .add_collator("new-col-1", col_opts, 2000) 167 | .await 168 | .unwrap(); 169 | 170 | // pause / resume 171 | let alice = network.get_node("alice").unwrap(); 172 | alice.pause().await.unwrap(); 173 | let res_err = alice 174 | .wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 5_f64, 5_u32) 175 | .await; 176 | 177 | assert!(res_err.is_err()); 178 | 179 | alice.resume().await.unwrap(); 180 | alice 181 | .wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 5_f64, 5_u32) 182 | .await 183 | .unwrap(); 184 | 185 | // tear down (optional if you don't detach the network) 186 | // network.destroy().await.unwrap(); 187 | } 188 | -------------------------------------------------------------------------------- /crates/sdk/tests/two-paras-same-id.rs: -------------------------------------------------------------------------------- 1 | use zombienet_sdk::{environment::get_spawn_fn, NetworkConfigBuilder}; 2 | 3 | #[tokio::test(flavor = "multi_thread")] 4 | async fn two_paras_same_id() { 5 | tracing_subscriber::fmt::init(); 6 | let spawn_fn = get_spawn_fn(); 7 | let config = NetworkConfigBuilder::new() 8 | .with_relaychain(|r| { 9 | r.with_chain("rococo-local") 10 | .with_default_command("polkadot") 11 | .with_default_image("docker.io/parity/polkadot:v1.7.0") 12 | .with_node(|node| node.with_name("alice")) 13 | .with_node(|node| node.with_name("bob")) 14 | }) 15 | .with_parachain(|p| { 16 | p.with_id(2000) 17 | .with_default_command("polkadot-parachain") 18 | .with_default_image("docker.io/parity/polkadot-parachain:1.7.0") 19 | .with_collator(|n| n.with_name("collator")) 20 | }) 21 | .with_parachain(|p| { 22 | p.with_id(2000) 23 | .with_default_command("polkadot-parachain") 24 | .with_default_image("docker.io/parity/polkadot-parachain:1.7.0") 25 | .with_registration_strategy(zombienet_sdk::RegistrationStrategy::Manual) 26 | .with_collator(|n| n.with_name("collator1")) 27 | }) 28 | .build() 29 | .unwrap(); 30 | 31 | let network = spawn_fn(config).await.unwrap(); 32 | 33 | assert!(network.get_node("collator").is_ok()); 34 | assert!(network.get_node("collator1").is_ok()); 35 | 36 | // First parachain (out of two) is fetched 37 | assert_eq!(network.parachain(2000).unwrap().unique_id(), "2000"); 38 | 39 | // First and second parachain hav the same para_id 40 | assert_eq!( 41 | network.parachain_by_unique_id("2000").unwrap().para_id(), 42 | network.parachain_by_unique_id("2000-1").unwrap().para_id(), 43 | ); 44 | } 45 | -------------------------------------------------------------------------------- /crates/support/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /crates/support/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zombienet-support" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | rust-version.workspace = true 7 | publish = true 8 | license.workspace = true 9 | repository.workspace = true 10 | description = "Support crates with common traits/structs and helpers" 11 | keywords = ["zombienet"] 12 | 13 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 14 | 15 | [dependencies] 16 | thiserror = { workspace = true } 17 | anyhow = { workspace = true } 18 | async-trait = { workspace = true } 19 | futures = { workspace = true } 20 | reqwest = { workspace = true } 21 | tokio = { workspace = true, features = ["full"] } 22 | uuid = { workspace = true, features = ["v4"] } 23 | nix = { workspace = true, features = ["signal"] } 24 | rand = { workspace = true } 25 | regex = { workspace = true } 26 | tracing = { workspace = true } 27 | lazy_static = { workspace = true } 28 | serde_json = { workspace = true } 29 | -------------------------------------------------------------------------------- /crates/support/src/constants.rs: -------------------------------------------------------------------------------- 1 | pub const VALID_REGEX: &str = "regex should be valid "; 2 | pub const BORROWABLE: &str = "must be borrowable as mutable "; 3 | pub const RELAY_NOT_NONE: &str = "typestate should ensure the relaychain isn't None at this point "; 4 | pub const SHOULD_COMPILE: &str = "should compile with success "; 5 | pub const INFAILABLE: &str = "infaillible "; 6 | pub const NO_ERR_DEF_BUILDER: &str = "should have no errors for default builder "; 7 | pub const RW_FAILED: &str = "should be able to read/write - failed "; 8 | pub const DEFAULT_TYPESTATE: &str = "'default' overriding should be ensured by typestate "; 9 | pub const VALIDATION_CHECK: &str = "validation failed "; 10 | 11 | pub const PREFIX_CANT_BE_NONE: &str = "name prefix can't be None if a value exists "; 12 | 13 | pub const THIS_IS_A_BUG: &str = 14 | "- this is a bug please report it: https://github.com/paritytech/zombienet-sdk/issues"; 15 | -------------------------------------------------------------------------------- /crates/support/src/fs.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use async_trait::async_trait; 4 | 5 | pub mod in_memory; 6 | pub mod local; 7 | 8 | #[derive(Debug, thiserror::Error)] 9 | #[error(transparent)] 10 | pub struct FileSystemError(#[from] anyhow::Error); 11 | 12 | impl From for FileSystemError { 13 | fn from(error: std::io::Error) -> Self { 14 | Self(error.into()) 15 | } 16 | } 17 | 18 | pub type FileSystemResult = Result; 19 | 20 | #[async_trait] 21 | pub trait FileSystem { 22 | async fn create_dir

(&self, path: P) -> FileSystemResult<()> 23 | where 24 | P: AsRef + Send; 25 | 26 | async fn create_dir_all

(&self, path: P) -> FileSystemResult<()> 27 | where 28 | P: AsRef + Send; 29 | 30 | async fn read

(&self, path: P) -> FileSystemResult> 31 | where 32 | P: AsRef + Send; 33 | 34 | async fn read_to_string

(&self, path: P) -> FileSystemResult 35 | where 36 | P: AsRef + Send; 37 | 38 | async fn write(&self, path: P, contents: C) -> FileSystemResult<()> 39 | where 40 | P: AsRef + Send, 41 | C: AsRef<[u8]> + Send; 42 | 43 | async fn append(&self, path: P, contents: C) -> FileSystemResult<()> 44 | where 45 | P: AsRef + Send, 46 | C: AsRef<[u8]> + Send; 47 | 48 | async fn copy(&self, from: P1, to: P2) -> FileSystemResult<()> 49 | where 50 | P1: AsRef + Send, 51 | P2: AsRef + Send; 52 | 53 | async fn set_mode

(&self, path: P, perm: u32) -> FileSystemResult<()> 54 | where 55 | P: AsRef + Send; 56 | 57 | async fn exists

(&self, path: P) -> bool 58 | where 59 | P: AsRef + Send; 60 | } 61 | -------------------------------------------------------------------------------- /crates/support/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod constants; 2 | pub mod fs; 3 | pub mod net; 4 | pub mod replacer; 5 | -------------------------------------------------------------------------------- /crates/support/src/net.rs: -------------------------------------------------------------------------------- 1 | use std::{io::Cursor, str::FromStr, time::Duration}; 2 | 3 | use reqwest::{Method, Request, StatusCode, Url}; 4 | use tracing::trace; 5 | 6 | use crate::constants::THIS_IS_A_BUG; 7 | 8 | type Result = std::result::Result>; 9 | 10 | pub async fn download_file(url: String, dest: String) -> Result<()> { 11 | let response = reqwest::get(url).await?; 12 | let mut file = std::fs::File::create(dest)?; 13 | let mut content = Cursor::new(response.bytes().await?); 14 | std::io::copy(&mut content, &mut file)?; 15 | Ok(()) 16 | } 17 | 18 | pub async fn wait_ws_ready(url: &str) -> Result<()> { 19 | let mut parsed = Url::from_str(url)?; 20 | parsed 21 | .set_scheme("http") 22 | .map_err(|_| anyhow::anyhow!("Can not set the scheme, {}", THIS_IS_A_BUG))?; 23 | 24 | let http_client = reqwest::Client::new(); 25 | loop { 26 | let req = Request::new(Method::OPTIONS, parsed.clone()); 27 | let res = http_client.execute(req).await; 28 | match res { 29 | Ok(res) => { 30 | if res.status() == StatusCode::OK { 31 | // ready to go! 32 | break; 33 | } 34 | 35 | trace!("http_client status: {}, continuing...", res.status()); 36 | }, 37 | Err(e) => { 38 | if !skip_err_while_waiting(&e) { 39 | return Err(e.into()); 40 | } 41 | 42 | trace!("http_client err: {}, continuing... ", e.to_string()); 43 | }, 44 | } 45 | 46 | tokio::time::sleep(Duration::from_secs(1)).await; 47 | } 48 | 49 | Ok(()) 50 | } 51 | 52 | pub fn skip_err_while_waiting(e: &reqwest::Error) -> bool { 53 | // if the error is connecting/request could be the case that the node 54 | // is not listening yet, so we keep waiting 55 | // Skipped errs like: 56 | // 'tcp connect error: Connection refused (os error 61)' 57 | // 'operation was canceled: connection closed before message completed' 58 | // 'connection error: Connection reset by peer (os error 54)' 59 | e.is_connect() || e.is_request() 60 | } 61 | -------------------------------------------------------------------------------- /crates/support/src/replacer.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use lazy_static::lazy_static; 4 | use regex::{Captures, Regex}; 5 | use tracing::{trace, warn}; 6 | 7 | use crate::constants::{SHOULD_COMPILE, THIS_IS_A_BUG}; 8 | 9 | lazy_static! { 10 | static ref RE: Regex = Regex::new(r#"\{\{([a-zA-Z0-9_]*)\}\}"#) 11 | .unwrap_or_else(|_| panic!("{}, {}", SHOULD_COMPILE, THIS_IS_A_BUG)); 12 | static ref TOKEN_PLACEHOLDER: Regex = Regex::new(r#"\{\{ZOMBIE:(.*?):(.*?)\}\}"#) 13 | .unwrap_or_else(|_| panic!("{}, {}", SHOULD_COMPILE, THIS_IS_A_BUG)); 14 | static ref PLACEHOLDER_COMPAT: HashMap<&'static str, &'static str> = { 15 | let mut m = HashMap::new(); 16 | m.insert("multiAddress", "multiaddr"); 17 | m.insert("wsUri", "ws_uri"); 18 | m.insert("prometheusUri", "prometheus_uri"); 19 | 20 | m 21 | }; 22 | } 23 | 24 | /// Return true if the text contains any TOKEN_PLACEHOLDER 25 | pub fn has_tokens(text: &str) -> bool { 26 | TOKEN_PLACEHOLDER.is_match(text) 27 | } 28 | 29 | pub fn apply_replacements(text: &str, replacements: &HashMap<&str, &str>) -> String { 30 | let augmented_text = RE.replace_all(text, |caps: &Captures| { 31 | if let Some(replacements_value) = replacements.get(&caps[1]) { 32 | replacements_value.to_string() 33 | } else { 34 | caps[0].to_string() 35 | } 36 | }); 37 | 38 | augmented_text.to_string() 39 | } 40 | 41 | pub fn apply_env_replacements(text: &str) -> String { 42 | let augmented_text = RE.replace_all(text, |caps: &Captures| { 43 | if let Ok(replacements_value) = std::env::var(&caps[1]) { 44 | replacements_value 45 | } else { 46 | caps[0].to_string() 47 | } 48 | }); 49 | 50 | augmented_text.to_string() 51 | } 52 | 53 | pub fn apply_running_network_replacements(text: &str, network: &serde_json::Value) -> String { 54 | let augmented_text = TOKEN_PLACEHOLDER.replace_all(text, |caps: &Captures| { 55 | trace!("appling replacements for caps: {caps:#?}"); 56 | if let Some(node) = network.get(&caps[1]) { 57 | trace!("caps1 {} - node: {node}", &caps[1]); 58 | let field = *PLACEHOLDER_COMPAT.get(&caps[2]).unwrap_or(&&caps[2]); 59 | if let Some(val) = node.get(field) { 60 | trace!("caps2 {} - node: {node}", field); 61 | val.as_str().unwrap_or("Invalid string").to_string() 62 | } else { 63 | warn!( 64 | "⚠️ The node with name {} doesn't have the value {} in context", 65 | &caps[1], &caps[2] 66 | ); 67 | caps[0].to_string() 68 | } 69 | } else { 70 | warn!("⚠️ No node with name {} in context", &caps[1]); 71 | caps[0].to_string() 72 | } 73 | }); 74 | 75 | augmented_text.to_string() 76 | } 77 | #[cfg(test)] 78 | mod tests { 79 | use serde_json::json; 80 | 81 | use super::*; 82 | 83 | #[test] 84 | fn replace_should_works() { 85 | let text = "some {{namespace}}"; 86 | let mut replacements = HashMap::new(); 87 | replacements.insert("namespace", "demo-123"); 88 | let res = apply_replacements(text, &replacements); 89 | assert_eq!("some demo-123".to_string(), res); 90 | } 91 | 92 | #[test] 93 | fn replace_env_should_works() { 94 | let text = "some {{namespace}}"; 95 | std::env::set_var("namespace", "demo-123"); 96 | // let mut replacements = HashMap::new(); 97 | // replacements.insert("namespace", "demo-123"); 98 | let res = apply_env_replacements(text); 99 | assert_eq!("some demo-123".to_string(), res); 100 | } 101 | 102 | #[test] 103 | fn replace_multiple_should_works() { 104 | let text = r#"some {{namespace}} 105 | other is {{other}}"#; 106 | let augmented_text = r#"some demo-123 107 | other is other-123"#; 108 | 109 | let mut replacements = HashMap::new(); 110 | replacements.insert("namespace", "demo-123"); 111 | replacements.insert("other", "other-123"); 112 | let res = apply_replacements(text, &replacements); 113 | assert_eq!(augmented_text, res); 114 | } 115 | 116 | #[test] 117 | fn replace_multiple_with_missing_should_works() { 118 | let text = r#"some {{namespace}} 119 | other is {{other}}"#; 120 | let augmented_text = r#"some demo-123 121 | other is {{other}}"#; 122 | 123 | let mut replacements = HashMap::new(); 124 | replacements.insert("namespace", "demo-123"); 125 | 126 | let res = apply_replacements(text, &replacements); 127 | assert_eq!(augmented_text, res); 128 | } 129 | 130 | #[test] 131 | fn replace_without_replacement_should_leave_text_unchanged() { 132 | let text = "some {{namespace}}"; 133 | let mut replacements = HashMap::new(); 134 | replacements.insert("other", "demo-123"); 135 | let res = apply_replacements(text, &replacements); 136 | assert_eq!(text.to_string(), res); 137 | } 138 | 139 | #[test] 140 | fn replace_running_network_should_work() { 141 | let network = json!({ 142 | "alice" : { 143 | "multiaddr": "some/demo/127.0.0.1" 144 | } 145 | }); 146 | 147 | let res = apply_running_network_replacements("{{ZOMBIE:alice:multiaddr}}", &network); 148 | assert_eq!(res.as_str(), "some/demo/127.0.0.1"); 149 | } 150 | 151 | #[test] 152 | fn replace_running_network_with_compat_should_work() { 153 | let network = json!({ 154 | "alice" : { 155 | "multiaddr": "some/demo/127.0.0.1" 156 | } 157 | }); 158 | 159 | let res = apply_running_network_replacements("{{ZOMBIE:alice:multiAddress}}", &network); 160 | assert_eq!(res.as_str(), "some/demo/127.0.0.1"); 161 | } 162 | 163 | #[test] 164 | fn replace_running_network_with_missing_field_should_not_replace_nothing() { 165 | let network = json!({ 166 | "alice" : { 167 | "multiaddr": "some/demo/127.0.0.1" 168 | } 169 | }); 170 | 171 | let res = apply_running_network_replacements("{{ZOMBIE:alice:someField}}", &network); 172 | assert_eq!(res.as_str(), "{{ZOMBIE:alice:someField}}"); 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /crates/test-runner/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /crates/test-runner/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-runner" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | -------------------------------------------------------------------------------- /crates/test-runner/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub fn add(left: usize, right: usize) -> usize { 2 | left + right 3 | } 4 | 5 | #[cfg(test)] 6 | mod tests { 7 | use super::*; 8 | 9 | #[test] 10 | fn it_works() { 11 | let result = add(2, 2); 12 | assert_eq!(result, 4); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # https://rust-lang.github.io/rustfmt/?version=v1.7.0 2 | 3 | # general 4 | indent_style = "Block" 5 | 6 | # rewriting 7 | condense_wildcard_suffixes = true 8 | match_block_trailing_comma = true 9 | use_field_init_shorthand = true 10 | use_try_shorthand = true 11 | 12 | # normalization 13 | normalize_comments = true 14 | normalize_doc_attributes = true 15 | 16 | # reordering 17 | reorder_impl_items = true 18 | reorder_imports = true 19 | reorder_modules = true 20 | imports_granularity = "Crate" 21 | group_imports = "StdExternalCrate" 22 | 23 | # additional formating 24 | format_code_in_doc_comments = true 25 | format_macro_matchers = true 26 | format_macro_bodies = true --------------------------------------------------------------------------------